]> Git Repo - linux.git/blob - fs/bcachefs/alloc_foreground.c
Merge patch series "riscv: Extension parsing fixes"
[linux.git] / fs / bcachefs / alloc_foreground.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2012 Google, Inc.
4  *
5  * Foreground allocator code: allocate buckets from freelist, and allocate in
6  * sector granularity from writepoints.
7  *
8  * bch2_bucket_alloc() allocates a single bucket from a specific device.
9  *
10  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11  * in a given filesystem.
12  */
13
14 #include "bcachefs.h"
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "backpointers.h"
18 #include "btree_iter.h"
19 #include "btree_update.h"
20 #include "btree_gc.h"
21 #include "buckets.h"
22 #include "buckets_waiting_for_journal.h"
23 #include "clock.h"
24 #include "debug.h"
25 #include "disk_groups.h"
26 #include "ec.h"
27 #include "error.h"
28 #include "io_write.h"
29 #include "journal.h"
30 #include "movinggc.h"
31 #include "nocow_locking.h"
32 #include "trace.h"
33
34 #include <linux/math64.h>
35 #include <linux/rculist.h>
36 #include <linux/rcupdate.h>
37
38 static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
39                                            struct mutex *lock)
40 {
41         if (!mutex_trylock(lock)) {
42                 bch2_trans_unlock(trans);
43                 mutex_lock(lock);
44         }
45 }
46
47 const char * const bch2_watermarks[] = {
48 #define x(t) #t,
49         BCH_WATERMARKS()
50 #undef x
51         NULL
52 };
53
54 /*
55  * Open buckets represent a bucket that's currently being allocated from.  They
56  * serve two purposes:
57  *
58  *  - They track buckets that have been partially allocated, allowing for
59  *    sub-bucket sized allocations - they're used by the sector allocator below
60  *
61  *  - They provide a reference to the buckets they own that mark and sweep GC
62  *    can find, until the new allocation has a pointer to it inserted into the
63  *    btree
64  *
65  * When allocating some space with the sector allocator, the allocation comes
66  * with a reference to an open bucket - the caller is required to put that
67  * reference _after_ doing the index update that makes its allocation reachable.
68  */
69
70 void bch2_reset_alloc_cursors(struct bch_fs *c)
71 {
72         rcu_read_lock();
73         for_each_member_device_rcu(c, ca, NULL)
74                 memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor));
75         rcu_read_unlock();
76 }
77
78 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
79 {
80         open_bucket_idx_t idx = ob - c->open_buckets;
81         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
82
83         ob->hash = *slot;
84         *slot = idx;
85 }
86
87 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
88 {
89         open_bucket_idx_t idx = ob - c->open_buckets;
90         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
91
92         while (*slot != idx) {
93                 BUG_ON(!*slot);
94                 slot = &c->open_buckets[*slot].hash;
95         }
96
97         *slot = ob->hash;
98         ob->hash = 0;
99 }
100
101 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
102 {
103         struct bch_dev *ca = ob_dev(c, ob);
104
105         if (ob->ec) {
106                 ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
107                 return;
108         }
109
110         percpu_down_read(&c->mark_lock);
111         spin_lock(&ob->lock);
112
113         ob->valid = false;
114         ob->data_type = 0;
115
116         spin_unlock(&ob->lock);
117         percpu_up_read(&c->mark_lock);
118
119         spin_lock(&c->freelist_lock);
120         bch2_open_bucket_hash_remove(c, ob);
121
122         ob->freelist = c->open_buckets_freelist;
123         c->open_buckets_freelist = ob - c->open_buckets;
124
125         c->open_buckets_nr_free++;
126         ca->nr_open_buckets--;
127         spin_unlock(&c->freelist_lock);
128
129         closure_wake_up(&c->open_buckets_wait);
130 }
131
132 void bch2_open_bucket_write_error(struct bch_fs *c,
133                                   struct open_buckets *obs,
134                                   unsigned dev)
135 {
136         struct open_bucket *ob;
137         unsigned i;
138
139         open_bucket_for_each(c, obs, ob, i)
140                 if (ob->dev == dev && ob->ec)
141                         bch2_ec_bucket_cancel(c, ob);
142 }
143
144 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
145 {
146         struct open_bucket *ob;
147
148         BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
149
150         ob = c->open_buckets + c->open_buckets_freelist;
151         c->open_buckets_freelist = ob->freelist;
152         atomic_set(&ob->pin, 1);
153         ob->data_type = 0;
154
155         c->open_buckets_nr_free--;
156         return ob;
157 }
158
159 static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
160 {
161         BUG_ON(c->open_buckets_partial_nr >=
162                ARRAY_SIZE(c->open_buckets_partial));
163
164         spin_lock(&c->freelist_lock);
165         ob->on_partial_list = true;
166         c->open_buckets_partial[c->open_buckets_partial_nr++] =
167                 ob - c->open_buckets;
168         spin_unlock(&c->freelist_lock);
169
170         closure_wake_up(&c->open_buckets_wait);
171         closure_wake_up(&c->freelist_wait);
172 }
173
174 /* _only_ for allocating the journal on a new device: */
175 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
176 {
177         while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
178                 u64 b = ca->new_fs_bucket_idx++;
179
180                 if (!is_superblock_bucket(ca, b) &&
181                     (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
182                         return b;
183         }
184
185         return -1;
186 }
187
188 static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
189 {
190         switch (watermark) {
191         case BCH_WATERMARK_interior_updates:
192                 return 0;
193         case BCH_WATERMARK_reclaim:
194                 return OPEN_BUCKETS_COUNT / 6;
195         case BCH_WATERMARK_btree:
196         case BCH_WATERMARK_btree_copygc:
197                 return OPEN_BUCKETS_COUNT / 4;
198         case BCH_WATERMARK_copygc:
199                 return OPEN_BUCKETS_COUNT / 3;
200         default:
201                 return OPEN_BUCKETS_COUNT / 2;
202         }
203 }
204
205 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
206                                               u64 bucket,
207                                               enum bch_watermark watermark,
208                                               const struct bch_alloc_v4 *a,
209                                               struct bucket_alloc_state *s,
210                                               struct closure *cl)
211 {
212         struct open_bucket *ob;
213
214         if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
215                 s->skipped_nouse++;
216                 return NULL;
217         }
218
219         if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
220                 s->skipped_open++;
221                 return NULL;
222         }
223
224         if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
225                         c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
226                 s->skipped_need_journal_commit++;
227                 return NULL;
228         }
229
230         if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
231                 s->skipped_nocow++;
232                 return NULL;
233         }
234
235         spin_lock(&c->freelist_lock);
236
237         if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
238                 if (cl)
239                         closure_wait(&c->open_buckets_wait, cl);
240
241                 track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true);
242                 spin_unlock(&c->freelist_lock);
243                 return ERR_PTR(-BCH_ERR_open_buckets_empty);
244         }
245
246         /* Recheck under lock: */
247         if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
248                 spin_unlock(&c->freelist_lock);
249                 s->skipped_open++;
250                 return NULL;
251         }
252
253         ob = bch2_open_bucket_alloc(c);
254
255         spin_lock(&ob->lock);
256
257         ob->valid       = true;
258         ob->sectors_free = ca->mi.bucket_size;
259         ob->dev         = ca->dev_idx;
260         ob->gen         = a->gen;
261         ob->bucket      = bucket;
262         spin_unlock(&ob->lock);
263
264         ca->nr_open_buckets++;
265         bch2_open_bucket_hash_add(c, ob);
266
267         track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], false);
268         track_event_change(&c->times[BCH_TIME_blocked_allocate], false);
269
270         spin_unlock(&c->freelist_lock);
271         return ob;
272 }
273
274 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
275                                             enum bch_watermark watermark, u64 free_entry,
276                                             struct bucket_alloc_state *s,
277                                             struct bkey_s_c freespace_k,
278                                             struct closure *cl)
279 {
280         struct bch_fs *c = trans->c;
281         struct btree_iter iter = { NULL };
282         struct bkey_s_c k;
283         struct open_bucket *ob;
284         struct bch_alloc_v4 a_convert;
285         const struct bch_alloc_v4 *a;
286         u64 b = free_entry & ~(~0ULL << 56);
287         unsigned genbits = free_entry >> 56;
288         struct printbuf buf = PRINTBUF;
289         int ret;
290
291         if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
292                 prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
293                        "  freespace key ",
294                         ca->mi.first_bucket, ca->mi.nbuckets);
295                 bch2_bkey_val_to_text(&buf, c, freespace_k);
296                 bch2_trans_inconsistent(trans, "%s", buf.buf);
297                 ob = ERR_PTR(-EIO);
298                 goto err;
299         }
300
301         k = bch2_bkey_get_iter(trans, &iter,
302                                BTREE_ID_alloc, POS(ca->dev_idx, b),
303                                BTREE_ITER_cached);
304         ret = bkey_err(k);
305         if (ret) {
306                 ob = ERR_PTR(ret);
307                 goto err;
308         }
309
310         a = bch2_alloc_to_v4(k, &a_convert);
311
312         if (a->data_type != BCH_DATA_free) {
313                 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
314                         ob = NULL;
315                         goto err;
316                 }
317
318                 prt_printf(&buf, "non free bucket in freespace btree\n"
319                        "  freespace key ");
320                 bch2_bkey_val_to_text(&buf, c, freespace_k);
321                 prt_printf(&buf, "\n  ");
322                 bch2_bkey_val_to_text(&buf, c, k);
323                 bch2_trans_inconsistent(trans, "%s", buf.buf);
324                 ob = ERR_PTR(-EIO);
325                 goto err;
326         }
327
328         if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
329             c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
330                 prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
331                        "  freespace key ",
332                        genbits, alloc_freespace_genbits(*a) >> 56);
333                 bch2_bkey_val_to_text(&buf, c, freespace_k);
334                 prt_printf(&buf, "\n  ");
335                 bch2_bkey_val_to_text(&buf, c, k);
336                 bch2_trans_inconsistent(trans, "%s", buf.buf);
337                 ob = ERR_PTR(-EIO);
338                 goto err;
339         }
340
341         if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) {
342                 struct bch_backpointer bp;
343                 struct bpos bp_pos = POS_MIN;
344
345                 ret = bch2_get_next_backpointer(trans, ca, POS(ca->dev_idx, b), -1,
346                                                 &bp_pos, &bp,
347                                                 BTREE_ITER_nopreserve);
348                 if (ret) {
349                         ob = ERR_PTR(ret);
350                         goto err;
351                 }
352
353                 if (!bkey_eq(bp_pos, POS_MAX)) {
354                         /*
355                          * Bucket may have data in it - we don't call
356                          * bc2h_trans_inconnsistent() because fsck hasn't
357                          * finished yet
358                          */
359                         ob = NULL;
360                         goto err;
361                 }
362         }
363
364         ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
365         if (!ob)
366                 bch2_set_btree_iter_dontneed(&iter);
367 err:
368         if (iter.path)
369                 bch2_set_btree_iter_dontneed(&iter);
370         bch2_trans_iter_exit(trans, &iter);
371         printbuf_exit(&buf);
372         return ob;
373 }
374
375 /*
376  * This path is for before the freespace btree is initialized:
377  *
378  * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
379  * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
380  */
381 static noinline struct open_bucket *
382 bch2_bucket_alloc_early(struct btree_trans *trans,
383                         struct bch_dev *ca,
384                         enum bch_watermark watermark,
385                         struct bucket_alloc_state *s,
386                         struct closure *cl)
387 {
388         struct btree_iter iter, citer;
389         struct bkey_s_c k, ck;
390         struct open_bucket *ob = NULL;
391         u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
392         u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
393         u64 alloc_start = max(first_bucket, *dev_alloc_cursor);
394         u64 alloc_cursor = alloc_start;
395         int ret;
396
397         /*
398          * Scan with an uncached iterator to avoid polluting the key cache. An
399          * uncached iter will return a cached key if one exists, but if not
400          * there is no other underlying protection for the associated key cache
401          * slot. To avoid racing bucket allocations, look up the cached key slot
402          * of any likely allocation candidate before attempting to proceed with
403          * the allocation. This provides proper exclusion on the associated
404          * bucket.
405          */
406 again:
407         for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
408                            BTREE_ITER_slots, k, ret) {
409                 u64 bucket = k.k->p.offset;
410
411                 if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
412                         break;
413
414                 if (ca->new_fs_bucket_idx &&
415                     is_superblock_bucket(ca, k.k->p.offset))
416                         continue;
417
418                 if (s->btree_bitmap != BTREE_BITMAP_ANY &&
419                     s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
420                                 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
421                         if (s->btree_bitmap == BTREE_BITMAP_YES &&
422                             bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
423                                 break;
424
425                         bucket = sector_to_bucket(ca,
426                                         round_up(bucket_to_sector(ca, bucket) + 1,
427                                                  1ULL << ca->mi.btree_bitmap_shift));
428                         bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
429                         s->buckets_seen++;
430                         s->skipped_mi_btree_bitmap++;
431                         continue;
432                 }
433
434                 struct bch_alloc_v4 a_convert;
435                 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
436                 if (a->data_type != BCH_DATA_free)
437                         continue;
438
439                 /* now check the cached key to serialize concurrent allocs of the bucket */
440                 ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached);
441                 ret = bkey_err(ck);
442                 if (ret)
443                         break;
444
445                 a = bch2_alloc_to_v4(ck, &a_convert);
446                 if (a->data_type != BCH_DATA_free)
447                         goto next;
448
449                 s->buckets_seen++;
450
451                 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
452 next:
453                 bch2_set_btree_iter_dontneed(&citer);
454                 bch2_trans_iter_exit(trans, &citer);
455                 if (ob)
456                         break;
457         }
458         bch2_trans_iter_exit(trans, &iter);
459
460         alloc_cursor = iter.pos.offset;
461
462         if (!ob && ret)
463                 ob = ERR_PTR(ret);
464
465         if (!ob && alloc_start > first_bucket) {
466                 alloc_cursor = alloc_start = first_bucket;
467                 goto again;
468         }
469
470         *dev_alloc_cursor = alloc_cursor;
471
472         return ob;
473 }
474
475 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
476                                                    struct bch_dev *ca,
477                                                    enum bch_watermark watermark,
478                                                    struct bucket_alloc_state *s,
479                                                    struct closure *cl)
480 {
481         struct btree_iter iter;
482         struct bkey_s_c k;
483         struct open_bucket *ob = NULL;
484         u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
485         u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor));
486         u64 alloc_cursor = alloc_start;
487         int ret;
488
489         BUG_ON(ca->new_fs_bucket_idx);
490 again:
491         for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
492                                      POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
493                 if (k.k->p.inode != ca->dev_idx)
494                         break;
495
496                 for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
497                      alloc_cursor < k.k->p.offset;
498                      alloc_cursor++) {
499                         ret = btree_trans_too_many_iters(trans);
500                         if (ret) {
501                                 ob = ERR_PTR(ret);
502                                 break;
503                         }
504
505                         s->buckets_seen++;
506
507                         u64 bucket = alloc_cursor & ~(~0ULL << 56);
508                         if (s->btree_bitmap != BTREE_BITMAP_ANY &&
509                             s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
510                                         bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
511                                 if (s->btree_bitmap == BTREE_BITMAP_YES &&
512                                     bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
513                                         goto fail;
514
515                                 bucket = sector_to_bucket(ca,
516                                                 round_up(bucket_to_sector(ca, bucket) + 1,
517                                                          1ULL << ca->mi.btree_bitmap_shift));
518                                 u64 genbits = alloc_cursor >> 56;
519                                 alloc_cursor = bucket | (genbits << 56);
520
521                                 if (alloc_cursor > k.k->p.offset)
522                                         bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
523                                 s->skipped_mi_btree_bitmap++;
524                                 continue;
525                         }
526
527                         ob = try_alloc_bucket(trans, ca, watermark,
528                                               alloc_cursor, s, k, cl);
529                         if (ob) {
530                                 bch2_set_btree_iter_dontneed(&iter);
531                                 break;
532                         }
533                 }
534
535                 if (ob || ret)
536                         break;
537         }
538 fail:
539         bch2_trans_iter_exit(trans, &iter);
540
541         if (!ob && ret)
542                 ob = ERR_PTR(ret);
543
544         if (!ob && alloc_start > ca->mi.first_bucket) {
545                 alloc_cursor = alloc_start = ca->mi.first_bucket;
546                 goto again;
547         }
548
549         *dev_alloc_cursor = alloc_cursor;
550
551         return ob;
552 }
553
554 static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
555                                          enum bch_watermark watermark,
556                                          enum bch_data_type data_type,
557                                          struct closure *cl,
558                                          struct bch_dev_usage *usage,
559                                          struct bucket_alloc_state *s,
560                                          struct open_bucket *ob)
561 {
562         struct printbuf buf = PRINTBUF;
563
564         printbuf_tabstop_push(&buf, 24);
565
566         prt_printf(&buf, "dev\t%s (%u)\n",      ca->name, ca->dev_idx);
567         prt_printf(&buf, "watermark\t%s\n",     bch2_watermarks[watermark]);
568         prt_printf(&buf, "data type\t%s\n",     __bch2_data_types[data_type]);
569         prt_printf(&buf, "blocking\t%u\n",      cl != NULL);
570         prt_printf(&buf, "free\t%llu\n",        usage->d[BCH_DATA_free].buckets);
571         prt_printf(&buf, "avail\t%llu\n",       dev_buckets_free(ca, *usage, watermark));
572         prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
573                    bch2_copygc_wait_amount(c),
574                    c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now));
575         prt_printf(&buf, "seen\t%llu\n",        s->buckets_seen);
576         prt_printf(&buf, "open\t%llu\n",        s->skipped_open);
577         prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit);
578         prt_printf(&buf, "nocow\t%llu\n",       s->skipped_nocow);
579         prt_printf(&buf, "nouse\t%llu\n",       s->skipped_nouse);
580         prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap);
581
582         if (!IS_ERR(ob)) {
583                 prt_printf(&buf, "allocated\t%llu\n", ob->bucket);
584                 trace_bucket_alloc(c, buf.buf);
585         } else {
586                 prt_printf(&buf, "err\t%s\n", bch2_err_str(PTR_ERR(ob)));
587                 trace_bucket_alloc_fail(c, buf.buf);
588         }
589
590         printbuf_exit(&buf);
591 }
592
593 /**
594  * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
595  * @trans:      transaction object
596  * @ca:         device to allocate from
597  * @watermark:  how important is this allocation?
598  * @data_type:  BCH_DATA_journal, btree, user...
599  * @cl:         if not NULL, closure to be used to wait if buckets not available
600  * @usage:      for secondarily also returning the current device usage
601  *
602  * Returns:     an open_bucket on success, or an ERR_PTR() on failure.
603  */
604 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
605                                       struct bch_dev *ca,
606                                       enum bch_watermark watermark,
607                                       enum bch_data_type data_type,
608                                       struct closure *cl,
609                                       struct bch_dev_usage *usage)
610 {
611         struct bch_fs *c = trans->c;
612         struct open_bucket *ob = NULL;
613         bool freespace = READ_ONCE(ca->mi.freespace_initialized);
614         u64 avail;
615         struct bucket_alloc_state s = {
616                 .btree_bitmap = data_type == BCH_DATA_btree,
617         };
618         bool waiting = false;
619 again:
620         bch2_dev_usage_read_fast(ca, usage);
621         avail = dev_buckets_free(ca, *usage, watermark);
622
623         if (usage->d[BCH_DATA_need_discard].buckets > avail)
624                 bch2_do_discards(c);
625
626         if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
627                 bch2_gc_gens_async(c);
628
629         if (should_invalidate_buckets(ca, *usage))
630                 bch2_do_invalidates(c);
631
632         if (!avail) {
633                 if (cl && !waiting) {
634                         closure_wait(&c->freelist_wait, cl);
635                         waiting = true;
636                         goto again;
637                 }
638
639                 track_event_change(&c->times[BCH_TIME_blocked_allocate], true);
640
641                 ob = ERR_PTR(-BCH_ERR_freelist_empty);
642                 goto err;
643         }
644
645         if (waiting)
646                 closure_wake_up(&c->freelist_wait);
647 alloc:
648         ob = likely(freespace)
649                 ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
650                 : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
651
652         if (s.skipped_need_journal_commit * 2 > avail)
653                 bch2_journal_flush_async(&c->journal, NULL);
654
655         if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) {
656                 s.btree_bitmap = BTREE_BITMAP_ANY;
657                 goto alloc;
658         }
659
660         if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
661                 freespace = false;
662                 goto alloc;
663         }
664 err:
665         if (!ob)
666                 ob = ERR_PTR(-BCH_ERR_no_buckets_found);
667
668         if (!IS_ERR(ob))
669                 ob->data_type = data_type;
670
671         if (!IS_ERR(ob))
672                 count_event(c, bucket_alloc);
673         else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
674                 count_event(c, bucket_alloc_fail);
675
676         if (!IS_ERR(ob)
677             ? trace_bucket_alloc_enabled()
678             : trace_bucket_alloc_fail_enabled())
679                 trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob);
680
681         return ob;
682 }
683
684 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
685                                       enum bch_watermark watermark,
686                                       enum bch_data_type data_type,
687                                       struct closure *cl)
688 {
689         struct bch_dev_usage usage;
690         struct open_bucket *ob;
691
692         bch2_trans_do(c, NULL, NULL, 0,
693                       PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
694                                                         data_type, cl, &usage)));
695         return ob;
696 }
697
698 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
699                             unsigned l, unsigned r)
700 {
701         return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
702                 (stripe->next_alloc[l] < stripe->next_alloc[r]));
703 }
704
705 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
706
707 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
708                                           struct dev_stripe_state *stripe,
709                                           struct bch_devs_mask *devs)
710 {
711         struct dev_alloc_list ret = { .nr = 0 };
712         unsigned i;
713
714         for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
715                 ret.devs[ret.nr++] = i;
716
717         bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
718         return ret;
719 }
720
721 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
722                                struct dev_stripe_state *stripe,
723                                struct bch_dev_usage *usage)
724 {
725         u64 *v = stripe->next_alloc + ca->dev_idx;
726         u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
727         u64 free_space_inv = free_space
728                 ? div64_u64(1ULL << 48, free_space)
729                 : 1ULL << 48;
730         u64 scale = *v / 4;
731
732         if (*v + free_space_inv >= *v)
733                 *v += free_space_inv;
734         else
735                 *v = U64_MAX;
736
737         for (v = stripe->next_alloc;
738              v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
739                 *v = *v < scale ? 0 : *v - scale;
740 }
741
742 void bch2_dev_stripe_increment(struct bch_dev *ca,
743                                struct dev_stripe_state *stripe)
744 {
745         struct bch_dev_usage usage;
746
747         bch2_dev_usage_read_fast(ca, &usage);
748         bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
749 }
750
751 static int add_new_bucket(struct bch_fs *c,
752                            struct open_buckets *ptrs,
753                            struct bch_devs_mask *devs_may_alloc,
754                            unsigned nr_replicas,
755                            unsigned *nr_effective,
756                            bool *have_cache,
757                            unsigned flags,
758                            struct open_bucket *ob)
759 {
760         unsigned durability = ob_dev(c, ob)->mi.durability;
761
762         BUG_ON(*nr_effective >= nr_replicas);
763
764         __clear_bit(ob->dev, devs_may_alloc->d);
765         *nr_effective   += durability;
766         *have_cache     |= !durability;
767
768         ob_push(c, ptrs, ob);
769
770         if (*nr_effective >= nr_replicas)
771                 return 1;
772         if (ob->ec)
773                 return 1;
774         return 0;
775 }
776
777 int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
778                       struct open_buckets *ptrs,
779                       struct dev_stripe_state *stripe,
780                       struct bch_devs_mask *devs_may_alloc,
781                       unsigned nr_replicas,
782                       unsigned *nr_effective,
783                       bool *have_cache,
784                       unsigned flags,
785                       enum bch_data_type data_type,
786                       enum bch_watermark watermark,
787                       struct closure *cl)
788 {
789         struct bch_fs *c = trans->c;
790         struct dev_alloc_list devs_sorted =
791                 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
792         int ret = -BCH_ERR_insufficient_devices;
793
794         BUG_ON(*nr_effective >= nr_replicas);
795
796         for (unsigned i = 0; i < devs_sorted.nr; i++) {
797                 struct bch_dev_usage usage;
798                 struct open_bucket *ob;
799
800                 unsigned dev = devs_sorted.devs[i];
801                 struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
802                 if (!ca)
803                         continue;
804
805                 if (!ca->mi.durability && *have_cache) {
806                         bch2_dev_put(ca);
807                         continue;
808                 }
809
810                 ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, cl, &usage);
811                 if (!IS_ERR(ob))
812                         bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
813                 bch2_dev_put(ca);
814
815                 if (IS_ERR(ob)) {
816                         ret = PTR_ERR(ob);
817                         if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
818                                 break;
819                         continue;
820                 }
821
822                 if (add_new_bucket(c, ptrs, devs_may_alloc,
823                                    nr_replicas, nr_effective,
824                                    have_cache, flags, ob)) {
825                         ret = 0;
826                         break;
827                 }
828         }
829
830         return ret;
831 }
832
833 /* Allocate from stripes: */
834
835 /*
836  * if we can't allocate a new stripe because there are already too many
837  * partially filled stripes, force allocating from an existing stripe even when
838  * it's to a device we don't want:
839  */
840
841 static int bucket_alloc_from_stripe(struct btree_trans *trans,
842                          struct open_buckets *ptrs,
843                          struct write_point *wp,
844                          struct bch_devs_mask *devs_may_alloc,
845                          u16 target,
846                          unsigned nr_replicas,
847                          unsigned *nr_effective,
848                          bool *have_cache,
849                          enum bch_watermark watermark,
850                          unsigned flags,
851                          struct closure *cl)
852 {
853         struct bch_fs *c = trans->c;
854         struct dev_alloc_list devs_sorted;
855         struct ec_stripe_head *h;
856         struct open_bucket *ob;
857         unsigned i, ec_idx;
858         int ret = 0;
859
860         if (nr_replicas < 2)
861                 return 0;
862
863         if (ec_open_bucket(c, ptrs))
864                 return 0;
865
866         h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
867         if (IS_ERR(h))
868                 return PTR_ERR(h);
869         if (!h)
870                 return 0;
871
872         devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
873
874         for (i = 0; i < devs_sorted.nr; i++)
875                 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
876                         if (!h->s->blocks[ec_idx])
877                                 continue;
878
879                         ob = c->open_buckets + h->s->blocks[ec_idx];
880                         if (ob->dev == devs_sorted.devs[i] &&
881                             !test_and_set_bit(ec_idx, h->s->blocks_allocated))
882                                 goto got_bucket;
883                 }
884         goto out_put_head;
885 got_bucket:
886         ob->ec_idx      = ec_idx;
887         ob->ec          = h->s;
888         ec_stripe_new_get(h->s, STRIPE_REF_io);
889
890         ret = add_new_bucket(c, ptrs, devs_may_alloc,
891                              nr_replicas, nr_effective,
892                              have_cache, flags, ob);
893 out_put_head:
894         bch2_ec_stripe_head_put(c, h);
895         return ret;
896 }
897
898 /* Sector allocator */
899
900 static bool want_bucket(struct bch_fs *c,
901                         struct write_point *wp,
902                         struct bch_devs_mask *devs_may_alloc,
903                         bool *have_cache, bool ec,
904                         struct open_bucket *ob)
905 {
906         struct bch_dev *ca = ob_dev(c, ob);
907
908         if (!test_bit(ob->dev, devs_may_alloc->d))
909                 return false;
910
911         if (ob->data_type != wp->data_type)
912                 return false;
913
914         if (!ca->mi.durability &&
915             (wp->data_type == BCH_DATA_btree || ec || *have_cache))
916                 return false;
917
918         if (ec != (ob->ec != NULL))
919                 return false;
920
921         return true;
922 }
923
924 static int bucket_alloc_set_writepoint(struct bch_fs *c,
925                                        struct open_buckets *ptrs,
926                                        struct write_point *wp,
927                                        struct bch_devs_mask *devs_may_alloc,
928                                        unsigned nr_replicas,
929                                        unsigned *nr_effective,
930                                        bool *have_cache,
931                                        bool ec, unsigned flags)
932 {
933         struct open_buckets ptrs_skip = { .nr = 0 };
934         struct open_bucket *ob;
935         unsigned i;
936         int ret = 0;
937
938         open_bucket_for_each(c, &wp->ptrs, ob, i) {
939                 if (!ret && want_bucket(c, wp, devs_may_alloc,
940                                         have_cache, ec, ob))
941                         ret = add_new_bucket(c, ptrs, devs_may_alloc,
942                                        nr_replicas, nr_effective,
943                                        have_cache, flags, ob);
944                 else
945                         ob_push(c, &ptrs_skip, ob);
946         }
947         wp->ptrs = ptrs_skip;
948
949         return ret;
950 }
951
952 static int bucket_alloc_set_partial(struct bch_fs *c,
953                                     struct open_buckets *ptrs,
954                                     struct write_point *wp,
955                                     struct bch_devs_mask *devs_may_alloc,
956                                     unsigned nr_replicas,
957                                     unsigned *nr_effective,
958                                     bool *have_cache, bool ec,
959                                     enum bch_watermark watermark,
960                                     unsigned flags)
961 {
962         int i, ret = 0;
963
964         if (!c->open_buckets_partial_nr)
965                 return 0;
966
967         spin_lock(&c->freelist_lock);
968
969         if (!c->open_buckets_partial_nr)
970                 goto unlock;
971
972         for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
973                 struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
974
975                 if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
976                         struct bch_dev *ca = ob_dev(c, ob);
977                         struct bch_dev_usage usage;
978                         u64 avail;
979
980                         bch2_dev_usage_read_fast(ca, &usage);
981                         avail = dev_buckets_free(ca, usage, watermark);
982                         if (!avail)
983                                 continue;
984
985                         array_remove_item(c->open_buckets_partial,
986                                           c->open_buckets_partial_nr,
987                                           i);
988                         ob->on_partial_list = false;
989
990                         ret = add_new_bucket(c, ptrs, devs_may_alloc,
991                                              nr_replicas, nr_effective,
992                                              have_cache, flags, ob);
993                         if (ret)
994                                 break;
995                 }
996         }
997 unlock:
998         spin_unlock(&c->freelist_lock);
999         return ret;
1000 }
1001
1002 static int __open_bucket_add_buckets(struct btree_trans *trans,
1003                         struct open_buckets *ptrs,
1004                         struct write_point *wp,
1005                         struct bch_devs_list *devs_have,
1006                         u16 target,
1007                         bool erasure_code,
1008                         unsigned nr_replicas,
1009                         unsigned *nr_effective,
1010                         bool *have_cache,
1011                         enum bch_watermark watermark,
1012                         unsigned flags,
1013                         struct closure *_cl)
1014 {
1015         struct bch_fs *c = trans->c;
1016         struct bch_devs_mask devs;
1017         struct open_bucket *ob;
1018         struct closure *cl = NULL;
1019         unsigned i;
1020         int ret;
1021
1022         devs = target_rw_devs(c, wp->data_type, target);
1023
1024         /* Don't allocate from devices we already have pointers to: */
1025         darray_for_each(*devs_have, i)
1026                 __clear_bit(*i, devs.d);
1027
1028         open_bucket_for_each(c, ptrs, ob, i)
1029                 __clear_bit(ob->dev, devs.d);
1030
1031         if (erasure_code && ec_open_bucket(c, ptrs))
1032                 return 0;
1033
1034         ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
1035                                  nr_replicas, nr_effective,
1036                                  have_cache, erasure_code, flags);
1037         if (ret)
1038                 return ret;
1039
1040         ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
1041                                  nr_replicas, nr_effective,
1042                                  have_cache, erasure_code, watermark, flags);
1043         if (ret)
1044                 return ret;
1045
1046         if (erasure_code) {
1047                 ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
1048                                          target,
1049                                          nr_replicas, nr_effective,
1050                                          have_cache,
1051                                          watermark, flags, _cl);
1052         } else {
1053 retry_blocking:
1054                 /*
1055                  * Try nonblocking first, so that if one device is full we'll try from
1056                  * other devices:
1057                  */
1058                 ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
1059                                         nr_replicas, nr_effective, have_cache,
1060                                         flags, wp->data_type, watermark, cl);
1061                 if (ret &&
1062                     !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
1063                     !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
1064                     !cl && _cl) {
1065                         cl = _cl;
1066                         goto retry_blocking;
1067                 }
1068         }
1069
1070         return ret;
1071 }
1072
1073 static int open_bucket_add_buckets(struct btree_trans *trans,
1074                         struct open_buckets *ptrs,
1075                         struct write_point *wp,
1076                         struct bch_devs_list *devs_have,
1077                         u16 target,
1078                         unsigned erasure_code,
1079                         unsigned nr_replicas,
1080                         unsigned *nr_effective,
1081                         bool *have_cache,
1082                         enum bch_watermark watermark,
1083                         unsigned flags,
1084                         struct closure *cl)
1085 {
1086         int ret;
1087
1088         if (erasure_code) {
1089                 ret = __open_bucket_add_buckets(trans, ptrs, wp,
1090                                 devs_have, target, erasure_code,
1091                                 nr_replicas, nr_effective, have_cache,
1092                                 watermark, flags, cl);
1093                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1094                     bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
1095                     bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
1096                     bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1097                         return ret;
1098                 if (*nr_effective >= nr_replicas)
1099                         return 0;
1100         }
1101
1102         ret = __open_bucket_add_buckets(trans, ptrs, wp,
1103                         devs_have, target, false,
1104                         nr_replicas, nr_effective, have_cache,
1105                         watermark, flags, cl);
1106         return ret < 0 ? ret : 0;
1107 }
1108
1109 /**
1110  * should_drop_bucket - check if this is open_bucket should go away
1111  * @ob:         open_bucket to predicate on
1112  * @c:          filesystem handle
1113  * @ca:         if set, we're killing buckets for a particular device
1114  * @ec:         if true, we're shutting down erasure coding and killing all ec
1115  *              open_buckets
1116  *              otherwise, return true
1117  * Returns: true if we should kill this open_bucket
1118  *
1119  * We're killing open_buckets because we're shutting down a device, erasure
1120  * coding, or the entire filesystem - check if this open_bucket matches:
1121  */
1122 static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
1123                                struct bch_dev *ca, bool ec)
1124 {
1125         if (ec) {
1126                 return ob->ec != NULL;
1127         } else if (ca) {
1128                 bool drop = ob->dev == ca->dev_idx;
1129                 struct open_bucket *ob2;
1130                 unsigned i;
1131
1132                 if (!drop && ob->ec) {
1133                         unsigned nr_blocks;
1134
1135                         mutex_lock(&ob->ec->lock);
1136                         nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
1137
1138                         for (i = 0; i < nr_blocks; i++) {
1139                                 if (!ob->ec->blocks[i])
1140                                         continue;
1141
1142                                 ob2 = c->open_buckets + ob->ec->blocks[i];
1143                                 drop |= ob2->dev == ca->dev_idx;
1144                         }
1145                         mutex_unlock(&ob->ec->lock);
1146                 }
1147
1148                 return drop;
1149         } else {
1150                 return true;
1151         }
1152 }
1153
1154 static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
1155                                  bool ec, struct write_point *wp)
1156 {
1157         struct open_buckets ptrs = { .nr = 0 };
1158         struct open_bucket *ob;
1159         unsigned i;
1160
1161         mutex_lock(&wp->lock);
1162         open_bucket_for_each(c, &wp->ptrs, ob, i)
1163                 if (should_drop_bucket(ob, c, ca, ec))
1164                         bch2_open_bucket_put(c, ob);
1165                 else
1166                         ob_push(c, &ptrs, ob);
1167         wp->ptrs = ptrs;
1168         mutex_unlock(&wp->lock);
1169 }
1170
1171 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
1172                             bool ec)
1173 {
1174         unsigned i;
1175
1176         /* Next, close write points that point to this device... */
1177         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1178                 bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
1179
1180         bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
1181         bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
1182         bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
1183
1184         mutex_lock(&c->btree_reserve_cache_lock);
1185         while (c->btree_reserve_cache_nr) {
1186                 struct btree_alloc *a =
1187                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1188
1189                 bch2_open_buckets_put(c, &a->ob);
1190         }
1191         mutex_unlock(&c->btree_reserve_cache_lock);
1192
1193         spin_lock(&c->freelist_lock);
1194         i = 0;
1195         while (i < c->open_buckets_partial_nr) {
1196                 struct open_bucket *ob =
1197                         c->open_buckets + c->open_buckets_partial[i];
1198
1199                 if (should_drop_bucket(ob, c, ca, ec)) {
1200                         --c->open_buckets_partial_nr;
1201                         swap(c->open_buckets_partial[i],
1202                              c->open_buckets_partial[c->open_buckets_partial_nr]);
1203                         ob->on_partial_list = false;
1204                         spin_unlock(&c->freelist_lock);
1205                         bch2_open_bucket_put(c, ob);
1206                         spin_lock(&c->freelist_lock);
1207                 } else {
1208                         i++;
1209                 }
1210         }
1211         spin_unlock(&c->freelist_lock);
1212
1213         bch2_ec_stop_dev(c, ca);
1214 }
1215
1216 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
1217                                                  unsigned long write_point)
1218 {
1219         unsigned hash =
1220                 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1221
1222         return &c->write_points_hash[hash];
1223 }
1224
1225 static struct write_point *__writepoint_find(struct hlist_head *head,
1226                                              unsigned long write_point)
1227 {
1228         struct write_point *wp;
1229
1230         rcu_read_lock();
1231         hlist_for_each_entry_rcu(wp, head, node)
1232                 if (wp->write_point == write_point)
1233                         goto out;
1234         wp = NULL;
1235 out:
1236         rcu_read_unlock();
1237         return wp;
1238 }
1239
1240 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1241 {
1242         u64 stranded    = c->write_points_nr * c->bucket_size_max;
1243         u64 free        = bch2_fs_usage_read_short(c).free;
1244
1245         return stranded * factor > free;
1246 }
1247
1248 static bool try_increase_writepoints(struct bch_fs *c)
1249 {
1250         struct write_point *wp;
1251
1252         if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1253             too_many_writepoints(c, 32))
1254                 return false;
1255
1256         wp = c->write_points + c->write_points_nr++;
1257         hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1258         return true;
1259 }
1260
1261 static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
1262 {
1263         struct bch_fs *c = trans->c;
1264         struct write_point *wp;
1265         struct open_bucket *ob;
1266         unsigned i;
1267
1268         mutex_lock(&c->write_points_hash_lock);
1269         if (c->write_points_nr < old_nr) {
1270                 mutex_unlock(&c->write_points_hash_lock);
1271                 return true;
1272         }
1273
1274         if (c->write_points_nr == 1 ||
1275             !too_many_writepoints(c, 8)) {
1276                 mutex_unlock(&c->write_points_hash_lock);
1277                 return false;
1278         }
1279
1280         wp = c->write_points + --c->write_points_nr;
1281
1282         hlist_del_rcu(&wp->node);
1283         mutex_unlock(&c->write_points_hash_lock);
1284
1285         bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1286         open_bucket_for_each(c, &wp->ptrs, ob, i)
1287                 open_bucket_free_unused(c, ob);
1288         wp->ptrs.nr = 0;
1289         mutex_unlock(&wp->lock);
1290         return true;
1291 }
1292
1293 static struct write_point *writepoint_find(struct btree_trans *trans,
1294                                            unsigned long write_point)
1295 {
1296         struct bch_fs *c = trans->c;
1297         struct write_point *wp, *oldest;
1298         struct hlist_head *head;
1299
1300         if (!(write_point & 1UL)) {
1301                 wp = (struct write_point *) write_point;
1302                 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1303                 return wp;
1304         }
1305
1306         head = writepoint_hash(c, write_point);
1307 restart_find:
1308         wp = __writepoint_find(head, write_point);
1309         if (wp) {
1310 lock_wp:
1311                 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1312                 if (wp->write_point == write_point)
1313                         goto out;
1314                 mutex_unlock(&wp->lock);
1315                 goto restart_find;
1316         }
1317 restart_find_oldest:
1318         oldest = NULL;
1319         for (wp = c->write_points;
1320              wp < c->write_points + c->write_points_nr; wp++)
1321                 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1322                         oldest = wp;
1323
1324         bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
1325         bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
1326         if (oldest >= c->write_points + c->write_points_nr ||
1327             try_increase_writepoints(c)) {
1328                 mutex_unlock(&c->write_points_hash_lock);
1329                 mutex_unlock(&oldest->lock);
1330                 goto restart_find_oldest;
1331         }
1332
1333         wp = __writepoint_find(head, write_point);
1334         if (wp && wp != oldest) {
1335                 mutex_unlock(&c->write_points_hash_lock);
1336                 mutex_unlock(&oldest->lock);
1337                 goto lock_wp;
1338         }
1339
1340         wp = oldest;
1341         hlist_del_rcu(&wp->node);
1342         wp->write_point = write_point;
1343         hlist_add_head_rcu(&wp->node, head);
1344         mutex_unlock(&c->write_points_hash_lock);
1345 out:
1346         wp->last_used = local_clock();
1347         return wp;
1348 }
1349
1350 static noinline void
1351 deallocate_extra_replicas(struct bch_fs *c,
1352                           struct open_buckets *ptrs,
1353                           struct open_buckets *ptrs_no_use,
1354                           unsigned extra_replicas)
1355 {
1356         struct open_buckets ptrs2 = { 0 };
1357         struct open_bucket *ob;
1358         unsigned i;
1359
1360         open_bucket_for_each(c, ptrs, ob, i) {
1361                 unsigned d = ob_dev(c, ob)->mi.durability;
1362
1363                 if (d && d <= extra_replicas) {
1364                         extra_replicas -= d;
1365                         ob_push(c, ptrs_no_use, ob);
1366                 } else {
1367                         ob_push(c, &ptrs2, ob);
1368                 }
1369         }
1370
1371         *ptrs = ptrs2;
1372 }
1373
1374 /*
1375  * Get us an open_bucket we can allocate from, return with it locked:
1376  */
1377 int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1378                              unsigned target,
1379                              unsigned erasure_code,
1380                              struct write_point_specifier write_point,
1381                              struct bch_devs_list *devs_have,
1382                              unsigned nr_replicas,
1383                              unsigned nr_replicas_required,
1384                              enum bch_watermark watermark,
1385                              unsigned flags,
1386                              struct closure *cl,
1387                              struct write_point **wp_ret)
1388 {
1389         struct bch_fs *c = trans->c;
1390         struct write_point *wp;
1391         struct open_bucket *ob;
1392         struct open_buckets ptrs;
1393         unsigned nr_effective, write_points_nr;
1394         bool have_cache;
1395         int ret;
1396         int i;
1397
1398         if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
1399                 erasure_code = false;
1400
1401         BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
1402
1403         BUG_ON(!nr_replicas || !nr_replicas_required);
1404 retry:
1405         ptrs.nr         = 0;
1406         nr_effective    = 0;
1407         write_points_nr = c->write_points_nr;
1408         have_cache      = false;
1409
1410         *wp_ret = wp = writepoint_find(trans, write_point.v);
1411
1412         ret = bch2_trans_relock(trans);
1413         if (ret)
1414                 goto err;
1415
1416         /* metadata may not allocate on cache devices: */
1417         if (wp->data_type != BCH_DATA_user)
1418                 have_cache = true;
1419
1420         if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1421                 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1422                                               target, erasure_code,
1423                                               nr_replicas, &nr_effective,
1424                                               &have_cache, watermark,
1425                                               flags, NULL);
1426                 if (!ret ||
1427                     bch2_err_matches(ret, BCH_ERR_transaction_restart))
1428                         goto alloc_done;
1429
1430                 /* Don't retry from all devices if we're out of open buckets: */
1431                 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
1432                         int ret2 = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1433                                               target, erasure_code,
1434                                               nr_replicas, &nr_effective,
1435                                               &have_cache, watermark,
1436                                               flags, cl);
1437                         if (!ret2 ||
1438                             bch2_err_matches(ret2, BCH_ERR_transaction_restart) ||
1439                             bch2_err_matches(ret2, BCH_ERR_open_buckets_empty)) {
1440                                 ret = ret2;
1441                                 goto alloc_done;
1442                         }
1443                 }
1444
1445                 /*
1446                  * Only try to allocate cache (durability = 0 devices) from the
1447                  * specified target:
1448                  */
1449                 have_cache = true;
1450
1451                 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1452                                               0, erasure_code,
1453                                               nr_replicas, &nr_effective,
1454                                               &have_cache, watermark,
1455                                               flags, cl);
1456         } else {
1457                 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1458                                               target, erasure_code,
1459                                               nr_replicas, &nr_effective,
1460                                               &have_cache, watermark,
1461                                               flags, cl);
1462         }
1463 alloc_done:
1464         BUG_ON(!ret && nr_effective < nr_replicas);
1465
1466         if (erasure_code && !ec_open_bucket(c, &ptrs))
1467                 pr_debug("failed to get ec bucket: ret %u", ret);
1468
1469         if (ret == -BCH_ERR_insufficient_devices &&
1470             nr_effective >= nr_replicas_required)
1471                 ret = 0;
1472
1473         if (ret)
1474                 goto err;
1475
1476         if (nr_effective > nr_replicas)
1477                 deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
1478
1479         /* Free buckets we didn't use: */
1480         open_bucket_for_each(c, &wp->ptrs, ob, i)
1481                 open_bucket_free_unused(c, ob);
1482
1483         wp->ptrs = ptrs;
1484
1485         wp->sectors_free = UINT_MAX;
1486
1487         open_bucket_for_each(c, &wp->ptrs, ob, i)
1488                 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1489
1490         BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1491
1492         return 0;
1493 err:
1494         open_bucket_for_each(c, &wp->ptrs, ob, i)
1495                 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1496                         ob_push(c, &ptrs, ob);
1497                 else
1498                         open_bucket_free_unused(c, ob);
1499         wp->ptrs = ptrs;
1500
1501         mutex_unlock(&wp->lock);
1502
1503         if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1504             try_decrease_writepoints(trans, write_points_nr))
1505                 goto retry;
1506
1507         if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
1508             bch2_err_matches(ret, BCH_ERR_freelist_empty))
1509                 return cl
1510                         ? -BCH_ERR_bucket_alloc_blocked
1511                         : -BCH_ERR_ENOSPC_bucket_alloc;
1512
1513         return ret;
1514 }
1515
1516 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1517 {
1518         struct bch_dev *ca = ob_dev(c, ob);
1519
1520         return (struct bch_extent_ptr) {
1521                 .type   = 1 << BCH_EXTENT_ENTRY_ptr,
1522                 .gen    = ob->gen,
1523                 .dev    = ob->dev,
1524                 .offset = bucket_to_sector(ca, ob->bucket) +
1525                         ca->mi.bucket_size -
1526                         ob->sectors_free,
1527         };
1528 }
1529
1530 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1531                                     struct bkey_i *k, unsigned sectors,
1532                                     bool cached)
1533 {
1534         bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
1535 }
1536
1537 /*
1538  * Append pointers to the space we just allocated to @k, and mark @sectors space
1539  * as allocated out of @ob
1540  */
1541 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1542 {
1543         bch2_alloc_sectors_done_inlined(c, wp);
1544 }
1545
1546 static inline void writepoint_init(struct write_point *wp,
1547                                    enum bch_data_type type)
1548 {
1549         mutex_init(&wp->lock);
1550         wp->data_type = type;
1551
1552         INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1553         INIT_LIST_HEAD(&wp->writes);
1554         spin_lock_init(&wp->writes_lock);
1555 }
1556
1557 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1558 {
1559         struct open_bucket *ob;
1560         struct write_point *wp;
1561
1562         mutex_init(&c->write_points_hash_lock);
1563         c->write_points_nr = ARRAY_SIZE(c->write_points);
1564
1565         /* open bucket 0 is a sentinal NULL: */
1566         spin_lock_init(&c->open_buckets[0].lock);
1567
1568         for (ob = c->open_buckets + 1;
1569              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1570                 spin_lock_init(&ob->lock);
1571                 c->open_buckets_nr_free++;
1572
1573                 ob->freelist = c->open_buckets_freelist;
1574                 c->open_buckets_freelist = ob - c->open_buckets;
1575         }
1576
1577         writepoint_init(&c->btree_write_point,          BCH_DATA_btree);
1578         writepoint_init(&c->rebalance_write_point,      BCH_DATA_user);
1579         writepoint_init(&c->copygc_write_point,         BCH_DATA_user);
1580
1581         for (wp = c->write_points;
1582              wp < c->write_points + c->write_points_nr; wp++) {
1583                 writepoint_init(wp, BCH_DATA_user);
1584
1585                 wp->last_used   = local_clock();
1586                 wp->write_point = (unsigned long) wp;
1587                 hlist_add_head_rcu(&wp->node,
1588                                    writepoint_hash(c, wp->write_point));
1589         }
1590 }
1591
1592 static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
1593 {
1594         struct bch_dev *ca = ob_dev(c, ob);
1595         unsigned data_type = ob->data_type;
1596         barrier(); /* READ_ONCE() doesn't work on bitfields */
1597
1598         prt_printf(out, "%zu ref %u ",
1599                    ob - c->open_buckets,
1600                    atomic_read(&ob->pin));
1601         bch2_prt_data_type(out, data_type);
1602         prt_printf(out, " %u:%llu gen %u allocated %u/%u",
1603                    ob->dev, ob->bucket, ob->gen,
1604                    ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
1605         if (ob->ec)
1606                 prt_printf(out, " ec idx %llu", ob->ec->idx);
1607         if (ob->on_partial_list)
1608                 prt_str(out, " partial");
1609         prt_newline(out);
1610 }
1611
1612 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1613 {
1614         struct open_bucket *ob;
1615
1616         out->atomic++;
1617
1618         for (ob = c->open_buckets;
1619              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1620              ob++) {
1621                 spin_lock(&ob->lock);
1622                 if (ob->valid && !ob->on_partial_list)
1623                         bch2_open_bucket_to_text(out, c, ob);
1624                 spin_unlock(&ob->lock);
1625         }
1626
1627         --out->atomic;
1628 }
1629
1630 void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
1631 {
1632         unsigned i;
1633
1634         out->atomic++;
1635         spin_lock(&c->freelist_lock);
1636
1637         for (i = 0; i < c->open_buckets_partial_nr; i++)
1638                 bch2_open_bucket_to_text(out, c,
1639                                 c->open_buckets + c->open_buckets_partial[i]);
1640
1641         spin_unlock(&c->freelist_lock);
1642         --out->atomic;
1643 }
1644
1645 static const char * const bch2_write_point_states[] = {
1646 #define x(n)    #n,
1647         WRITE_POINT_STATES()
1648 #undef x
1649         NULL
1650 };
1651
1652 static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
1653                                      struct write_point *wp)
1654 {
1655         struct open_bucket *ob;
1656         unsigned i;
1657
1658         prt_printf(out, "%lu: ", wp->write_point);
1659         prt_human_readable_u64(out, wp->sectors_allocated);
1660
1661         prt_printf(out, " last wrote: ");
1662         bch2_pr_time_units(out, sched_clock() - wp->last_used);
1663
1664         for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
1665                 prt_printf(out, " %s: ", bch2_write_point_states[i]);
1666                 bch2_pr_time_units(out, wp->time[i]);
1667         }
1668
1669         prt_newline(out);
1670
1671         printbuf_indent_add(out, 2);
1672         open_bucket_for_each(c, &wp->ptrs, ob, i)
1673                 bch2_open_bucket_to_text(out, c, ob);
1674         printbuf_indent_sub(out, 2);
1675 }
1676
1677 void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
1678 {
1679         struct write_point *wp;
1680
1681         prt_str(out, "Foreground write points\n");
1682         for (wp = c->write_points;
1683              wp < c->write_points + ARRAY_SIZE(c->write_points);
1684              wp++)
1685                 bch2_write_point_to_text(out, c, wp);
1686
1687         prt_str(out, "Copygc write point\n");
1688         bch2_write_point_to_text(out, c, &c->copygc_write_point);
1689
1690         prt_str(out, "Rebalance write point\n");
1691         bch2_write_point_to_text(out, c, &c->rebalance_write_point);
1692
1693         prt_str(out, "Btree write point\n");
1694         bch2_write_point_to_text(out, c, &c->btree_write_point);
1695 }
1696
1697 void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
1698 {
1699         unsigned nr[BCH_DATA_NR];
1700
1701         memset(nr, 0, sizeof(nr));
1702
1703         for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1704                 nr[c->open_buckets[i].data_type]++;
1705
1706         printbuf_tabstop_push(out, 24);
1707
1708         percpu_down_read(&c->mark_lock);
1709         prt_printf(out, "hidden\t%llu\n",                       bch2_fs_usage_read_one(c, &c->usage_base->b.hidden));
1710         prt_printf(out, "btree\t%llu\n",                        bch2_fs_usage_read_one(c, &c->usage_base->b.btree));
1711         prt_printf(out, "data\t%llu\n",                         bch2_fs_usage_read_one(c, &c->usage_base->b.data));
1712         prt_printf(out, "cached\t%llu\n",                       bch2_fs_usage_read_one(c, &c->usage_base->b.cached));
1713         prt_printf(out, "reserved\t%llu\n",                     bch2_fs_usage_read_one(c, &c->usage_base->b.reserved));
1714         prt_printf(out, "online_reserved\t%llu\n",              percpu_u64_get(c->online_reserved));
1715         prt_printf(out, "nr_inodes\t%llu\n",                    bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes));
1716         percpu_up_read(&c->mark_lock);
1717
1718         prt_newline(out);
1719         prt_printf(out, "freelist_wait\t%s\n",                  c->freelist_wait.list.first ? "waiting" : "empty");
1720         prt_printf(out, "open buckets allocated\t%i\n",         OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
1721         prt_printf(out, "open buckets total\t%u\n",             OPEN_BUCKETS_COUNT);
1722         prt_printf(out, "open_buckets_wait\t%s\n",              c->open_buckets_wait.list.first ? "waiting" : "empty");
1723         prt_printf(out, "open_buckets_btree\t%u\n",             nr[BCH_DATA_btree]);
1724         prt_printf(out, "open_buckets_user\t%u\n",              nr[BCH_DATA_user]);
1725         prt_printf(out, "btree reserve cache\t%u\n",            c->btree_reserve_cache_nr);
1726 }
1727
1728 void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
1729 {
1730         struct bch_fs *c = ca->fs;
1731         struct bch_dev_usage stats = bch2_dev_usage_read(ca);
1732         unsigned nr[BCH_DATA_NR];
1733
1734         memset(nr, 0, sizeof(nr));
1735
1736         for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1737                 nr[c->open_buckets[i].data_type]++;
1738
1739         printbuf_tabstop_push(out, 12);
1740         printbuf_tabstop_push(out, 16);
1741         printbuf_tabstop_push(out, 16);
1742         printbuf_tabstop_push(out, 16);
1743         printbuf_tabstop_push(out, 16);
1744
1745         bch2_dev_usage_to_text(out, &stats);
1746
1747         prt_newline(out);
1748
1749         prt_printf(out, "reserves:\n");
1750         for (unsigned i = 0; i < BCH_WATERMARK_NR; i++)
1751                 prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i));
1752
1753         prt_newline(out);
1754
1755         printbuf_tabstops_reset(out);
1756         printbuf_tabstop_push(out, 12);
1757         printbuf_tabstop_push(out, 16);
1758
1759         prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets);
1760         prt_printf(out, "buckets to invalidate\t%llu\r\n",      should_invalidate_buckets(ca, stats));
1761 }
1762
1763 void bch2_print_allocator_stuck(struct bch_fs *c)
1764 {
1765         struct printbuf buf = PRINTBUF;
1766
1767         prt_printf(&buf, "Allocator stuck? Waited for 10 seconds\n");
1768
1769         prt_printf(&buf, "Allocator debug:\n");
1770         printbuf_indent_add(&buf, 2);
1771         bch2_fs_alloc_debug_to_text(&buf, c);
1772         printbuf_indent_sub(&buf, 2);
1773         prt_newline(&buf);
1774
1775         for_each_online_member(c, ca) {
1776                 prt_printf(&buf, "Dev %u:\n", ca->dev_idx);
1777                 printbuf_indent_add(&buf, 2);
1778                 bch2_dev_alloc_debug_to_text(&buf, ca);
1779                 printbuf_indent_sub(&buf, 2);
1780                 prt_newline(&buf);
1781         }
1782
1783         prt_printf(&buf, "Copygc debug:\n");
1784         printbuf_indent_add(&buf, 2);
1785         bch2_copygc_wait_to_text(&buf, c);
1786         printbuf_indent_sub(&buf, 2);
1787         prt_newline(&buf);
1788
1789         prt_printf(&buf, "Journal debug:\n");
1790         printbuf_indent_add(&buf, 2);
1791         bch2_journal_debug_to_text(&buf, &c->journal);
1792         printbuf_indent_sub(&buf, 2);
1793
1794         bch2_print_string_as_lines(KERN_ERR, buf.buf);
1795         printbuf_exit(&buf);
1796 }
This page took 0.146183 seconds and 4 git commands to generate.