]> Git Repo - linux.git/blob - fs/bcachefs/journal.c
Linux 6.14-rc3
[linux.git] / fs / bcachefs / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "error.h"
16 #include "journal.h"
17 #include "journal_io.h"
18 #include "journal_reclaim.h"
19 #include "journal_sb.h"
20 #include "journal_seq_blacklist.h"
21 #include "trace.h"
22
23 static const char * const bch2_journal_errors[] = {
24 #define x(n)    #n,
25         JOURNAL_ERRORS()
26 #undef x
27         NULL
28 };
29
30 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
31 {
32         return seq > j->seq_ondisk;
33 }
34
35 static bool __journal_entry_is_open(union journal_res_state state)
36 {
37         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
38 }
39
40 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
41 {
42         return atomic64_read(&j->seq) - j->seq_ondisk;
43 }
44
45 static bool journal_entry_is_open(struct journal *j)
46 {
47         return __journal_entry_is_open(j->reservations);
48 }
49
50 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
51 {
52         union journal_res_state s = READ_ONCE(j->reservations);
53         unsigned i = seq & JOURNAL_BUF_MASK;
54         struct journal_buf *buf = j->buf + i;
55
56         prt_printf(out, "seq:\t%llu\n", seq);
57         printbuf_indent_add(out, 2);
58
59         prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i));
60
61         prt_printf(out, "size:\t");
62         prt_human_readable_u64(out, vstruct_bytes(buf->data));
63         prt_newline(out);
64
65         prt_printf(out, "expires:\t");
66         prt_printf(out, "%li jiffies\n", buf->expires - jiffies);
67
68         prt_printf(out, "flags:\t");
69         if (buf->noflush)
70                 prt_str(out, "noflush ");
71         if (buf->must_flush)
72                 prt_str(out, "must_flush ");
73         if (buf->separate_flush)
74                 prt_str(out, "separate_flush ");
75         if (buf->need_flush_to_write_buffer)
76                 prt_str(out, "need_flush_to_write_buffer ");
77         if (buf->write_started)
78                 prt_str(out, "write_started ");
79         if (buf->write_allocated)
80                 prt_str(out, "write_allocated ");
81         if (buf->write_done)
82                 prt_str(out, "write_done");
83         prt_newline(out);
84
85         printbuf_indent_sub(out, 2);
86 }
87
88 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
89 {
90         if (!out->nr_tabstops)
91                 printbuf_tabstop_push(out, 24);
92
93         for (u64 seq = journal_last_unwritten_seq(j);
94              seq <= journal_cur_seq(j);
95              seq++)
96                 bch2_journal_buf_to_text(out, j, seq);
97         prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
98 }
99
100 static inline struct journal_buf *
101 journal_seq_to_buf(struct journal *j, u64 seq)
102 {
103         struct journal_buf *buf = NULL;
104
105         EBUG_ON(seq > journal_cur_seq(j));
106
107         if (journal_seq_unwritten(j, seq)) {
108                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
109                 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
110         }
111         return buf;
112 }
113
114 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
115 {
116         for (unsigned i = 0; i < ARRAY_SIZE(p->unflushed); i++)
117                 INIT_LIST_HEAD(&p->unflushed[i]);
118         for (unsigned i = 0; i < ARRAY_SIZE(p->flushed); i++)
119                 INIT_LIST_HEAD(&p->flushed[i]);
120         atomic_set(&p->count, count);
121         p->devs.nr = 0;
122 }
123
124 /*
125  * Detect stuck journal conditions and trigger shutdown. Technically the journal
126  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
127  * reservation lockup, etc. Since this is a fatal error with potentially
128  * unpredictable characteristics, we want to be fairly conservative before we
129  * decide to shut things down.
130  *
131  * Consider the journal stuck when it appears full with no ability to commit
132  * btree transactions, to discard journal buckets, nor acquire priority
133  * (reserved watermark) reservation.
134  */
135 static inline bool
136 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
137 {
138         struct bch_fs *c = container_of(j, struct bch_fs, journal);
139         bool stuck = false;
140         struct printbuf buf = PRINTBUF;
141
142         if (!(error == JOURNAL_ERR_journal_full ||
143               error == JOURNAL_ERR_journal_pin_full) ||
144             nr_unwritten_journal_entries(j) ||
145             (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
146                 return stuck;
147
148         spin_lock(&j->lock);
149
150         if (j->can_discard) {
151                 spin_unlock(&j->lock);
152                 return stuck;
153         }
154
155         stuck = true;
156
157         /*
158          * The journal shutdown path will set ->err_seq, but do it here first to
159          * serialize against concurrent failures and avoid duplicate error
160          * reports.
161          */
162         if (j->err_seq) {
163                 spin_unlock(&j->lock);
164                 return stuck;
165         }
166         j->err_seq = journal_cur_seq(j);
167         spin_unlock(&j->lock);
168
169         bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
170                 bch2_journal_errors[error]);
171         bch2_journal_debug_to_text(&buf, j);
172         bch_err(c, "%s", buf.buf);
173
174         printbuf_reset(&buf);
175         bch2_journal_pins_to_text(&buf, j);
176         bch_err(c, "Journal pins:\n%s", buf.buf);
177         printbuf_exit(&buf);
178
179         bch2_fatal_error(c);
180         dump_stack();
181
182         return stuck;
183 }
184
185 void bch2_journal_do_writes(struct journal *j)
186 {
187         for (u64 seq = journal_last_unwritten_seq(j);
188              seq <= journal_cur_seq(j);
189              seq++) {
190                 unsigned idx = seq & JOURNAL_BUF_MASK;
191                 struct journal_buf *w = j->buf + idx;
192
193                 if (w->write_started && !w->write_allocated)
194                         break;
195                 if (w->write_started)
196                         continue;
197
198                 if (!journal_state_count(j->reservations, idx)) {
199                         w->write_started = true;
200                         closure_call(&w->io, bch2_journal_write, j->wq, NULL);
201                 }
202
203                 break;
204         }
205 }
206
207 /*
208  * Final processing when the last reference of a journal buffer has been
209  * dropped. Drop the pin list reference acquired at journal entry open and write
210  * the buffer, if requested.
211  */
212 void bch2_journal_buf_put_final(struct journal *j, u64 seq)
213 {
214         lockdep_assert_held(&j->lock);
215
216         if (__bch2_journal_pin_put(j, seq))
217                 bch2_journal_reclaim_fast(j);
218         bch2_journal_do_writes(j);
219
220         /*
221          * for __bch2_next_write_buffer_flush_journal_buf(), when quiescing an
222          * open journal entry
223          */
224         wake_up(&j->wait);
225 }
226
227 /*
228  * Returns true if journal entry is now closed:
229  *
230  * We don't close a journal_buf until the next journal_buf is finished writing,
231  * and can be opened again - this also initializes the next journal_buf:
232  */
233 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
234 {
235         struct bch_fs *c = container_of(j, struct bch_fs, journal);
236         struct journal_buf *buf = journal_cur_buf(j);
237         union journal_res_state old, new;
238         unsigned sectors;
239
240         BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
241                closed_val != JOURNAL_ENTRY_ERROR_VAL);
242
243         lockdep_assert_held(&j->lock);
244
245         old.v = atomic64_read(&j->reservations.counter);
246         do {
247                 new.v = old.v;
248                 new.cur_entry_offset = closed_val;
249
250                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
251                     old.cur_entry_offset == new.cur_entry_offset)
252                         return;
253         } while (!atomic64_try_cmpxchg(&j->reservations.counter,
254                                        &old.v, new.v));
255
256         if (!__journal_entry_is_open(old))
257                 return;
258
259         if (old.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL)
260                 old.cur_entry_offset = j->cur_entry_offset_if_blocked;
261
262         /* Close out old buffer: */
263         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
264
265         if (trace_journal_entry_close_enabled() && trace) {
266                 struct printbuf pbuf = PRINTBUF;
267                 pbuf.atomic++;
268
269                 prt_str(&pbuf, "entry size: ");
270                 prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
271                 prt_newline(&pbuf);
272                 bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
273                 trace_journal_entry_close(c, pbuf.buf);
274                 printbuf_exit(&pbuf);
275         }
276
277         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
278                                       buf->u64s_reserved) << c->block_bits;
279         BUG_ON(sectors > buf->sectors);
280         buf->sectors = sectors;
281
282         /*
283          * We have to set last_seq here, _before_ opening a new journal entry:
284          *
285          * A threads may replace an old pin with a new pin on their current
286          * journal reservation - the expectation being that the journal will
287          * contain either what the old pin protected or what the new pin
288          * protects.
289          *
290          * After the old pin is dropped journal_last_seq() won't include the old
291          * pin, so we can only write the updated last_seq on the entry that
292          * contains whatever the new pin protects.
293          *
294          * Restated, we can _not_ update last_seq for a given entry if there
295          * could be a newer entry open with reservations/pins that have been
296          * taken against it.
297          *
298          * Hence, we want update/set last_seq on the current journal entry right
299          * before we open a new one:
300          */
301         buf->last_seq           = journal_last_seq(j);
302         buf->data->last_seq     = cpu_to_le64(buf->last_seq);
303         BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
304
305         cancel_delayed_work(&j->write_work);
306
307         bch2_journal_space_available(j);
308
309         __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
310 }
311
312 void bch2_journal_halt(struct journal *j)
313 {
314         spin_lock(&j->lock);
315         __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
316         if (!j->err_seq)
317                 j->err_seq = journal_cur_seq(j);
318         journal_wake(j);
319         spin_unlock(&j->lock);
320 }
321
322 void bch2_journal_halt_locked(struct journal *j)
323 {
324         lockdep_assert_held(&j->lock);
325
326         __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
327         if (!j->err_seq)
328                 j->err_seq = journal_cur_seq(j);
329         journal_wake(j);
330 }
331
332 static bool journal_entry_want_write(struct journal *j)
333 {
334         bool ret = !journal_entry_is_open(j) ||
335                 journal_cur_seq(j) == journal_last_unwritten_seq(j);
336
337         /* Don't close it yet if we already have a write in flight: */
338         if (ret)
339                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
340         else if (nr_unwritten_journal_entries(j)) {
341                 struct journal_buf *buf = journal_cur_buf(j);
342
343                 if (!buf->flush_time) {
344                         buf->flush_time = local_clock() ?: 1;
345                         buf->expires = jiffies;
346                 }
347         }
348
349         return ret;
350 }
351
352 bool bch2_journal_entry_close(struct journal *j)
353 {
354         bool ret;
355
356         spin_lock(&j->lock);
357         ret = journal_entry_want_write(j);
358         spin_unlock(&j->lock);
359
360         return ret;
361 }
362
363 /*
364  * should _only_ called from journal_res_get() - when we actually want a
365  * journal reservation - journal entry is open means journal is dirty:
366  */
367 static int journal_entry_open(struct journal *j)
368 {
369         struct bch_fs *c = container_of(j, struct bch_fs, journal);
370         struct journal_buf *buf = j->buf +
371                 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
372         union journal_res_state old, new;
373         int u64s;
374
375         lockdep_assert_held(&j->lock);
376         BUG_ON(journal_entry_is_open(j));
377         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
378
379         if (j->blocked)
380                 return JOURNAL_ERR_blocked;
381
382         if (j->cur_entry_error)
383                 return j->cur_entry_error;
384
385         if (bch2_journal_error(j))
386                 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
387
388         if (!fifo_free(&j->pin))
389                 return JOURNAL_ERR_journal_pin_full;
390
391         if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
392                 return JOURNAL_ERR_max_in_flight;
393
394         if (journal_cur_seq(j) >= JOURNAL_SEQ_MAX) {
395                 bch_err(c, "cannot start: journal seq overflow");
396                 if (bch2_fs_emergency_read_only_locked(c))
397                         bch_err(c, "fatal error - emergency read only");
398                 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
399         }
400
401         BUG_ON(!j->cur_entry_sectors);
402
403         buf->expires            =
404                 (journal_cur_seq(j) == j->flushed_seq_ondisk
405                  ? jiffies
406                  : j->last_flush_write) +
407                 msecs_to_jiffies(c->opts.journal_flush_delay);
408
409         buf->u64s_reserved      = j->entry_u64s_reserved;
410         buf->disk_sectors       = j->cur_entry_sectors;
411         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
412
413         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
414                 journal_entry_overhead(j);
415         u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
416
417         if (u64s <= (ssize_t) j->early_journal_entries.nr)
418                 return JOURNAL_ERR_journal_full;
419
420         if (fifo_empty(&j->pin) && j->reclaim_thread)
421                 wake_up_process(j->reclaim_thread);
422
423         /*
424          * The fifo_push() needs to happen at the same time as j->seq is
425          * incremented for journal_last_seq() to be calculated correctly
426          */
427         atomic64_inc(&j->seq);
428         journal_pin_list_init(fifo_push_ref(&j->pin), 1);
429
430         BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
431
432         BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
433
434         bkey_extent_init(&buf->key);
435         buf->noflush            = false;
436         buf->must_flush         = false;
437         buf->separate_flush     = false;
438         buf->flush_time         = 0;
439         buf->need_flush_to_write_buffer = true;
440         buf->write_started      = false;
441         buf->write_allocated    = false;
442         buf->write_done         = false;
443
444         memset(buf->data, 0, sizeof(*buf->data));
445         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
446         buf->data->u64s = 0;
447
448         if (j->early_journal_entries.nr) {
449                 memcpy(buf->data->_data, j->early_journal_entries.data,
450                        j->early_journal_entries.nr * sizeof(u64));
451                 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
452         }
453
454         /*
455          * Must be set before marking the journal entry as open:
456          */
457         j->cur_entry_u64s = u64s;
458
459         old.v = atomic64_read(&j->reservations.counter);
460         do {
461                 new.v = old.v;
462
463                 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
464
465                 new.idx++;
466                 BUG_ON(journal_state_count(new, new.idx));
467                 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
468
469                 journal_state_inc(&new);
470
471                 /* Handle any already added entries */
472                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
473         } while (!atomic64_try_cmpxchg(&j->reservations.counter,
474                                        &old.v, new.v));
475
476         if (nr_unwritten_journal_entries(j) == 1)
477                 mod_delayed_work(j->wq,
478                                  &j->write_work,
479                                  msecs_to_jiffies(c->opts.journal_flush_delay));
480         journal_wake(j);
481
482         if (j->early_journal_entries.nr)
483                 darray_exit(&j->early_journal_entries);
484         return 0;
485 }
486
487 static bool journal_quiesced(struct journal *j)
488 {
489         bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
490
491         if (!ret)
492                 bch2_journal_entry_close(j);
493         return ret;
494 }
495
496 static void journal_quiesce(struct journal *j)
497 {
498         wait_event(j->wait, journal_quiesced(j));
499 }
500
501 static void journal_write_work(struct work_struct *work)
502 {
503         struct journal *j = container_of(work, struct journal, write_work.work);
504
505         spin_lock(&j->lock);
506         if (__journal_entry_is_open(j->reservations)) {
507                 long delta = journal_cur_buf(j)->expires - jiffies;
508
509                 if (delta > 0)
510                         mod_delayed_work(j->wq, &j->write_work, delta);
511                 else
512                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
513         }
514         spin_unlock(&j->lock);
515 }
516
517 static int __journal_res_get(struct journal *j, struct journal_res *res,
518                              unsigned flags)
519 {
520         struct bch_fs *c = container_of(j, struct bch_fs, journal);
521         struct journal_buf *buf;
522         bool can_discard;
523         int ret;
524 retry:
525         if (journal_res_get_fast(j, res, flags))
526                 return 0;
527
528         if (bch2_journal_error(j))
529                 return -BCH_ERR_erofs_journal_err;
530
531         if (j->blocked)
532                 return -BCH_ERR_journal_res_get_blocked;
533
534         if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
535                 ret = JOURNAL_ERR_journal_full;
536                 can_discard = j->can_discard;
537                 goto out;
538         }
539
540         if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
541                 ret = JOURNAL_ERR_max_in_flight;
542                 goto out;
543         }
544
545         spin_lock(&j->lock);
546
547         /*
548          * Recheck after taking the lock, so we don't race with another thread
549          * that just did journal_entry_open() and call bch2_journal_entry_close()
550          * unnecessarily
551          */
552         if (journal_res_get_fast(j, res, flags)) {
553                 ret = 0;
554                 goto unlock;
555         }
556
557         /*
558          * If we couldn't get a reservation because the current buf filled up,
559          * and we had room for a bigger entry on disk, signal that we want to
560          * realloc the journal bufs:
561          */
562         buf = journal_cur_buf(j);
563         if (journal_entry_is_open(j) &&
564             buf->buf_size >> 9 < buf->disk_sectors &&
565             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
566                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
567
568         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
569         ret = journal_entry_open(j) ?: JOURNAL_ERR_retry;
570 unlock:
571         can_discard = j->can_discard;
572         spin_unlock(&j->lock);
573 out:
574         if (ret == JOURNAL_ERR_retry)
575                 goto retry;
576         if (!ret)
577                 return 0;
578
579         if (journal_error_check_stuck(j, ret, flags))
580                 ret = -BCH_ERR_journal_res_get_blocked;
581
582         if (ret == JOURNAL_ERR_max_in_flight &&
583             track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) {
584
585                 struct printbuf buf = PRINTBUF;
586                 prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
587                 bch2_journal_bufs_to_text(&buf, j);
588                 trace_journal_entry_full(c, buf.buf);
589                 printbuf_exit(&buf);
590                 count_event(c, journal_entry_full);
591         }
592
593         /*
594          * Journal is full - can't rely on reclaim from work item due to
595          * freezing:
596          */
597         if ((ret == JOURNAL_ERR_journal_full ||
598              ret == JOURNAL_ERR_journal_pin_full) &&
599             !(flags & JOURNAL_RES_GET_NONBLOCK)) {
600                 if (can_discard) {
601                         bch2_journal_do_discards(j);
602                         goto retry;
603                 }
604
605                 if (mutex_trylock(&j->reclaim_lock)) {
606                         bch2_journal_reclaim(j);
607                         mutex_unlock(&j->reclaim_lock);
608                 }
609         }
610
611         return ret == JOURNAL_ERR_insufficient_devices
612                 ? -BCH_ERR_erofs_journal_err
613                 : -BCH_ERR_journal_res_get_blocked;
614 }
615
616 static unsigned max_dev_latency(struct bch_fs *c)
617 {
618         u64 nsecs = 0;
619
620         for_each_rw_member(c, ca)
621                 nsecs = max(nsecs, ca->io_latency[WRITE].stats.max_duration);
622
623         return nsecs_to_jiffies(nsecs);
624 }
625
626 /*
627  * Essentially the entry function to the journaling code. When bcachefs is doing
628  * a btree insert, it calls this function to get the current journal write.
629  * Journal write is the structure used set up journal writes. The calling
630  * function will then add its keys to the structure, queuing them for the next
631  * write.
632  *
633  * To ensure forward progress, the current task must not be holding any
634  * btree node write locks.
635  */
636 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
637                                   unsigned flags,
638                                   struct btree_trans *trans)
639 {
640         int ret;
641
642         if (closure_wait_event_timeout(&j->async_wait,
643                    (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
644                    (flags & JOURNAL_RES_GET_NONBLOCK),
645                    HZ))
646                 return ret;
647
648         if (trans)
649                 bch2_trans_unlock_long(trans);
650
651         struct bch_fs *c = container_of(j, struct bch_fs, journal);
652         int remaining_wait = max(max_dev_latency(c) * 2, HZ * 10);
653
654         remaining_wait = max(0, remaining_wait - HZ);
655
656         if (closure_wait_event_timeout(&j->async_wait,
657                    (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
658                    (flags & JOURNAL_RES_GET_NONBLOCK),
659                    remaining_wait))
660                 return ret;
661
662         struct printbuf buf = PRINTBUF;
663         bch2_journal_debug_to_text(&buf, j);
664         bch_err(c, "Journal stuck? Waited for 10 seconds...\n%s",
665                 buf.buf);
666         printbuf_exit(&buf);
667
668         closure_wait_event(&j->async_wait,
669                    (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
670                    (flags & JOURNAL_RES_GET_NONBLOCK));
671         return ret;
672 }
673
674 /* journal_entry_res: */
675
676 void bch2_journal_entry_res_resize(struct journal *j,
677                                    struct journal_entry_res *res,
678                                    unsigned new_u64s)
679 {
680         union journal_res_state state;
681         int d = new_u64s - res->u64s;
682
683         spin_lock(&j->lock);
684
685         j->entry_u64s_reserved += d;
686         if (d <= 0)
687                 goto out;
688
689         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
690         smp_mb();
691         state = READ_ONCE(j->reservations);
692
693         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
694             state.cur_entry_offset > j->cur_entry_u64s) {
695                 j->cur_entry_u64s += d;
696                 /*
697                  * Not enough room in current journal entry, have to flush it:
698                  */
699                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
700         } else {
701                 journal_cur_buf(j)->u64s_reserved += d;
702         }
703 out:
704         spin_unlock(&j->lock);
705         res->u64s += d;
706 }
707
708 /* journal flushing: */
709
710 /**
711  * bch2_journal_flush_seq_async - wait for a journal entry to be written
712  * @j:          journal object
713  * @seq:        seq to flush
714  * @parent:     closure object to wait with
715  * Returns:     1 if @seq has already been flushed, 0 if @seq is being flushed,
716  *              -BCH_ERR_journal_flush_err if @seq will never be flushed
717  *
718  * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
719  * necessary
720  */
721 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
722                                  struct closure *parent)
723 {
724         struct journal_buf *buf;
725         int ret = 0;
726
727         if (seq <= j->flushed_seq_ondisk)
728                 return 1;
729
730         spin_lock(&j->lock);
731
732         if (WARN_ONCE(seq > journal_cur_seq(j),
733                       "requested to flush journal seq %llu, but currently at %llu",
734                       seq, journal_cur_seq(j)))
735                 goto out;
736
737         /* Recheck under lock: */
738         if (j->err_seq && seq >= j->err_seq) {
739                 ret = -BCH_ERR_journal_flush_err;
740                 goto out;
741         }
742
743         if (seq <= j->flushed_seq_ondisk) {
744                 ret = 1;
745                 goto out;
746         }
747
748         /* if seq was written, but not flushed - flush a newer one instead */
749         seq = max(seq, journal_last_unwritten_seq(j));
750
751 recheck_need_open:
752         if (seq > journal_cur_seq(j)) {
753                 struct journal_res res = { 0 };
754
755                 if (journal_entry_is_open(j))
756                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
757
758                 spin_unlock(&j->lock);
759
760                 /*
761                  * We're called from bch2_journal_flush_seq() -> wait_event();
762                  * but this might block. We won't usually block, so we won't
763                  * livelock:
764                  */
765                 sched_annotate_sleep();
766                 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
767                 if (ret)
768                         return ret;
769
770                 seq = res.seq;
771                 buf = journal_seq_to_buf(j, seq);
772                 buf->must_flush = true;
773
774                 if (!buf->flush_time) {
775                         buf->flush_time = local_clock() ?: 1;
776                         buf->expires = jiffies;
777                 }
778
779                 if (parent && !closure_wait(&buf->wait, parent))
780                         BUG();
781
782                 bch2_journal_res_put(j, &res);
783
784                 spin_lock(&j->lock);
785                 goto want_write;
786         }
787
788         /*
789          * if write was kicked off without a flush, or if we promised it
790          * wouldn't be a flush, flush the next sequence number instead
791          */
792         buf = journal_seq_to_buf(j, seq);
793         if (buf->noflush) {
794                 seq++;
795                 goto recheck_need_open;
796         }
797
798         buf->must_flush = true;
799         j->flushing_seq = max(j->flushing_seq, seq);
800
801         if (parent && !closure_wait(&buf->wait, parent))
802                 BUG();
803 want_write:
804         if (seq == journal_cur_seq(j))
805                 journal_entry_want_write(j);
806 out:
807         spin_unlock(&j->lock);
808         return ret;
809 }
810
811 int bch2_journal_flush_seq(struct journal *j, u64 seq, unsigned task_state)
812 {
813         u64 start_time = local_clock();
814         int ret, ret2;
815
816         /*
817          * Don't update time_stats when @seq is already flushed:
818          */
819         if (seq <= j->flushed_seq_ondisk)
820                 return 0;
821
822         ret = wait_event_state(j->wait,
823                                (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)),
824                                task_state);
825
826         if (!ret)
827                 bch2_time_stats_update(j->flush_seq_time, start_time);
828
829         return ret ?: ret2 < 0 ? ret2 : 0;
830 }
831
832 /*
833  * bch2_journal_flush_async - if there is an open journal entry, or a journal
834  * still being written, write it and wait for the write to complete
835  */
836 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
837 {
838         bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
839 }
840
841 int bch2_journal_flush(struct journal *j)
842 {
843         return bch2_journal_flush_seq(j, atomic64_read(&j->seq), TASK_UNINTERRUPTIBLE);
844 }
845
846 /*
847  * bch2_journal_noflush_seq - ask the journal not to issue any flushes in the
848  * range [start, end)
849  * @seq
850  */
851 bool bch2_journal_noflush_seq(struct journal *j, u64 start, u64 end)
852 {
853         struct bch_fs *c = container_of(j, struct bch_fs, journal);
854         u64 unwritten_seq;
855         bool ret = false;
856
857         if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
858                 return false;
859
860         if (c->journal.flushed_seq_ondisk >= start)
861                 return false;
862
863         spin_lock(&j->lock);
864         if (c->journal.flushed_seq_ondisk >= start)
865                 goto out;
866
867         for (unwritten_seq = journal_last_unwritten_seq(j);
868              unwritten_seq < end;
869              unwritten_seq++) {
870                 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
871
872                 /* journal flush already in flight, or flush requseted */
873                 if (buf->must_flush)
874                         goto out;
875
876                 buf->noflush = true;
877         }
878
879         ret = true;
880 out:
881         spin_unlock(&j->lock);
882         return ret;
883 }
884
885 static int __bch2_journal_meta(struct journal *j)
886 {
887         struct journal_res res = {};
888         int ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
889         if (ret)
890                 return ret;
891
892         struct journal_buf *buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
893         buf->must_flush = true;
894
895         if (!buf->flush_time) {
896                 buf->flush_time = local_clock() ?: 1;
897                 buf->expires = jiffies;
898         }
899
900         bch2_journal_res_put(j, &res);
901
902         return bch2_journal_flush_seq(j, res.seq, TASK_UNINTERRUPTIBLE);
903 }
904
905 int bch2_journal_meta(struct journal *j)
906 {
907         struct bch_fs *c = container_of(j, struct bch_fs, journal);
908
909         if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_journal))
910                 return -EROFS;
911
912         int ret = __bch2_journal_meta(j);
913         bch2_write_ref_put(c, BCH_WRITE_REF_journal);
914         return ret;
915 }
916
917 /* block/unlock the journal: */
918
919 void bch2_journal_unblock(struct journal *j)
920 {
921         spin_lock(&j->lock);
922         if (!--j->blocked &&
923             j->cur_entry_offset_if_blocked < JOURNAL_ENTRY_CLOSED_VAL &&
924             j->reservations.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) {
925                 union journal_res_state old, new;
926
927                 old.v = atomic64_read(&j->reservations.counter);
928                 do {
929                         new.v = old.v;
930                         new.cur_entry_offset = j->cur_entry_offset_if_blocked;
931                 } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v));
932         }
933         spin_unlock(&j->lock);
934
935         journal_wake(j);
936 }
937
938 static void __bch2_journal_block(struct journal *j)
939 {
940         if (!j->blocked++) {
941                 union journal_res_state old, new;
942
943                 old.v = atomic64_read(&j->reservations.counter);
944                 do {
945                         j->cur_entry_offset_if_blocked = old.cur_entry_offset;
946
947                         if (j->cur_entry_offset_if_blocked >= JOURNAL_ENTRY_CLOSED_VAL)
948                                 break;
949
950                         new.v = old.v;
951                         new.cur_entry_offset = JOURNAL_ENTRY_BLOCKED_VAL;
952                 } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v));
953
954                 journal_cur_buf(j)->data->u64s = cpu_to_le32(old.cur_entry_offset);
955         }
956 }
957
958 void bch2_journal_block(struct journal *j)
959 {
960         spin_lock(&j->lock);
961         __bch2_journal_block(j);
962         spin_unlock(&j->lock);
963
964         journal_quiesce(j);
965 }
966
967 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j,
968                                                 u64 max_seq, bool *blocked)
969 {
970         struct journal_buf *ret = NULL;
971
972         /* We're inside wait_event(), but using mutex_lock(: */
973         sched_annotate_sleep();
974         mutex_lock(&j->buf_lock);
975         spin_lock(&j->lock);
976         max_seq = min(max_seq, journal_cur_seq(j));
977
978         for (u64 seq = journal_last_unwritten_seq(j);
979              seq <= max_seq;
980              seq++) {
981                 unsigned idx = seq & JOURNAL_BUF_MASK;
982                 struct journal_buf *buf = j->buf + idx;
983
984                 if (buf->need_flush_to_write_buffer) {
985                         union journal_res_state s;
986                         s.v = atomic64_read_acquire(&j->reservations.counter);
987
988                         unsigned open = seq == journal_cur_seq(j) && __journal_entry_is_open(s);
989
990                         if (open && !*blocked) {
991                                 __bch2_journal_block(j);
992                                 *blocked = true;
993                         }
994
995                         ret = journal_state_count(s, idx) > open
996                                 ? ERR_PTR(-EAGAIN)
997                                 : buf;
998                         break;
999                 }
1000         }
1001
1002         spin_unlock(&j->lock);
1003         if (IS_ERR_OR_NULL(ret))
1004                 mutex_unlock(&j->buf_lock);
1005         return ret;
1006 }
1007
1008 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j,
1009                                                              u64 max_seq, bool *blocked)
1010 {
1011         struct journal_buf *ret;
1012         *blocked = false;
1013
1014         wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j,
1015                                                 max_seq, blocked)) != ERR_PTR(-EAGAIN));
1016         if (IS_ERR_OR_NULL(ret) && *blocked)
1017                 bch2_journal_unblock(j);
1018
1019         return ret;
1020 }
1021
1022 /* allocate journal on a device: */
1023
1024 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
1025                                          bool new_fs, struct closure *cl)
1026 {
1027         struct bch_fs *c = ca->fs;
1028         struct journal_device *ja = &ca->journal;
1029         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
1030         struct open_bucket **ob = NULL;
1031         long *bu = NULL;
1032         unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
1033         int ret = 0;
1034
1035         BUG_ON(nr <= ja->nr);
1036
1037         bu              = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
1038         ob              = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
1039         new_buckets     = kcalloc(nr, sizeof(u64), GFP_KERNEL);
1040         new_bucket_seq  = kcalloc(nr, sizeof(u64), GFP_KERNEL);
1041         if (!bu || !ob || !new_buckets || !new_bucket_seq) {
1042                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1043                 goto err_free;
1044         }
1045
1046         for (nr_got = 0; nr_got < nr_want; nr_got++) {
1047                 enum bch_watermark watermark = new_fs
1048                         ? BCH_WATERMARK_btree
1049                         : BCH_WATERMARK_normal;
1050
1051                 ob[nr_got] = bch2_bucket_alloc(c, ca, watermark,
1052                                                BCH_DATA_journal, cl);
1053                 ret = PTR_ERR_OR_ZERO(ob[nr_got]);
1054                 if (ret)
1055                         break;
1056
1057                 if (!new_fs) {
1058                         ret = bch2_trans_run(c,
1059                                 bch2_trans_mark_metadata_bucket(trans, ca,
1060                                                 ob[nr_got]->bucket, BCH_DATA_journal,
1061                                                 ca->mi.bucket_size, BTREE_TRIGGER_transactional));
1062                         if (ret) {
1063                                 bch2_open_bucket_put(c, ob[nr_got]);
1064                                 bch_err_msg(c, ret, "marking new journal buckets");
1065                                 break;
1066                         }
1067                 }
1068
1069                 bu[nr_got] = ob[nr_got]->bucket;
1070         }
1071
1072         if (!nr_got)
1073                 goto err_free;
1074
1075         /* Don't return an error if we successfully allocated some buckets: */
1076         ret = 0;
1077
1078         if (c) {
1079                 bch2_journal_flush_all_pins(&c->journal);
1080                 bch2_journal_block(&c->journal);
1081                 mutex_lock(&c->sb_lock);
1082         }
1083
1084         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
1085         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
1086
1087         BUG_ON(ja->discard_idx > ja->nr);
1088
1089         pos = ja->discard_idx ?: ja->nr;
1090
1091         memmove(new_buckets + pos + nr_got,
1092                 new_buckets + pos,
1093                 sizeof(new_buckets[0]) * (ja->nr - pos));
1094         memmove(new_bucket_seq + pos + nr_got,
1095                 new_bucket_seq + pos,
1096                 sizeof(new_bucket_seq[0]) * (ja->nr - pos));
1097
1098         for (i = 0; i < nr_got; i++) {
1099                 new_buckets[pos + i] = bu[i];
1100                 new_bucket_seq[pos + i] = 0;
1101         }
1102
1103         nr = ja->nr + nr_got;
1104
1105         ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
1106         if (ret)
1107                 goto err_unblock;
1108
1109         bch2_write_super(c);
1110
1111         /* Commit: */
1112         if (c)
1113                 spin_lock(&c->journal.lock);
1114
1115         swap(new_buckets,       ja->buckets);
1116         swap(new_bucket_seq,    ja->bucket_seq);
1117         ja->nr = nr;
1118
1119         if (pos <= ja->discard_idx)
1120                 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
1121         if (pos <= ja->dirty_idx_ondisk)
1122                 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
1123         if (pos <= ja->dirty_idx)
1124                 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
1125         if (pos <= ja->cur_idx)
1126                 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
1127
1128         if (c)
1129                 spin_unlock(&c->journal.lock);
1130 err_unblock:
1131         if (c) {
1132                 bch2_journal_unblock(&c->journal);
1133                 mutex_unlock(&c->sb_lock);
1134         }
1135
1136         if (ret && !new_fs)
1137                 for (i = 0; i < nr_got; i++)
1138                         bch2_trans_run(c,
1139                                 bch2_trans_mark_metadata_bucket(trans, ca,
1140                                                 bu[i], BCH_DATA_free, 0,
1141                                                 BTREE_TRIGGER_transactional));
1142 err_free:
1143         for (i = 0; i < nr_got; i++)
1144                 bch2_open_bucket_put(c, ob[i]);
1145
1146         kfree(new_bucket_seq);
1147         kfree(new_buckets);
1148         kfree(ob);
1149         kfree(bu);
1150         return ret;
1151 }
1152
1153 /*
1154  * Allocate more journal space at runtime - not currently making use if it, but
1155  * the code works:
1156  */
1157 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1158                                 unsigned nr)
1159 {
1160         struct journal_device *ja = &ca->journal;
1161         struct closure cl;
1162         int ret = 0;
1163
1164         closure_init_stack(&cl);
1165
1166         down_write(&c->state_lock);
1167
1168         /* don't handle reducing nr of buckets yet: */
1169         if (nr < ja->nr)
1170                 goto unlock;
1171
1172         while (ja->nr < nr) {
1173                 struct disk_reservation disk_res = { 0, 0, 0 };
1174
1175                 /*
1176                  * note: journal buckets aren't really counted as _sectors_ used yet, so
1177                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1178                  * when space used goes up without a reservation - but we do need the
1179                  * reservation to ensure we'll actually be able to allocate:
1180                  *
1181                  * XXX: that's not right, disk reservations only ensure a
1182                  * filesystem-wide allocation will succeed, this is a device
1183                  * specific allocation - we can hang here:
1184                  */
1185
1186                 ret = bch2_disk_reservation_get(c, &disk_res,
1187                                                 bucket_to_sector(ca, nr - ja->nr), 1, 0);
1188                 if (ret)
1189                         break;
1190
1191                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
1192
1193                 bch2_disk_reservation_put(c, &disk_res);
1194
1195                 closure_sync(&cl);
1196
1197                 if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
1198                         break;
1199         }
1200
1201         bch_err_fn(c, ret);
1202 unlock:
1203         up_write(&c->state_lock);
1204         return ret;
1205 }
1206
1207 int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
1208 {
1209         unsigned nr;
1210         int ret;
1211
1212         if (dynamic_fault("bcachefs:add:journal_alloc")) {
1213                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1214                 goto err;
1215         }
1216
1217         /* 1/128th of the device by default: */
1218         nr = ca->mi.nbuckets >> 7;
1219
1220         /*
1221          * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1222          * is smaller:
1223          */
1224         nr = clamp_t(unsigned, nr,
1225                      BCH_JOURNAL_BUCKETS_MIN,
1226                      min(1 << 13,
1227                          (1 << 24) / ca->mi.bucket_size));
1228
1229         ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL);
1230 err:
1231         bch_err_fn(ca, ret);
1232         return ret;
1233 }
1234
1235 int bch2_fs_journal_alloc(struct bch_fs *c)
1236 {
1237         for_each_online_member(c, ca) {
1238                 if (ca->journal.nr)
1239                         continue;
1240
1241                 int ret = bch2_dev_journal_alloc(ca, true);
1242                 if (ret) {
1243                         percpu_ref_put(&ca->io_ref);
1244                         return ret;
1245                 }
1246         }
1247
1248         return 0;
1249 }
1250
1251 /* startup/shutdown: */
1252
1253 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1254 {
1255         bool ret = false;
1256         u64 seq;
1257
1258         spin_lock(&j->lock);
1259         for (seq = journal_last_unwritten_seq(j);
1260              seq <= journal_cur_seq(j) && !ret;
1261              seq++) {
1262                 struct journal_buf *buf = journal_seq_to_buf(j, seq);
1263
1264                 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1265                         ret = true;
1266         }
1267         spin_unlock(&j->lock);
1268
1269         return ret;
1270 }
1271
1272 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1273 {
1274         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1275 }
1276
1277 void bch2_fs_journal_stop(struct journal *j)
1278 {
1279         if (!test_bit(JOURNAL_running, &j->flags))
1280                 return;
1281
1282         bch2_journal_reclaim_stop(j);
1283         bch2_journal_flush_all_pins(j);
1284
1285         wait_event(j->wait, bch2_journal_entry_close(j));
1286
1287         /*
1288          * Always write a new journal entry, to make sure the clock hands are up
1289          * to date (and match the superblock)
1290          */
1291         __bch2_journal_meta(j);
1292
1293         journal_quiesce(j);
1294         cancel_delayed_work_sync(&j->write_work);
1295
1296         WARN(!bch2_journal_error(j) &&
1297              test_bit(JOURNAL_replay_done, &j->flags) &&
1298              j->last_empty_seq != journal_cur_seq(j),
1299              "journal shutdown error: cur seq %llu but last empty seq %llu",
1300              journal_cur_seq(j), j->last_empty_seq);
1301
1302         if (!bch2_journal_error(j))
1303                 clear_bit(JOURNAL_running, &j->flags);
1304 }
1305
1306 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1307 {
1308         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1309         struct journal_entry_pin_list *p;
1310         struct journal_replay *i, **_i;
1311         struct genradix_iter iter;
1312         bool had_entries = false;
1313         u64 last_seq = cur_seq, nr, seq;
1314
1315         if (cur_seq >= JOURNAL_SEQ_MAX) {
1316                 bch_err(c, "cannot start: journal seq overflow");
1317                 return -EINVAL;
1318         }
1319
1320         genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1321                 i = *_i;
1322
1323                 if (journal_replay_ignore(i))
1324                         continue;
1325
1326                 last_seq = le64_to_cpu(i->j.last_seq);
1327                 break;
1328         }
1329
1330         nr = cur_seq - last_seq;
1331
1332         if (nr + 1 > j->pin.size) {
1333                 free_fifo(&j->pin);
1334                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1335                 if (!j->pin.data) {
1336                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1337                         return -BCH_ERR_ENOMEM_journal_pin_fifo;
1338                 }
1339         }
1340
1341         j->replay_journal_seq   = last_seq;
1342         j->replay_journal_seq_end = cur_seq;
1343         j->last_seq_ondisk      = last_seq;
1344         j->flushed_seq_ondisk   = cur_seq - 1;
1345         j->seq_ondisk           = cur_seq - 1;
1346         j->pin.front            = last_seq;
1347         j->pin.back             = cur_seq;
1348         atomic64_set(&j->seq, cur_seq - 1);
1349
1350         fifo_for_each_entry_ptr(p, &j->pin, seq)
1351                 journal_pin_list_init(p, 1);
1352
1353         genradix_for_each(&c->journal_entries, iter, _i) {
1354                 i = *_i;
1355
1356                 if (journal_replay_ignore(i))
1357                         continue;
1358
1359                 seq = le64_to_cpu(i->j.seq);
1360                 BUG_ON(seq >= cur_seq);
1361
1362                 if (seq < last_seq)
1363                         continue;
1364
1365                 if (journal_entry_empty(&i->j))
1366                         j->last_empty_seq = le64_to_cpu(i->j.seq);
1367
1368                 p = journal_seq_pin(j, seq);
1369
1370                 p->devs.nr = 0;
1371                 darray_for_each(i->ptrs, ptr)
1372                         bch2_dev_list_add_dev(&p->devs, ptr->dev);
1373
1374                 had_entries = true;
1375         }
1376
1377         if (!had_entries)
1378                 j->last_empty_seq = cur_seq - 1; /* to match j->seq */
1379
1380         spin_lock(&j->lock);
1381
1382         set_bit(JOURNAL_running, &j->flags);
1383         j->last_flush_write = jiffies;
1384
1385         j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1386         j->reservations.unwritten_idx++;
1387
1388         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1389
1390         bch2_journal_space_available(j);
1391         spin_unlock(&j->lock);
1392
1393         return bch2_journal_reclaim_start(j);
1394 }
1395
1396 /* init/exit: */
1397
1398 void bch2_dev_journal_exit(struct bch_dev *ca)
1399 {
1400         struct journal_device *ja = &ca->journal;
1401
1402         for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1403                 kfree(ja->bio[i]);
1404                 ja->bio[i] = NULL;
1405         }
1406
1407         kfree(ja->buckets);
1408         kfree(ja->bucket_seq);
1409         ja->buckets     = NULL;
1410         ja->bucket_seq  = NULL;
1411 }
1412
1413 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1414 {
1415         struct journal_device *ja = &ca->journal;
1416         struct bch_sb_field_journal *journal_buckets =
1417                 bch2_sb_field_get(sb, journal);
1418         struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1419                 bch2_sb_field_get(sb, journal_v2);
1420
1421         ja->nr = 0;
1422
1423         if (journal_buckets_v2) {
1424                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1425
1426                 for (unsigned i = 0; i < nr; i++)
1427                         ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1428         } else if (journal_buckets) {
1429                 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1430         }
1431
1432         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1433         if (!ja->bucket_seq)
1434                 return -BCH_ERR_ENOMEM_dev_journal_init;
1435
1436         unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1437
1438         for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1439                 ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
1440                                      nr_bvecs), GFP_KERNEL);
1441                 if (!ja->bio[i])
1442                         return -BCH_ERR_ENOMEM_dev_journal_init;
1443
1444                 ja->bio[i]->ca = ca;
1445                 ja->bio[i]->buf_idx = i;
1446                 bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
1447         }
1448
1449         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1450         if (!ja->buckets)
1451                 return -BCH_ERR_ENOMEM_dev_journal_init;
1452
1453         if (journal_buckets_v2) {
1454                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1455                 unsigned dst = 0;
1456
1457                 for (unsigned i = 0; i < nr; i++)
1458                         for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1459                                 ja->buckets[dst++] =
1460                                         le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1461         } else if (journal_buckets) {
1462                 for (unsigned i = 0; i < ja->nr; i++)
1463                         ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1464         }
1465
1466         return 0;
1467 }
1468
1469 void bch2_fs_journal_exit(struct journal *j)
1470 {
1471         if (j->wq)
1472                 destroy_workqueue(j->wq);
1473
1474         darray_exit(&j->early_journal_entries);
1475
1476         for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1477                 kvfree(j->buf[i].data);
1478         free_fifo(&j->pin);
1479 }
1480
1481 int bch2_fs_journal_init(struct journal *j)
1482 {
1483         static struct lock_class_key res_key;
1484
1485         mutex_init(&j->buf_lock);
1486         spin_lock_init(&j->lock);
1487         spin_lock_init(&j->err_lock);
1488         init_waitqueue_head(&j->wait);
1489         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1490         init_waitqueue_head(&j->reclaim_wait);
1491         init_waitqueue_head(&j->pin_flush_wait);
1492         mutex_init(&j->reclaim_lock);
1493         mutex_init(&j->discard_lock);
1494
1495         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1496
1497         atomic64_set(&j->reservations.counter,
1498                 ((union journal_res_state)
1499                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1500
1501         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1502                 return -BCH_ERR_ENOMEM_journal_pin_fifo;
1503
1504         for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
1505                 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1506                 j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
1507                 if (!j->buf[i].data)
1508                         return -BCH_ERR_ENOMEM_journal_buf;
1509                 j->buf[i].idx = i;
1510         }
1511
1512         j->pin.front = j->pin.back = 1;
1513
1514         j->wq = alloc_workqueue("bcachefs_journal",
1515                                 WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
1516         if (!j->wq)
1517                 return -BCH_ERR_ENOMEM_fs_other_alloc;
1518         return 0;
1519 }
1520
1521 /* debug: */
1522
1523 static const char * const bch2_journal_flags_strs[] = {
1524 #define x(n)    #n,
1525         JOURNAL_FLAGS()
1526 #undef x
1527         NULL
1528 };
1529
1530 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1531 {
1532         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1533         union journal_res_state s;
1534         unsigned long now = jiffies;
1535         u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1536
1537         printbuf_tabstops_reset(out);
1538         printbuf_tabstop_push(out, 28);
1539         out->atomic++;
1540
1541         rcu_read_lock();
1542         s = READ_ONCE(j->reservations);
1543
1544         prt_printf(out, "flags:\t");
1545         prt_bitflags(out, bch2_journal_flags_strs, j->flags);
1546         prt_newline(out);
1547         prt_printf(out, "dirty journal entries:\t%llu/%llu\n",  fifo_used(&j->pin), j->pin.size);
1548         prt_printf(out, "seq:\t%llu\n",                         journal_cur_seq(j));
1549         prt_printf(out, "seq_ondisk:\t%llu\n",                  j->seq_ondisk);
1550         prt_printf(out, "last_seq:\t%llu\n",                    journal_last_seq(j));
1551         prt_printf(out, "last_seq_ondisk:\t%llu\n",             j->last_seq_ondisk);
1552         prt_printf(out, "flushed_seq_ondisk:\t%llu\n",          j->flushed_seq_ondisk);
1553         prt_printf(out, "watermark:\t%s\n",                     bch2_watermarks[j->watermark]);
1554         prt_printf(out, "each entry reserved:\t%u\n",           j->entry_u64s_reserved);
1555         prt_printf(out, "nr flush writes:\t%llu\n",             j->nr_flush_writes);
1556         prt_printf(out, "nr noflush writes:\t%llu\n",           j->nr_noflush_writes);
1557         prt_printf(out, "average write size:\t");
1558         prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1559         prt_newline(out);
1560         prt_printf(out, "nr direct reclaim:\t%llu\n",           j->nr_direct_reclaim);
1561         prt_printf(out, "nr background reclaim:\t%llu\n",       j->nr_background_reclaim);
1562         prt_printf(out, "reclaim kicked:\t%u\n",                j->reclaim_kicked);
1563         prt_printf(out, "reclaim runs in:\t%u ms\n",            time_after(j->next_reclaim, now)
1564                ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1565         prt_printf(out, "blocked:\t%u\n",                       j->blocked);
1566         prt_printf(out, "current entry sectors:\t%u\n",         j->cur_entry_sectors);
1567         prt_printf(out, "current entry error:\t%s\n",           bch2_journal_errors[j->cur_entry_error]);
1568         prt_printf(out, "current entry:\t");
1569
1570         switch (s.cur_entry_offset) {
1571         case JOURNAL_ENTRY_ERROR_VAL:
1572                 prt_printf(out, "error\n");
1573                 break;
1574         case JOURNAL_ENTRY_CLOSED_VAL:
1575                 prt_printf(out, "closed\n");
1576                 break;
1577         case JOURNAL_ENTRY_BLOCKED_VAL:
1578                 prt_printf(out, "blocked\n");
1579                 break;
1580         default:
1581                 prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
1582                 break;
1583         }
1584
1585         prt_printf(out, "unwritten entries:\n");
1586         bch2_journal_bufs_to_text(out, j);
1587
1588         prt_printf(out, "space:\n");
1589         printbuf_indent_add(out, 2);
1590         prt_printf(out, "discarded\t%u:%u\n",
1591                j->space[journal_space_discarded].next_entry,
1592                j->space[journal_space_discarded].total);
1593         prt_printf(out, "clean ondisk\t%u:%u\n",
1594                j->space[journal_space_clean_ondisk].next_entry,
1595                j->space[journal_space_clean_ondisk].total);
1596         prt_printf(out, "clean\t%u:%u\n",
1597                j->space[journal_space_clean].next_entry,
1598                j->space[journal_space_clean].total);
1599         prt_printf(out, "total\t%u:%u\n",
1600                j->space[journal_space_total].next_entry,
1601                j->space[journal_space_total].total);
1602         printbuf_indent_sub(out, 2);
1603
1604         for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
1605                 if (!ca->mi.durability)
1606                         continue;
1607
1608                 struct journal_device *ja = &ca->journal;
1609
1610                 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1611                         continue;
1612
1613                 if (!ja->nr)
1614                         continue;
1615
1616                 prt_printf(out, "dev %u:\n",                    ca->dev_idx);
1617                 prt_printf(out, "durability %u:\n",             ca->mi.durability);
1618                 printbuf_indent_add(out, 2);
1619                 prt_printf(out, "nr\t%u\n",                     ja->nr);
1620                 prt_printf(out, "bucket size\t%u\n",            ca->mi.bucket_size);
1621                 prt_printf(out, "available\t%u:%u\n",           bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1622                 prt_printf(out, "discard_idx\t%u\n",            ja->discard_idx);
1623                 prt_printf(out, "dirty_ondisk\t%u (seq %llu)\n",ja->dirty_idx_ondisk,   ja->bucket_seq[ja->dirty_idx_ondisk]);
1624                 prt_printf(out, "dirty_idx\t%u (seq %llu)\n",   ja->dirty_idx,          ja->bucket_seq[ja->dirty_idx]);
1625                 prt_printf(out, "cur_idx\t%u (seq %llu)\n",     ja->cur_idx,            ja->bucket_seq[ja->cur_idx]);
1626                 printbuf_indent_sub(out, 2);
1627         }
1628
1629         prt_printf(out, "replicas want %u need %u\n", c->opts.metadata_replicas, c->opts.metadata_replicas_required);
1630
1631         rcu_read_unlock();
1632
1633         --out->atomic;
1634 }
1635
1636 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1637 {
1638         spin_lock(&j->lock);
1639         __bch2_journal_debug_to_text(out, j);
1640         spin_unlock(&j->lock);
1641 }
This page took 0.123905 seconds and 4 git commands to generate.