]> Git Repo - linux.git/blob - drivers/md/raid5-cache.c
net: wan: Add framer framework support
[linux.git] / drivers / md / raid5-cache.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Shaohua Li <[email protected]>
4  * Copyright (C) 2016 Song Liu <[email protected]>
5  */
6 #include <linux/kernel.h>
7 #include <linux/wait.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/raid/md_p.h>
11 #include <linux/crc32c.h>
12 #include <linux/random.h>
13 #include <linux/kthread.h>
14 #include <linux/types.h>
15 #include "md.h"
16 #include "raid5.h"
17 #include "md-bitmap.h"
18 #include "raid5-log.h"
19
20 /*
21  * metadata/data stored in disk with 4k size unit (a block) regardless
22  * underneath hardware sector size. only works with PAGE_SIZE == 4096
23  */
24 #define BLOCK_SECTORS (8)
25 #define BLOCK_SECTOR_SHIFT (3)
26
27 /*
28  * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
29  *
30  * In write through mode, the reclaim runs every log->max_free_space.
31  * This can prevent the recovery scans for too long
32  */
33 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
34 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
35
36 /* wake up reclaim thread periodically */
37 #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
38 /* start flush with these full stripes */
39 #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
40 /* reclaim stripes in groups */
41 #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
42
43 /*
44  * We only need 2 bios per I/O unit to make progress, but ensure we
45  * have a few more available to not get too tight.
46  */
47 #define R5L_POOL_SIZE   4
48
49 static char *r5c_journal_mode_str[] = {"write-through",
50                                        "write-back"};
51 /*
52  * raid5 cache state machine
53  *
54  * With the RAID cache, each stripe works in two phases:
55  *      - caching phase
56  *      - writing-out phase
57  *
58  * These two phases are controlled by bit STRIPE_R5C_CACHING:
59  *   if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
60  *   if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
61  *
62  * When there is no journal, or the journal is in write-through mode,
63  * the stripe is always in writing-out phase.
64  *
65  * For write-back journal, the stripe is sent to caching phase on write
66  * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
67  * the write-out phase by clearing STRIPE_R5C_CACHING.
68  *
69  * Stripes in caching phase do not write the raid disks. Instead, all
70  * writes are committed from the log device. Therefore, a stripe in
71  * caching phase handles writes as:
72  *      - write to log device
73  *      - return IO
74  *
75  * Stripes in writing-out phase handle writes as:
76  *      - calculate parity
77  *      - write pending data and parity to journal
78  *      - write data and parity to raid disks
79  *      - return IO for pending writes
80  */
81
82 struct r5l_log {
83         struct md_rdev *rdev;
84
85         u32 uuid_checksum;
86
87         sector_t device_size;           /* log device size, round to
88                                          * BLOCK_SECTORS */
89         sector_t max_free_space;        /* reclaim run if free space is at
90                                          * this size */
91
92         sector_t last_checkpoint;       /* log tail. where recovery scan
93                                          * starts from */
94         u64 last_cp_seq;                /* log tail sequence */
95
96         sector_t log_start;             /* log head. where new data appends */
97         u64 seq;                        /* log head sequence */
98
99         sector_t next_checkpoint;
100
101         struct mutex io_mutex;
102         struct r5l_io_unit *current_io; /* current io_unit accepting new data */
103
104         spinlock_t io_list_lock;
105         struct list_head running_ios;   /* io_units which are still running,
106                                          * and have not yet been completely
107                                          * written to the log */
108         struct list_head io_end_ios;    /* io_units which have been completely
109                                          * written to the log but not yet written
110                                          * to the RAID */
111         struct list_head flushing_ios;  /* io_units which are waiting for log
112                                          * cache flush */
113         struct list_head finished_ios;  /* io_units which settle down in log disk */
114         struct bio flush_bio;
115
116         struct list_head no_mem_stripes;   /* pending stripes, -ENOMEM */
117
118         struct kmem_cache *io_kc;
119         mempool_t io_pool;
120         struct bio_set bs;
121         mempool_t meta_pool;
122
123         struct md_thread __rcu *reclaim_thread;
124         unsigned long reclaim_target;   /* number of space that need to be
125                                          * reclaimed.  if it's 0, reclaim spaces
126                                          * used by io_units which are in
127                                          * IO_UNIT_STRIPE_END state (eg, reclaim
128                                          * doesn't wait for specific io_unit
129                                          * switching to IO_UNIT_STRIPE_END
130                                          * state) */
131         wait_queue_head_t iounit_wait;
132
133         struct list_head no_space_stripes; /* pending stripes, log has no space */
134         spinlock_t no_space_stripes_lock;
135
136         bool need_cache_flush;
137
138         /* for r5c_cache */
139         enum r5c_journal_mode r5c_journal_mode;
140
141         /* all stripes in r5cache, in the order of seq at sh->log_start */
142         struct list_head stripe_in_journal_list;
143
144         spinlock_t stripe_in_journal_lock;
145         atomic_t stripe_in_journal_count;
146
147         /* to submit async io_units, to fulfill ordering of flush */
148         struct work_struct deferred_io_work;
149         /* to disable write back during in degraded mode */
150         struct work_struct disable_writeback_work;
151
152         /* to for chunk_aligned_read in writeback mode, details below */
153         spinlock_t tree_lock;
154         struct radix_tree_root big_stripe_tree;
155 };
156
157 /*
158  * Enable chunk_aligned_read() with write back cache.
159  *
160  * Each chunk may contain more than one stripe (for example, a 256kB
161  * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
162  * chunk_aligned_read, these stripes are grouped into one "big_stripe".
163  * For each big_stripe, we count how many stripes of this big_stripe
164  * are in the write back cache. These data are tracked in a radix tree
165  * (big_stripe_tree). We use radix_tree item pointer as the counter.
166  * r5c_tree_index() is used to calculate keys for the radix tree.
167  *
168  * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
169  * big_stripe of each chunk in the tree. If this big_stripe is in the
170  * tree, chunk_aligned_read() aborts. This look up is protected by
171  * rcu_read_lock().
172  *
173  * It is necessary to remember whether a stripe is counted in
174  * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
175  * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
176  * two flags are set, the stripe is counted in big_stripe_tree. This
177  * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
178  * r5c_try_caching_write(); and moving clear_bit of
179  * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
180  * r5c_finish_stripe_write_out().
181  */
182
183 /*
184  * radix tree requests lowest 2 bits of data pointer to be 2b'00.
185  * So it is necessary to left shift the counter by 2 bits before using it
186  * as data pointer of the tree.
187  */
188 #define R5C_RADIX_COUNT_SHIFT 2
189
190 /*
191  * calculate key for big_stripe_tree
192  *
193  * sect: align_bi->bi_iter.bi_sector or sh->sector
194  */
195 static inline sector_t r5c_tree_index(struct r5conf *conf,
196                                       sector_t sect)
197 {
198         sector_div(sect, conf->chunk_sectors);
199         return sect;
200 }
201
202 /*
203  * an IO range starts from a meta data block and end at the next meta data
204  * block. The io unit's the meta data block tracks data/parity followed it. io
205  * unit is written to log disk with normal write, as we always flush log disk
206  * first and then start move data to raid disks, there is no requirement to
207  * write io unit with FLUSH/FUA
208  */
209 struct r5l_io_unit {
210         struct r5l_log *log;
211
212         struct page *meta_page; /* store meta block */
213         int meta_offset;        /* current offset in meta_page */
214
215         struct bio *current_bio;/* current_bio accepting new data */
216
217         atomic_t pending_stripe;/* how many stripes not flushed to raid */
218         u64 seq;                /* seq number of the metablock */
219         sector_t log_start;     /* where the io_unit starts */
220         sector_t log_end;       /* where the io_unit ends */
221         struct list_head log_sibling; /* log->running_ios */
222         struct list_head stripe_list; /* stripes added to the io_unit */
223
224         int state;
225         bool need_split_bio;
226         struct bio *split_bio;
227
228         unsigned int has_flush:1;               /* include flush request */
229         unsigned int has_fua:1;                 /* include fua request */
230         unsigned int has_null_flush:1;          /* include null flush request */
231         unsigned int has_flush_payload:1;       /* include flush payload  */
232         /*
233          * io isn't sent yet, flush/fua request can only be submitted till it's
234          * the first IO in running_ios list
235          */
236         unsigned int io_deferred:1;
237
238         struct bio_list flush_barriers;   /* size == 0 flush bios */
239 };
240
241 /* r5l_io_unit state */
242 enum r5l_io_unit_state {
243         IO_UNIT_RUNNING = 0,    /* accepting new IO */
244         IO_UNIT_IO_START = 1,   /* io_unit bio start writing to log,
245                                  * don't accepting new bio */
246         IO_UNIT_IO_END = 2,     /* io_unit bio finish writing to log */
247         IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
248 };
249
250 bool r5c_is_writeback(struct r5l_log *log)
251 {
252         return (log != NULL &&
253                 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
254 }
255
256 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
257 {
258         start += inc;
259         if (start >= log->device_size)
260                 start = start - log->device_size;
261         return start;
262 }
263
264 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
265                                   sector_t end)
266 {
267         if (end >= start)
268                 return end - start;
269         else
270                 return end + log->device_size - start;
271 }
272
273 static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
274 {
275         sector_t used_size;
276
277         used_size = r5l_ring_distance(log, log->last_checkpoint,
278                                         log->log_start);
279
280         return log->device_size > used_size + size;
281 }
282
283 static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
284                                     enum r5l_io_unit_state state)
285 {
286         if (WARN_ON(io->state >= state))
287                 return;
288         io->state = state;
289 }
290
291 static void
292 r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
293 {
294         struct bio *wbi, *wbi2;
295
296         wbi = dev->written;
297         dev->written = NULL;
298         while (wbi && wbi->bi_iter.bi_sector <
299                dev->sector + RAID5_STRIPE_SECTORS(conf)) {
300                 wbi2 = r5_next_bio(conf, wbi, dev->sector);
301                 md_write_end(conf->mddev);
302                 bio_endio(wbi);
303                 wbi = wbi2;
304         }
305 }
306
307 void r5c_handle_cached_data_endio(struct r5conf *conf,
308                                   struct stripe_head *sh, int disks)
309 {
310         int i;
311
312         for (i = sh->disks; i--; ) {
313                 if (sh->dev[i].written) {
314                         set_bit(R5_UPTODATE, &sh->dev[i].flags);
315                         r5c_return_dev_pending_writes(conf, &sh->dev[i]);
316                         md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
317                                            RAID5_STRIPE_SECTORS(conf),
318                                            !test_bit(STRIPE_DEGRADED, &sh->state),
319                                            0);
320                 }
321         }
322 }
323
324 void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
325
326 /* Check whether we should flush some stripes to free up stripe cache */
327 void r5c_check_stripe_cache_usage(struct r5conf *conf)
328 {
329         int total_cached;
330         struct r5l_log *log = READ_ONCE(conf->log);
331
332         if (!r5c_is_writeback(log))
333                 return;
334
335         total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
336                 atomic_read(&conf->r5c_cached_full_stripes);
337
338         /*
339          * The following condition is true for either of the following:
340          *   - stripe cache pressure high:
341          *          total_cached > 3/4 min_nr_stripes ||
342          *          empty_inactive_list_nr > 0
343          *   - stripe cache pressure moderate:
344          *          total_cached > 1/2 min_nr_stripes
345          */
346         if (total_cached > conf->min_nr_stripes * 1 / 2 ||
347             atomic_read(&conf->empty_inactive_list_nr) > 0)
348                 r5l_wake_reclaim(log, 0);
349 }
350
351 /*
352  * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
353  * stripes in the cache
354  */
355 void r5c_check_cached_full_stripe(struct r5conf *conf)
356 {
357         struct r5l_log *log = READ_ONCE(conf->log);
358
359         if (!r5c_is_writeback(log))
360                 return;
361
362         /*
363          * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
364          * or a full stripe (chunk size / 4k stripes).
365          */
366         if (atomic_read(&conf->r5c_cached_full_stripes) >=
367             min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
368                 conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
369                 r5l_wake_reclaim(log, 0);
370 }
371
372 /*
373  * Total log space (in sectors) needed to flush all data in cache
374  *
375  * To avoid deadlock due to log space, it is necessary to reserve log
376  * space to flush critical stripes (stripes that occupying log space near
377  * last_checkpoint). This function helps check how much log space is
378  * required to flush all cached stripes.
379  *
380  * To reduce log space requirements, two mechanisms are used to give cache
381  * flush higher priorities:
382  *    1. In handle_stripe_dirtying() and schedule_reconstruction(),
383  *       stripes ALREADY in journal can be flushed w/o pending writes;
384  *    2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
385  *       can be delayed (r5l_add_no_space_stripe).
386  *
387  * In cache flush, the stripe goes through 1 and then 2. For a stripe that
388  * already passed 1, flushing it requires at most (conf->max_degraded + 1)
389  * pages of journal space. For stripes that has not passed 1, flushing it
390  * requires (conf->raid_disks + 1) pages of journal space. There are at
391  * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
392  * required to flush all cached stripes (in pages) is:
393  *
394  *     (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
395  *     (group_cnt + 1) * (raid_disks + 1)
396  * or
397  *     (stripe_in_journal_count) * (max_degraded + 1) +
398  *     (group_cnt + 1) * (raid_disks - max_degraded)
399  */
400 static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
401 {
402         struct r5l_log *log = READ_ONCE(conf->log);
403
404         if (!r5c_is_writeback(log))
405                 return 0;
406
407         return BLOCK_SECTORS *
408                 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
409                  (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1));
410 }
411
412 /*
413  * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
414  *
415  * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
416  * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
417  * device is less than 2x of reclaim_required_space.
418  */
419 static inline void r5c_update_log_state(struct r5l_log *log)
420 {
421         struct r5conf *conf = log->rdev->mddev->private;
422         sector_t free_space;
423         sector_t reclaim_space;
424         bool wake_reclaim = false;
425
426         if (!r5c_is_writeback(log))
427                 return;
428
429         free_space = r5l_ring_distance(log, log->log_start,
430                                        log->last_checkpoint);
431         reclaim_space = r5c_log_required_to_flush_cache(conf);
432         if (free_space < 2 * reclaim_space)
433                 set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
434         else {
435                 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
436                         wake_reclaim = true;
437                 clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
438         }
439         if (free_space < 3 * reclaim_space)
440                 set_bit(R5C_LOG_TIGHT, &conf->cache_state);
441         else
442                 clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
443
444         if (wake_reclaim)
445                 r5l_wake_reclaim(log, 0);
446 }
447
448 /*
449  * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
450  * This function should only be called in write-back mode.
451  */
452 void r5c_make_stripe_write_out(struct stripe_head *sh)
453 {
454         struct r5conf *conf = sh->raid_conf;
455         struct r5l_log *log = READ_ONCE(conf->log);
456
457         BUG_ON(!r5c_is_writeback(log));
458
459         WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
460         clear_bit(STRIPE_R5C_CACHING, &sh->state);
461
462         if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
463                 atomic_inc(&conf->preread_active_stripes);
464 }
465
466 static void r5c_handle_data_cached(struct stripe_head *sh)
467 {
468         int i;
469
470         for (i = sh->disks; i--; )
471                 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
472                         set_bit(R5_InJournal, &sh->dev[i].flags);
473                         clear_bit(R5_LOCKED, &sh->dev[i].flags);
474                 }
475         clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
476 }
477
478 /*
479  * this journal write must contain full parity,
480  * it may also contain some data pages
481  */
482 static void r5c_handle_parity_cached(struct stripe_head *sh)
483 {
484         int i;
485
486         for (i = sh->disks; i--; )
487                 if (test_bit(R5_InJournal, &sh->dev[i].flags))
488                         set_bit(R5_Wantwrite, &sh->dev[i].flags);
489 }
490
491 /*
492  * Setting proper flags after writing (or flushing) data and/or parity to the
493  * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
494  */
495 static void r5c_finish_cache_stripe(struct stripe_head *sh)
496 {
497         struct r5l_log *log = READ_ONCE(sh->raid_conf->log);
498
499         if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
500                 BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
501                 /*
502                  * Set R5_InJournal for parity dev[pd_idx]. This means
503                  * all data AND parity in the journal. For RAID 6, it is
504                  * NOT necessary to set the flag for dev[qd_idx], as the
505                  * two parities are written out together.
506                  */
507                 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
508         } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
509                 r5c_handle_data_cached(sh);
510         } else {
511                 r5c_handle_parity_cached(sh);
512                 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
513         }
514 }
515
516 static void r5l_io_run_stripes(struct r5l_io_unit *io)
517 {
518         struct stripe_head *sh, *next;
519
520         list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
521                 list_del_init(&sh->log_list);
522
523                 r5c_finish_cache_stripe(sh);
524
525                 set_bit(STRIPE_HANDLE, &sh->state);
526                 raid5_release_stripe(sh);
527         }
528 }
529
530 static void r5l_log_run_stripes(struct r5l_log *log)
531 {
532         struct r5l_io_unit *io, *next;
533
534         lockdep_assert_held(&log->io_list_lock);
535
536         list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
537                 /* don't change list order */
538                 if (io->state < IO_UNIT_IO_END)
539                         break;
540
541                 list_move_tail(&io->log_sibling, &log->finished_ios);
542                 r5l_io_run_stripes(io);
543         }
544 }
545
546 static void r5l_move_to_end_ios(struct r5l_log *log)
547 {
548         struct r5l_io_unit *io, *next;
549
550         lockdep_assert_held(&log->io_list_lock);
551
552         list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
553                 /* don't change list order */
554                 if (io->state < IO_UNIT_IO_END)
555                         break;
556                 list_move_tail(&io->log_sibling, &log->io_end_ios);
557         }
558 }
559
560 static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
561 static void r5l_log_endio(struct bio *bio)
562 {
563         struct r5l_io_unit *io = bio->bi_private;
564         struct r5l_io_unit *io_deferred;
565         struct r5l_log *log = io->log;
566         unsigned long flags;
567         bool has_null_flush;
568         bool has_flush_payload;
569
570         if (bio->bi_status)
571                 md_error(log->rdev->mddev, log->rdev);
572
573         bio_put(bio);
574         mempool_free(io->meta_page, &log->meta_pool);
575
576         spin_lock_irqsave(&log->io_list_lock, flags);
577         __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
578
579         /*
580          * if the io doesn't not have null_flush or flush payload,
581          * it is not safe to access it after releasing io_list_lock.
582          * Therefore, it is necessary to check the condition with
583          * the lock held.
584          */
585         has_null_flush = io->has_null_flush;
586         has_flush_payload = io->has_flush_payload;
587
588         if (log->need_cache_flush && !list_empty(&io->stripe_list))
589                 r5l_move_to_end_ios(log);
590         else
591                 r5l_log_run_stripes(log);
592         if (!list_empty(&log->running_ios)) {
593                 /*
594                  * FLUSH/FUA io_unit is deferred because of ordering, now we
595                  * can dispatch it
596                  */
597                 io_deferred = list_first_entry(&log->running_ios,
598                                                struct r5l_io_unit, log_sibling);
599                 if (io_deferred->io_deferred)
600                         schedule_work(&log->deferred_io_work);
601         }
602
603         spin_unlock_irqrestore(&log->io_list_lock, flags);
604
605         if (log->need_cache_flush)
606                 md_wakeup_thread(log->rdev->mddev->thread);
607
608         /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
609         if (has_null_flush) {
610                 struct bio *bi;
611
612                 WARN_ON(bio_list_empty(&io->flush_barriers));
613                 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
614                         bio_endio(bi);
615                         if (atomic_dec_and_test(&io->pending_stripe)) {
616                                 __r5l_stripe_write_finished(io);
617                                 return;
618                         }
619                 }
620         }
621         /* decrease pending_stripe for flush payload */
622         if (has_flush_payload)
623                 if (atomic_dec_and_test(&io->pending_stripe))
624                         __r5l_stripe_write_finished(io);
625 }
626
627 static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
628 {
629         unsigned long flags;
630
631         spin_lock_irqsave(&log->io_list_lock, flags);
632         __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
633         spin_unlock_irqrestore(&log->io_list_lock, flags);
634
635         /*
636          * In case of journal device failures, submit_bio will get error
637          * and calls endio, then active stripes will continue write
638          * process. Therefore, it is not necessary to check Faulty bit
639          * of journal device here.
640          *
641          * We can't check split_bio after current_bio is submitted. If
642          * io->split_bio is null, after current_bio is submitted, current_bio
643          * might already be completed and the io_unit is freed. We submit
644          * split_bio first to avoid the issue.
645          */
646         if (io->split_bio) {
647                 if (io->has_flush)
648                         io->split_bio->bi_opf |= REQ_PREFLUSH;
649                 if (io->has_fua)
650                         io->split_bio->bi_opf |= REQ_FUA;
651                 submit_bio(io->split_bio);
652         }
653
654         if (io->has_flush)
655                 io->current_bio->bi_opf |= REQ_PREFLUSH;
656         if (io->has_fua)
657                 io->current_bio->bi_opf |= REQ_FUA;
658         submit_bio(io->current_bio);
659 }
660
661 /* deferred io_unit will be dispatched here */
662 static void r5l_submit_io_async(struct work_struct *work)
663 {
664         struct r5l_log *log = container_of(work, struct r5l_log,
665                                            deferred_io_work);
666         struct r5l_io_unit *io = NULL;
667         unsigned long flags;
668
669         spin_lock_irqsave(&log->io_list_lock, flags);
670         if (!list_empty(&log->running_ios)) {
671                 io = list_first_entry(&log->running_ios, struct r5l_io_unit,
672                                       log_sibling);
673                 if (!io->io_deferred)
674                         io = NULL;
675                 else
676                         io->io_deferred = 0;
677         }
678         spin_unlock_irqrestore(&log->io_list_lock, flags);
679         if (io)
680                 r5l_do_submit_io(log, io);
681 }
682
683 static void r5c_disable_writeback_async(struct work_struct *work)
684 {
685         struct r5l_log *log = container_of(work, struct r5l_log,
686                                            disable_writeback_work);
687         struct mddev *mddev = log->rdev->mddev;
688         struct r5conf *conf = mddev->private;
689
690         if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
691                 return;
692         pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
693                 mdname(mddev));
694
695         /* wait superblock change before suspend */
696         wait_event(mddev->sb_wait,
697                    !READ_ONCE(conf->log) ||
698                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
699
700         log = READ_ONCE(conf->log);
701         if (log) {
702                 mddev_suspend(mddev, false);
703                 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
704                 mddev_resume(mddev);
705         }
706 }
707
708 static void r5l_submit_current_io(struct r5l_log *log)
709 {
710         struct r5l_io_unit *io = log->current_io;
711         struct r5l_meta_block *block;
712         unsigned long flags;
713         u32 crc;
714         bool do_submit = true;
715
716         if (!io)
717                 return;
718
719         block = page_address(io->meta_page);
720         block->meta_size = cpu_to_le32(io->meta_offset);
721         crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
722         block->checksum = cpu_to_le32(crc);
723
724         log->current_io = NULL;
725         spin_lock_irqsave(&log->io_list_lock, flags);
726         if (io->has_flush || io->has_fua) {
727                 if (io != list_first_entry(&log->running_ios,
728                                            struct r5l_io_unit, log_sibling)) {
729                         io->io_deferred = 1;
730                         do_submit = false;
731                 }
732         }
733         spin_unlock_irqrestore(&log->io_list_lock, flags);
734         if (do_submit)
735                 r5l_do_submit_io(log, io);
736 }
737
738 static struct bio *r5l_bio_alloc(struct r5l_log *log)
739 {
740         struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
741                                            REQ_OP_WRITE, GFP_NOIO, &log->bs);
742
743         bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
744
745         return bio;
746 }
747
748 static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
749 {
750         log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
751
752         r5c_update_log_state(log);
753         /*
754          * If we filled up the log device start from the beginning again,
755          * which will require a new bio.
756          *
757          * Note: for this to work properly the log size needs to me a multiple
758          * of BLOCK_SECTORS.
759          */
760         if (log->log_start == 0)
761                 io->need_split_bio = true;
762
763         io->log_end = log->log_start;
764 }
765
766 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
767 {
768         struct r5l_io_unit *io;
769         struct r5l_meta_block *block;
770
771         io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
772         if (!io)
773                 return NULL;
774         memset(io, 0, sizeof(*io));
775
776         io->log = log;
777         INIT_LIST_HEAD(&io->log_sibling);
778         INIT_LIST_HEAD(&io->stripe_list);
779         bio_list_init(&io->flush_barriers);
780         io->state = IO_UNIT_RUNNING;
781
782         io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
783         block = page_address(io->meta_page);
784         clear_page(block);
785         block->magic = cpu_to_le32(R5LOG_MAGIC);
786         block->version = R5LOG_VERSION;
787         block->seq = cpu_to_le64(log->seq);
788         block->position = cpu_to_le64(log->log_start);
789
790         io->log_start = log->log_start;
791         io->meta_offset = sizeof(struct r5l_meta_block);
792         io->seq = log->seq++;
793
794         io->current_bio = r5l_bio_alloc(log);
795         io->current_bio->bi_end_io = r5l_log_endio;
796         io->current_bio->bi_private = io;
797         __bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
798
799         r5_reserve_log_entry(log, io);
800
801         spin_lock_irq(&log->io_list_lock);
802         list_add_tail(&io->log_sibling, &log->running_ios);
803         spin_unlock_irq(&log->io_list_lock);
804
805         return io;
806 }
807
808 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
809 {
810         if (log->current_io &&
811             log->current_io->meta_offset + payload_size > PAGE_SIZE)
812                 r5l_submit_current_io(log);
813
814         if (!log->current_io) {
815                 log->current_io = r5l_new_meta(log);
816                 if (!log->current_io)
817                         return -ENOMEM;
818         }
819
820         return 0;
821 }
822
823 static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
824                                     sector_t location,
825                                     u32 checksum1, u32 checksum2,
826                                     bool checksum2_valid)
827 {
828         struct r5l_io_unit *io = log->current_io;
829         struct r5l_payload_data_parity *payload;
830
831         payload = page_address(io->meta_page) + io->meta_offset;
832         payload->header.type = cpu_to_le16(type);
833         payload->header.flags = cpu_to_le16(0);
834         payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
835                                     (PAGE_SHIFT - 9));
836         payload->location = cpu_to_le64(location);
837         payload->checksum[0] = cpu_to_le32(checksum1);
838         if (checksum2_valid)
839                 payload->checksum[1] = cpu_to_le32(checksum2);
840
841         io->meta_offset += sizeof(struct r5l_payload_data_parity) +
842                 sizeof(__le32) * (1 + !!checksum2_valid);
843 }
844
845 static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
846 {
847         struct r5l_io_unit *io = log->current_io;
848
849         if (io->need_split_bio) {
850                 BUG_ON(io->split_bio);
851                 io->split_bio = io->current_bio;
852                 io->current_bio = r5l_bio_alloc(log);
853                 bio_chain(io->current_bio, io->split_bio);
854                 io->need_split_bio = false;
855         }
856
857         if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
858                 BUG();
859
860         r5_reserve_log_entry(log, io);
861 }
862
863 static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
864 {
865         struct mddev *mddev = log->rdev->mddev;
866         struct r5conf *conf = mddev->private;
867         struct r5l_io_unit *io;
868         struct r5l_payload_flush *payload;
869         int meta_size;
870
871         /*
872          * payload_flush requires extra writes to the journal.
873          * To avoid handling the extra IO in quiesce, just skip
874          * flush_payload
875          */
876         if (conf->quiesce)
877                 return;
878
879         mutex_lock(&log->io_mutex);
880         meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64);
881
882         if (r5l_get_meta(log, meta_size)) {
883                 mutex_unlock(&log->io_mutex);
884                 return;
885         }
886
887         /* current implementation is one stripe per flush payload */
888         io = log->current_io;
889         payload = page_address(io->meta_page) + io->meta_offset;
890         payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH);
891         payload->header.flags = cpu_to_le16(0);
892         payload->size = cpu_to_le32(sizeof(__le64));
893         payload->flush_stripes[0] = cpu_to_le64(sect);
894         io->meta_offset += meta_size;
895         /* multiple flush payloads count as one pending_stripe */
896         if (!io->has_flush_payload) {
897                 io->has_flush_payload = 1;
898                 atomic_inc(&io->pending_stripe);
899         }
900         mutex_unlock(&log->io_mutex);
901 }
902
903 static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
904                            int data_pages, int parity_pages)
905 {
906         int i;
907         int meta_size;
908         int ret;
909         struct r5l_io_unit *io;
910
911         meta_size =
912                 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
913                  * data_pages) +
914                 sizeof(struct r5l_payload_data_parity) +
915                 sizeof(__le32) * parity_pages;
916
917         ret = r5l_get_meta(log, meta_size);
918         if (ret)
919                 return ret;
920
921         io = log->current_io;
922
923         if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
924                 io->has_flush = 1;
925
926         for (i = 0; i < sh->disks; i++) {
927                 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
928                     test_bit(R5_InJournal, &sh->dev[i].flags))
929                         continue;
930                 if (i == sh->pd_idx || i == sh->qd_idx)
931                         continue;
932                 if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
933                     log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
934                         io->has_fua = 1;
935                         /*
936                          * we need to flush journal to make sure recovery can
937                          * reach the data with fua flag
938                          */
939                         io->has_flush = 1;
940                 }
941                 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
942                                         raid5_compute_blocknr(sh, i, 0),
943                                         sh->dev[i].log_checksum, 0, false);
944                 r5l_append_payload_page(log, sh->dev[i].page);
945         }
946
947         if (parity_pages == 2) {
948                 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
949                                         sh->sector, sh->dev[sh->pd_idx].log_checksum,
950                                         sh->dev[sh->qd_idx].log_checksum, true);
951                 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
952                 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
953         } else if (parity_pages == 1) {
954                 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
955                                         sh->sector, sh->dev[sh->pd_idx].log_checksum,
956                                         0, false);
957                 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
958         } else  /* Just writing data, not parity, in caching phase */
959                 BUG_ON(parity_pages != 0);
960
961         list_add_tail(&sh->log_list, &io->stripe_list);
962         atomic_inc(&io->pending_stripe);
963         sh->log_io = io;
964
965         if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
966                 return 0;
967
968         if (sh->log_start == MaxSector) {
969                 BUG_ON(!list_empty(&sh->r5c));
970                 sh->log_start = io->log_start;
971                 spin_lock_irq(&log->stripe_in_journal_lock);
972                 list_add_tail(&sh->r5c,
973                               &log->stripe_in_journal_list);
974                 spin_unlock_irq(&log->stripe_in_journal_lock);
975                 atomic_inc(&log->stripe_in_journal_count);
976         }
977         return 0;
978 }
979
980 /* add stripe to no_space_stripes, and then wake up reclaim */
981 static inline void r5l_add_no_space_stripe(struct r5l_log *log,
982                                            struct stripe_head *sh)
983 {
984         spin_lock(&log->no_space_stripes_lock);
985         list_add_tail(&sh->log_list, &log->no_space_stripes);
986         spin_unlock(&log->no_space_stripes_lock);
987 }
988
989 /*
990  * running in raid5d, where reclaim could wait for raid5d too (when it flushes
991  * data from log to raid disks), so we shouldn't wait for reclaim here
992  */
993 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
994 {
995         struct r5conf *conf = sh->raid_conf;
996         int write_disks = 0;
997         int data_pages, parity_pages;
998         int reserve;
999         int i;
1000         int ret = 0;
1001         bool wake_reclaim = false;
1002
1003         if (!log)
1004                 return -EAGAIN;
1005         /* Don't support stripe batch */
1006         if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
1007             test_bit(STRIPE_SYNCING, &sh->state)) {
1008                 /* the stripe is written to log, we start writing it to raid */
1009                 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
1010                 return -EAGAIN;
1011         }
1012
1013         WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
1014
1015         for (i = 0; i < sh->disks; i++) {
1016                 void *addr;
1017
1018                 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
1019                     test_bit(R5_InJournal, &sh->dev[i].flags))
1020                         continue;
1021
1022                 write_disks++;
1023                 /* checksum is already calculated in last run */
1024                 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
1025                         continue;
1026                 addr = kmap_atomic(sh->dev[i].page);
1027                 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1028                                                     addr, PAGE_SIZE);
1029                 kunmap_atomic(addr);
1030         }
1031         parity_pages = 1 + !!(sh->qd_idx >= 0);
1032         data_pages = write_disks - parity_pages;
1033
1034         set_bit(STRIPE_LOG_TRAPPED, &sh->state);
1035         /*
1036          * The stripe must enter state machine again to finish the write, so
1037          * don't delay.
1038          */
1039         clear_bit(STRIPE_DELAYED, &sh->state);
1040         atomic_inc(&sh->count);
1041
1042         mutex_lock(&log->io_mutex);
1043         /* meta + data */
1044         reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
1045
1046         if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1047                 if (!r5l_has_free_space(log, reserve)) {
1048                         r5l_add_no_space_stripe(log, sh);
1049                         wake_reclaim = true;
1050                 } else {
1051                         ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1052                         if (ret) {
1053                                 spin_lock_irq(&log->io_list_lock);
1054                                 list_add_tail(&sh->log_list,
1055                                               &log->no_mem_stripes);
1056                                 spin_unlock_irq(&log->io_list_lock);
1057                         }
1058                 }
1059         } else {  /* R5C_JOURNAL_MODE_WRITE_BACK */
1060                 /*
1061                  * log space critical, do not process stripes that are
1062                  * not in cache yet (sh->log_start == MaxSector).
1063                  */
1064                 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
1065                     sh->log_start == MaxSector) {
1066                         r5l_add_no_space_stripe(log, sh);
1067                         wake_reclaim = true;
1068                         reserve = 0;
1069                 } else if (!r5l_has_free_space(log, reserve)) {
1070                         if (sh->log_start == log->last_checkpoint)
1071                                 BUG();
1072                         else
1073                                 r5l_add_no_space_stripe(log, sh);
1074                 } else {
1075                         ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1076                         if (ret) {
1077                                 spin_lock_irq(&log->io_list_lock);
1078                                 list_add_tail(&sh->log_list,
1079                                               &log->no_mem_stripes);
1080                                 spin_unlock_irq(&log->io_list_lock);
1081                         }
1082                 }
1083         }
1084
1085         mutex_unlock(&log->io_mutex);
1086         if (wake_reclaim)
1087                 r5l_wake_reclaim(log, reserve);
1088         return 0;
1089 }
1090
1091 void r5l_write_stripe_run(struct r5l_log *log)
1092 {
1093         if (!log)
1094                 return;
1095         mutex_lock(&log->io_mutex);
1096         r5l_submit_current_io(log);
1097         mutex_unlock(&log->io_mutex);
1098 }
1099
1100 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1101 {
1102         if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1103                 /*
1104                  * in write through (journal only)
1105                  * we flush log disk cache first, then write stripe data to
1106                  * raid disks. So if bio is finished, the log disk cache is
1107                  * flushed already. The recovery guarantees we can recovery
1108                  * the bio from log disk, so we don't need to flush again
1109                  */
1110                 if (bio->bi_iter.bi_size == 0) {
1111                         bio_endio(bio);
1112                         return 0;
1113                 }
1114                 bio->bi_opf &= ~REQ_PREFLUSH;
1115         } else {
1116                 /* write back (with cache) */
1117                 if (bio->bi_iter.bi_size == 0) {
1118                         mutex_lock(&log->io_mutex);
1119                         r5l_get_meta(log, 0);
1120                         bio_list_add(&log->current_io->flush_barriers, bio);
1121                         log->current_io->has_flush = 1;
1122                         log->current_io->has_null_flush = 1;
1123                         atomic_inc(&log->current_io->pending_stripe);
1124                         r5l_submit_current_io(log);
1125                         mutex_unlock(&log->io_mutex);
1126                         return 0;
1127                 }
1128         }
1129         return -EAGAIN;
1130 }
1131
1132 /* This will run after log space is reclaimed */
1133 static void r5l_run_no_space_stripes(struct r5l_log *log)
1134 {
1135         struct stripe_head *sh;
1136
1137         spin_lock(&log->no_space_stripes_lock);
1138         while (!list_empty(&log->no_space_stripes)) {
1139                 sh = list_first_entry(&log->no_space_stripes,
1140                                       struct stripe_head, log_list);
1141                 list_del_init(&sh->log_list);
1142                 set_bit(STRIPE_HANDLE, &sh->state);
1143                 raid5_release_stripe(sh);
1144         }
1145         spin_unlock(&log->no_space_stripes_lock);
1146 }
1147
1148 /*
1149  * calculate new last_checkpoint
1150  * for write through mode, returns log->next_checkpoint
1151  * for write back, returns log_start of first sh in stripe_in_journal_list
1152  */
1153 static sector_t r5c_calculate_new_cp(struct r5conf *conf)
1154 {
1155         struct stripe_head *sh;
1156         struct r5l_log *log = READ_ONCE(conf->log);
1157         sector_t new_cp;
1158         unsigned long flags;
1159
1160         if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1161                 return log->next_checkpoint;
1162
1163         spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1164         if (list_empty(&log->stripe_in_journal_list)) {
1165                 /* all stripes flushed */
1166                 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1167                 return log->next_checkpoint;
1168         }
1169         sh = list_first_entry(&log->stripe_in_journal_list,
1170                               struct stripe_head, r5c);
1171         new_cp = sh->log_start;
1172         spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1173         return new_cp;
1174 }
1175
1176 static sector_t r5l_reclaimable_space(struct r5l_log *log)
1177 {
1178         struct r5conf *conf = log->rdev->mddev->private;
1179
1180         return r5l_ring_distance(log, log->last_checkpoint,
1181                                  r5c_calculate_new_cp(conf));
1182 }
1183
1184 static void r5l_run_no_mem_stripe(struct r5l_log *log)
1185 {
1186         struct stripe_head *sh;
1187
1188         lockdep_assert_held(&log->io_list_lock);
1189
1190         if (!list_empty(&log->no_mem_stripes)) {
1191                 sh = list_first_entry(&log->no_mem_stripes,
1192                                       struct stripe_head, log_list);
1193                 list_del_init(&sh->log_list);
1194                 set_bit(STRIPE_HANDLE, &sh->state);
1195                 raid5_release_stripe(sh);
1196         }
1197 }
1198
1199 static bool r5l_complete_finished_ios(struct r5l_log *log)
1200 {
1201         struct r5l_io_unit *io, *next;
1202         bool found = false;
1203
1204         lockdep_assert_held(&log->io_list_lock);
1205
1206         list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
1207                 /* don't change list order */
1208                 if (io->state < IO_UNIT_STRIPE_END)
1209                         break;
1210
1211                 log->next_checkpoint = io->log_start;
1212
1213                 list_del(&io->log_sibling);
1214                 mempool_free(io, &log->io_pool);
1215                 r5l_run_no_mem_stripe(log);
1216
1217                 found = true;
1218         }
1219
1220         return found;
1221 }
1222
1223 static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
1224 {
1225         struct r5l_log *log = io->log;
1226         struct r5conf *conf = log->rdev->mddev->private;
1227         unsigned long flags;
1228
1229         spin_lock_irqsave(&log->io_list_lock, flags);
1230         __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
1231
1232         if (!r5l_complete_finished_ios(log)) {
1233                 spin_unlock_irqrestore(&log->io_list_lock, flags);
1234                 return;
1235         }
1236
1237         if (r5l_reclaimable_space(log) > log->max_free_space ||
1238             test_bit(R5C_LOG_TIGHT, &conf->cache_state))
1239                 r5l_wake_reclaim(log, 0);
1240
1241         spin_unlock_irqrestore(&log->io_list_lock, flags);
1242         wake_up(&log->iounit_wait);
1243 }
1244
1245 void r5l_stripe_write_finished(struct stripe_head *sh)
1246 {
1247         struct r5l_io_unit *io;
1248
1249         io = sh->log_io;
1250         sh->log_io = NULL;
1251
1252         if (io && atomic_dec_and_test(&io->pending_stripe))
1253                 __r5l_stripe_write_finished(io);
1254 }
1255
1256 static void r5l_log_flush_endio(struct bio *bio)
1257 {
1258         struct r5l_log *log = container_of(bio, struct r5l_log,
1259                 flush_bio);
1260         unsigned long flags;
1261         struct r5l_io_unit *io;
1262
1263         if (bio->bi_status)
1264                 md_error(log->rdev->mddev, log->rdev);
1265         bio_uninit(bio);
1266
1267         spin_lock_irqsave(&log->io_list_lock, flags);
1268         list_for_each_entry(io, &log->flushing_ios, log_sibling)
1269                 r5l_io_run_stripes(io);
1270         list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
1271         spin_unlock_irqrestore(&log->io_list_lock, flags);
1272 }
1273
1274 /*
1275  * Starting dispatch IO to raid.
1276  * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1277  * broken meta in the middle of a log causes recovery can't find meta at the
1278  * head of log. If operations require meta at the head persistent in log, we
1279  * must make sure meta before it persistent in log too. A case is:
1280  *
1281  * stripe data/parity is in log, we start write stripe to raid disks. stripe
1282  * data/parity must be persistent in log before we do the write to raid disks.
1283  *
1284  * The solution is we restrictly maintain io_unit list order. In this case, we
1285  * only write stripes of an io_unit to raid disks till the io_unit is the first
1286  * one whose data/parity is in log.
1287  */
1288 void r5l_flush_stripe_to_raid(struct r5l_log *log)
1289 {
1290         bool do_flush;
1291
1292         if (!log || !log->need_cache_flush)
1293                 return;
1294
1295         spin_lock_irq(&log->io_list_lock);
1296         /* flush bio is running */
1297         if (!list_empty(&log->flushing_ios)) {
1298                 spin_unlock_irq(&log->io_list_lock);
1299                 return;
1300         }
1301         list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1302         do_flush = !list_empty(&log->flushing_ios);
1303         spin_unlock_irq(&log->io_list_lock);
1304
1305         if (!do_flush)
1306                 return;
1307         bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
1308                   REQ_OP_WRITE | REQ_PREFLUSH);
1309         log->flush_bio.bi_end_io = r5l_log_flush_endio;
1310         submit_bio(&log->flush_bio);
1311 }
1312
1313 static void r5l_write_super(struct r5l_log *log, sector_t cp);
1314 static void r5l_write_super_and_discard_space(struct r5l_log *log,
1315         sector_t end)
1316 {
1317         struct block_device *bdev = log->rdev->bdev;
1318         struct mddev *mddev;
1319
1320         r5l_write_super(log, end);
1321
1322         if (!bdev_max_discard_sectors(bdev))
1323                 return;
1324
1325         mddev = log->rdev->mddev;
1326         /*
1327          * Discard could zero data, so before discard we must make sure
1328          * superblock is updated to new log tail. Updating superblock (either
1329          * directly call md_update_sb() or depend on md thread) must hold
1330          * reconfig mutex. On the other hand, raid5_quiesce is called with
1331          * reconfig_mutex hold. The first step of raid5_quiesce() is waiting
1332          * for all IO finish, hence waiting for reclaim thread, while reclaim
1333          * thread is calling this function and waiting for reconfig mutex. So
1334          * there is a deadlock. We workaround this issue with a trylock.
1335          * FIXME: we could miss discard if we can't take reconfig mutex
1336          */
1337         set_mask_bits(&mddev->sb_flags, 0,
1338                 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1339         if (!mddev_trylock(mddev))
1340                 return;
1341         md_update_sb(mddev, 1);
1342         mddev_unlock(mddev);
1343
1344         /* discard IO error really doesn't matter, ignore it */
1345         if (log->last_checkpoint < end) {
1346                 blkdev_issue_discard(bdev,
1347                                 log->last_checkpoint + log->rdev->data_offset,
1348                                 end - log->last_checkpoint, GFP_NOIO);
1349         } else {
1350                 blkdev_issue_discard(bdev,
1351                                 log->last_checkpoint + log->rdev->data_offset,
1352                                 log->device_size - log->last_checkpoint,
1353                                 GFP_NOIO);
1354                 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
1355                                 GFP_NOIO);
1356         }
1357 }
1358
1359 /*
1360  * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1361  * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1362  *
1363  * must hold conf->device_lock
1364  */
1365 static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
1366 {
1367         BUG_ON(list_empty(&sh->lru));
1368         BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
1369         BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
1370
1371         /*
1372          * The stripe is not ON_RELEASE_LIST, so it is safe to call
1373          * raid5_release_stripe() while holding conf->device_lock
1374          */
1375         BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
1376         lockdep_assert_held(&conf->device_lock);
1377
1378         list_del_init(&sh->lru);
1379         atomic_inc(&sh->count);
1380
1381         set_bit(STRIPE_HANDLE, &sh->state);
1382         atomic_inc(&conf->active_stripes);
1383         r5c_make_stripe_write_out(sh);
1384
1385         if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
1386                 atomic_inc(&conf->r5c_flushing_partial_stripes);
1387         else
1388                 atomic_inc(&conf->r5c_flushing_full_stripes);
1389         raid5_release_stripe(sh);
1390 }
1391
1392 /*
1393  * if num == 0, flush all full stripes
1394  * if num > 0, flush all full stripes. If less than num full stripes are
1395  *             flushed, flush some partial stripes until totally num stripes are
1396  *             flushed or there is no more cached stripes.
1397  */
1398 void r5c_flush_cache(struct r5conf *conf, int num)
1399 {
1400         int count;
1401         struct stripe_head *sh, *next;
1402
1403         lockdep_assert_held(&conf->device_lock);
1404         if (!READ_ONCE(conf->log))
1405                 return;
1406
1407         count = 0;
1408         list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
1409                 r5c_flush_stripe(conf, sh);
1410                 count++;
1411         }
1412
1413         if (count >= num)
1414                 return;
1415         list_for_each_entry_safe(sh, next,
1416                                  &conf->r5c_partial_stripe_list, lru) {
1417                 r5c_flush_stripe(conf, sh);
1418                 if (++count >= num)
1419                         break;
1420         }
1421 }
1422
1423 static void r5c_do_reclaim(struct r5conf *conf)
1424 {
1425         struct r5l_log *log = READ_ONCE(conf->log);
1426         struct stripe_head *sh;
1427         int count = 0;
1428         unsigned long flags;
1429         int total_cached;
1430         int stripes_to_flush;
1431         int flushing_partial, flushing_full;
1432
1433         if (!r5c_is_writeback(log))
1434                 return;
1435
1436         flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes);
1437         flushing_full = atomic_read(&conf->r5c_flushing_full_stripes);
1438         total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
1439                 atomic_read(&conf->r5c_cached_full_stripes) -
1440                 flushing_full - flushing_partial;
1441
1442         if (total_cached > conf->min_nr_stripes * 3 / 4 ||
1443             atomic_read(&conf->empty_inactive_list_nr) > 0)
1444                 /*
1445                  * if stripe cache pressure high, flush all full stripes and
1446                  * some partial stripes
1447                  */
1448                 stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
1449         else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
1450                  atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
1451                  R5C_FULL_STRIPE_FLUSH_BATCH(conf))
1452                 /*
1453                  * if stripe cache pressure moderate, or if there is many full
1454                  * stripes,flush all full stripes
1455                  */
1456                 stripes_to_flush = 0;
1457         else
1458                 /* no need to flush */
1459                 stripes_to_flush = -1;
1460
1461         if (stripes_to_flush >= 0) {
1462                 spin_lock_irqsave(&conf->device_lock, flags);
1463                 r5c_flush_cache(conf, stripes_to_flush);
1464                 spin_unlock_irqrestore(&conf->device_lock, flags);
1465         }
1466
1467         /* if log space is tight, flush stripes on stripe_in_journal_list */
1468         if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
1469                 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1470                 spin_lock(&conf->device_lock);
1471                 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1472                         /*
1473                          * stripes on stripe_in_journal_list could be in any
1474                          * state of the stripe_cache state machine. In this
1475                          * case, we only want to flush stripe on
1476                          * r5c_cached_full/partial_stripes. The following
1477                          * condition makes sure the stripe is on one of the
1478                          * two lists.
1479                          */
1480                         if (!list_empty(&sh->lru) &&
1481                             !test_bit(STRIPE_HANDLE, &sh->state) &&
1482                             atomic_read(&sh->count) == 0) {
1483                                 r5c_flush_stripe(conf, sh);
1484                                 if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
1485                                         break;
1486                         }
1487                 }
1488                 spin_unlock(&conf->device_lock);
1489                 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1490         }
1491
1492         if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
1493                 r5l_run_no_space_stripes(log);
1494
1495         md_wakeup_thread(conf->mddev->thread);
1496 }
1497
1498 static void r5l_do_reclaim(struct r5l_log *log)
1499 {
1500         struct r5conf *conf = log->rdev->mddev->private;
1501         sector_t reclaim_target = xchg(&log->reclaim_target, 0);
1502         sector_t reclaimable;
1503         sector_t next_checkpoint;
1504         bool write_super;
1505
1506         spin_lock_irq(&log->io_list_lock);
1507         write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1508                 reclaim_target != 0 || !list_empty(&log->no_space_stripes);
1509         /*
1510          * move proper io_unit to reclaim list. We should not change the order.
1511          * reclaimable/unreclaimable io_unit can be mixed in the list, we
1512          * shouldn't reuse space of an unreclaimable io_unit
1513          */
1514         while (1) {
1515                 reclaimable = r5l_reclaimable_space(log);
1516                 if (reclaimable >= reclaim_target ||
1517                     (list_empty(&log->running_ios) &&
1518                      list_empty(&log->io_end_ios) &&
1519                      list_empty(&log->flushing_ios) &&
1520                      list_empty(&log->finished_ios)))
1521                         break;
1522
1523                 md_wakeup_thread(log->rdev->mddev->thread);
1524                 wait_event_lock_irq(log->iounit_wait,
1525                                     r5l_reclaimable_space(log) > reclaimable,
1526                                     log->io_list_lock);
1527         }
1528
1529         next_checkpoint = r5c_calculate_new_cp(conf);
1530         spin_unlock_irq(&log->io_list_lock);
1531
1532         if (reclaimable == 0 || !write_super)
1533                 return;
1534
1535         /*
1536          * write_super will flush cache of each raid disk. We must write super
1537          * here, because the log area might be reused soon and we don't want to
1538          * confuse recovery
1539          */
1540         r5l_write_super_and_discard_space(log, next_checkpoint);
1541
1542         mutex_lock(&log->io_mutex);
1543         log->last_checkpoint = next_checkpoint;
1544         r5c_update_log_state(log);
1545         mutex_unlock(&log->io_mutex);
1546
1547         r5l_run_no_space_stripes(log);
1548 }
1549
1550 static void r5l_reclaim_thread(struct md_thread *thread)
1551 {
1552         struct mddev *mddev = thread->mddev;
1553         struct r5conf *conf = mddev->private;
1554         struct r5l_log *log = READ_ONCE(conf->log);
1555
1556         if (!log)
1557                 return;
1558         r5c_do_reclaim(conf);
1559         r5l_do_reclaim(log);
1560 }
1561
1562 void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
1563 {
1564         unsigned long target;
1565         unsigned long new = (unsigned long)space; /* overflow in theory */
1566
1567         if (!log)
1568                 return;
1569
1570         target = READ_ONCE(log->reclaim_target);
1571         do {
1572                 if (new < target)
1573                         return;
1574         } while (!try_cmpxchg(&log->reclaim_target, &target, new));
1575         md_wakeup_thread(log->reclaim_thread);
1576 }
1577
1578 void r5l_quiesce(struct r5l_log *log, int quiesce)
1579 {
1580         struct mddev *mddev = log->rdev->mddev;
1581         struct md_thread *thread = rcu_dereference_protected(
1582                 log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex));
1583
1584         if (quiesce) {
1585                 /* make sure r5l_write_super_and_discard_space exits */
1586                 wake_up(&mddev->sb_wait);
1587                 kthread_park(thread->tsk);
1588                 r5l_wake_reclaim(log, MaxSector);
1589                 r5l_do_reclaim(log);
1590         } else
1591                 kthread_unpark(thread->tsk);
1592 }
1593
1594 bool r5l_log_disk_error(struct r5conf *conf)
1595 {
1596         struct r5l_log *log = READ_ONCE(conf->log);
1597
1598         /* don't allow write if journal disk is missing */
1599         if (!log)
1600                 return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1601         else
1602                 return test_bit(Faulty, &log->rdev->flags);
1603 }
1604
1605 #define R5L_RECOVERY_PAGE_POOL_SIZE 256
1606
1607 struct r5l_recovery_ctx {
1608         struct page *meta_page;         /* current meta */
1609         sector_t meta_total_blocks;     /* total size of current meta and data */
1610         sector_t pos;                   /* recovery position */
1611         u64 seq;                        /* recovery position seq */
1612         int data_parity_stripes;        /* number of data_parity stripes */
1613         int data_only_stripes;          /* number of data_only stripes */
1614         struct list_head cached_list;
1615
1616         /*
1617          * read ahead page pool (ra_pool)
1618          * in recovery, log is read sequentially. It is not efficient to
1619          * read every page with sync_page_io(). The read ahead page pool
1620          * reads multiple pages with one IO, so further log read can
1621          * just copy data from the pool.
1622          */
1623         struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
1624         struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
1625         sector_t pool_offset;   /* offset of first page in the pool */
1626         int total_pages;        /* total allocated pages */
1627         int valid_pages;        /* pages with valid data */
1628 };
1629
1630 static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1631                                             struct r5l_recovery_ctx *ctx)
1632 {
1633         struct page *page;
1634
1635         ctx->valid_pages = 0;
1636         ctx->total_pages = 0;
1637         while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
1638                 page = alloc_page(GFP_KERNEL);
1639
1640                 if (!page)
1641                         break;
1642                 ctx->ra_pool[ctx->total_pages] = page;
1643                 ctx->total_pages += 1;
1644         }
1645
1646         if (ctx->total_pages == 0)
1647                 return -ENOMEM;
1648
1649         ctx->pool_offset = 0;
1650         return 0;
1651 }
1652
1653 static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1654                                         struct r5l_recovery_ctx *ctx)
1655 {
1656         int i;
1657
1658         for (i = 0; i < ctx->total_pages; ++i)
1659                 put_page(ctx->ra_pool[i]);
1660 }
1661
1662 /*
1663  * fetch ctx->valid_pages pages from offset
1664  * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1665  * However, if the offset is close to the end of the journal device,
1666  * ctx->valid_pages could be smaller than ctx->total_pages
1667  */
1668 static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1669                                       struct r5l_recovery_ctx *ctx,
1670                                       sector_t offset)
1671 {
1672         struct bio bio;
1673         int ret;
1674
1675         bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
1676                  R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
1677         bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
1678
1679         ctx->valid_pages = 0;
1680         ctx->pool_offset = offset;
1681
1682         while (ctx->valid_pages < ctx->total_pages) {
1683                 __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
1684                                0);
1685                 ctx->valid_pages += 1;
1686
1687                 offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1688
1689                 if (offset == 0)  /* reached end of the device */
1690                         break;
1691         }
1692
1693         ret = submit_bio_wait(&bio);
1694         bio_uninit(&bio);
1695         return ret;
1696 }
1697
1698 /*
1699  * try read a page from the read ahead page pool, if the page is not in the
1700  * pool, call r5l_recovery_fetch_ra_pool
1701  */
1702 static int r5l_recovery_read_page(struct r5l_log *log,
1703                                   struct r5l_recovery_ctx *ctx,
1704                                   struct page *page,
1705                                   sector_t offset)
1706 {
1707         int ret;
1708
1709         if (offset < ctx->pool_offset ||
1710             offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
1711                 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1712                 if (ret)
1713                         return ret;
1714         }
1715
1716         BUG_ON(offset < ctx->pool_offset ||
1717                offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
1718
1719         memcpy(page_address(page),
1720                page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
1721                                          BLOCK_SECTOR_SHIFT]),
1722                PAGE_SIZE);
1723         return 0;
1724 }
1725
1726 static int r5l_recovery_read_meta_block(struct r5l_log *log,
1727                                         struct r5l_recovery_ctx *ctx)
1728 {
1729         struct page *page = ctx->meta_page;
1730         struct r5l_meta_block *mb;
1731         u32 crc, stored_crc;
1732         int ret;
1733
1734         ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1735         if (ret != 0)
1736                 return ret;
1737
1738         mb = page_address(page);
1739         stored_crc = le32_to_cpu(mb->checksum);
1740         mb->checksum = 0;
1741
1742         if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1743             le64_to_cpu(mb->seq) != ctx->seq ||
1744             mb->version != R5LOG_VERSION ||
1745             le64_to_cpu(mb->position) != ctx->pos)
1746                 return -EINVAL;
1747
1748         crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1749         if (stored_crc != crc)
1750                 return -EINVAL;
1751
1752         if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
1753                 return -EINVAL;
1754
1755         ctx->meta_total_blocks = BLOCK_SECTORS;
1756
1757         return 0;
1758 }
1759
1760 static void
1761 r5l_recovery_create_empty_meta_block(struct r5l_log *log,
1762                                      struct page *page,
1763                                      sector_t pos, u64 seq)
1764 {
1765         struct r5l_meta_block *mb;
1766
1767         mb = page_address(page);
1768         clear_page(mb);
1769         mb->magic = cpu_to_le32(R5LOG_MAGIC);
1770         mb->version = R5LOG_VERSION;
1771         mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1772         mb->seq = cpu_to_le64(seq);
1773         mb->position = cpu_to_le64(pos);
1774 }
1775
1776 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1777                                           u64 seq)
1778 {
1779         struct page *page;
1780         struct r5l_meta_block *mb;
1781
1782         page = alloc_page(GFP_KERNEL);
1783         if (!page)
1784                 return -ENOMEM;
1785         r5l_recovery_create_empty_meta_block(log, page, pos, seq);
1786         mb = page_address(page);
1787         mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
1788                                              mb, PAGE_SIZE));
1789         if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
1790                           REQ_SYNC | REQ_FUA, false)) {
1791                 __free_page(page);
1792                 return -EIO;
1793         }
1794         __free_page(page);
1795         return 0;
1796 }
1797
1798 /*
1799  * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1800  * to mark valid (potentially not flushed) data in the journal.
1801  *
1802  * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1803  * so there should not be any mismatch here.
1804  */
1805 static void r5l_recovery_load_data(struct r5l_log *log,
1806                                    struct stripe_head *sh,
1807                                    struct r5l_recovery_ctx *ctx,
1808                                    struct r5l_payload_data_parity *payload,
1809                                    sector_t log_offset)
1810 {
1811         struct mddev *mddev = log->rdev->mddev;
1812         struct r5conf *conf = mddev->private;
1813         int dd_idx;
1814
1815         raid5_compute_sector(conf,
1816                              le64_to_cpu(payload->location), 0,
1817                              &dd_idx, sh);
1818         r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1819         sh->dev[dd_idx].log_checksum =
1820                 le32_to_cpu(payload->checksum[0]);
1821         ctx->meta_total_blocks += BLOCK_SECTORS;
1822
1823         set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
1824         set_bit(STRIPE_R5C_CACHING, &sh->state);
1825 }
1826
1827 static void r5l_recovery_load_parity(struct r5l_log *log,
1828                                      struct stripe_head *sh,
1829                                      struct r5l_recovery_ctx *ctx,
1830                                      struct r5l_payload_data_parity *payload,
1831                                      sector_t log_offset)
1832 {
1833         struct mddev *mddev = log->rdev->mddev;
1834         struct r5conf *conf = mddev->private;
1835
1836         ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
1837         r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1838         sh->dev[sh->pd_idx].log_checksum =
1839                 le32_to_cpu(payload->checksum[0]);
1840         set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
1841
1842         if (sh->qd_idx >= 0) {
1843                 r5l_recovery_read_page(
1844                         log, ctx, sh->dev[sh->qd_idx].page,
1845                         r5l_ring_add(log, log_offset, BLOCK_SECTORS));
1846                 sh->dev[sh->qd_idx].log_checksum =
1847                         le32_to_cpu(payload->checksum[1]);
1848                 set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
1849         }
1850         clear_bit(STRIPE_R5C_CACHING, &sh->state);
1851 }
1852
1853 static void r5l_recovery_reset_stripe(struct stripe_head *sh)
1854 {
1855         int i;
1856
1857         sh->state = 0;
1858         sh->log_start = MaxSector;
1859         for (i = sh->disks; i--; )
1860                 sh->dev[i].flags = 0;
1861 }
1862
1863 static void
1864 r5l_recovery_replay_one_stripe(struct r5conf *conf,
1865                                struct stripe_head *sh,
1866                                struct r5l_recovery_ctx *ctx)
1867 {
1868         struct md_rdev *rdev, *rrdev;
1869         int disk_index;
1870         int data_count = 0;
1871
1872         for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1873                 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1874                         continue;
1875                 if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
1876                         continue;
1877                 data_count++;
1878         }
1879
1880         /*
1881          * stripes that only have parity must have been flushed
1882          * before the crash that we are now recovering from, so
1883          * there is nothing more to recovery.
1884          */
1885         if (data_count == 0)
1886                 goto out;
1887
1888         for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1889                 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1890                         continue;
1891
1892                 /* in case device is broken */
1893                 rcu_read_lock();
1894                 rdev = rcu_dereference(conf->disks[disk_index].rdev);
1895                 if (rdev) {
1896                         atomic_inc(&rdev->nr_pending);
1897                         rcu_read_unlock();
1898                         sync_page_io(rdev, sh->sector, PAGE_SIZE,
1899                                      sh->dev[disk_index].page, REQ_OP_WRITE,
1900                                      false);
1901                         rdev_dec_pending(rdev, rdev->mddev);
1902                         rcu_read_lock();
1903                 }
1904                 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
1905                 if (rrdev) {
1906                         atomic_inc(&rrdev->nr_pending);
1907                         rcu_read_unlock();
1908                         sync_page_io(rrdev, sh->sector, PAGE_SIZE,
1909                                      sh->dev[disk_index].page, REQ_OP_WRITE,
1910                                      false);
1911                         rdev_dec_pending(rrdev, rrdev->mddev);
1912                         rcu_read_lock();
1913                 }
1914                 rcu_read_unlock();
1915         }
1916         ctx->data_parity_stripes++;
1917 out:
1918         r5l_recovery_reset_stripe(sh);
1919 }
1920
1921 static struct stripe_head *
1922 r5c_recovery_alloc_stripe(
1923                 struct r5conf *conf,
1924                 sector_t stripe_sect,
1925                 int noblock)
1926 {
1927         struct stripe_head *sh;
1928
1929         sh = raid5_get_active_stripe(conf, NULL, stripe_sect,
1930                                      noblock ? R5_GAS_NOBLOCK : 0);
1931         if (!sh)
1932                 return NULL;  /* no more stripe available */
1933
1934         r5l_recovery_reset_stripe(sh);
1935
1936         return sh;
1937 }
1938
1939 static struct stripe_head *
1940 r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
1941 {
1942         struct stripe_head *sh;
1943
1944         list_for_each_entry(sh, list, lru)
1945                 if (sh->sector == sect)
1946                         return sh;
1947         return NULL;
1948 }
1949
1950 static void
1951 r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
1952                           struct r5l_recovery_ctx *ctx)
1953 {
1954         struct stripe_head *sh, *next;
1955
1956         list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
1957                 r5l_recovery_reset_stripe(sh);
1958                 list_del_init(&sh->lru);
1959                 raid5_release_stripe(sh);
1960         }
1961 }
1962
1963 static void
1964 r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
1965                             struct r5l_recovery_ctx *ctx)
1966 {
1967         struct stripe_head *sh, *next;
1968
1969         list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
1970                 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
1971                         r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1972                         list_del_init(&sh->lru);
1973                         raid5_release_stripe(sh);
1974                 }
1975 }
1976
1977 /* if matches return 0; otherwise return -EINVAL */
1978 static int
1979 r5l_recovery_verify_data_checksum(struct r5l_log *log,
1980                                   struct r5l_recovery_ctx *ctx,
1981                                   struct page *page,
1982                                   sector_t log_offset, __le32 log_checksum)
1983 {
1984         void *addr;
1985         u32 checksum;
1986
1987         r5l_recovery_read_page(log, ctx, page, log_offset);
1988         addr = kmap_atomic(page);
1989         checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
1990         kunmap_atomic(addr);
1991         return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
1992 }
1993
1994 /*
1995  * before loading data to stripe cache, we need verify checksum for all data,
1996  * if there is mismatch for any data page, we drop all data in the mata block
1997  */
1998 static int
1999 r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
2000                                          struct r5l_recovery_ctx *ctx)
2001 {
2002         struct mddev *mddev = log->rdev->mddev;
2003         struct r5conf *conf = mddev->private;
2004         struct r5l_meta_block *mb = page_address(ctx->meta_page);
2005         sector_t mb_offset = sizeof(struct r5l_meta_block);
2006         sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2007         struct page *page;
2008         struct r5l_payload_data_parity *payload;
2009         struct r5l_payload_flush *payload_flush;
2010
2011         page = alloc_page(GFP_KERNEL);
2012         if (!page)
2013                 return -ENOMEM;
2014
2015         while (mb_offset < le32_to_cpu(mb->meta_size)) {
2016                 payload = (void *)mb + mb_offset;
2017                 payload_flush = (void *)mb + mb_offset;
2018
2019                 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2020                         if (r5l_recovery_verify_data_checksum(
2021                                     log, ctx, page, log_offset,
2022                                     payload->checksum[0]) < 0)
2023                                 goto mismatch;
2024                 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
2025                         if (r5l_recovery_verify_data_checksum(
2026                                     log, ctx, page, log_offset,
2027                                     payload->checksum[0]) < 0)
2028                                 goto mismatch;
2029                         if (conf->max_degraded == 2 && /* q for RAID 6 */
2030                             r5l_recovery_verify_data_checksum(
2031                                     log, ctx, page,
2032                                     r5l_ring_add(log, log_offset,
2033                                                  BLOCK_SECTORS),
2034                                     payload->checksum[1]) < 0)
2035                                 goto mismatch;
2036                 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2037                         /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2038                 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2039                         goto mismatch;
2040
2041                 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2042                         mb_offset += sizeof(struct r5l_payload_flush) +
2043                                 le32_to_cpu(payload_flush->size);
2044                 } else {
2045                         /* DATA or PARITY payload */
2046                         log_offset = r5l_ring_add(log, log_offset,
2047                                                   le32_to_cpu(payload->size));
2048                         mb_offset += sizeof(struct r5l_payload_data_parity) +
2049                                 sizeof(__le32) *
2050                                 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2051                 }
2052
2053         }
2054
2055         put_page(page);
2056         return 0;
2057
2058 mismatch:
2059         put_page(page);
2060         return -EINVAL;
2061 }
2062
2063 /*
2064  * Analyze all data/parity pages in one meta block
2065  * Returns:
2066  * 0 for success
2067  * -EINVAL for unknown playload type
2068  * -EAGAIN for checksum mismatch of data page
2069  * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2070  */
2071 static int
2072 r5c_recovery_analyze_meta_block(struct r5l_log *log,
2073                                 struct r5l_recovery_ctx *ctx,
2074                                 struct list_head *cached_stripe_list)
2075 {
2076         struct mddev *mddev = log->rdev->mddev;
2077         struct r5conf *conf = mddev->private;
2078         struct r5l_meta_block *mb;
2079         struct r5l_payload_data_parity *payload;
2080         struct r5l_payload_flush *payload_flush;
2081         int mb_offset;
2082         sector_t log_offset;
2083         sector_t stripe_sect;
2084         struct stripe_head *sh;
2085         int ret;
2086
2087         /*
2088          * for mismatch in data blocks, we will drop all data in this mb, but
2089          * we will still read next mb for other data with FLUSH flag, as
2090          * io_unit could finish out of order.
2091          */
2092         ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2093         if (ret == -EINVAL)
2094                 return -EAGAIN;
2095         else if (ret)
2096                 return ret;   /* -ENOMEM duo to alloc_page() failed */
2097
2098         mb = page_address(ctx->meta_page);
2099         mb_offset = sizeof(struct r5l_meta_block);
2100         log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2101
2102         while (mb_offset < le32_to_cpu(mb->meta_size)) {
2103                 int dd;
2104
2105                 payload = (void *)mb + mb_offset;
2106                 payload_flush = (void *)mb + mb_offset;
2107
2108                 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2109                         int i, count;
2110
2111                         count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
2112                         for (i = 0; i < count; ++i) {
2113                                 stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
2114                                 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2115                                                                 stripe_sect);
2116                                 if (sh) {
2117                                         WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2118                                         r5l_recovery_reset_stripe(sh);
2119                                         list_del_init(&sh->lru);
2120                                         raid5_release_stripe(sh);
2121                                 }
2122                         }
2123
2124                         mb_offset += sizeof(struct r5l_payload_flush) +
2125                                 le32_to_cpu(payload_flush->size);
2126                         continue;
2127                 }
2128
2129                 /* DATA or PARITY payload */
2130                 stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
2131                         raid5_compute_sector(
2132                                 conf, le64_to_cpu(payload->location), 0, &dd,
2133                                 NULL)
2134                         : le64_to_cpu(payload->location);
2135
2136                 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2137                                                 stripe_sect);
2138
2139                 if (!sh) {
2140                         sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2141                         /*
2142                          * cannot get stripe from raid5_get_active_stripe
2143                          * try replay some stripes
2144                          */
2145                         if (!sh) {
2146                                 r5c_recovery_replay_stripes(
2147                                         cached_stripe_list, ctx);
2148                                 sh = r5c_recovery_alloc_stripe(
2149                                         conf, stripe_sect, 1);
2150                         }
2151                         if (!sh) {
2152                                 int new_size = conf->min_nr_stripes * 2;
2153                                 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2154                                         mdname(mddev),
2155                                         new_size);
2156                                 ret = raid5_set_cache_size(mddev, new_size);
2157                                 if (conf->min_nr_stripes <= new_size / 2) {
2158                                         pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2159                                                 mdname(mddev),
2160                                                 ret,
2161                                                 new_size,
2162                                                 conf->min_nr_stripes,
2163                                                 conf->max_nr_stripes);
2164                                         return -ENOMEM;
2165                                 }
2166                                 sh = r5c_recovery_alloc_stripe(
2167                                         conf, stripe_sect, 0);
2168                         }
2169                         if (!sh) {
2170                                 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2171                                         mdname(mddev));
2172                                 return -ENOMEM;
2173                         }
2174                         list_add_tail(&sh->lru, cached_stripe_list);
2175                 }
2176
2177                 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2178                         if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
2179                             test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
2180                                 r5l_recovery_replay_one_stripe(conf, sh, ctx);
2181                                 list_move_tail(&sh->lru, cached_stripe_list);
2182                         }
2183                         r5l_recovery_load_data(log, sh, ctx, payload,
2184                                                log_offset);
2185                 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
2186                         r5l_recovery_load_parity(log, sh, ctx, payload,
2187                                                  log_offset);
2188                 else
2189                         return -EINVAL;
2190
2191                 log_offset = r5l_ring_add(log, log_offset,
2192                                           le32_to_cpu(payload->size));
2193
2194                 mb_offset += sizeof(struct r5l_payload_data_parity) +
2195                         sizeof(__le32) *
2196                         (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2197         }
2198
2199         return 0;
2200 }
2201
2202 /*
2203  * Load the stripe into cache. The stripe will be written out later by
2204  * the stripe cache state machine.
2205  */
2206 static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2207                                          struct stripe_head *sh)
2208 {
2209         struct r5dev *dev;
2210         int i;
2211
2212         for (i = sh->disks; i--; ) {
2213                 dev = sh->dev + i;
2214                 if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
2215                         set_bit(R5_InJournal, &dev->flags);
2216                         set_bit(R5_UPTODATE, &dev->flags);
2217                 }
2218         }
2219 }
2220
2221 /*
2222  * Scan through the log for all to-be-flushed data
2223  *
2224  * For stripes with data and parity, namely Data-Parity stripe
2225  * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2226  *
2227  * For stripes with only data, namely Data-Only stripe
2228  * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2229  *
2230  * For a stripe, if we see data after parity, we should discard all previous
2231  * data and parity for this stripe, as these data are already flushed to
2232  * the array.
2233  *
2234  * At the end of the scan, we return the new journal_tail, which points to
2235  * first data-only stripe on the journal device, or next invalid meta block.
2236  */
2237 static int r5c_recovery_flush_log(struct r5l_log *log,
2238                                   struct r5l_recovery_ctx *ctx)
2239 {
2240         struct stripe_head *sh;
2241         int ret = 0;
2242
2243         /* scan through the log */
2244         while (1) {
2245                 if (r5l_recovery_read_meta_block(log, ctx))
2246                         break;
2247
2248                 ret = r5c_recovery_analyze_meta_block(log, ctx,
2249                                                       &ctx->cached_list);
2250                 /*
2251                  * -EAGAIN means mismatch in data block, in this case, we still
2252                  * try scan the next metablock
2253                  */
2254                 if (ret && ret != -EAGAIN)
2255                         break;   /* ret == -EINVAL or -ENOMEM */
2256                 ctx->seq++;
2257                 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2258         }
2259
2260         if (ret == -ENOMEM) {
2261                 r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2262                 return ret;
2263         }
2264
2265         /* replay data-parity stripes */
2266         r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2267
2268         /* load data-only stripes to stripe cache */
2269         list_for_each_entry(sh, &ctx->cached_list, lru) {
2270                 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2271                 r5c_recovery_load_one_stripe(log, sh);
2272                 ctx->data_only_stripes++;
2273         }
2274
2275         return 0;
2276 }
2277
2278 /*
2279  * we did a recovery. Now ctx.pos points to an invalid meta block. New
2280  * log will start here. but we can't let superblock point to last valid
2281  * meta block. The log might looks like:
2282  * | meta 1| meta 2| meta 3|
2283  * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2284  * superblock points to meta 1, we write a new valid meta 2n.  if crash
2285  * happens again, new recovery will start from meta 1. Since meta 2n is
2286  * valid now, recovery will think meta 3 is valid, which is wrong.
2287  * The solution is we create a new meta in meta2 with its seq == meta
2288  * 1's seq + 10000 and let superblock points to meta2. The same recovery
2289  * will not think meta 3 is a valid meta, because its seq doesn't match
2290  */
2291
2292 /*
2293  * Before recovery, the log looks like the following
2294  *
2295  *   ---------------------------------------------
2296  *   |           valid log        | invalid log  |
2297  *   ---------------------------------------------
2298  *   ^
2299  *   |- log->last_checkpoint
2300  *   |- log->last_cp_seq
2301  *
2302  * Now we scan through the log until we see invalid entry
2303  *
2304  *   ---------------------------------------------
2305  *   |           valid log        | invalid log  |
2306  *   ---------------------------------------------
2307  *   ^                            ^
2308  *   |- log->last_checkpoint      |- ctx->pos
2309  *   |- log->last_cp_seq          |- ctx->seq
2310  *
2311  * From this point, we need to increase seq number by 10 to avoid
2312  * confusing next recovery.
2313  *
2314  *   ---------------------------------------------
2315  *   |           valid log        | invalid log  |
2316  *   ---------------------------------------------
2317  *   ^                              ^
2318  *   |- log->last_checkpoint        |- ctx->pos+1
2319  *   |- log->last_cp_seq            |- ctx->seq+10001
2320  *
2321  * However, it is not safe to start the state machine yet, because data only
2322  * parities are not yet secured in RAID. To save these data only parities, we
2323  * rewrite them from seq+11.
2324  *
2325  *   -----------------------------------------------------------------
2326  *   |           valid log        | data only stripes | invalid log  |
2327  *   -----------------------------------------------------------------
2328  *   ^                                                ^
2329  *   |- log->last_checkpoint                          |- ctx->pos+n
2330  *   |- log->last_cp_seq                              |- ctx->seq+10000+n
2331  *
2332  * If failure happens again during this process, the recovery can safe start
2333  * again from log->last_checkpoint.
2334  *
2335  * Once data only stripes are rewritten to journal, we move log_tail
2336  *
2337  *   -----------------------------------------------------------------
2338  *   |     old log        |    data only stripes    | invalid log  |
2339  *   -----------------------------------------------------------------
2340  *                        ^                         ^
2341  *                        |- log->last_checkpoint   |- ctx->pos+n
2342  *                        |- log->last_cp_seq       |- ctx->seq+10000+n
2343  *
2344  * Then we can safely start the state machine. If failure happens from this
2345  * point on, the recovery will start from new log->last_checkpoint.
2346  */
2347 static int
2348 r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2349                                        struct r5l_recovery_ctx *ctx)
2350 {
2351         struct stripe_head *sh;
2352         struct mddev *mddev = log->rdev->mddev;
2353         struct page *page;
2354         sector_t next_checkpoint = MaxSector;
2355
2356         page = alloc_page(GFP_KERNEL);
2357         if (!page) {
2358                 pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2359                        mdname(mddev));
2360                 return -ENOMEM;
2361         }
2362
2363         WARN_ON(list_empty(&ctx->cached_list));
2364
2365         list_for_each_entry(sh, &ctx->cached_list, lru) {
2366                 struct r5l_meta_block *mb;
2367                 int i;
2368                 int offset;
2369                 sector_t write_pos;
2370
2371                 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2372                 r5l_recovery_create_empty_meta_block(log, page,
2373                                                      ctx->pos, ctx->seq);
2374                 mb = page_address(page);
2375                 offset = le32_to_cpu(mb->meta_size);
2376                 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2377
2378                 for (i = sh->disks; i--; ) {
2379                         struct r5dev *dev = &sh->dev[i];
2380                         struct r5l_payload_data_parity *payload;
2381                         void *addr;
2382
2383                         if (test_bit(R5_InJournal, &dev->flags)) {
2384                                 payload = (void *)mb + offset;
2385                                 payload->header.type = cpu_to_le16(
2386                                         R5LOG_PAYLOAD_DATA);
2387                                 payload->size = cpu_to_le32(BLOCK_SECTORS);
2388                                 payload->location = cpu_to_le64(
2389                                         raid5_compute_blocknr(sh, i, 0));
2390                                 addr = kmap_atomic(dev->page);
2391                                 payload->checksum[0] = cpu_to_le32(
2392                                         crc32c_le(log->uuid_checksum, addr,
2393                                                   PAGE_SIZE));
2394                                 kunmap_atomic(addr);
2395                                 sync_page_io(log->rdev, write_pos, PAGE_SIZE,
2396                                              dev->page, REQ_OP_WRITE, false);
2397                                 write_pos = r5l_ring_add(log, write_pos,
2398                                                          BLOCK_SECTORS);
2399                                 offset += sizeof(__le32) +
2400                                         sizeof(struct r5l_payload_data_parity);
2401
2402                         }
2403                 }
2404                 mb->meta_size = cpu_to_le32(offset);
2405                 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
2406                                                      mb, PAGE_SIZE));
2407                 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2408                              REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
2409                 sh->log_start = ctx->pos;
2410                 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2411                 atomic_inc(&log->stripe_in_journal_count);
2412                 ctx->pos = write_pos;
2413                 ctx->seq += 1;
2414                 next_checkpoint = sh->log_start;
2415         }
2416         log->next_checkpoint = next_checkpoint;
2417         __free_page(page);
2418         return 0;
2419 }
2420
2421 static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2422                                                  struct r5l_recovery_ctx *ctx)
2423 {
2424         struct mddev *mddev = log->rdev->mddev;
2425         struct r5conf *conf = mddev->private;
2426         struct stripe_head *sh, *next;
2427         bool cleared_pending = false;
2428
2429         if (ctx->data_only_stripes == 0)
2430                 return;
2431
2432         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2433                 cleared_pending = true;
2434                 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2435         }
2436         log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2437
2438         list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2439                 r5c_make_stripe_write_out(sh);
2440                 set_bit(STRIPE_HANDLE, &sh->state);
2441                 list_del_init(&sh->lru);
2442                 raid5_release_stripe(sh);
2443         }
2444
2445         /* reuse conf->wait_for_quiescent in recovery */
2446         wait_event(conf->wait_for_quiescent,
2447                    atomic_read(&conf->active_stripes) == 0);
2448
2449         log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2450         if (cleared_pending)
2451                 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2452 }
2453
2454 static int r5l_recovery_log(struct r5l_log *log)
2455 {
2456         struct mddev *mddev = log->rdev->mddev;
2457         struct r5l_recovery_ctx *ctx;
2458         int ret;
2459         sector_t pos;
2460
2461         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2462         if (!ctx)
2463                 return -ENOMEM;
2464
2465         ctx->pos = log->last_checkpoint;
2466         ctx->seq = log->last_cp_seq;
2467         INIT_LIST_HEAD(&ctx->cached_list);
2468         ctx->meta_page = alloc_page(GFP_KERNEL);
2469
2470         if (!ctx->meta_page) {
2471                 ret =  -ENOMEM;
2472                 goto meta_page;
2473         }
2474
2475         if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2476                 ret = -ENOMEM;
2477                 goto ra_pool;
2478         }
2479
2480         ret = r5c_recovery_flush_log(log, ctx);
2481
2482         if (ret)
2483                 goto error;
2484
2485         pos = ctx->pos;
2486         ctx->seq += 10000;
2487
2488         if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
2489                 pr_info("md/raid:%s: starting from clean shutdown\n",
2490                          mdname(mddev));
2491         else
2492                 pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2493                          mdname(mddev), ctx->data_only_stripes,
2494                          ctx->data_parity_stripes);
2495
2496         if (ctx->data_only_stripes == 0) {
2497                 log->next_checkpoint = ctx->pos;
2498                 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2499                 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2500         } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
2501                 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2502                        mdname(mddev));
2503                 ret =  -EIO;
2504                 goto error;
2505         }
2506
2507         log->log_start = ctx->pos;
2508         log->seq = ctx->seq;
2509         log->last_checkpoint = pos;
2510         r5l_write_super(log, pos);
2511
2512         r5c_recovery_flush_data_only_stripes(log, ctx);
2513         ret = 0;
2514 error:
2515         r5l_recovery_free_ra_pool(log, ctx);
2516 ra_pool:
2517         __free_page(ctx->meta_page);
2518 meta_page:
2519         kfree(ctx);
2520         return ret;
2521 }
2522
2523 static void r5l_write_super(struct r5l_log *log, sector_t cp)
2524 {
2525         struct mddev *mddev = log->rdev->mddev;
2526
2527         log->rdev->journal_tail = cp;
2528         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2529 }
2530
2531 static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2532 {
2533         struct r5conf *conf;
2534         int ret;
2535
2536         ret = mddev_lock(mddev);
2537         if (ret)
2538                 return ret;
2539
2540         conf = mddev->private;
2541         if (!conf || !conf->log)
2542                 goto out_unlock;
2543
2544         switch (conf->log->r5c_journal_mode) {
2545         case R5C_JOURNAL_MODE_WRITE_THROUGH:
2546                 ret = snprintf(
2547                         page, PAGE_SIZE, "[%s] %s\n",
2548                         r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2549                         r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2550                 break;
2551         case R5C_JOURNAL_MODE_WRITE_BACK:
2552                 ret = snprintf(
2553                         page, PAGE_SIZE, "%s [%s]\n",
2554                         r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2555                         r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2556                 break;
2557         default:
2558                 ret = 0;
2559         }
2560
2561 out_unlock:
2562         mddev_unlock(mddev);
2563         return ret;
2564 }
2565
2566 /*
2567  * Set journal cache mode on @mddev (external API initially needed by dm-raid).
2568  *
2569  * @mode as defined in 'enum r5c_journal_mode'.
2570  *
2571  */
2572 int r5c_journal_mode_set(struct mddev *mddev, int mode)
2573 {
2574         struct r5conf *conf;
2575
2576         if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2577             mode > R5C_JOURNAL_MODE_WRITE_BACK)
2578                 return -EINVAL;
2579
2580         conf = mddev->private;
2581         if (!conf || !conf->log)
2582                 return -ENODEV;
2583
2584         if (raid5_calc_degraded(conf) > 0 &&
2585             mode == R5C_JOURNAL_MODE_WRITE_BACK)
2586                 return -EINVAL;
2587
2588         conf->log->r5c_journal_mode = mode;
2589
2590         pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2591                  mdname(mddev), mode, r5c_journal_mode_str[mode]);
2592         return 0;
2593 }
2594 EXPORT_SYMBOL(r5c_journal_mode_set);
2595
2596 static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2597                                       const char *page, size_t length)
2598 {
2599         int mode = ARRAY_SIZE(r5c_journal_mode_str);
2600         size_t len = length;
2601         int ret;
2602
2603         if (len < 2)
2604                 return -EINVAL;
2605
2606         if (page[len - 1] == '\n')
2607                 len--;
2608
2609         while (mode--)
2610                 if (strlen(r5c_journal_mode_str[mode]) == len &&
2611                     !strncmp(page, r5c_journal_mode_str[mode], len))
2612                         break;
2613         ret = mddev_suspend_and_lock(mddev);
2614         if (ret)
2615                 return ret;
2616         ret = r5c_journal_mode_set(mddev, mode);
2617         mddev_unlock_and_resume(mddev);
2618         return ret ?: length;
2619 }
2620
2621 struct md_sysfs_entry
2622 r5c_journal_mode = __ATTR(journal_mode, 0644,
2623                           r5c_journal_mode_show, r5c_journal_mode_store);
2624
2625 /*
2626  * Try handle write operation in caching phase. This function should only
2627  * be called in write-back mode.
2628  *
2629  * If all outstanding writes can be handled in caching phase, returns 0
2630  * If writes requires write-out phase, call r5c_make_stripe_write_out()
2631  * and returns -EAGAIN
2632  */
2633 int r5c_try_caching_write(struct r5conf *conf,
2634                           struct stripe_head *sh,
2635                           struct stripe_head_state *s,
2636                           int disks)
2637 {
2638         struct r5l_log *log = READ_ONCE(conf->log);
2639         int i;
2640         struct r5dev *dev;
2641         int to_cache = 0;
2642         void __rcu **pslot;
2643         sector_t tree_index;
2644         int ret;
2645         uintptr_t refcount;
2646
2647         BUG_ON(!r5c_is_writeback(log));
2648
2649         if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
2650                 /*
2651                  * There are two different scenarios here:
2652                  *  1. The stripe has some data cached, and it is sent to
2653                  *     write-out phase for reclaim
2654                  *  2. The stripe is clean, and this is the first write
2655                  *
2656                  * For 1, return -EAGAIN, so we continue with
2657                  * handle_stripe_dirtying().
2658                  *
2659                  * For 2, set STRIPE_R5C_CACHING and continue with caching
2660                  * write.
2661                  */
2662
2663                 /* case 1: anything injournal or anything in written */
2664                 if (s->injournal > 0 || s->written > 0)
2665                         return -EAGAIN;
2666                 /* case 2 */
2667                 set_bit(STRIPE_R5C_CACHING, &sh->state);
2668         }
2669
2670         /*
2671          * When run in degraded mode, array is set to write-through mode.
2672          * This check helps drain pending write safely in the transition to
2673          * write-through mode.
2674          *
2675          * When a stripe is syncing, the write is also handled in write
2676          * through mode.
2677          */
2678         if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
2679                 r5c_make_stripe_write_out(sh);
2680                 return -EAGAIN;
2681         }
2682
2683         for (i = disks; i--; ) {
2684                 dev = &sh->dev[i];
2685                 /* if non-overwrite, use writing-out phase */
2686                 if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
2687                     !test_bit(R5_InJournal, &dev->flags)) {
2688                         r5c_make_stripe_write_out(sh);
2689                         return -EAGAIN;
2690                 }
2691         }
2692
2693         /* if the stripe is not counted in big_stripe_tree, add it now */
2694         if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
2695             !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2696                 tree_index = r5c_tree_index(conf, sh->sector);
2697                 spin_lock(&log->tree_lock);
2698                 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2699                                                tree_index);
2700                 if (pslot) {
2701                         refcount = (uintptr_t)radix_tree_deref_slot_protected(
2702                                 pslot, &log->tree_lock) >>
2703                                 R5C_RADIX_COUNT_SHIFT;
2704                         radix_tree_replace_slot(
2705                                 &log->big_stripe_tree, pslot,
2706                                 (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT));
2707                 } else {
2708                         /*
2709                          * this radix_tree_insert can fail safely, so no
2710                          * need to call radix_tree_preload()
2711                          */
2712                         ret = radix_tree_insert(
2713                                 &log->big_stripe_tree, tree_index,
2714                                 (void *)(1 << R5C_RADIX_COUNT_SHIFT));
2715                         if (ret) {
2716                                 spin_unlock(&log->tree_lock);
2717                                 r5c_make_stripe_write_out(sh);
2718                                 return -EAGAIN;
2719                         }
2720                 }
2721                 spin_unlock(&log->tree_lock);
2722
2723                 /*
2724                  * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
2725                  * counted in the radix tree
2726                  */
2727                 set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state);
2728                 atomic_inc(&conf->r5c_cached_partial_stripes);
2729         }
2730
2731         for (i = disks; i--; ) {
2732                 dev = &sh->dev[i];
2733                 if (dev->towrite) {
2734                         set_bit(R5_Wantwrite, &dev->flags);
2735                         set_bit(R5_Wantdrain, &dev->flags);
2736                         set_bit(R5_LOCKED, &dev->flags);
2737                         to_cache++;
2738                 }
2739         }
2740
2741         if (to_cache) {
2742                 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2743                 /*
2744                  * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
2745                  * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
2746                  * r5c_handle_data_cached()
2747                  */
2748                 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
2749         }
2750
2751         return 0;
2752 }
2753
2754 /*
2755  * free extra pages (orig_page) we allocated for prexor
2756  */
2757 void r5c_release_extra_page(struct stripe_head *sh)
2758 {
2759         struct r5conf *conf = sh->raid_conf;
2760         int i;
2761         bool using_disk_info_extra_page;
2762
2763         using_disk_info_extra_page =
2764                 sh->dev[0].orig_page == conf->disks[0].extra_page;
2765
2766         for (i = sh->disks; i--; )
2767                 if (sh->dev[i].page != sh->dev[i].orig_page) {
2768                         struct page *p = sh->dev[i].orig_page;
2769
2770                         sh->dev[i].orig_page = sh->dev[i].page;
2771                         clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2772
2773                         if (!using_disk_info_extra_page)
2774                                 put_page(p);
2775                 }
2776
2777         if (using_disk_info_extra_page) {
2778                 clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
2779                 md_wakeup_thread(conf->mddev->thread);
2780         }
2781 }
2782
2783 void r5c_use_extra_page(struct stripe_head *sh)
2784 {
2785         struct r5conf *conf = sh->raid_conf;
2786         int i;
2787         struct r5dev *dev;
2788
2789         for (i = sh->disks; i--; ) {
2790                 dev = &sh->dev[i];
2791                 if (dev->orig_page != dev->page)
2792                         put_page(dev->orig_page);
2793                 dev->orig_page = conf->disks[i].extra_page;
2794         }
2795 }
2796
2797 /*
2798  * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2799  * stripe is committed to RAID disks.
2800  */
2801 void r5c_finish_stripe_write_out(struct r5conf *conf,
2802                                  struct stripe_head *sh,
2803                                  struct stripe_head_state *s)
2804 {
2805         struct r5l_log *log = READ_ONCE(conf->log);
2806         int i;
2807         int do_wakeup = 0;
2808         sector_t tree_index;
2809         void __rcu **pslot;
2810         uintptr_t refcount;
2811
2812         if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
2813                 return;
2814
2815         WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2816         clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
2817
2818         if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
2819                 return;
2820
2821         for (i = sh->disks; i--; ) {
2822                 clear_bit(R5_InJournal, &sh->dev[i].flags);
2823                 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2824                         do_wakeup = 1;
2825         }
2826
2827         /*
2828          * analyse_stripe() runs before r5c_finish_stripe_write_out(),
2829          * We updated R5_InJournal, so we also update s->injournal.
2830          */
2831         s->injournal = 0;
2832
2833         if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2834                 if (atomic_dec_and_test(&conf->pending_full_writes))
2835                         md_wakeup_thread(conf->mddev->thread);
2836
2837         if (do_wakeup)
2838                 wake_up(&conf->wait_for_overlap);
2839
2840         spin_lock_irq(&log->stripe_in_journal_lock);
2841         list_del_init(&sh->r5c);
2842         spin_unlock_irq(&log->stripe_in_journal_lock);
2843         sh->log_start = MaxSector;
2844
2845         atomic_dec(&log->stripe_in_journal_count);
2846         r5c_update_log_state(log);
2847
2848         /* stop counting this stripe in big_stripe_tree */
2849         if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) ||
2850             test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2851                 tree_index = r5c_tree_index(conf, sh->sector);
2852                 spin_lock(&log->tree_lock);
2853                 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2854                                                tree_index);
2855                 BUG_ON(pslot == NULL);
2856                 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2857                         pslot, &log->tree_lock) >>
2858                         R5C_RADIX_COUNT_SHIFT;
2859                 if (refcount == 1)
2860                         radix_tree_delete(&log->big_stripe_tree, tree_index);
2861                 else
2862                         radix_tree_replace_slot(
2863                                 &log->big_stripe_tree, pslot,
2864                                 (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT));
2865                 spin_unlock(&log->tree_lock);
2866         }
2867
2868         if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
2869                 BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
2870                 atomic_dec(&conf->r5c_flushing_partial_stripes);
2871                 atomic_dec(&conf->r5c_cached_partial_stripes);
2872         }
2873
2874         if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2875                 BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
2876                 atomic_dec(&conf->r5c_flushing_full_stripes);
2877                 atomic_dec(&conf->r5c_cached_full_stripes);
2878         }
2879
2880         r5l_append_flush_payload(log, sh->sector);
2881         /* stripe is flused to raid disks, we can do resync now */
2882         if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2883                 set_bit(STRIPE_HANDLE, &sh->state);
2884 }
2885
2886 int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
2887 {
2888         struct r5conf *conf = sh->raid_conf;
2889         int pages = 0;
2890         int reserve;
2891         int i;
2892         int ret = 0;
2893
2894         BUG_ON(!log);
2895
2896         for (i = 0; i < sh->disks; i++) {
2897                 void *addr;
2898
2899                 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
2900                         continue;
2901                 addr = kmap_atomic(sh->dev[i].page);
2902                 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
2903                                                     addr, PAGE_SIZE);
2904                 kunmap_atomic(addr);
2905                 pages++;
2906         }
2907         WARN_ON(pages == 0);
2908
2909         /*
2910          * The stripe must enter state machine again to call endio, so
2911          * don't delay.
2912          */
2913         clear_bit(STRIPE_DELAYED, &sh->state);
2914         atomic_inc(&sh->count);
2915
2916         mutex_lock(&log->io_mutex);
2917         /* meta + data */
2918         reserve = (1 + pages) << (PAGE_SHIFT - 9);
2919
2920         if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
2921             sh->log_start == MaxSector)
2922                 r5l_add_no_space_stripe(log, sh);
2923         else if (!r5l_has_free_space(log, reserve)) {
2924                 if (sh->log_start == log->last_checkpoint)
2925                         BUG();
2926                 else
2927                         r5l_add_no_space_stripe(log, sh);
2928         } else {
2929                 ret = r5l_log_stripe(log, sh, pages, 0);
2930                 if (ret) {
2931                         spin_lock_irq(&log->io_list_lock);
2932                         list_add_tail(&sh->log_list, &log->no_mem_stripes);
2933                         spin_unlock_irq(&log->io_list_lock);
2934                 }
2935         }
2936
2937         mutex_unlock(&log->io_mutex);
2938         return 0;
2939 }
2940
2941 /* check whether this big stripe is in write back cache. */
2942 bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
2943 {
2944         struct r5l_log *log = READ_ONCE(conf->log);
2945         sector_t tree_index;
2946         void *slot;
2947
2948         if (!log)
2949                 return false;
2950
2951         WARN_ON_ONCE(!rcu_read_lock_held());
2952         tree_index = r5c_tree_index(conf, sect);
2953         slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
2954         return slot != NULL;
2955 }
2956
2957 static int r5l_load_log(struct r5l_log *log)
2958 {
2959         struct md_rdev *rdev = log->rdev;
2960         struct page *page;
2961         struct r5l_meta_block *mb;
2962         sector_t cp = log->rdev->journal_tail;
2963         u32 stored_crc, expected_crc;
2964         bool create_super = false;
2965         int ret = 0;
2966
2967         /* Make sure it's valid */
2968         if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
2969                 cp = 0;
2970         page = alloc_page(GFP_KERNEL);
2971         if (!page)
2972                 return -ENOMEM;
2973
2974         if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) {
2975                 ret = -EIO;
2976                 goto ioerr;
2977         }
2978         mb = page_address(page);
2979
2980         if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
2981             mb->version != R5LOG_VERSION) {
2982                 create_super = true;
2983                 goto create;
2984         }
2985         stored_crc = le32_to_cpu(mb->checksum);
2986         mb->checksum = 0;
2987         expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
2988         if (stored_crc != expected_crc) {
2989                 create_super = true;
2990                 goto create;
2991         }
2992         if (le64_to_cpu(mb->position) != cp) {
2993                 create_super = true;
2994                 goto create;
2995         }
2996 create:
2997         if (create_super) {
2998                 log->last_cp_seq = get_random_u32();
2999                 cp = 0;
3000                 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
3001                 /*
3002                  * Make sure super points to correct address. Log might have
3003                  * data very soon. If super hasn't correct log tail address,
3004                  * recovery can't find the log
3005                  */
3006                 r5l_write_super(log, cp);
3007         } else
3008                 log->last_cp_seq = le64_to_cpu(mb->seq);
3009
3010         log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
3011         log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
3012         if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
3013                 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
3014         log->last_checkpoint = cp;
3015
3016         __free_page(page);
3017
3018         if (create_super) {
3019                 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
3020                 log->seq = log->last_cp_seq + 1;
3021                 log->next_checkpoint = cp;
3022         } else
3023                 ret = r5l_recovery_log(log);
3024
3025         r5c_update_log_state(log);
3026         return ret;
3027 ioerr:
3028         __free_page(page);
3029         return ret;
3030 }
3031
3032 int r5l_start(struct r5l_log *log)
3033 {
3034         int ret;
3035
3036         if (!log)
3037                 return 0;
3038
3039         ret = r5l_load_log(log);
3040         if (ret) {
3041                 struct mddev *mddev = log->rdev->mddev;
3042                 struct r5conf *conf = mddev->private;
3043
3044                 r5l_exit_log(conf);
3045         }
3046         return ret;
3047 }
3048
3049 void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
3050 {
3051         struct r5conf *conf = mddev->private;
3052         struct r5l_log *log = READ_ONCE(conf->log);
3053
3054         if (!log)
3055                 return;
3056
3057         if ((raid5_calc_degraded(conf) > 0 ||
3058              test_bit(Journal, &rdev->flags)) &&
3059             log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
3060                 schedule_work(&log->disable_writeback_work);
3061 }
3062
3063 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
3064 {
3065         struct r5l_log *log;
3066         struct md_thread *thread;
3067         int ret;
3068
3069         pr_debug("md/raid:%s: using device %pg as journal\n",
3070                  mdname(conf->mddev), rdev->bdev);
3071
3072         if (PAGE_SIZE != 4096)
3073                 return -EINVAL;
3074
3075         /*
3076          * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
3077          * raid_disks r5l_payload_data_parity.
3078          *
3079          * Write journal and cache does not work for very big array
3080          * (raid_disks > 203)
3081          */
3082         if (sizeof(struct r5l_meta_block) +
3083             ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
3084              conf->raid_disks) > PAGE_SIZE) {
3085                 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3086                        mdname(conf->mddev), conf->raid_disks);
3087                 return -EINVAL;
3088         }
3089
3090         log = kzalloc(sizeof(*log), GFP_KERNEL);
3091         if (!log)
3092                 return -ENOMEM;
3093         log->rdev = rdev;
3094         log->need_cache_flush = bdev_write_cache(rdev->bdev);
3095         log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
3096                                        sizeof(rdev->mddev->uuid));
3097
3098         mutex_init(&log->io_mutex);
3099
3100         spin_lock_init(&log->io_list_lock);
3101         INIT_LIST_HEAD(&log->running_ios);
3102         INIT_LIST_HEAD(&log->io_end_ios);
3103         INIT_LIST_HEAD(&log->flushing_ios);
3104         INIT_LIST_HEAD(&log->finished_ios);
3105
3106         log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3107         if (!log->io_kc)
3108                 goto io_kc;
3109
3110         ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
3111         if (ret)
3112                 goto io_pool;
3113
3114         ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
3115         if (ret)
3116                 goto io_bs;
3117
3118         ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
3119         if (ret)
3120                 goto out_mempool;
3121
3122         spin_lock_init(&log->tree_lock);
3123         INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
3124
3125         thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
3126                                     "reclaim");
3127         if (!thread)
3128                 goto reclaim_thread;
3129
3130         thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
3131         rcu_assign_pointer(log->reclaim_thread, thread);
3132
3133         init_waitqueue_head(&log->iounit_wait);
3134
3135         INIT_LIST_HEAD(&log->no_mem_stripes);
3136
3137         INIT_LIST_HEAD(&log->no_space_stripes);
3138         spin_lock_init(&log->no_space_stripes_lock);
3139
3140         INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
3141         INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
3142
3143         log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
3144         INIT_LIST_HEAD(&log->stripe_in_journal_list);
3145         spin_lock_init(&log->stripe_in_journal_lock);
3146         atomic_set(&log->stripe_in_journal_count, 0);
3147
3148         WRITE_ONCE(conf->log, log);
3149
3150         set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
3151         return 0;
3152
3153 reclaim_thread:
3154         mempool_exit(&log->meta_pool);
3155 out_mempool:
3156         bioset_exit(&log->bs);
3157 io_bs:
3158         mempool_exit(&log->io_pool);
3159 io_pool:
3160         kmem_cache_destroy(log->io_kc);
3161 io_kc:
3162         kfree(log);
3163         return -EINVAL;
3164 }
3165
3166 void r5l_exit_log(struct r5conf *conf)
3167 {
3168         struct r5l_log *log = conf->log;
3169
3170         md_unregister_thread(conf->mddev, &log->reclaim_thread);
3171
3172         /*
3173          * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
3174          * ensure disable_writeback_work wakes up and exits.
3175          */
3176         WRITE_ONCE(conf->log, NULL);
3177         wake_up(&conf->mddev->sb_wait);
3178         flush_work(&log->disable_writeback_work);
3179
3180         mempool_exit(&log->meta_pool);
3181         bioset_exit(&log->bs);
3182         mempool_exit(&log->io_pool);
3183         kmem_cache_destroy(log->io_kc);
3184         kfree(log);
3185 }
This page took 0.219301 seconds and 4 git commands to generate.