2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/cgroup.h>
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include "blk-cgroup.h"
21 #include <trace/events/bcache.h>
23 #define CUTOFF_CACHE_ADD 95
24 #define CUTOFF_CACHE_READA 90
26 struct kmem_cache *bch_search_cache;
28 static void bch_data_insert_start(struct closure *);
30 /* Cgroup interface */
32 #ifdef CONFIG_CGROUP_BCACHE
33 static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
35 static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
37 struct cgroup_subsys_state *css;
39 (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
40 ? container_of(css, struct bch_cgroup, css)
41 : &bcache_default_cgroup;
44 struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
46 struct cgroup_subsys_state *css = bio->bi_css
47 ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
48 : task_subsys_state(current, bcache_subsys_id);
51 ? container_of(css, struct bch_cgroup, css)
52 : &bcache_default_cgroup;
55 static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
57 char __user *buf, size_t nbytes, loff_t *ppos)
60 int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
61 cgroup_to_bcache(cgrp)->cache_mode + 1);
66 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
69 static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
72 int v = bch_read_string_list(buf, bch_cache_modes);
76 cgroup_to_bcache(cgrp)->cache_mode = v - 1;
80 static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
82 return cgroup_to_bcache(cgrp)->verify;
85 static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
87 cgroup_to_bcache(cgrp)->verify = val;
91 static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
93 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
94 return atomic_read(&bcachecg->stats.cache_hits);
97 static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
99 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
100 return atomic_read(&bcachecg->stats.cache_misses);
103 static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
106 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
107 return atomic_read(&bcachecg->stats.cache_bypass_hits);
110 static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
113 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
114 return atomic_read(&bcachecg->stats.cache_bypass_misses);
117 static struct cftype bch_files[] = {
119 .name = "cache_mode",
120 .read = cache_mode_read,
121 .write_string = cache_mode_write,
125 .read_u64 = bch_verify_read,
126 .write_u64 = bch_verify_write,
129 .name = "cache_hits",
130 .read_u64 = bch_cache_hits_read,
133 .name = "cache_misses",
134 .read_u64 = bch_cache_misses_read,
137 .name = "cache_bypass_hits",
138 .read_u64 = bch_cache_bypass_hits_read,
141 .name = "cache_bypass_misses",
142 .read_u64 = bch_cache_bypass_misses_read,
147 static void init_bch_cgroup(struct bch_cgroup *cg)
152 static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
154 struct bch_cgroup *cg;
156 cg = kzalloc(sizeof(*cg), GFP_KERNEL);
158 return ERR_PTR(-ENOMEM);
163 static void bcachecg_destroy(struct cgroup *cgroup)
165 struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
166 free_css_id(&bcache_subsys, &cg->css);
170 struct cgroup_subsys bcache_subsys = {
171 .create = bcachecg_create,
172 .destroy = bcachecg_destroy,
173 .subsys_id = bcache_subsys_id,
175 .module = THIS_MODULE,
177 EXPORT_SYMBOL_GPL(bcache_subsys);
180 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
182 #ifdef CONFIG_CGROUP_BCACHE
183 int r = bch_bio_to_cgroup(bio)->cache_mode;
187 return BDEV_CACHE_MODE(&dc->sb);
190 static bool verify(struct cached_dev *dc, struct bio *bio)
192 #ifdef CONFIG_CGROUP_BCACHE
193 if (bch_bio_to_cgroup(bio)->verify)
199 static void bio_csum(struct bio *bio, struct bkey *k)
205 bio_for_each_segment(bv, bio, i) {
206 void *d = kmap(bv->bv_page) + bv->bv_offset;
207 csum = bch_crc64_update(csum, d, bv->bv_len);
211 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
214 /* Insert data into cache */
216 static void bch_data_insert_keys(struct closure *cl)
218 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
219 atomic_t *journal_ref = NULL;
220 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
224 * If we're looping, might already be waiting on
225 * another journal write - can't wait on more than one journal write at
228 * XXX: this looks wrong
231 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
232 closure_sync(&s->cl);
236 journal_ref = bch_journal(op->c, &op->insert_keys,
237 op->flush_journal ? cl : NULL);
239 ret = bch_btree_insert(op->c, &op->insert_keys,
240 journal_ref, replace_key);
242 op->replace_collision = true;
245 op->insert_data_done = true;
249 atomic_dec_bug(journal_ref);
251 if (!op->insert_data_done)
252 continue_at(cl, bch_data_insert_start, bcache_wq);
254 bch_keylist_free(&op->insert_keys);
258 static void bch_data_invalidate(struct closure *cl)
260 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
261 struct bio *bio = op->bio;
263 pr_debug("invalidating %i sectors from %llu",
264 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
266 while (bio_sectors(bio)) {
267 unsigned sectors = min(bio_sectors(bio),
268 1U << (KEY_SIZE_BITS - 1));
270 if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
273 bio->bi_iter.bi_sector += sectors;
274 bio->bi_iter.bi_size -= sectors << 9;
276 bch_keylist_add(&op->insert_keys,
277 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
280 op->insert_data_done = true;
283 continue_at(cl, bch_data_insert_keys, bcache_wq);
286 static void bch_data_insert_error(struct closure *cl)
288 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
291 * Our data write just errored, which means we've got a bunch of keys to
292 * insert that point to data that wasn't succesfully written.
294 * We don't have to insert those keys but we still have to invalidate
295 * that region of the cache - so, if we just strip off all the pointers
296 * from the keys we'll accomplish just that.
299 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
301 while (src != op->insert_keys.top) {
302 struct bkey *n = bkey_next(src);
304 SET_KEY_PTRS(src, 0);
305 memmove(dst, src, bkey_bytes(src));
307 dst = bkey_next(dst);
311 op->insert_keys.top = dst;
313 bch_data_insert_keys(cl);
316 static void bch_data_insert_endio(struct bio *bio, int error)
318 struct closure *cl = bio->bi_private;
319 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
322 /* TODO: We could try to recover from this. */
325 else if (!op->replace)
326 set_closure_fn(cl, bch_data_insert_error, bcache_wq);
328 set_closure_fn(cl, NULL, NULL);
331 bch_bbio_endio(op->c, bio, error, "writing data to cache");
334 static void bch_data_insert_start(struct closure *cl)
336 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
337 struct bio *bio = op->bio, *n;
340 return bch_data_invalidate(cl);
342 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
343 set_gc_sectors(op->c);
348 * Journal writes are marked REQ_FLUSH; if the original write was a
349 * flush, it'll wait on the journal write.
351 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
356 struct bio_set *split = op->c->bio_split;
358 /* 1 for the device pointer and 1 for the chksum */
359 if (bch_keylist_realloc(&op->insert_keys,
360 1 + (op->csum ? 1 : 0),
362 continue_at(cl, bch_data_insert_keys, bcache_wq);
364 k = op->insert_keys.top;
366 SET_KEY_INODE(k, op->inode);
367 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
369 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
370 op->write_point, op->write_prio,
374 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
376 n->bi_end_io = bch_data_insert_endio;
380 SET_KEY_DIRTY(k, true);
382 for (i = 0; i < KEY_PTRS(k); i++)
383 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
387 SET_KEY_CSUM(k, op->csum);
391 trace_bcache_cache_insert(k);
392 bch_keylist_push(&op->insert_keys);
394 n->bi_rw |= REQ_WRITE;
395 bch_submit_bbio(n, op->c, k, 0);
398 op->insert_data_done = true;
399 continue_at(cl, bch_data_insert_keys, bcache_wq);
401 /* bch_alloc_sectors() blocks if s->writeback = true */
402 BUG_ON(op->writeback);
405 * But if it's not a writeback write we'd rather just bail out if
406 * there aren't any buckets ready to write to - it might take awhile and
407 * we might be starving btree writes for gc or something.
412 * Writethrough write: We can't complete the write until we've
413 * updated the index. But we don't want to delay the write while
414 * we wait for buckets to be freed up, so just invalidate the
418 return bch_data_invalidate(cl);
421 * From a cache miss, we can just insert the keys for the data
422 * we have written or bail out if we didn't do anything.
424 op->insert_data_done = true;
427 if (!bch_keylist_empty(&op->insert_keys))
428 continue_at(cl, bch_data_insert_keys, bcache_wq);
435 * bch_data_insert - stick some data in the cache
437 * This is the starting point for any data to end up in a cache device; it could
438 * be from a normal write, or a writeback write, or a write to a flash only
439 * volume - it's also used by the moving garbage collector to compact data in
440 * mostly empty buckets.
442 * It first writes the data to the cache, creating a list of keys to be inserted
443 * (if the data had to be fragmented there will be multiple keys); after the
444 * data is written it calls bch_journal, and after the keys have been added to
445 * the next journal write they're inserted into the btree.
447 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
448 * and op->inode is used for the key inode.
450 * If s->bypass is true, instead of inserting the data it invalidates the
451 * region of the cache represented by s->cache_bio and op->inode.
453 void bch_data_insert(struct closure *cl)
455 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
457 trace_bcache_write(op->bio, op->writeback, op->bypass);
459 bch_keylist_init(&op->insert_keys);
461 bch_data_insert_start(cl);
466 unsigned bch_get_congested(struct cache_set *c)
471 if (!c->congested_read_threshold_us &&
472 !c->congested_write_threshold_us)
475 i = (local_clock_us() - c->congested_last_us) / 1024;
479 i += atomic_read(&c->congested);
486 i = fract_exp_two(i, 6);
488 rand = get_random_int();
489 i -= bitmap_weight(&rand, BITS_PER_LONG);
491 return i > 0 ? i : 1;
494 static void add_sequential(struct task_struct *t)
496 ewma_add(t->sequential_io_avg,
497 t->sequential_io, 8, 0);
499 t->sequential_io = 0;
502 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
504 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
507 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
509 struct cache_set *c = dc->disk.c;
510 unsigned mode = cache_mode(dc, bio);
511 unsigned sectors, congested = bch_get_congested(c);
512 struct task_struct *task = current;
515 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
516 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
517 (bio->bi_rw & REQ_DISCARD))
520 if (mode == CACHE_MODE_NONE ||
521 (mode == CACHE_MODE_WRITEAROUND &&
522 (bio->bi_rw & REQ_WRITE)))
525 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
526 bio_sectors(bio) & (c->sb.block_size - 1)) {
527 pr_debug("skipping unaligned io");
531 if (bypass_torture_test(dc)) {
532 if ((get_random_int() & 3) == 3)
538 if (!congested && !dc->sequential_cutoff)
542 mode == CACHE_MODE_WRITEBACK &&
543 (bio->bi_rw & REQ_WRITE) &&
544 (bio->bi_rw & REQ_SYNC))
547 spin_lock(&dc->io_lock);
549 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
550 if (i->last == bio->bi_iter.bi_sector &&
551 time_before(jiffies, i->jiffies))
554 i = list_first_entry(&dc->io_lru, struct io, lru);
556 add_sequential(task);
559 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
560 i->sequential += bio->bi_iter.bi_size;
562 i->last = bio_end_sector(bio);
563 i->jiffies = jiffies + msecs_to_jiffies(5000);
564 task->sequential_io = i->sequential;
567 hlist_add_head(&i->hash, iohash(dc, i->last));
568 list_move_tail(&i->lru, &dc->io_lru);
570 spin_unlock(&dc->io_lock);
572 sectors = max(task->sequential_io,
573 task->sequential_io_avg) >> 9;
575 if (dc->sequential_cutoff &&
576 sectors >= dc->sequential_cutoff >> 9) {
577 trace_bcache_bypass_sequential(bio);
581 if (congested && sectors >= congested) {
582 trace_bcache_bypass_congested(bio);
587 bch_rescale_priorities(c, bio_sectors(bio));
590 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
597 /* Stack frame for bio_complete */
600 struct bcache_device *d;
603 struct bio *orig_bio;
604 struct bio *cache_miss;
606 unsigned insert_bio_sectors;
608 unsigned recoverable:1;
610 unsigned read_dirty_data:1;
612 unsigned long start_time;
615 struct data_insert_op iop;
616 struct bio_vec bv[BIO_MAX_PAGES];
619 static void bch_cache_read_endio(struct bio *bio, int error)
621 struct bbio *b = container_of(bio, struct bbio, bio);
622 struct closure *cl = bio->bi_private;
623 struct search *s = container_of(cl, struct search, cl);
626 * If the bucket was reused while our bio was in flight, we might have
627 * read the wrong data. Set s->error but not error so it doesn't get
628 * counted against the cache device, but we'll still reread the data
629 * from the backing device.
633 s->iop.error = error;
634 else if (ptr_stale(s->iop.c, &b->key, 0)) {
635 atomic_long_inc(&s->iop.c->cache_read_races);
636 s->iop.error = -EINTR;
639 bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
643 * Read from a single key, handling the initial cache miss if the key starts in
644 * the middle of the bio
646 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
648 struct search *s = container_of(op, struct search, op);
649 struct bio *n, *bio = &s->bio.bio;
650 struct bkey *bio_key;
653 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
656 if (KEY_INODE(k) != s->iop.inode ||
657 KEY_START(k) > bio->bi_iter.bi_sector) {
658 unsigned bio_sectors = bio_sectors(bio);
659 unsigned sectors = KEY_INODE(k) == s->iop.inode
660 ? min_t(uint64_t, INT_MAX,
661 KEY_START(k) - bio->bi_iter.bi_sector)
664 int ret = s->d->cache_miss(b, s, bio, sectors);
665 if (ret != MAP_CONTINUE)
668 /* if this was a complete miss we shouldn't get here */
669 BUG_ON(bio_sectors <= sectors);
675 /* XXX: figure out best pointer - for multiple cache devices */
678 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
681 s->read_dirty_data = true;
683 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
684 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
685 GFP_NOIO, s->d->bio_split);
687 bio_key = &container_of(n, struct bbio, bio)->key;
688 bch_bkey_copy_single_ptr(bio_key, k, ptr);
690 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
691 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
693 n->bi_end_io = bch_cache_read_endio;
694 n->bi_private = &s->cl;
697 * The bucket we're reading from might be reused while our bio
698 * is in flight, and we could then end up reading the wrong
701 * We guard against this by checking (in cache_read_endio()) if
702 * the pointer is stale again; if so, we treat it as an error
703 * and reread from the backing device (but we don't pass that
704 * error up anywhere).
707 __bch_submit_bbio(n, b->c);
708 return n == bio ? MAP_DONE : MAP_CONTINUE;
711 static void cache_lookup(struct closure *cl)
713 struct search *s = container_of(cl, struct search, iop.cl);
714 struct bio *bio = &s->bio.bio;
716 int ret = bch_btree_map_keys(&s->op, s->iop.c,
717 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
718 cache_lookup_fn, MAP_END_KEY);
720 continue_at(cl, cache_lookup, bcache_wq);
725 /* Common code for the make_request functions */
727 static void request_endio(struct bio *bio, int error)
729 struct closure *cl = bio->bi_private;
732 struct search *s = container_of(cl, struct search, cl);
733 s->iop.error = error;
734 /* Only cache read errors are recoverable */
735 s->recoverable = false;
742 static void bio_complete(struct search *s)
745 int cpu, rw = bio_data_dir(s->orig_bio);
746 unsigned long duration = jiffies - s->start_time;
748 cpu = part_stat_lock();
749 part_round_stats(cpu, &s->d->disk->part0);
750 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
753 trace_bcache_request_end(s->d, s->orig_bio);
754 bio_endio(s->orig_bio, s->iop.error);
759 static void do_bio_hook(struct search *s)
761 struct bio *bio = &s->bio.bio;
764 bio->bi_io_vec = s->bv;
765 bio->bi_max_vecs = BIO_MAX_PAGES;
766 __bio_clone(bio, s->orig_bio);
767 bio->bi_end_io = request_endio;
768 bio->bi_private = &s->cl;
770 atomic_set(&bio->bi_cnt, 3);
773 static void search_free(struct closure *cl)
775 struct search *s = container_of(cl, struct search, cl);
781 closure_debug_destroy(cl);
782 mempool_free(s, s->d->c->search);
785 static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
789 s = mempool_alloc(d->c->search, GFP_NOIO);
790 memset(s, 0, offsetof(struct search, iop.insert_keys));
792 __closure_init(&s->cl, NULL);
794 s->iop.inode = d->id;
798 s->iop.write_point = hash_long((unsigned long) current, 16);
800 s->write = (bio->bi_rw & REQ_WRITE) != 0;
801 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
803 s->start_time = jiffies;
811 static void cached_dev_bio_complete(struct closure *cl)
813 struct search *s = container_of(cl, struct search, cl);
814 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
822 static void cached_dev_cache_miss_done(struct closure *cl)
824 struct search *s = container_of(cl, struct search, cl);
826 if (s->iop.replace_collision)
827 bch_mark_cache_miss_collision(s->iop.c, s->d);
833 bio_for_each_segment_all(bv, s->iop.bio, i)
834 __free_page(bv->bv_page);
837 cached_dev_bio_complete(cl);
840 static void cached_dev_read_error(struct closure *cl)
842 struct search *s = container_of(cl, struct search, cl);
843 struct bio *bio = &s->bio.bio;
845 if (s->recoverable) {
846 /* Retry from the backing device: */
847 trace_bcache_read_retry(s->orig_bio);
852 /* XXX: invalidate cache */
854 closure_bio_submit(bio, cl, s->d);
857 continue_at(cl, cached_dev_cache_miss_done, NULL);
860 static void cached_dev_read_done(struct closure *cl)
862 struct search *s = container_of(cl, struct search, cl);
863 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
866 * We had a cache miss; cache_bio now contains data ready to be inserted
869 * First, we copy the data we just read from cache_bio's bounce buffers
870 * to the buffers the original bio pointed to:
874 bio_reset(s->iop.bio);
875 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
876 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
877 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
878 bch_bio_map(s->iop.bio, NULL);
880 bio_copy_data(s->cache_miss, s->iop.bio);
882 bio_put(s->cache_miss);
883 s->cache_miss = NULL;
886 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
887 bch_data_verify(dc, s->orig_bio);
892 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
893 BUG_ON(!s->iop.replace);
894 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
897 continue_at(cl, cached_dev_cache_miss_done, NULL);
900 static void cached_dev_read_done_bh(struct closure *cl)
902 struct search *s = container_of(cl, struct search, cl);
903 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
905 bch_mark_cache_accounting(s->iop.c, s->d,
906 !s->cache_miss, s->iop.bypass);
907 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
910 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
911 else if (s->iop.bio || verify(dc, &s->bio.bio))
912 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
914 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
917 static int cached_dev_cache_miss(struct btree *b, struct search *s,
918 struct bio *bio, unsigned sectors)
920 int ret = MAP_CONTINUE;
922 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
923 struct bio *miss, *cache_bio;
925 if (s->cache_miss || s->iop.bypass) {
926 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
927 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
931 if (!(bio->bi_rw & REQ_RAHEAD) &&
932 !(bio->bi_rw & REQ_META) &&
933 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
934 reada = min_t(sector_t, dc->readahead >> 9,
935 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
937 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
939 s->iop.replace_key = KEY(s->iop.inode,
940 bio->bi_iter.bi_sector + s->insert_bio_sectors,
941 s->insert_bio_sectors);
943 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
947 s->iop.replace = true;
949 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
951 /* btree_search_recurse()'s btree iterator is no good anymore */
952 ret = miss == bio ? MAP_DONE : -EINTR;
954 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
955 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
960 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
961 cache_bio->bi_bdev = miss->bi_bdev;
962 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
964 cache_bio->bi_end_io = request_endio;
965 cache_bio->bi_private = &s->cl;
967 bch_bio_map(cache_bio, NULL);
968 if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
972 bch_mark_cache_readahead(s->iop.c, s->d);
974 s->cache_miss = miss;
975 s->iop.bio = cache_bio;
977 closure_bio_submit(cache_bio, &s->cl, s->d);
983 miss->bi_end_io = request_endio;
984 miss->bi_private = &s->cl;
985 closure_bio_submit(miss, &s->cl, s->d);
989 static void cached_dev_read(struct cached_dev *dc, struct search *s)
991 struct closure *cl = &s->cl;
993 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
994 continue_at(cl, cached_dev_read_done_bh, NULL);
999 static void cached_dev_write_complete(struct closure *cl)
1001 struct search *s = container_of(cl, struct search, cl);
1002 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1004 up_read_non_owner(&dc->writeback_lock);
1005 cached_dev_bio_complete(cl);
1008 static void cached_dev_write(struct cached_dev *dc, struct search *s)
1010 struct closure *cl = &s->cl;
1011 struct bio *bio = &s->bio.bio;
1012 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
1013 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1015 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
1017 down_read_non_owner(&dc->writeback_lock);
1018 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
1020 * We overlap with some dirty data undergoing background
1021 * writeback, force this write to writeback
1023 s->iop.bypass = false;
1024 s->iop.writeback = true;
1028 * Discards aren't _required_ to do anything, so skipping if
1029 * check_overlapping returned true is ok
1031 * But check_overlapping drops dirty keys for which io hasn't started,
1032 * so we still want to call it.
1034 if (bio->bi_rw & REQ_DISCARD)
1035 s->iop.bypass = true;
1037 if (should_writeback(dc, s->orig_bio,
1038 cache_mode(dc, bio),
1040 s->iop.bypass = false;
1041 s->iop.writeback = true;
1044 if (s->iop.bypass) {
1045 s->iop.bio = s->orig_bio;
1046 bio_get(s->iop.bio);
1048 if (!(bio->bi_rw & REQ_DISCARD) ||
1049 blk_queue_discard(bdev_get_queue(dc->bdev)))
1050 closure_bio_submit(bio, cl, s->d);
1051 } else if (s->iop.writeback) {
1052 bch_writeback_add(dc);
1055 if (bio->bi_rw & REQ_FLUSH) {
1056 /* Also need to send a flush to the backing device */
1057 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
1058 dc->disk.bio_split);
1060 flush->bi_rw = WRITE_FLUSH;
1061 flush->bi_bdev = bio->bi_bdev;
1062 flush->bi_end_io = request_endio;
1063 flush->bi_private = cl;
1065 closure_bio_submit(flush, cl, s->d);
1068 s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
1069 dc->disk.bio_split);
1071 closure_bio_submit(bio, cl, s->d);
1074 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1075 continue_at(cl, cached_dev_write_complete, NULL);
1078 static void cached_dev_nodata(struct closure *cl)
1080 struct search *s = container_of(cl, struct search, cl);
1081 struct bio *bio = &s->bio.bio;
1083 if (s->iop.flush_journal)
1084 bch_journal_meta(s->iop.c, cl);
1086 /* If it's a flush, we send the flush to the backing device too */
1087 closure_bio_submit(bio, cl, s->d);
1089 continue_at(cl, cached_dev_bio_complete, NULL);
1092 /* Cached devices - read & write stuff */
1094 static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1097 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1098 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1099 int cpu, rw = bio_data_dir(bio);
1101 cpu = part_stat_lock();
1102 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1103 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1106 bio->bi_bdev = dc->bdev;
1107 bio->bi_iter.bi_sector += dc->sb.data_offset;
1109 if (cached_dev_get(dc)) {
1110 s = search_alloc(bio, d);
1111 trace_bcache_request_start(s->d, bio);
1113 if (!bio->bi_iter.bi_size) {
1115 * can't call bch_journal_meta from under
1116 * generic_make_request
1118 continue_at_nobarrier(&s->cl,
1122 s->iop.bypass = check_should_bypass(dc, bio);
1125 cached_dev_write(dc, s);
1127 cached_dev_read(dc, s);
1130 if ((bio->bi_rw & REQ_DISCARD) &&
1131 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1134 bch_generic_make_request(bio, &d->bio_split_hook);
1138 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1139 unsigned int cmd, unsigned long arg)
1141 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1142 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1145 static int cached_dev_congested(void *data, int bits)
1147 struct bcache_device *d = data;
1148 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1149 struct request_queue *q = bdev_get_queue(dc->bdev);
1152 if (bdi_congested(&q->backing_dev_info, bits))
1155 if (cached_dev_get(dc)) {
1159 for_each_cache(ca, d->c, i) {
1160 q = bdev_get_queue(ca->bdev);
1161 ret |= bdi_congested(&q->backing_dev_info, bits);
1170 void bch_cached_dev_request_init(struct cached_dev *dc)
1172 struct gendisk *g = dc->disk.disk;
1174 g->queue->make_request_fn = cached_dev_make_request;
1175 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1176 dc->disk.cache_miss = cached_dev_cache_miss;
1177 dc->disk.ioctl = cached_dev_ioctl;
1180 /* Flash backed devices */
1182 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1183 struct bio *bio, unsigned sectors)
1190 bio_for_each_segment(bv, bio, i) {
1191 unsigned j = min(bv->bv_len >> 9, sectors);
1193 void *p = kmap(bv->bv_page);
1194 memset(p + bv->bv_offset, 0, j << 9);
1195 kunmap(bv->bv_page);
1200 bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
1202 if (!bio->bi_iter.bi_size)
1205 return MAP_CONTINUE;
1208 static void flash_dev_nodata(struct closure *cl)
1210 struct search *s = container_of(cl, struct search, cl);
1212 if (s->iop.flush_journal)
1213 bch_journal_meta(s->iop.c, cl);
1215 continue_at(cl, search_free, NULL);
1218 static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1222 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1223 int cpu, rw = bio_data_dir(bio);
1225 cpu = part_stat_lock();
1226 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1227 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1230 s = search_alloc(bio, d);
1234 trace_bcache_request_start(s->d, bio);
1236 if (!bio->bi_iter.bi_size) {
1238 * can't call bch_journal_meta from under
1239 * generic_make_request
1241 continue_at_nobarrier(&s->cl,
1245 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1246 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1247 &KEY(d->id, bio_end_sector(bio), 0));
1249 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
1250 s->iop.writeback = true;
1253 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1255 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1258 continue_at(cl, search_free, NULL);
1261 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1262 unsigned int cmd, unsigned long arg)
1267 static int flash_dev_congested(void *data, int bits)
1269 struct bcache_device *d = data;
1270 struct request_queue *q;
1275 for_each_cache(ca, d->c, i) {
1276 q = bdev_get_queue(ca->bdev);
1277 ret |= bdi_congested(&q->backing_dev_info, bits);
1283 void bch_flash_dev_request_init(struct bcache_device *d)
1285 struct gendisk *g = d->disk;
1287 g->queue->make_request_fn = flash_dev_make_request;
1288 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1289 d->cache_miss = flash_dev_cache_miss;
1290 d->ioctl = flash_dev_ioctl;
1293 void bch_request_exit(void)
1295 #ifdef CONFIG_CGROUP_BCACHE
1296 cgroup_unload_subsys(&bcache_subsys);
1298 if (bch_search_cache)
1299 kmem_cache_destroy(bch_search_cache);
1302 int __init bch_request_init(void)
1304 bch_search_cache = KMEM_CACHE(search, 0);
1305 if (!bch_search_cache)
1308 #ifdef CONFIG_CGROUP_BCACHE
1309 cgroup_load_subsys(&bcache_subsys);
1310 init_bch_cgroup(&bcache_default_cgroup);
1312 cgroup_add_cftypes(&bcache_subsys, bch_files);