2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/cgroup.h>
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include "blk-cgroup.h"
21 #include <trace/events/bcache.h>
23 #define CUTOFF_CACHE_ADD 95
24 #define CUTOFF_CACHE_READA 90
26 struct kmem_cache *bch_search_cache;
28 static void check_should_skip(struct cached_dev *, struct search *);
30 /* Cgroup interface */
32 #ifdef CONFIG_CGROUP_BCACHE
33 static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
35 static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
37 struct cgroup_subsys_state *css;
39 (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
40 ? container_of(css, struct bch_cgroup, css)
41 : &bcache_default_cgroup;
44 struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
46 struct cgroup_subsys_state *css = bio->bi_css
47 ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
48 : task_subsys_state(current, bcache_subsys_id);
51 ? container_of(css, struct bch_cgroup, css)
52 : &bcache_default_cgroup;
55 static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
57 char __user *buf, size_t nbytes, loff_t *ppos)
60 int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
61 cgroup_to_bcache(cgrp)->cache_mode + 1);
66 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
69 static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
72 int v = bch_read_string_list(buf, bch_cache_modes);
76 cgroup_to_bcache(cgrp)->cache_mode = v - 1;
80 static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
82 return cgroup_to_bcache(cgrp)->verify;
85 static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
87 cgroup_to_bcache(cgrp)->verify = val;
91 static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
93 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
94 return atomic_read(&bcachecg->stats.cache_hits);
97 static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
99 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
100 return atomic_read(&bcachecg->stats.cache_misses);
103 static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
106 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
107 return atomic_read(&bcachecg->stats.cache_bypass_hits);
110 static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
113 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
114 return atomic_read(&bcachecg->stats.cache_bypass_misses);
117 static struct cftype bch_files[] = {
119 .name = "cache_mode",
120 .read = cache_mode_read,
121 .write_string = cache_mode_write,
125 .read_u64 = bch_verify_read,
126 .write_u64 = bch_verify_write,
129 .name = "cache_hits",
130 .read_u64 = bch_cache_hits_read,
133 .name = "cache_misses",
134 .read_u64 = bch_cache_misses_read,
137 .name = "cache_bypass_hits",
138 .read_u64 = bch_cache_bypass_hits_read,
141 .name = "cache_bypass_misses",
142 .read_u64 = bch_cache_bypass_misses_read,
147 static void init_bch_cgroup(struct bch_cgroup *cg)
152 static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
154 struct bch_cgroup *cg;
156 cg = kzalloc(sizeof(*cg), GFP_KERNEL);
158 return ERR_PTR(-ENOMEM);
163 static void bcachecg_destroy(struct cgroup *cgroup)
165 struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
166 free_css_id(&bcache_subsys, &cg->css);
170 struct cgroup_subsys bcache_subsys = {
171 .create = bcachecg_create,
172 .destroy = bcachecg_destroy,
173 .subsys_id = bcache_subsys_id,
175 .module = THIS_MODULE,
177 EXPORT_SYMBOL_GPL(bcache_subsys);
180 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
182 #ifdef CONFIG_CGROUP_BCACHE
183 int r = bch_bio_to_cgroup(bio)->cache_mode;
187 return BDEV_CACHE_MODE(&dc->sb);
190 static bool verify(struct cached_dev *dc, struct bio *bio)
192 #ifdef CONFIG_CGROUP_BCACHE
193 if (bch_bio_to_cgroup(bio)->verify)
199 static void bio_csum(struct bio *bio, struct bkey *k)
205 bio_for_each_segment(bv, bio, i) {
206 void *d = kmap(bv->bv_page) + bv->bv_offset;
207 csum = bch_crc64_update(csum, d, bv->bv_len);
211 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
214 /* Insert data into cache */
216 static void bio_invalidate(struct closure *cl)
218 struct btree_op *op = container_of(cl, struct btree_op, cl);
219 struct bio *bio = op->cache_bio;
221 pr_debug("invalidating %i sectors from %llu",
222 bio_sectors(bio), (uint64_t) bio->bi_sector);
224 while (bio_sectors(bio)) {
225 unsigned len = min(bio_sectors(bio), 1U << 14);
227 if (bch_keylist_realloc(&op->keys, 0, op->c))
230 bio->bi_sector += len;
231 bio->bi_size -= len << 9;
233 bch_keylist_add(&op->keys,
234 &KEY(op->inode, bio->bi_sector, len));
237 op->insert_data_done = true;
240 continue_at(cl, bch_journal, bcache_wq);
244 struct list_head list;
245 struct task_struct *last;
246 unsigned sectors_free;
250 void bch_open_buckets_free(struct cache_set *c)
252 struct open_bucket *b;
254 while (!list_empty(&c->data_buckets)) {
255 b = list_first_entry(&c->data_buckets,
256 struct open_bucket, list);
262 int bch_open_buckets_alloc(struct cache_set *c)
266 spin_lock_init(&c->data_bucket_lock);
268 for (i = 0; i < 6; i++) {
269 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
273 list_add(&b->list, &c->data_buckets);
280 * We keep multiple buckets open for writes, and try to segregate different
281 * write streams for better cache utilization: first we look for a bucket where
282 * the last write to it was sequential with the current write, and failing that
283 * we look for a bucket that was last used by the same task.
285 * The ideas is if you've got multiple tasks pulling data into the cache at the
286 * same time, you'll get better cache utilization if you try to segregate their
287 * data and preserve locality.
289 * For example, say you've starting Firefox at the same time you're copying a
290 * bunch of files. Firefox will likely end up being fairly hot and stay in the
291 * cache awhile, but the data you copied might not be; if you wrote all that
292 * data to the same buckets it'd get invalidated at the same time.
294 * Both of those tasks will be doing fairly random IO so we can't rely on
295 * detecting sequential IO to segregate their data, but going off of the task
296 * should be a sane heuristic.
298 static struct open_bucket *pick_data_bucket(struct cache_set *c,
299 const struct bkey *search,
300 struct task_struct *task,
303 struct open_bucket *ret, *ret_task = NULL;
305 list_for_each_entry_reverse(ret, &c->data_buckets, list)
306 if (!bkey_cmp(&ret->key, search))
308 else if (ret->last == task)
311 ret = ret_task ?: list_first_entry(&c->data_buckets,
312 struct open_bucket, list);
314 if (!ret->sectors_free && KEY_PTRS(alloc)) {
315 ret->sectors_free = c->sb.bucket_size;
316 bkey_copy(&ret->key, alloc);
320 if (!ret->sectors_free)
327 * Allocates some space in the cache to write to, and k to point to the newly
328 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
329 * end of the newly allocated space).
331 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
332 * sectors were actually allocated.
334 * If s->writeback is true, will not fail.
336 static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
339 struct cache_set *c = s->op.c;
340 struct open_bucket *b;
341 BKEY_PADDED(key) alloc;
342 struct closure cl, *w = NULL;
346 closure_init_stack(&cl);
351 * We might have to allocate a new bucket, which we can't do with a
352 * spinlock held. So if we have to allocate, we drop the lock, allocate
353 * and then retry. KEY_PTRS() indicates whether alloc points to
354 * allocated bucket(s).
357 bkey_init(&alloc.key);
358 spin_lock(&c->data_bucket_lock);
360 while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
361 unsigned watermark = s->op.write_prio
365 spin_unlock(&c->data_bucket_lock);
367 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
370 spin_lock(&c->data_bucket_lock);
374 * If we had to allocate, we might race and not need to allocate the
375 * second time we call find_data_bucket(). If we allocated a bucket but
376 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
378 if (KEY_PTRS(&alloc.key))
379 __bkey_put(c, &alloc.key);
381 for (i = 0; i < KEY_PTRS(&b->key); i++)
382 EBUG_ON(ptr_stale(c, &b->key, i));
384 /* Set up the pointer to the space we're allocating: */
386 for (i = 0; i < KEY_PTRS(&b->key); i++)
387 k->ptr[i] = b->key.ptr[i];
389 sectors = min(sectors, b->sectors_free);
391 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
392 SET_KEY_SIZE(k, sectors);
393 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
396 * Move b to the end of the lru, and keep track of what this bucket was
399 list_move_tail(&b->list, &c->data_buckets);
400 bkey_copy_key(&b->key, k);
403 b->sectors_free -= sectors;
405 for (i = 0; i < KEY_PTRS(&b->key); i++) {
406 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
408 atomic_long_add(sectors,
409 &PTR_CACHE(c, &b->key, i)->sectors_written);
412 if (b->sectors_free < c->sb.block_size)
416 * k takes refcounts on the buckets it points to until it's inserted
417 * into the btree, but if we're done with this bucket we just transfer
418 * get_data_bucket()'s refcount.
421 for (i = 0; i < KEY_PTRS(&b->key); i++)
422 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
424 spin_unlock(&c->data_bucket_lock);
428 static void bch_insert_data_error(struct closure *cl)
430 struct btree_op *op = container_of(cl, struct btree_op, cl);
433 * Our data write just errored, which means we've got a bunch of keys to
434 * insert that point to data that wasn't succesfully written.
436 * We don't have to insert those keys but we still have to invalidate
437 * that region of the cache - so, if we just strip off all the pointers
438 * from the keys we'll accomplish just that.
441 struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
443 while (src != op->keys.top) {
444 struct bkey *n = bkey_next(src);
446 SET_KEY_PTRS(src, 0);
449 dst = bkey_next(dst);
458 static void bch_insert_data_endio(struct bio *bio, int error)
460 struct closure *cl = bio->bi_private;
461 struct btree_op *op = container_of(cl, struct btree_op, cl);
462 struct search *s = container_of(op, struct search, op);
465 /* TODO: We could try to recover from this. */
469 set_closure_fn(cl, bch_insert_data_error, bcache_wq);
471 set_closure_fn(cl, NULL, NULL);
474 bch_bbio_endio(op->c, bio, error, "writing data to cache");
477 static void bch_insert_data_loop(struct closure *cl)
479 struct btree_op *op = container_of(cl, struct btree_op, cl);
480 struct search *s = container_of(op, struct search, op);
481 struct bio *bio = op->cache_bio, *n;
484 return bio_invalidate(cl);
486 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
487 set_gc_sectors(op->c);
492 * Journal writes are marked REQ_FLUSH; if the original write was a
493 * flush, it'll wait on the journal write.
495 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
500 struct bio_set *split = s->d
501 ? s->d->bio_split : op->c->bio_split;
503 /* 1 for the device pointer and 1 for the chksum */
504 if (bch_keylist_realloc(&op->keys,
505 1 + (op->csum ? 1 : 0),
507 continue_at(cl, bch_journal, bcache_wq);
511 SET_KEY_INODE(k, op->inode);
512 SET_KEY_OFFSET(k, bio->bi_sector);
514 if (!bch_alloc_sectors(k, bio_sectors(bio), s))
517 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
519 n->bi_end_io = bch_insert_data_endio;
523 SET_KEY_DIRTY(k, true);
525 for (i = 0; i < KEY_PTRS(k); i++)
526 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
530 SET_KEY_CSUM(k, op->csum);
534 trace_bcache_cache_insert(k);
535 bch_keylist_push(&op->keys);
537 n->bi_rw |= REQ_WRITE;
538 bch_submit_bbio(n, op->c, k, 0);
541 op->insert_data_done = true;
542 continue_at(cl, bch_journal, bcache_wq);
544 /* bch_alloc_sectors() blocks if s->writeback = true */
545 BUG_ON(s->writeback);
548 * But if it's not a writeback write we'd rather just bail out if
549 * there aren't any buckets ready to write to - it might take awhile and
550 * we might be starving btree writes for gc or something.
555 * Writethrough write: We can't complete the write until we've
556 * updated the index. But we don't want to delay the write while
557 * we wait for buckets to be freed up, so just invalidate the
561 return bio_invalidate(cl);
564 * From a cache miss, we can just insert the keys for the data
565 * we have written or bail out if we didn't do anything.
567 op->insert_data_done = true;
570 if (!bch_keylist_empty(&op->keys))
571 continue_at(cl, bch_journal, bcache_wq);
578 * bch_insert_data - stick some data in the cache
580 * This is the starting point for any data to end up in a cache device; it could
581 * be from a normal write, or a writeback write, or a write to a flash only
582 * volume - it's also used by the moving garbage collector to compact data in
583 * mostly empty buckets.
585 * It first writes the data to the cache, creating a list of keys to be inserted
586 * (if the data had to be fragmented there will be multiple keys); after the
587 * data is written it calls bch_journal, and after the keys have been added to
588 * the next journal write they're inserted into the btree.
590 * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
591 * and op->inode is used for the key inode.
593 * If op->skip is true, instead of inserting the data it invalidates the region
594 * of the cache represented by op->cache_bio and op->inode.
596 void bch_insert_data(struct closure *cl)
598 struct btree_op *op = container_of(cl, struct btree_op, cl);
600 bch_keylist_init(&op->keys);
601 bio_get(op->cache_bio);
602 bch_insert_data_loop(cl);
605 void bch_btree_insert_async(struct closure *cl)
607 struct btree_op *op = container_of(cl, struct btree_op, cl);
608 struct search *s = container_of(op, struct search, op);
610 if (bch_btree_insert(op, op->c)) {
612 op->insert_data_done = true;
615 if (op->insert_data_done) {
616 bch_keylist_free(&op->keys);
619 continue_at(cl, bch_insert_data_loop, bcache_wq);
622 /* Common code for the make_request functions */
624 static void request_endio(struct bio *bio, int error)
626 struct closure *cl = bio->bi_private;
629 struct search *s = container_of(cl, struct search, cl);
631 /* Only cache read errors are recoverable */
632 s->recoverable = false;
639 void bch_cache_read_endio(struct bio *bio, int error)
641 struct bbio *b = container_of(bio, struct bbio, bio);
642 struct closure *cl = bio->bi_private;
643 struct search *s = container_of(cl, struct search, cl);
646 * If the bucket was reused while our bio was in flight, we might have
647 * read the wrong data. Set s->error but not error so it doesn't get
648 * counted against the cache device, but we'll still reread the data
649 * from the backing device.
654 else if (ptr_stale(s->op.c, &b->key, 0)) {
655 atomic_long_inc(&s->op.c->cache_read_races);
659 bch_bbio_endio(s->op.c, bio, error, "reading from cache");
662 static void bio_complete(struct search *s)
665 int cpu, rw = bio_data_dir(s->orig_bio);
666 unsigned long duration = jiffies - s->start_time;
668 cpu = part_stat_lock();
669 part_round_stats(cpu, &s->d->disk->part0);
670 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
673 trace_bcache_request_end(s, s->orig_bio);
674 bio_endio(s->orig_bio, s->error);
679 static void do_bio_hook(struct search *s)
681 struct bio *bio = &s->bio.bio;
682 memcpy(bio, s->orig_bio, sizeof(struct bio));
684 bio->bi_end_io = request_endio;
685 bio->bi_private = &s->cl;
686 atomic_set(&bio->bi_cnt, 3);
689 static void search_free(struct closure *cl)
691 struct search *s = container_of(cl, struct search, cl);
695 bio_put(s->op.cache_bio);
697 if (s->unaligned_bvec)
698 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
700 closure_debug_destroy(cl);
701 mempool_free(s, s->d->c->search);
704 static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
707 struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
708 memset(s, 0, offsetof(struct search, op.keys));
710 __closure_init(&s->cl, NULL);
718 s->write = (bio->bi_rw & REQ_WRITE) != 0;
719 s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
720 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
722 s->start_time = jiffies;
725 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
726 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
727 memcpy(bv, bio_iovec(bio),
728 sizeof(struct bio_vec) * bio_segments(bio));
730 s->bio.bio.bi_io_vec = bv;
731 s->unaligned_bvec = 1;
737 static void btree_read_async(struct closure *cl)
739 struct btree_op *op = container_of(cl, struct btree_op, cl);
741 int ret = btree_root(search_recurse, op->c, op);
744 continue_at(cl, btree_read_async, bcache_wq);
751 static void cached_dev_bio_complete(struct closure *cl)
753 struct search *s = container_of(cl, struct search, cl);
754 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
762 static void cached_dev_read_complete(struct closure *cl)
764 struct search *s = container_of(cl, struct search, cl);
766 if (s->op.insert_collision)
767 bch_mark_cache_miss_collision(s);
769 if (s->op.cache_bio) {
773 __bio_for_each_segment(bv, s->op.cache_bio, i, 0)
774 __free_page(bv->bv_page);
777 cached_dev_bio_complete(cl);
780 static void request_read_error(struct closure *cl)
782 struct search *s = container_of(cl, struct search, cl);
786 if (s->recoverable) {
787 /* Retry from the backing device: */
788 trace_bcache_read_retry(s->orig_bio);
791 bv = s->bio.bio.bi_io_vec;
793 s->bio.bio.bi_io_vec = bv;
795 if (!s->unaligned_bvec)
796 bio_for_each_segment(bv, s->orig_bio, i)
797 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
799 memcpy(s->bio.bio.bi_io_vec,
800 bio_iovec(s->orig_bio),
801 sizeof(struct bio_vec) *
802 bio_segments(s->orig_bio));
804 /* XXX: invalidate cache */
806 closure_bio_submit(&s->bio.bio, &s->cl, s->d);
809 continue_at(cl, cached_dev_read_complete, NULL);
812 static void request_read_done(struct closure *cl)
814 struct search *s = container_of(cl, struct search, cl);
815 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
818 * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
819 * contains data ready to be inserted into the cache.
821 * First, we copy the data we just read from cache_bio's bounce buffers
822 * to the buffers the original bio pointed to:
825 if (s->op.cache_bio) {
826 bio_reset(s->op.cache_bio);
827 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
828 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
829 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
830 bch_bio_map(s->op.cache_bio, NULL);
832 bio_copy_data(s->cache_miss, s->op.cache_bio);
834 bio_put(s->cache_miss);
835 s->cache_miss = NULL;
838 if (verify(dc, &s->bio.bio) && s->recoverable)
843 if (s->op.cache_bio &&
844 !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
845 s->op.type = BTREE_REPLACE;
846 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
849 continue_at(cl, cached_dev_read_complete, NULL);
852 static void request_read_done_bh(struct closure *cl)
854 struct search *s = container_of(cl, struct search, cl);
855 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
857 bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
858 trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
861 continue_at_nobarrier(cl, request_read_error, bcache_wq);
862 else if (s->op.cache_bio || verify(dc, &s->bio.bio))
863 continue_at_nobarrier(cl, request_read_done, bcache_wq);
865 continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
868 static int cached_dev_cache_miss(struct btree *b, struct search *s,
869 struct bio *bio, unsigned sectors)
873 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
876 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
878 s->op.lookup_done = true;
880 miss->bi_end_io = request_endio;
881 miss->bi_private = &s->cl;
883 if (s->cache_miss || s->op.skip)
887 (bio->bi_rw & REQ_RAHEAD) ||
888 (bio->bi_rw & REQ_META) ||
889 s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
892 reada = min(dc->readahead >> 9,
893 sectors - bio_sectors(miss));
895 if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
896 reada = bdev_sectors(miss->bi_bdev) -
897 bio_end_sector(miss);
900 s->cache_bio_sectors = bio_sectors(miss) + reada;
901 s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
902 DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
905 if (!s->op.cache_bio)
908 s->op.cache_bio->bi_sector = miss->bi_sector;
909 s->op.cache_bio->bi_bdev = miss->bi_bdev;
910 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
912 s->op.cache_bio->bi_end_io = request_endio;
913 s->op.cache_bio->bi_private = &s->cl;
915 /* btree_search_recurse()'s btree iterator is no good anymore */
917 if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
920 bch_bio_map(s->op.cache_bio, NULL);
921 if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
924 s->cache_miss = miss;
925 bio_get(s->op.cache_bio);
927 closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
931 bio_put(s->op.cache_bio);
932 s->op.cache_bio = NULL;
934 closure_bio_submit(miss, &s->cl, s->d);
938 static void request_read(struct cached_dev *dc, struct search *s)
940 struct closure *cl = &s->cl;
942 check_should_skip(dc, s);
943 closure_call(&s->op.cl, btree_read_async, NULL, cl);
945 continue_at(cl, request_read_done_bh, NULL);
950 static void cached_dev_write_complete(struct closure *cl)
952 struct search *s = container_of(cl, struct search, cl);
953 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
955 up_read_non_owner(&dc->writeback_lock);
956 cached_dev_bio_complete(cl);
959 static void request_write(struct cached_dev *dc, struct search *s)
961 struct closure *cl = &s->cl;
962 struct bio *bio = &s->bio.bio;
963 struct bkey start, end;
964 start = KEY(dc->disk.id, bio->bi_sector, 0);
965 end = KEY(dc->disk.id, bio_end_sector(bio), 0);
967 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
969 check_should_skip(dc, s);
970 down_read_non_owner(&dc->writeback_lock);
972 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
977 if (bio->bi_rw & REQ_DISCARD)
980 if (should_writeback(dc, s->orig_bio,
990 trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
993 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
996 closure_bio_submit(bio, cl, s->d);
998 bch_writeback_add(dc);
1000 if (s->op.flush_journal) {
1001 /* Also need to send a flush to the backing device */
1002 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1003 dc->disk.bio_split);
1007 closure_bio_submit(bio, cl, s->d);
1009 s->op.cache_bio = bio;
1013 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1014 continue_at(cl, cached_dev_write_complete, NULL);
1017 s->op.cache_bio = s->orig_bio;
1018 bio_get(s->op.cache_bio);
1020 if ((bio->bi_rw & REQ_DISCARD) &&
1021 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1024 closure_bio_submit(bio, cl, s->d);
1028 static void request_nodata(struct cached_dev *dc, struct search *s)
1030 struct closure *cl = &s->cl;
1031 struct bio *bio = &s->bio.bio;
1033 if (bio->bi_rw & REQ_DISCARD) {
1034 request_write(dc, s);
1038 if (s->op.flush_journal)
1039 bch_journal_meta(s->op.c, cl);
1041 closure_bio_submit(bio, cl, s->d);
1043 continue_at(cl, cached_dev_bio_complete, NULL);
1046 /* Cached devices - read & write stuff */
1048 unsigned bch_get_congested(struct cache_set *c)
1053 if (!c->congested_read_threshold_us &&
1054 !c->congested_write_threshold_us)
1057 i = (local_clock_us() - c->congested_last_us) / 1024;
1061 i += atomic_read(&c->congested);
1068 i = fract_exp_two(i, 6);
1070 rand = get_random_int();
1071 i -= bitmap_weight(&rand, BITS_PER_LONG);
1073 return i > 0 ? i : 1;
1076 static void add_sequential(struct task_struct *t)
1078 ewma_add(t->sequential_io_avg,
1079 t->sequential_io, 8, 0);
1081 t->sequential_io = 0;
1084 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
1086 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
1089 static void check_should_skip(struct cached_dev *dc, struct search *s)
1091 struct cache_set *c = s->op.c;
1092 struct bio *bio = &s->bio.bio;
1093 unsigned mode = cache_mode(dc, bio);
1094 unsigned sectors, congested = bch_get_congested(c);
1096 if (atomic_read(&dc->disk.detaching) ||
1097 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
1098 (bio->bi_rw & REQ_DISCARD))
1101 if (mode == CACHE_MODE_NONE ||
1102 (mode == CACHE_MODE_WRITEAROUND &&
1103 (bio->bi_rw & REQ_WRITE)))
1106 if (bio->bi_sector & (c->sb.block_size - 1) ||
1107 bio_sectors(bio) & (c->sb.block_size - 1)) {
1108 pr_debug("skipping unaligned io");
1112 if (!congested && !dc->sequential_cutoff)
1116 mode == CACHE_MODE_WRITEBACK &&
1117 (bio->bi_rw & REQ_WRITE) &&
1118 (bio->bi_rw & REQ_SYNC))
1121 if (dc->sequential_merge) {
1124 spin_lock(&dc->io_lock);
1126 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
1127 if (i->last == bio->bi_sector &&
1128 time_before(jiffies, i->jiffies))
1131 i = list_first_entry(&dc->io_lru, struct io, lru);
1133 add_sequential(s->task);
1136 if (i->sequential + bio->bi_size > i->sequential)
1137 i->sequential += bio->bi_size;
1139 i->last = bio_end_sector(bio);
1140 i->jiffies = jiffies + msecs_to_jiffies(5000);
1141 s->task->sequential_io = i->sequential;
1143 hlist_del(&i->hash);
1144 hlist_add_head(&i->hash, iohash(dc, i->last));
1145 list_move_tail(&i->lru, &dc->io_lru);
1147 spin_unlock(&dc->io_lock);
1149 s->task->sequential_io = bio->bi_size;
1151 add_sequential(s->task);
1154 sectors = max(s->task->sequential_io,
1155 s->task->sequential_io_avg) >> 9;
1157 if (dc->sequential_cutoff &&
1158 sectors >= dc->sequential_cutoff >> 9) {
1159 trace_bcache_bypass_sequential(s->orig_bio);
1163 if (congested && sectors >= congested) {
1164 trace_bcache_bypass_congested(s->orig_bio);
1169 bch_rescale_priorities(c, bio_sectors(bio));
1172 bch_mark_sectors_bypassed(s, bio_sectors(bio));
1176 static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1179 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1180 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1181 int cpu, rw = bio_data_dir(bio);
1183 cpu = part_stat_lock();
1184 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1185 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1188 bio->bi_bdev = dc->bdev;
1189 bio->bi_sector += dc->sb.data_offset;
1191 if (cached_dev_get(dc)) {
1192 s = search_alloc(bio, d);
1193 trace_bcache_request_start(s, bio);
1195 if (!bio_has_data(bio))
1196 request_nodata(dc, s);
1198 request_write(dc, s);
1200 request_read(dc, s);
1202 if ((bio->bi_rw & REQ_DISCARD) &&
1203 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1206 bch_generic_make_request(bio, &d->bio_split_hook);
1210 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1211 unsigned int cmd, unsigned long arg)
1213 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1214 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1217 static int cached_dev_congested(void *data, int bits)
1219 struct bcache_device *d = data;
1220 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1221 struct request_queue *q = bdev_get_queue(dc->bdev);
1224 if (bdi_congested(&q->backing_dev_info, bits))
1227 if (cached_dev_get(dc)) {
1231 for_each_cache(ca, d->c, i) {
1232 q = bdev_get_queue(ca->bdev);
1233 ret |= bdi_congested(&q->backing_dev_info, bits);
1242 void bch_cached_dev_request_init(struct cached_dev *dc)
1244 struct gendisk *g = dc->disk.disk;
1246 g->queue->make_request_fn = cached_dev_make_request;
1247 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1248 dc->disk.cache_miss = cached_dev_cache_miss;
1249 dc->disk.ioctl = cached_dev_ioctl;
1252 /* Flash backed devices */
1254 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1255 struct bio *bio, unsigned sectors)
1262 bio_for_each_segment(bv, bio, i) {
1263 unsigned j = min(bv->bv_len >> 9, sectors);
1265 void *p = kmap(bv->bv_page);
1266 memset(p + bv->bv_offset, 0, j << 9);
1267 kunmap(bv->bv_page);
1272 bio_advance(bio, min(sectors << 9, bio->bi_size));
1275 s->op.lookup_done = true;
1280 static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1284 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1285 int cpu, rw = bio_data_dir(bio);
1287 cpu = part_stat_lock();
1288 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1289 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1292 s = search_alloc(bio, d);
1296 trace_bcache_request_start(s, bio);
1298 if (bio_has_data(bio) && !rw) {
1299 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1300 } else if (bio_has_data(bio) || s->op.skip) {
1301 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1302 &KEY(d->id, bio->bi_sector, 0),
1303 &KEY(d->id, bio_end_sector(bio), 0));
1305 s->writeback = true;
1306 s->op.cache_bio = bio;
1308 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1310 /* No data - probably a cache flush */
1311 if (s->op.flush_journal)
1312 bch_journal_meta(s->op.c, cl);
1315 continue_at(cl, search_free, NULL);
1318 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1319 unsigned int cmd, unsigned long arg)
1324 static int flash_dev_congested(void *data, int bits)
1326 struct bcache_device *d = data;
1327 struct request_queue *q;
1332 for_each_cache(ca, d->c, i) {
1333 q = bdev_get_queue(ca->bdev);
1334 ret |= bdi_congested(&q->backing_dev_info, bits);
1340 void bch_flash_dev_request_init(struct bcache_device *d)
1342 struct gendisk *g = d->disk;
1344 g->queue->make_request_fn = flash_dev_make_request;
1345 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1346 d->cache_miss = flash_dev_cache_miss;
1347 d->ioctl = flash_dev_ioctl;
1350 void bch_request_exit(void)
1352 #ifdef CONFIG_CGROUP_BCACHE
1353 cgroup_unload_subsys(&bcache_subsys);
1355 if (bch_search_cache)
1356 kmem_cache_destroy(bch_search_cache);
1359 int __init bch_request_init(void)
1361 bch_search_cache = KMEM_CACHE(search, 0);
1362 if (!bch_search_cache)
1365 #ifdef CONFIG_CGROUP_BCACHE
1366 cgroup_load_subsys(&bcache_subsys);
1367 init_bch_cgroup(&bcache_default_cgroup);
1369 cgroup_add_cftypes(&bcache_subsys, bch_files);