2 * Some low level IO code, and hacks for various block layer limitations
5 * Copyright 2012 Google, Inc.
12 #include <linux/blkdev.h>
14 /* Bios with headers */
16 void bch_bbio_free(struct bio *bio, struct cache_set *c)
18 struct bbio *b = container_of(bio, struct bbio, bio);
19 mempool_free(b, c->bio_meta);
22 struct bio *bch_bbio_alloc(struct cache_set *c)
24 struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
25 struct bio *bio = &b->bio;
28 bio->bi_max_vecs = bucket_pages(c);
29 bio->bi_io_vec = bio->bi_inline_vecs;
34 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
36 struct bbio *b = container_of(bio, struct bbio, bio);
38 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
39 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
41 b->submit_time_us = local_clock_us();
42 closure_bio_submit(bio, bio->bi_private);
45 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
46 struct bkey *k, unsigned ptr)
48 struct bbio *b = container_of(bio, struct bbio, bio);
49 bch_bkey_copy_single_ptr(&b->key, k, ptr);
50 __bch_submit_bbio(bio, c);
55 void bch_count_io_errors(struct cache *ca, int error, const char *m)
58 * The halflife of an error is:
59 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
62 if (ca->set->error_decay) {
63 unsigned count = atomic_inc_return(&ca->io_count);
65 while (count > ca->set->error_decay) {
68 unsigned new = count - ca->set->error_decay;
71 * First we subtract refresh from count; each time we
72 * succesfully do so, we rescale the errors once:
75 count = atomic_cmpxchg(&ca->io_count, old, new);
80 errors = atomic_read(&ca->io_errors);
83 new = ((uint64_t) errors * 127) / 128;
84 errors = atomic_cmpxchg(&ca->io_errors,
86 } while (old != errors);
92 char buf[BDEVNAME_SIZE];
93 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
95 errors >>= IO_ERROR_SHIFT;
97 if (errors < ca->set->error_limit)
98 pr_err("%s: IO error on %s, recovering",
99 bdevname(ca->bdev, buf), m);
101 bch_cache_set_error(ca->set,
102 "%s: too many IO errors %s",
103 bdevname(ca->bdev, buf), m);
107 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
108 int error, const char *m)
110 struct bbio *b = container_of(bio, struct bbio, bio);
111 struct cache *ca = PTR_CACHE(c, &b->key, 0);
113 unsigned threshold = op_is_write(bio_op(bio))
114 ? c->congested_write_threshold_us
115 : c->congested_read_threshold_us;
118 unsigned t = local_clock_us();
120 int us = t - b->submit_time_us;
121 int congested = atomic_read(&c->congested);
123 if (us > (int) threshold) {
125 c->congested_last_us = t;
127 ms = min(ms, CONGESTED_MAX + congested);
128 atomic_sub(ms, &c->congested);
129 } else if (congested < 0)
130 atomic_inc(&c->congested);
133 bch_count_io_errors(ca, error, m);
136 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
137 int error, const char *m)
139 struct closure *cl = bio->bi_private;
141 bch_bbio_count_io_errors(c, bio, error, m);