]> Git Repo - linux.git/blame - block/blk.h
blk-mq: don't handle non-flush requests in blk_insert_flush
[linux.git] / block / blk.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
8324aa91
JA
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
a73f730d 5#include <linux/idr.h>
f70ced09 6#include <linux/blk-mq.h>
c6a564ff 7#include <linux/part_stat.h>
a892c8d5 8#include <linux/blk-crypto.h>
9bb33f24 9#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
c39ae60d 10#include <xen/xen.h>
a892c8d5 11#include "blk-crypto-internal.h"
f70ced09 12#include "blk-mq.h"
c3e22192 13#include "blk-mq-sched.h"
a73f730d 14
2e9bc346
CH
15struct elevator_type;
16
0d2602ca
JA
17/* Max future timer expiry for timeouts */
18#define BLK_MAX_TIMEOUT (5 * HZ)
19
18fbda91 20extern struct dentry *blk_debugfs_root;
18fbda91 21
7c94e1c1 22struct blk_flush_queue {
7c94e1c1
ML
23 unsigned int flush_pending_idx:1;
24 unsigned int flush_running_idx:1;
8d699663 25 blk_status_t rq_status;
7c94e1c1
ML
26 unsigned long flush_pending_since;
27 struct list_head flush_queue[2];
28 struct list_head flush_data_in_flight;
29 struct request *flush_rq;
0048b483 30
7c94e1c1
ML
31 spinlock_t mq_flush_lock;
32};
33
8324aa91
JA
34extern struct kmem_cache *blk_requestq_cachep;
35extern struct kobj_type blk_queue_ktype;
a73f730d 36extern struct ida blk_queue_ida;
8324aa91 37
f9afca4d
JA
38static inline struct blk_flush_queue *
39blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
7c94e1c1 40{
8ccdf4a3 41 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
7c94e1c1
ML
42}
43
09ac46c4
TH
44static inline void __blk_get_queue(struct request_queue *q)
45{
46 kobject_get(&q->kobj);
47}
48
a9ed27a7 49bool is_flush_rq(struct request *req);
8d699663 50
754a1572
GJ
51struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
52 gfp_t flags);
f70ced09 53void blk_free_flush_queue(struct blk_flush_queue *q);
f3552655 54
3ef28e83 55void blk_freeze_queue(struct request_queue *q);
aec89dc5 56void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
8e141f9e 57void blk_queue_start_drain(struct request_queue *q);
3ef28e83 58
dc0b8a57 59#define BIO_INLINE_VECS 4
7a800a20
CH
60struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
61 gfp_t gfp_mask);
62void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
eec716a1 63
3dccdae5
CH
64static inline bool biovec_phys_mergeable(struct request_queue *q,
65 struct bio_vec *vec1, struct bio_vec *vec2)
6a9f5f24 66{
3dccdae5 67 unsigned long mask = queue_segment_boundary(q);
6e768461
CH
68 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
69 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
3dccdae5
CH
70
71 if (addr1 + vec1->bv_len != addr2)
6a9f5f24 72 return false;
0383ad43 73 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
6a9f5f24 74 return false;
3dccdae5
CH
75 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
76 return false;
6a9f5f24
CH
77 return true;
78}
79
27ca1d4e
CH
80static inline bool __bvec_gap_to_prev(struct request_queue *q,
81 struct bio_vec *bprv, unsigned int offset)
82{
df376b2e 83 return (offset & queue_virt_boundary(q)) ||
27ca1d4e
CH
84 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
85}
86
87/*
88 * Check if adding a bio_vec after bprv with offset would create a gap in
89 * the SG list. Most drivers don't care about this, but some do.
90 */
91static inline bool bvec_gap_to_prev(struct request_queue *q,
92 struct bio_vec *bprv, unsigned int offset)
93{
94 if (!queue_virt_boundary(q))
95 return false;
96 return __bvec_gap_to_prev(q, bprv, offset);
97}
98
badf7f64
CH
99static inline bool rq_mergeable(struct request *rq)
100{
101 if (blk_rq_is_passthrough(rq))
102 return false;
103
104 if (req_op(rq) == REQ_OP_FLUSH)
105 return false;
106
107 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
108 return false;
109
110 if (req_op(rq) == REQ_OP_ZONE_APPEND)
111 return false;
112
113 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
114 return false;
115 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
116 return false;
117
118 return true;
119}
120
121/*
122 * There are two different ways to handle DISCARD merges:
123 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
124 * send the bios to controller together. The ranges don't need to be
125 * contiguous.
126 * 2) Otherwise, the request will be normal read/write requests. The ranges
127 * need to be contiguous.
128 */
129static inline bool blk_discard_mergable(struct request *req)
130{
131 if (req_op(req) == REQ_OP_DISCARD &&
132 queue_max_discard_segments(req->q) > 1)
133 return true;
134 return false;
135}
136
5a48fc14
DW
137#ifdef CONFIG_BLK_DEV_INTEGRITY
138void blk_flush_integrity(void);
7c20f116 139bool __bio_integrity_endio(struct bio *);
ece841ab 140void bio_integrity_free(struct bio *bio);
7c20f116
CH
141static inline bool bio_integrity_endio(struct bio *bio)
142{
143 if (bio_integrity(bio))
144 return __bio_integrity_endio(bio);
145 return true;
146}
43b729bf 147
92cf2fd1
CH
148bool blk_integrity_merge_rq(struct request_queue *, struct request *,
149 struct request *);
d59da419
CH
150bool blk_integrity_merge_bio(struct request_queue *, struct request *,
151 struct bio *);
92cf2fd1 152
43b729bf
CH
153static inline bool integrity_req_gap_back_merge(struct request *req,
154 struct bio *next)
155{
156 struct bio_integrity_payload *bip = bio_integrity(req->bio);
157 struct bio_integrity_payload *bip_next = bio_integrity(next);
158
159 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
160 bip_next->bip_vec[0].bv_offset);
161}
162
163static inline bool integrity_req_gap_front_merge(struct request *req,
164 struct bio *bio)
165{
166 struct bio_integrity_payload *bip = bio_integrity(bio);
167 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
168
169 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
170 bip_next->bip_vec[0].bv_offset);
171}
581e2600 172
614310c9 173int blk_integrity_add(struct gendisk *disk);
581e2600 174void blk_integrity_del(struct gendisk *);
43b729bf 175#else /* CONFIG_BLK_DEV_INTEGRITY */
92cf2fd1
CH
176static inline bool blk_integrity_merge_rq(struct request_queue *rq,
177 struct request *r1, struct request *r2)
178{
179 return true;
180}
d59da419
CH
181static inline bool blk_integrity_merge_bio(struct request_queue *rq,
182 struct request *r, struct bio *b)
183{
184 return true;
185}
43b729bf
CH
186static inline bool integrity_req_gap_back_merge(struct request *req,
187 struct bio *next)
188{
189 return false;
190}
191static inline bool integrity_req_gap_front_merge(struct request *req,
192 struct bio *bio)
193{
194 return false;
195}
196
5a48fc14
DW
197static inline void blk_flush_integrity(void)
198{
199}
7c20f116
CH
200static inline bool bio_integrity_endio(struct bio *bio)
201{
202 return true;
203}
ece841ab
JT
204static inline void bio_integrity_free(struct bio *bio)
205{
206}
614310c9 207static inline int blk_integrity_add(struct gendisk *disk)
581e2600 208{
614310c9 209 return 0;
581e2600
CH
210}
211static inline void blk_integrity_del(struct gendisk *disk)
212{
213}
43b729bf 214#endif /* CONFIG_BLK_DEV_INTEGRITY */
8324aa91 215
0d2602ca 216unsigned long blk_rq_timeout(unsigned long timeout);
87ee7b11 217void blk_add_timer(struct request *req);
9be3e06f 218void blk_print_req_error(struct request *req, blk_status_t status);
320ae51f 219
320ae51f 220bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
87c037d1 221 unsigned int nr_segs, bool *same_queue_rq);
bdc6a287
BW
222bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
223 struct bio *bio, unsigned int nr_segs);
320ae51f 224
be6bfe36
PB
225void __blk_account_io_start(struct request *req);
226void __blk_account_io_done(struct request *req, u64 now);
320ae51f 227
ba0ffdd8
JA
228/*
229 * Plug flush limits
230 */
231#define BLK_MAX_REQUEST_COUNT 32
232#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
233
158dbda0
TH
234/*
235 * Internal elevator interface
236 */
e8064021 237#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
158dbda0 238
d92ca9d8 239bool blk_insert_flush(struct request *rq);
dd831006 240
d48ece20
JW
241int elevator_switch_mq(struct request_queue *q,
242 struct elevator_type *new_e);
c3e22192 243void __elevator_exit(struct request_queue *, struct elevator_queue *);
cecf5d87 244int elv_register_queue(struct request_queue *q, bool uevent);
83d016ac
BVA
245void elv_unregister_queue(struct request_queue *q);
246
c3e22192
ML
247static inline void elevator_exit(struct request_queue *q,
248 struct elevator_queue *e)
249{
284b94be
ML
250 lockdep_assert_held(&q->sysfs_lock);
251
1820f4f0 252 blk_mq_sched_free_rqs(q);
c3e22192
ML
253 __elevator_exit(q, e);
254}
255
3ad5cee5
CH
256ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
257 char *buf);
258ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
259 char *buf);
260ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
261 char *buf);
262ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
263 char *buf);
264ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
265 const char *buf, size_t count);
581d4e28
JA
266ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
267ssize_t part_timeout_store(struct device *, struct device_attribute *,
268 const char *, size_t);
581d4e28 269
abd45c15
JA
270static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
271{
272 switch (bio_op(bio)) {
273 case REQ_OP_DISCARD:
274 case REQ_OP_SECURE_ERASE:
275 case REQ_OP_WRITE_ZEROES:
276 case REQ_OP_WRITE_SAME:
277 return true; /* non-trivial splitting decisions */
278 default:
279 break;
280 }
281
282 /*
283 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
284 * This is a quick and dirty check that relies on the fact that
285 * bi_io_vec[0] is always valid if a bio has data. The check might
286 * lead to occasional false negatives when bios are cloned, but compared
287 * to the performance impact of cloned bios themselves the loop below
288 * doesn't matter anyway.
289 */
290 return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
291 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
292}
293
294void __blk_queue_split(struct request_queue *q, struct bio **bio,
295 unsigned int *nr_segs);
14ccb66b
CH
296int ll_back_merge_fn(struct request *req, struct bio *bio,
297 unsigned int nr_segs);
fd2ef39c 298bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
5e84ea3a 299 struct request *next);
e9cd19c0 300unsigned int blk_recalc_rq_segments(struct request *rq);
80a761fd 301void blk_rq_set_mixed_merge(struct request *rq);
050c8ea8 302bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
34fe7c05 303enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
d6d48196 304
ff88972c
AB
305int blk_dev_init(void);
306
c2553b58
JA
307/*
308 * Contribute to IO statistics IFF:
309 *
310 * a) it's attached to a gendisk, and
48d9b0d4 311 * b) the queue had IO stats enabled when this request was started
c2553b58 312 */
599d067d 313static inline bool blk_do_io_stat(struct request *rq)
fb8ec18c 314{
be6bfe36
PB
315 return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk;
316}
317
318static inline void blk_account_io_done(struct request *req, u64 now)
319{
320 /*
321 * Account IO completion. flush_rq isn't accounted as a
322 * normal IO on queueing nor completion. Accounting the
323 * containing request is enough.
324 */
325 if (blk_do_io_stat(req) && req->part &&
326 !(req->rq_flags & RQF_FLUSH_SEQ))
327 __blk_account_io_done(req, now);
328}
329
330static inline void blk_account_io_start(struct request *req)
331{
332 if (blk_do_io_stat(req))
333 __blk_account_io_start(req);
fb8ec18c
JA
334}
335
6cf7677f
CH
336static inline void req_set_nomerge(struct request_queue *q, struct request *req)
337{
338 req->cmd_flags |= REQ_NOMERGE;
339 if (req == q->last_merge)
340 q->last_merge = NULL;
341}
342
1adfc5e4
ML
343/*
344 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
345 * is defined as 'unsigned int', meantime it has to aligned to with logical
346 * block size which is the minimum accepted unit by hardware.
347 */
348static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
349{
350 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
351}
352
9b15d109
CL
353/*
354 * The max bio size which is aligned to q->limits.discard_granularity. This
355 * is a hint to split large discard bio in generic block layer, then if device
356 * driver needs to split the discard bio into smaller ones, their bi_size can
357 * be very probably and easily aligned to discard_granularity of the device's
358 * queue.
359 */
360static inline unsigned int bio_aligned_discard_max_sectors(
361 struct request_queue *q)
362{
363 return round_down(UINT_MAX, q->limits.discard_granularity) >>
364 SECTOR_SHIFT;
365}
366
f2dbd76a
TH
367/*
368 * Internal io_context interface
369 */
370void get_io_context(struct io_context *ioc);
47fdd4ca 371struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
24acfc34
TH
372struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
373 gfp_t gfp_mask);
7e5a8794 374void ioc_clear_queue(struct request_queue *q);
f2dbd76a 375
24acfc34 376int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
f2dbd76a 377
297e3d85
SL
378#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
379extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
380extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
381 const char *page, size_t count);
9e234eea 382extern void blk_throtl_bio_endio(struct bio *bio);
b9147dd1 383extern void blk_throtl_stat_add(struct request *rq, u64 time);
9e234eea
SL
384#else
385static inline void blk_throtl_bio_endio(struct bio *bio) { }
b9147dd1 386static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
297e3d85 387#endif
bc9fcbf9 388
9bb33f24
CH
389void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
390
391static inline bool blk_queue_may_bounce(struct request_queue *q)
392{
393 return IS_ENABLED(CONFIG_BOUNCE) &&
394 q->limits.bounce == BLK_BOUNCE_HIGH &&
395 max_low_pfn >= max_pfn;
396}
397
3bce016a
CH
398static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
399{
9bb33f24
CH
400 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
401 __blk_queue_bounce(q, bio);
3bce016a 402}
3bce016a 403
d7067512
JB
404#ifdef CONFIG_BLK_CGROUP_IOLATENCY
405extern int blk_iolatency_init(struct request_queue *q);
406#else
407static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
408#endif
409
a2d6b3a2
DLM
410struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
411
bf505456
DLM
412#ifdef CONFIG_BLK_DEV_ZONED
413void blk_queue_free_zone_bitmaps(struct request_queue *q);
508aebb8 414void blk_queue_clear_zone_settings(struct request_queue *q);
bf505456
DLM
415#else
416static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
508aebb8 417static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
bf505456
DLM
418#endif
419
7c3f828b
CH
420int blk_alloc_ext_minor(void);
421void blk_free_ext_minor(unsigned int minor);
581e2600
CH
422#define ADDPART_FLAG_NONE 0
423#define ADDPART_FLAG_RAID 1
424#define ADDPART_FLAG_WHOLEDISK 2
7f6be376
CH
425int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
426 sector_t length);
926fbb16 427int bdev_del_partition(struct gendisk *disk, int partno);
3d2e7989
CH
428int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
429 sector_t length);
581e2600 430
e4581105 431int bio_add_hw_page(struct request_queue *q, struct bio *bio,
130879f1 432 struct page *page, unsigned int len, unsigned int offset,
e4581105 433 unsigned int max_sectors, bool *same_page);
130879f1 434
da7ba729
CH
435struct request_queue *blk_alloc_queue(int node_id);
436
92e7755e 437int disk_alloc_events(struct gendisk *disk);
d5870edf
CH
438void disk_add_events(struct gendisk *disk);
439void disk_del_events(struct gendisk *disk);
440void disk_release_events(struct gendisk *disk);
2bc8cda5
CH
441extern struct device_attribute dev_attr_events;
442extern struct device_attribute dev_attr_events_async;
443extern struct device_attribute dev_attr_events_poll_msecs;
d5870edf 444
6ce913fe 445static inline void bio_clear_polled(struct bio *bio)
270a1c91
JA
446{
447 /* can't support alloc cache if we turn off polling */
448 bio_clear_flag(bio, BIO_PERCPU_CACHE);
6ce913fe 449 bio->bi_opf &= ~REQ_POLLED;
270a1c91
JA
450}
451
8a709512 452long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
84b8514b
CH
453long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
454
cd82cca7
CH
455extern const struct address_space_operations def_blk_aops;
456
bc9fcbf9 457#endif /* BLK_INTERNAL_H */
This page took 0.632486 seconds and 4 git commands to generate.