]>
Commit | Line | Data |
---|---|---|
1 | #ifndef INT_BLK_MQ_H | |
2 | #define INT_BLK_MQ_H | |
3 | ||
4 | #include "blk-stat.h" | |
5 | ||
6 | struct blk_mq_tag_set; | |
7 | ||
8 | struct blk_mq_ctx { | |
9 | struct { | |
10 | spinlock_t lock; | |
11 | struct list_head rq_list; | |
12 | } ____cacheline_aligned_in_smp; | |
13 | ||
14 | unsigned int cpu; | |
15 | unsigned int index_hw; | |
16 | ||
17 | /* incremented at dispatch time */ | |
18 | unsigned long rq_dispatched[2]; | |
19 | unsigned long rq_merged; | |
20 | ||
21 | /* incremented at completion time */ | |
22 | unsigned long ____cacheline_aligned_in_smp rq_completed[2]; | |
23 | ||
24 | struct request_queue *queue; | |
25 | struct kobject kobj; | |
26 | } ____cacheline_aligned_in_smp; | |
27 | ||
28 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); | |
29 | void blk_mq_freeze_queue(struct request_queue *q); | |
30 | void blk_mq_free_queue(struct request_queue *q); | |
31 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); | |
32 | void blk_mq_wake_waiters(struct request_queue *q); | |
33 | bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *); | |
34 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); | |
35 | bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); | |
36 | bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, | |
37 | bool wait); | |
38 | ||
39 | /* | |
40 | * Internal helpers for allocating/freeing the request map | |
41 | */ | |
42 | void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, | |
43 | unsigned int hctx_idx); | |
44 | void blk_mq_free_rq_map(struct blk_mq_tags *tags); | |
45 | struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, | |
46 | unsigned int hctx_idx, | |
47 | unsigned int nr_tags, | |
48 | unsigned int reserved_tags); | |
49 | int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, | |
50 | unsigned int hctx_idx, unsigned int depth); | |
51 | ||
52 | /* | |
53 | * Internal helpers for request insertion into sw queues | |
54 | */ | |
55 | void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
56 | bool at_head); | |
57 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, | |
58 | struct list_head *list); | |
59 | ||
60 | /* | |
61 | * CPU -> queue mappings | |
62 | */ | |
63 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); | |
64 | ||
65 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, | |
66 | int cpu) | |
67 | { | |
68 | return q->queue_hw_ctx[q->mq_map[cpu]]; | |
69 | } | |
70 | ||
71 | /* | |
72 | * sysfs helpers | |
73 | */ | |
74 | extern void blk_mq_sysfs_init(struct request_queue *q); | |
75 | extern void blk_mq_sysfs_deinit(struct request_queue *q); | |
76 | extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); | |
77 | extern int blk_mq_sysfs_register(struct request_queue *q); | |
78 | extern void blk_mq_sysfs_unregister(struct request_queue *q); | |
79 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); | |
80 | ||
81 | extern void blk_mq_rq_timed_out(struct request *req, bool reserved); | |
82 | ||
83 | void blk_mq_release(struct request_queue *q); | |
84 | ||
85 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, | |
86 | unsigned int cpu) | |
87 | { | |
88 | return per_cpu_ptr(q->queue_ctx, cpu); | |
89 | } | |
90 | ||
91 | /* | |
92 | * This assumes per-cpu software queueing queues. They could be per-node | |
93 | * as well, for instance. For now this is hardcoded as-is. Note that we don't | |
94 | * care about preemption, since we know the ctx's are persistent. This does | |
95 | * mean that we can't rely on ctx always matching the currently running CPU. | |
96 | */ | |
97 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) | |
98 | { | |
99 | return __blk_mq_get_ctx(q, get_cpu()); | |
100 | } | |
101 | ||
102 | static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) | |
103 | { | |
104 | put_cpu(); | |
105 | } | |
106 | ||
107 | struct blk_mq_alloc_data { | |
108 | /* input parameter */ | |
109 | struct request_queue *q; | |
110 | unsigned int flags; | |
111 | unsigned int shallow_depth; | |
112 | ||
113 | /* input & output parameter */ | |
114 | struct blk_mq_ctx *ctx; | |
115 | struct blk_mq_hw_ctx *hctx; | |
116 | }; | |
117 | ||
118 | static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) | |
119 | { | |
120 | if (data->flags & BLK_MQ_REQ_INTERNAL) | |
121 | return data->hctx->sched_tags; | |
122 | ||
123 | return data->hctx->tags; | |
124 | } | |
125 | ||
126 | static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) | |
127 | { | |
128 | return test_bit(BLK_MQ_S_STOPPED, &hctx->state); | |
129 | } | |
130 | ||
131 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) | |
132 | { | |
133 | return hctx->nr_ctx && hctx->tags; | |
134 | } | |
135 | ||
136 | #endif |