]>
Commit | Line | Data |
---|---|---|
fd0928df JA |
1 | #ifndef IOCONTEXT_H |
2 | #define IOCONTEXT_H | |
3 | ||
4ac845a2 | 4 | #include <linux/radix-tree.h> |
34e6bbf2 | 5 | #include <linux/rcupdate.h> |
b2efa052 | 6 | #include <linux/workqueue.h> |
4ac845a2 | 7 | |
dc86900e | 8 | enum { |
621032ad | 9 | ICQ_EXITED = 1 << 2, |
dc86900e TH |
10 | }; |
11 | ||
f1f8cc94 TH |
12 | /* |
13 | * An io_cq (icq) is association between an io_context (ioc) and a | |
14 | * request_queue (q). This is used by elevators which need to track | |
15 | * information per ioc - q pair. | |
16 | * | |
17 | * Elevator can request use of icq by setting elevator_type->icq_size and | |
18 | * ->icq_align. Both size and align must be larger than that of struct | |
19 | * io_cq and elevator can use the tail area for private information. The | |
20 | * recommended way to do this is defining a struct which contains io_cq as | |
21 | * the first member followed by private members and using its size and | |
22 | * align. For example, | |
23 | * | |
24 | * struct snail_io_cq { | |
25 | * struct io_cq icq; | |
26 | * int poke_snail; | |
27 | * int feed_snail; | |
28 | * }; | |
29 | * | |
30 | * struct elevator_type snail_elv_type { | |
31 | * .ops = { ... }, | |
32 | * .icq_size = sizeof(struct snail_io_cq), | |
33 | * .icq_align = __alignof__(struct snail_io_cq), | |
34 | * ... | |
35 | * }; | |
36 | * | |
37 | * If icq_size is set, block core will manage icq's. All requests will | |
38 | * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn() | |
39 | * is called and be holding a reference to the associated io_context. | |
40 | * | |
41 | * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is | |
42 | * called and, on destruction, ->elevator_exit_icq_fn(). Both functions | |
43 | * are called with both the associated io_context and queue locks held. | |
44 | * | |
45 | * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding | |
46 | * queue lock but the returned icq is valid only until the queue lock is | |
47 | * released. Elevators can not and should not try to create or destroy | |
48 | * icq's. | |
49 | * | |
50 | * As icq's are linked from both ioc and q, the locking rules are a bit | |
51 | * complex. | |
52 | * | |
53 | * - ioc lock nests inside q lock. | |
54 | * | |
55 | * - ioc->icq_list and icq->ioc_node are protected by ioc lock. | |
56 | * q->icq_list and icq->q_node by q lock. | |
57 | * | |
58 | * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq | |
59 | * itself is protected by q lock. However, both the indexes and icq | |
60 | * itself are also RCU managed and lookup can be performed holding only | |
61 | * the q lock. | |
62 | * | |
63 | * - icq's are not reference counted. They are destroyed when either the | |
64 | * ioc or q goes away. Each request with icq set holds an extra | |
65 | * reference to ioc to ensure it stays until the request is completed. | |
66 | * | |
67 | * - Linking and unlinking icq's are performed while holding both ioc and q | |
68 | * locks. Due to the lock ordering, q exit is simple but ioc exit | |
69 | * requires reverse-order double lock dance. | |
70 | */ | |
c5869807 TH |
71 | struct io_cq { |
72 | struct request_queue *q; | |
73 | struct io_context *ioc; | |
fd0928df | 74 | |
7e5a8794 TH |
75 | /* |
76 | * q_node and ioc_node link io_cq through icq_list of q and ioc | |
77 | * respectively. Both fields are unused once ioc_exit_icq() is | |
78 | * called and shared with __rcu_icq_cache and __rcu_head which are | |
79 | * used for RCU free of io_cq. | |
80 | */ | |
81 | union { | |
82 | struct list_head q_node; | |
83 | struct kmem_cache *__rcu_icq_cache; | |
84 | }; | |
85 | union { | |
86 | struct hlist_node ioc_node; | |
87 | struct rcu_head __rcu_head; | |
88 | }; | |
dc86900e | 89 | |
d705ae6b | 90 | unsigned int flags; |
fd0928df JA |
91 | }; |
92 | ||
93 | /* | |
d38ecf93 JA |
94 | * I/O subsystem state of the associated processes. It is refcounted |
95 | * and kmalloc'ed. These could be shared between processes. | |
fd0928df JA |
96 | */ |
97 | struct io_context { | |
d9c7d394 | 98 | atomic_long_t refcount; |
f6e8d01b | 99 | atomic_t active_ref; |
d38ecf93 JA |
100 | atomic_t nr_tasks; |
101 | ||
102 | /* all the fields below are protected by this lock */ | |
103 | spinlock_t lock; | |
fd0928df JA |
104 | |
105 | unsigned short ioprio; | |
31e4c28d | 106 | |
fd0928df JA |
107 | /* |
108 | * For request batching | |
109 | */ | |
fd0928df | 110 | int nr_batch_requests; /* Number of requests left in the batch */ |
58c24a61 | 111 | unsigned long last_waited; /* Time last woken after wait for request */ |
fd0928df | 112 | |
c5869807 TH |
113 | struct radix_tree_root icq_tree; |
114 | struct io_cq __rcu *icq_hint; | |
115 | struct hlist_head icq_list; | |
b2efa052 TH |
116 | |
117 | struct work_struct release_work; | |
fd0928df JA |
118 | }; |
119 | ||
f6e8d01b TH |
120 | /** |
121 | * get_io_context_active - get active reference on ioc | |
122 | * @ioc: ioc of interest | |
123 | * | |
124 | * Only iocs with active reference can issue new IOs. This function | |
125 | * acquires an active reference on @ioc. The caller must already have an | |
126 | * active reference on @ioc. | |
127 | */ | |
128 | static inline void get_io_context_active(struct io_context *ioc) | |
d38ecf93 | 129 | { |
3d48749d | 130 | WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0); |
f6e8d01b | 131 | WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0); |
3d48749d | 132 | atomic_long_inc(&ioc->refcount); |
f6e8d01b TH |
133 | atomic_inc(&ioc->active_ref); |
134 | } | |
135 | ||
136 | static inline void ioc_task_link(struct io_context *ioc) | |
137 | { | |
138 | get_io_context_active(ioc); | |
139 | ||
140 | WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0); | |
3d48749d | 141 | atomic_inc(&ioc->nr_tasks); |
d38ecf93 JA |
142 | } |
143 | ||
b69f2292 | 144 | struct task_struct; |
da9cbc87 | 145 | #ifdef CONFIG_BLOCK |
11a3122f | 146 | void put_io_context(struct io_context *ioc); |
f6e8d01b | 147 | void put_io_context_active(struct io_context *ioc); |
b69f2292 | 148 | void exit_io_context(struct task_struct *task); |
6e736be7 TH |
149 | struct io_context *get_task_io_context(struct task_struct *task, |
150 | gfp_t gfp_flags, int node); | |
da9cbc87 | 151 | #else |
da9cbc87 | 152 | struct io_context; |
11a3122f | 153 | static inline void put_io_context(struct io_context *ioc) { } |
42ec57a8 | 154 | static inline void exit_io_context(struct task_struct *task) { } |
da9cbc87 JA |
155 | #endif |
156 | ||
fd0928df | 157 | #endif |