]>
Commit | Line | Data |
---|---|---|
771b53d0 JA |
1 | #ifndef INTERNAL_IO_WQ_H |
2 | #define INTERNAL_IO_WQ_H | |
3 | ||
e941894e | 4 | #include <linux/refcount.h> |
98447d65 | 5 | |
771b53d0 JA |
6 | struct io_wq; |
7 | ||
8 | enum { | |
9 | IO_WQ_WORK_CANCEL = 1, | |
e883a79d PB |
10 | IO_WQ_WORK_HASHED = 2, |
11 | IO_WQ_WORK_UNBOUND = 4, | |
e883a79d | 12 | IO_WQ_WORK_CONCURRENT = 16, |
771b53d0 JA |
13 | |
14 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ | |
15 | }; | |
16 | ||
17 | enum io_wq_cancel { | |
18 | IO_WQ_CANCEL_OK, /* cancelled before started */ | |
19 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ | |
20 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ | |
21 | }; | |
22 | ||
53e043b2 SM |
23 | struct io_wq_work_node { |
24 | struct io_wq_work_node *next; | |
25 | }; | |
26 | ||
27 | struct io_wq_work_list { | |
28 | struct io_wq_work_node *first; | |
29 | struct io_wq_work_node *last; | |
30 | }; | |
31 | ||
86f3cd1b PB |
32 | static inline void wq_list_add_after(struct io_wq_work_node *node, |
33 | struct io_wq_work_node *pos, | |
34 | struct io_wq_work_list *list) | |
35 | { | |
36 | struct io_wq_work_node *next = pos->next; | |
37 | ||
38 | pos->next = node; | |
39 | node->next = next; | |
40 | if (!next) | |
41 | list->last = node; | |
42 | } | |
43 | ||
6206f0e1 JA |
44 | static inline void wq_list_add_tail(struct io_wq_work_node *node, |
45 | struct io_wq_work_list *list) | |
46 | { | |
47 | if (!list->first) { | |
e995d512 JA |
48 | list->last = node; |
49 | WRITE_ONCE(list->first, node); | |
6206f0e1 JA |
50 | } else { |
51 | list->last->next = node; | |
52 | list->last = node; | |
53 | } | |
0020ef04 | 54 | node->next = NULL; |
6206f0e1 JA |
55 | } |
56 | ||
86f3cd1b PB |
57 | static inline void wq_list_cut(struct io_wq_work_list *list, |
58 | struct io_wq_work_node *last, | |
6206f0e1 JA |
59 | struct io_wq_work_node *prev) |
60 | { | |
86f3cd1b PB |
61 | /* first in the list, if prev==NULL */ |
62 | if (!prev) | |
63 | WRITE_ONCE(list->first, last->next); | |
64 | else | |
65 | prev->next = last->next; | |
66 | ||
67 | if (last == list->last) | |
6206f0e1 | 68 | list->last = prev; |
86f3cd1b PB |
69 | last->next = NULL; |
70 | } | |
71 | ||
72 | static inline void wq_list_del(struct io_wq_work_list *list, | |
73 | struct io_wq_work_node *node, | |
74 | struct io_wq_work_node *prev) | |
75 | { | |
76 | wq_list_cut(list, node, prev); | |
6206f0e1 JA |
77 | } |
78 | ||
79 | #define wq_list_for_each(pos, prv, head) \ | |
80 | for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) | |
81 | ||
e995d512 | 82 | #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) |
6206f0e1 JA |
83 | #define INIT_WQ_LIST(list) do { \ |
84 | (list)->first = NULL; \ | |
85 | (list)->last = NULL; \ | |
86 | } while (0) | |
87 | ||
771b53d0 | 88 | struct io_wq_work { |
18a542ff | 89 | struct io_wq_work_node list; |
003e8dcc | 90 | const struct cred *creds; |
6206f0e1 | 91 | unsigned flags; |
771b53d0 JA |
92 | }; |
93 | ||
86f3cd1b PB |
94 | static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) |
95 | { | |
96 | if (!work->list.next) | |
97 | return NULL; | |
98 | ||
99 | return container_of(work->list.next, struct io_wq_work, list); | |
100 | } | |
101 | ||
5280f7e5 PB |
102 | typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *); |
103 | typedef void (io_wq_work_fn)(struct io_wq_work *); | |
7d723065 | 104 | |
e941894e JA |
105 | struct io_wq_hash { |
106 | refcount_t refs; | |
107 | unsigned long map; | |
108 | struct wait_queue_head wait; | |
109 | }; | |
110 | ||
111 | static inline void io_wq_put_hash(struct io_wq_hash *hash) | |
112 | { | |
113 | if (refcount_dec_and_test(&hash->refs)) | |
114 | kfree(hash); | |
115 | } | |
116 | ||
576a347b | 117 | struct io_wq_data { |
e941894e | 118 | struct io_wq_hash *hash; |
f5fa38c5 | 119 | io_wq_work_fn *do_work; |
e9fd9396 | 120 | free_work_fn *free_work; |
576a347b JA |
121 | }; |
122 | ||
123 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); | |
4fb6ac32 | 124 | void io_wq_put(struct io_wq *wq); |
afcc4015 | 125 | void io_wq_put_and_exit(struct io_wq *wq); |
771b53d0 JA |
126 | |
127 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); | |
8766dd51 PB |
128 | void io_wq_hash_work(struct io_wq_work *work, void *val); |
129 | ||
130 | static inline bool io_wq_is_hashed(struct io_wq_work *work) | |
131 | { | |
132 | return work->flags & IO_WQ_WORK_HASHED; | |
133 | } | |
771b53d0 | 134 | |
62755e35 JA |
135 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
136 | ||
137 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, | |
4f26bda1 | 138 | void *data, bool cancel_all); |
62755e35 | 139 | |
771b53d0 JA |
140 | #if defined(CONFIG_IO_WQ) |
141 | extern void io_wq_worker_sleeping(struct task_struct *); | |
142 | extern void io_wq_worker_running(struct task_struct *); | |
143 | #else | |
144 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) | |
145 | { | |
146 | } | |
147 | static inline void io_wq_worker_running(struct task_struct *tsk) | |
148 | { | |
149 | } | |
525b305d | 150 | #endif |
771b53d0 | 151 | |
525b305d JA |
152 | static inline bool io_wq_current_is_worker(void) |
153 | { | |
3bfe6106 JA |
154 | return in_task() && (current->flags & PF_IO_WORKER) && |
155 | current->pf_io_worker; | |
525b305d JA |
156 | } |
157 | #endif |