]>
Commit | Line | Data |
---|---|---|
771b53d0 JA |
1 | #ifndef INTERNAL_IO_WQ_H |
2 | #define INTERNAL_IO_WQ_H | |
3 | ||
98447d65 JA |
4 | #include <linux/io_uring.h> |
5 | ||
771b53d0 JA |
6 | struct io_wq; |
7 | ||
8 | enum { | |
9 | IO_WQ_WORK_CANCEL = 1, | |
e883a79d PB |
10 | IO_WQ_WORK_HASHED = 2, |
11 | IO_WQ_WORK_UNBOUND = 4, | |
12 | IO_WQ_WORK_NO_CANCEL = 8, | |
13 | IO_WQ_WORK_CONCURRENT = 16, | |
771b53d0 | 14 | |
0f203765 JA |
15 | IO_WQ_WORK_FILES = 32, |
16 | IO_WQ_WORK_FS = 64, | |
17 | IO_WQ_WORK_MM = 128, | |
18 | IO_WQ_WORK_CREDS = 256, | |
19 | IO_WQ_WORK_BLKCG = 512, | |
69228338 | 20 | IO_WQ_WORK_FSIZE = 1024, |
0f203765 | 21 | |
771b53d0 JA |
22 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ |
23 | }; | |
24 | ||
25 | enum io_wq_cancel { | |
26 | IO_WQ_CANCEL_OK, /* cancelled before started */ | |
27 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ | |
28 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ | |
29 | }; | |
30 | ||
6206f0e1 JA |
31 | struct io_wq_work_node { |
32 | struct io_wq_work_node *next; | |
33 | }; | |
34 | ||
35 | struct io_wq_work_list { | |
36 | struct io_wq_work_node *first; | |
37 | struct io_wq_work_node *last; | |
38 | }; | |
39 | ||
86f3cd1b PB |
40 | static inline void wq_list_add_after(struct io_wq_work_node *node, |
41 | struct io_wq_work_node *pos, | |
42 | struct io_wq_work_list *list) | |
43 | { | |
44 | struct io_wq_work_node *next = pos->next; | |
45 | ||
46 | pos->next = node; | |
47 | node->next = next; | |
48 | if (!next) | |
49 | list->last = node; | |
50 | } | |
51 | ||
6206f0e1 JA |
52 | static inline void wq_list_add_tail(struct io_wq_work_node *node, |
53 | struct io_wq_work_list *list) | |
54 | { | |
55 | if (!list->first) { | |
e995d512 JA |
56 | list->last = node; |
57 | WRITE_ONCE(list->first, node); | |
6206f0e1 JA |
58 | } else { |
59 | list->last->next = node; | |
60 | list->last = node; | |
61 | } | |
62 | } | |
63 | ||
86f3cd1b PB |
64 | static inline void wq_list_cut(struct io_wq_work_list *list, |
65 | struct io_wq_work_node *last, | |
6206f0e1 JA |
66 | struct io_wq_work_node *prev) |
67 | { | |
86f3cd1b PB |
68 | /* first in the list, if prev==NULL */ |
69 | if (!prev) | |
70 | WRITE_ONCE(list->first, last->next); | |
71 | else | |
72 | prev->next = last->next; | |
73 | ||
74 | if (last == list->last) | |
6206f0e1 | 75 | list->last = prev; |
86f3cd1b PB |
76 | last->next = NULL; |
77 | } | |
78 | ||
79 | static inline void wq_list_del(struct io_wq_work_list *list, | |
80 | struct io_wq_work_node *node, | |
81 | struct io_wq_work_node *prev) | |
82 | { | |
83 | wq_list_cut(list, node, prev); | |
6206f0e1 JA |
84 | } |
85 | ||
86 | #define wq_list_for_each(pos, prv, head) \ | |
87 | for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) | |
88 | ||
e995d512 | 89 | #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) |
6206f0e1 JA |
90 | #define INIT_WQ_LIST(list) do { \ |
91 | (list)->first = NULL; \ | |
92 | (list)->last = NULL; \ | |
93 | } while (0) | |
94 | ||
771b53d0 | 95 | struct io_wq_work { |
18a542ff | 96 | struct io_wq_work_node list; |
98447d65 | 97 | struct io_identity *identity; |
6206f0e1 | 98 | unsigned flags; |
771b53d0 JA |
99 | }; |
100 | ||
86f3cd1b PB |
101 | static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) |
102 | { | |
103 | if (!work->list.next) | |
104 | return NULL; | |
105 | ||
106 | return container_of(work->list.next, struct io_wq_work, list); | |
107 | } | |
108 | ||
e9fd9396 | 109 | typedef void (free_work_fn)(struct io_wq_work *); |
f4db7182 | 110 | typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *); |
7d723065 | 111 | |
576a347b | 112 | struct io_wq_data { |
576a347b JA |
113 | struct user_struct *user; |
114 | ||
f5fa38c5 | 115 | io_wq_work_fn *do_work; |
e9fd9396 | 116 | free_work_fn *free_work; |
576a347b JA |
117 | }; |
118 | ||
119 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); | |
eba6f5a3 | 120 | bool io_wq_get(struct io_wq *wq, struct io_wq_data *data); |
771b53d0 JA |
121 | void io_wq_destroy(struct io_wq *wq); |
122 | ||
123 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); | |
8766dd51 PB |
124 | void io_wq_hash_work(struct io_wq_work *work, void *val); |
125 | ||
126 | static inline bool io_wq_is_hashed(struct io_wq_work *work) | |
127 | { | |
128 | return work->flags & IO_WQ_WORK_HASHED; | |
129 | } | |
771b53d0 JA |
130 | |
131 | void io_wq_cancel_all(struct io_wq *wq); | |
132 | enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork); | |
133 | ||
62755e35 JA |
134 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
135 | ||
136 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, | |
4f26bda1 | 137 | void *data, bool cancel_all); |
62755e35 | 138 | |
aa96bf8a JA |
139 | struct task_struct *io_wq_get_task(struct io_wq *wq); |
140 | ||
771b53d0 JA |
141 | #if defined(CONFIG_IO_WQ) |
142 | extern void io_wq_worker_sleeping(struct task_struct *); | |
143 | extern void io_wq_worker_running(struct task_struct *); | |
144 | #else | |
145 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) | |
146 | { | |
147 | } | |
148 | static inline void io_wq_worker_running(struct task_struct *tsk) | |
149 | { | |
150 | } | |
525b305d | 151 | #endif |
771b53d0 | 152 | |
525b305d JA |
153 | static inline bool io_wq_current_is_worker(void) |
154 | { | |
155 | return in_task() && (current->flags & PF_IO_WORKER); | |
156 | } | |
157 | #endif |