]>
Commit | Line | Data |
---|---|---|
771b53d0 JA |
1 | #ifndef INTERNAL_IO_WQ_H |
2 | #define INTERNAL_IO_WQ_H | |
3 | ||
98447d65 JA |
4 | #include <linux/io_uring.h> |
5 | ||
771b53d0 JA |
6 | struct io_wq; |
7 | ||
8 | enum { | |
9 | IO_WQ_WORK_CANCEL = 1, | |
e883a79d PB |
10 | IO_WQ_WORK_HASHED = 2, |
11 | IO_WQ_WORK_UNBOUND = 4, | |
12 | IO_WQ_WORK_NO_CANCEL = 8, | |
13 | IO_WQ_WORK_CONCURRENT = 16, | |
771b53d0 | 14 | |
0f203765 JA |
15 | IO_WQ_WORK_FILES = 32, |
16 | IO_WQ_WORK_FS = 64, | |
17 | IO_WQ_WORK_MM = 128, | |
18 | IO_WQ_WORK_CREDS = 256, | |
19 | IO_WQ_WORK_BLKCG = 512, | |
69228338 | 20 | IO_WQ_WORK_FSIZE = 1024, |
0f203765 | 21 | |
771b53d0 JA |
22 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ |
23 | }; | |
24 | ||
25 | enum io_wq_cancel { | |
26 | IO_WQ_CANCEL_OK, /* cancelled before started */ | |
27 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ | |
28 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ | |
29 | }; | |
30 | ||
6206f0e1 JA |
31 | struct io_wq_work_node { |
32 | struct io_wq_work_node *next; | |
33 | }; | |
34 | ||
35 | struct io_wq_work_list { | |
36 | struct io_wq_work_node *first; | |
37 | struct io_wq_work_node *last; | |
38 | }; | |
39 | ||
86f3cd1b PB |
40 | static inline void wq_list_add_after(struct io_wq_work_node *node, |
41 | struct io_wq_work_node *pos, | |
42 | struct io_wq_work_list *list) | |
43 | { | |
44 | struct io_wq_work_node *next = pos->next; | |
45 | ||
46 | pos->next = node; | |
47 | node->next = next; | |
48 | if (!next) | |
49 | list->last = node; | |
50 | } | |
51 | ||
6206f0e1 JA |
52 | static inline void wq_list_add_tail(struct io_wq_work_node *node, |
53 | struct io_wq_work_list *list) | |
54 | { | |
55 | if (!list->first) { | |
e995d512 JA |
56 | list->last = node; |
57 | WRITE_ONCE(list->first, node); | |
6206f0e1 JA |
58 | } else { |
59 | list->last->next = node; | |
60 | list->last = node; | |
61 | } | |
0020ef04 | 62 | node->next = NULL; |
6206f0e1 JA |
63 | } |
64 | ||
86f3cd1b PB |
65 | static inline void wq_list_cut(struct io_wq_work_list *list, |
66 | struct io_wq_work_node *last, | |
6206f0e1 JA |
67 | struct io_wq_work_node *prev) |
68 | { | |
86f3cd1b PB |
69 | /* first in the list, if prev==NULL */ |
70 | if (!prev) | |
71 | WRITE_ONCE(list->first, last->next); | |
72 | else | |
73 | prev->next = last->next; | |
74 | ||
75 | if (last == list->last) | |
6206f0e1 | 76 | list->last = prev; |
86f3cd1b PB |
77 | last->next = NULL; |
78 | } | |
79 | ||
80 | static inline void wq_list_del(struct io_wq_work_list *list, | |
81 | struct io_wq_work_node *node, | |
82 | struct io_wq_work_node *prev) | |
83 | { | |
84 | wq_list_cut(list, node, prev); | |
6206f0e1 JA |
85 | } |
86 | ||
87 | #define wq_list_for_each(pos, prv, head) \ | |
88 | for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) | |
89 | ||
e995d512 | 90 | #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) |
6206f0e1 JA |
91 | #define INIT_WQ_LIST(list) do { \ |
92 | (list)->first = NULL; \ | |
93 | (list)->last = NULL; \ | |
94 | } while (0) | |
95 | ||
771b53d0 | 96 | struct io_wq_work { |
18a542ff | 97 | struct io_wq_work_node list; |
98447d65 | 98 | struct io_identity *identity; |
6206f0e1 | 99 | unsigned flags; |
771b53d0 JA |
100 | }; |
101 | ||
86f3cd1b PB |
102 | static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) |
103 | { | |
104 | if (!work->list.next) | |
105 | return NULL; | |
106 | ||
107 | return container_of(work->list.next, struct io_wq_work, list); | |
108 | } | |
109 | ||
e9fd9396 | 110 | typedef void (free_work_fn)(struct io_wq_work *); |
f4db7182 | 111 | typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *); |
7d723065 | 112 | |
576a347b | 113 | struct io_wq_data { |
576a347b JA |
114 | struct user_struct *user; |
115 | ||
f5fa38c5 | 116 | io_wq_work_fn *do_work; |
e9fd9396 | 117 | free_work_fn *free_work; |
576a347b JA |
118 | }; |
119 | ||
120 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); | |
eba6f5a3 | 121 | bool io_wq_get(struct io_wq *wq, struct io_wq_data *data); |
771b53d0 JA |
122 | void io_wq_destroy(struct io_wq *wq); |
123 | ||
124 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); | |
8766dd51 PB |
125 | void io_wq_hash_work(struct io_wq_work *work, void *val); |
126 | ||
127 | static inline bool io_wq_is_hashed(struct io_wq_work *work) | |
128 | { | |
129 | return work->flags & IO_WQ_WORK_HASHED; | |
130 | } | |
771b53d0 | 131 | |
62755e35 JA |
132 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
133 | ||
134 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, | |
4f26bda1 | 135 | void *data, bool cancel_all); |
62755e35 | 136 | |
aa96bf8a JA |
137 | struct task_struct *io_wq_get_task(struct io_wq *wq); |
138 | ||
771b53d0 JA |
139 | #if defined(CONFIG_IO_WQ) |
140 | extern void io_wq_worker_sleeping(struct task_struct *); | |
141 | extern void io_wq_worker_running(struct task_struct *); | |
142 | #else | |
143 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) | |
144 | { | |
145 | } | |
146 | static inline void io_wq_worker_running(struct task_struct *tsk) | |
147 | { | |
148 | } | |
525b305d | 149 | #endif |
771b53d0 | 150 | |
525b305d JA |
151 | static inline bool io_wq_current_is_worker(void) |
152 | { | |
153 | return in_task() && (current->flags & PF_IO_WORKER); | |
154 | } | |
155 | #endif |