]> Git Repo - linux.git/blame - fs/io-wq.h
tracing: Restructure trace_clock_global() to never block
[linux.git] / fs / io-wq.h
CommitLineData
771b53d0
JA
1#ifndef INTERNAL_IO_WQ_H
2#define INTERNAL_IO_WQ_H
3
e941894e 4#include <linux/refcount.h>
98447d65
JA
5#include <linux/io_uring.h>
6
771b53d0
JA
7struct io_wq;
8
9enum {
10 IO_WQ_WORK_CANCEL = 1,
e883a79d
PB
11 IO_WQ_WORK_HASHED = 2,
12 IO_WQ_WORK_UNBOUND = 4,
e883a79d 13 IO_WQ_WORK_CONCURRENT = 16,
771b53d0
JA
14
15 IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
16};
17
18enum io_wq_cancel {
19 IO_WQ_CANCEL_OK, /* cancelled before started */
20 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
21 IO_WQ_CANCEL_NOTFOUND, /* work not found */
22};
23
86f3cd1b
PB
24static inline void wq_list_add_after(struct io_wq_work_node *node,
25 struct io_wq_work_node *pos,
26 struct io_wq_work_list *list)
27{
28 struct io_wq_work_node *next = pos->next;
29
30 pos->next = node;
31 node->next = next;
32 if (!next)
33 list->last = node;
34}
35
6206f0e1
JA
36static inline void wq_list_add_tail(struct io_wq_work_node *node,
37 struct io_wq_work_list *list)
38{
39 if (!list->first) {
e995d512
JA
40 list->last = node;
41 WRITE_ONCE(list->first, node);
6206f0e1
JA
42 } else {
43 list->last->next = node;
44 list->last = node;
45 }
0020ef04 46 node->next = NULL;
6206f0e1
JA
47}
48
86f3cd1b
PB
49static inline void wq_list_cut(struct io_wq_work_list *list,
50 struct io_wq_work_node *last,
6206f0e1
JA
51 struct io_wq_work_node *prev)
52{
86f3cd1b
PB
53 /* first in the list, if prev==NULL */
54 if (!prev)
55 WRITE_ONCE(list->first, last->next);
56 else
57 prev->next = last->next;
58
59 if (last == list->last)
6206f0e1 60 list->last = prev;
86f3cd1b
PB
61 last->next = NULL;
62}
63
64static inline void wq_list_del(struct io_wq_work_list *list,
65 struct io_wq_work_node *node,
66 struct io_wq_work_node *prev)
67{
68 wq_list_cut(list, node, prev);
6206f0e1
JA
69}
70
71#define wq_list_for_each(pos, prv, head) \
72 for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
73
e995d512 74#define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
6206f0e1
JA
75#define INIT_WQ_LIST(list) do { \
76 (list)->first = NULL; \
77 (list)->last = NULL; \
78} while (0)
79
771b53d0 80struct io_wq_work {
18a542ff 81 struct io_wq_work_node list;
003e8dcc 82 const struct cred *creds;
6206f0e1 83 unsigned flags;
771b53d0
JA
84};
85
86f3cd1b
PB
86static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
87{
88 if (!work->list.next)
89 return NULL;
90
91 return container_of(work->list.next, struct io_wq_work, list);
92}
93
5280f7e5
PB
94typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
95typedef void (io_wq_work_fn)(struct io_wq_work *);
7d723065 96
e941894e
JA
97struct io_wq_hash {
98 refcount_t refs;
99 unsigned long map;
100 struct wait_queue_head wait;
101};
102
103static inline void io_wq_put_hash(struct io_wq_hash *hash)
104{
105 if (refcount_dec_and_test(&hash->refs))
106 kfree(hash);
107}
108
576a347b 109struct io_wq_data {
e941894e 110 struct io_wq_hash *hash;
f5fa38c5 111 io_wq_work_fn *do_work;
e9fd9396 112 free_work_fn *free_work;
576a347b
JA
113};
114
115struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
4fb6ac32 116void io_wq_put(struct io_wq *wq);
afcc4015 117void io_wq_put_and_exit(struct io_wq *wq);
771b53d0
JA
118
119void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
8766dd51
PB
120void io_wq_hash_work(struct io_wq_work *work, void *val);
121
122static inline bool io_wq_is_hashed(struct io_wq_work *work)
123{
124 return work->flags & IO_WQ_WORK_HASHED;
125}
771b53d0 126
62755e35
JA
127typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
128
129enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
4f26bda1 130 void *data, bool cancel_all);
62755e35 131
771b53d0
JA
132#if defined(CONFIG_IO_WQ)
133extern void io_wq_worker_sleeping(struct task_struct *);
134extern void io_wq_worker_running(struct task_struct *);
135#else
136static inline void io_wq_worker_sleeping(struct task_struct *tsk)
137{
138}
139static inline void io_wq_worker_running(struct task_struct *tsk)
140{
141}
525b305d 142#endif
771b53d0 143
525b305d
JA
144static inline bool io_wq_current_is_worker(void)
145{
3bfe6106
JA
146 return in_task() && (current->flags & PF_IO_WORKER) &&
147 current->pf_io_worker;
525b305d
JA
148}
149#endif
This page took 0.184267 seconds and 4 git commands to generate.