]>
Commit | Line | Data |
---|---|---|
604326b4 DB |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ | |
3 | ||
4 | #ifndef _LINUX_SKMSG_H | |
5 | #define _LINUX_SKMSG_H | |
6 | ||
7 | #include <linux/bpf.h> | |
8 | #include <linux/filter.h> | |
9 | #include <linux/scatterlist.h> | |
10 | #include <linux/skbuff.h> | |
11 | ||
12 | #include <net/sock.h> | |
13 | #include <net/tcp.h> | |
14 | #include <net/strparser.h> | |
15 | ||
16 | #define MAX_MSG_FRAGS MAX_SKB_FRAGS | |
031097d9 | 17 | #define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1) |
604326b4 DB |
18 | |
19 | enum __sk_action { | |
20 | __SK_DROP = 0, | |
21 | __SK_PASS, | |
22 | __SK_REDIRECT, | |
23 | __SK_NONE, | |
24 | }; | |
25 | ||
26 | struct sk_msg_sg { | |
27 | u32 start; | |
28 | u32 curr; | |
29 | u32 end; | |
30 | u32 size; | |
31 | u32 copybreak; | |
5a8fb33e | 32 | DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2); |
031097d9 JK |
33 | /* The extra two elements: |
34 | * 1) used for chaining the front and sections when the list becomes | |
35 | * partitioned (e.g. end < start). The crypto APIs require the | |
36 | * chaining; | |
37 | * 2) to chain tailer SG entries after the message. | |
d3b18ad3 | 38 | */ |
031097d9 | 39 | struct scatterlist data[MAX_MSG_FRAGS + 2]; |
604326b4 DB |
40 | }; |
41 | ||
7a69c0f2 | 42 | /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ |
604326b4 DB |
43 | struct sk_msg { |
44 | struct sk_msg_sg sg; | |
45 | void *data; | |
46 | void *data_end; | |
47 | u32 apply_bytes; | |
48 | u32 cork_bytes; | |
49 | u32 flags; | |
50 | struct sk_buff *skb; | |
51 | struct sock *sk_redir; | |
52 | struct sock *sk; | |
53 | struct list_head list; | |
54 | }; | |
55 | ||
56 | struct sk_psock_progs { | |
57 | struct bpf_prog *msg_parser; | |
ae8b8332 CW |
58 | struct bpf_prog *stream_parser; |
59 | struct bpf_prog *stream_verdict; | |
a7ba4558 | 60 | struct bpf_prog *skb_verdict; |
604326b4 DB |
61 | }; |
62 | ||
63 | enum sk_psock_state_bits { | |
64 | SK_PSOCK_TX_ENABLED, | |
65 | }; | |
66 | ||
67 | struct sk_psock_link { | |
68 | struct list_head list; | |
69 | struct bpf_map *map; | |
70 | void *link_raw; | |
71 | }; | |
72 | ||
604326b4 DB |
73 | struct sk_psock_work_state { |
74 | struct sk_buff *skb; | |
75 | u32 len; | |
76 | u32 off; | |
77 | }; | |
78 | ||
79 | struct sk_psock { | |
80 | struct sock *sk; | |
81 | struct sock *sk_redir; | |
82 | u32 apply_bytes; | |
83 | u32 cork_bytes; | |
84 | u32 eval; | |
85 | struct sk_msg *cork; | |
86 | struct sk_psock_progs progs; | |
5a685cd9 CW |
87 | #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) |
88 | struct strparser strp; | |
89 | #endif | |
604326b4 DB |
90 | struct sk_buff_head ingress_skb; |
91 | struct list_head ingress_msg; | |
b01fd6e8 | 92 | spinlock_t ingress_lock; |
604326b4 DB |
93 | unsigned long state; |
94 | struct list_head link; | |
95 | spinlock_t link_lock; | |
96 | refcount_t refcnt; | |
97 | void (*saved_unhash)(struct sock *sk); | |
d8616ee2 | 98 | void (*saved_destroy)(struct sock *sk); |
604326b4 DB |
99 | void (*saved_close)(struct sock *sk, long timeout); |
100 | void (*saved_write_space)(struct sock *sk); | |
5a685cd9 | 101 | void (*saved_data_ready)(struct sock *sk); |
51e0158a CW |
102 | int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock, |
103 | bool restore); | |
604326b4 | 104 | struct proto *sk_proto; |
799aa7f9 | 105 | struct mutex work_mutex; |
604326b4 DB |
106 | struct sk_psock_work_state work_state; |
107 | struct work_struct work; | |
7786dfc4 | 108 | struct rcu_work rwork; |
604326b4 DB |
109 | }; |
110 | ||
111 | int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, | |
112 | int elem_first_coalesce); | |
d829e9c4 DB |
113 | int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, |
114 | u32 off, u32 len); | |
604326b4 DB |
115 | void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); |
116 | int sk_msg_free(struct sock *sk, struct sk_msg *msg); | |
117 | int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); | |
118 | void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); | |
119 | void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, | |
120 | u32 bytes); | |
121 | ||
122 | void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); | |
d3b18ad3 | 123 | void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); |
604326b4 DB |
124 | |
125 | int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, | |
126 | struct sk_msg *msg, u32 bytes); | |
127 | int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, | |
128 | struct sk_msg *msg, u32 bytes); | |
2bc793e3 CW |
129 | int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, |
130 | int len, int flags); | |
fb4e0a5e | 131 | bool sk_msg_is_readable(struct sock *sk); |
604326b4 DB |
132 | |
133 | static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) | |
134 | { | |
135 | WARN_ON(i == msg->sg.end && bytes); | |
136 | } | |
137 | ||
138 | static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) | |
139 | { | |
140 | if (psock->apply_bytes) { | |
141 | if (psock->apply_bytes < bytes) | |
142 | psock->apply_bytes = 0; | |
143 | else | |
144 | psock->apply_bytes -= bytes; | |
145 | } | |
146 | } | |
147 | ||
683916f6 JK |
148 | static inline u32 sk_msg_iter_dist(u32 start, u32 end) |
149 | { | |
031097d9 | 150 | return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); |
683916f6 JK |
151 | } |
152 | ||
604326b4 DB |
153 | #define sk_msg_iter_var_prev(var) \ |
154 | do { \ | |
155 | if (var == 0) \ | |
031097d9 | 156 | var = NR_MSG_FRAG_IDS - 1; \ |
604326b4 DB |
157 | else \ |
158 | var--; \ | |
159 | } while (0) | |
160 | ||
161 | #define sk_msg_iter_var_next(var) \ | |
162 | do { \ | |
163 | var++; \ | |
031097d9 | 164 | if (var == NR_MSG_FRAG_IDS) \ |
604326b4 DB |
165 | var = 0; \ |
166 | } while (0) | |
167 | ||
168 | #define sk_msg_iter_prev(msg, which) \ | |
169 | sk_msg_iter_var_prev(msg->sg.which) | |
170 | ||
171 | #define sk_msg_iter_next(msg, which) \ | |
172 | sk_msg_iter_var_next(msg->sg.which) | |
173 | ||
604326b4 DB |
174 | static inline void sk_msg_init(struct sk_msg *msg) |
175 | { | |
031097d9 | 176 | BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); |
604326b4 | 177 | memset(msg, 0, sizeof(*msg)); |
031097d9 | 178 | sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); |
604326b4 DB |
179 | } |
180 | ||
181 | static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, | |
182 | int which, u32 size) | |
183 | { | |
184 | dst->sg.data[which] = src->sg.data[which]; | |
185 | dst->sg.data[which].length = size; | |
3f4c3127 | 186 | dst->sg.size += size; |
81aabbb9 | 187 | src->sg.size -= size; |
604326b4 DB |
188 | src->sg.data[which].length -= size; |
189 | src->sg.data[which].offset += size; | |
190 | } | |
191 | ||
d3b18ad3 JF |
192 | static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) |
193 | { | |
194 | memcpy(dst, src, sizeof(*src)); | |
195 | sk_msg_init(src); | |
196 | } | |
197 | ||
8734a162 JF |
198 | static inline bool sk_msg_full(const struct sk_msg *msg) |
199 | { | |
031097d9 | 200 | return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; |
8734a162 JF |
201 | } |
202 | ||
604326b4 DB |
203 | static inline u32 sk_msg_elem_used(const struct sk_msg *msg) |
204 | { | |
683916f6 | 205 | return sk_msg_iter_dist(msg->sg.start, msg->sg.end); |
604326b4 DB |
206 | } |
207 | ||
604326b4 DB |
208 | static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) |
209 | { | |
210 | return &msg->sg.data[which]; | |
211 | } | |
212 | ||
6fff607e JF |
213 | static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) |
214 | { | |
215 | return msg->sg.data[which]; | |
216 | } | |
217 | ||
604326b4 DB |
218 | static inline struct page *sk_msg_page(struct sk_msg *msg, int which) |
219 | { | |
220 | return sg_page(sk_msg_elem(msg, which)); | |
221 | } | |
222 | ||
223 | static inline bool sk_msg_to_ingress(const struct sk_msg *msg) | |
224 | { | |
225 | return msg->flags & BPF_F_INGRESS; | |
226 | } | |
227 | ||
228 | static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) | |
229 | { | |
230 | struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); | |
231 | ||
5a8fb33e | 232 | if (test_bit(msg->sg.start, msg->sg.copy)) { |
604326b4 DB |
233 | msg->data = NULL; |
234 | msg->data_end = NULL; | |
235 | } else { | |
236 | msg->data = sg_virt(sge); | |
237 | msg->data_end = msg->data + sge->length; | |
238 | } | |
239 | } | |
240 | ||
241 | static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, | |
242 | u32 len, u32 offset) | |
243 | { | |
244 | struct scatterlist *sge; | |
245 | ||
246 | get_page(page); | |
247 | sge = sk_msg_elem(msg, msg->sg.end); | |
248 | sg_set_page(sge, page, len, offset); | |
249 | sg_unmark_end(sge); | |
250 | ||
5a8fb33e | 251 | __set_bit(msg->sg.end, msg->sg.copy); |
604326b4 DB |
252 | msg->sg.size += len; |
253 | sk_msg_iter_next(msg, end); | |
254 | } | |
255 | ||
d3b18ad3 JF |
256 | static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) |
257 | { | |
258 | do { | |
163ab96b | 259 | if (copy_state) |
5a8fb33e | 260 | __set_bit(i, msg->sg.copy); |
163ab96b | 261 | else |
5a8fb33e | 262 | __clear_bit(i, msg->sg.copy); |
d3b18ad3 JF |
263 | sk_msg_iter_var_next(i); |
264 | if (i == msg->sg.end) | |
265 | break; | |
266 | } while (1); | |
267 | } | |
268 | ||
269 | static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) | |
270 | { | |
271 | sk_msg_sg_copy(msg, start, true); | |
272 | } | |
273 | ||
274 | static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) | |
275 | { | |
276 | sk_msg_sg_copy(msg, start, false); | |
277 | } | |
278 | ||
604326b4 DB |
279 | static inline struct sk_psock *sk_psock(const struct sock *sk) |
280 | { | |
2a013372 HJ |
281 | return __rcu_dereference_sk_user_data_with_flags(sk, |
282 | SK_USER_DATA_PSOCK); | |
604326b4 DB |
283 | } |
284 | ||
9635720b JF |
285 | static inline void sk_psock_set_state(struct sk_psock *psock, |
286 | enum sk_psock_state_bits bit) | |
287 | { | |
288 | set_bit(bit, &psock->state); | |
289 | } | |
290 | ||
291 | static inline void sk_psock_clear_state(struct sk_psock *psock, | |
292 | enum sk_psock_state_bits bit) | |
293 | { | |
294 | clear_bit(bit, &psock->state); | |
295 | } | |
296 | ||
297 | static inline bool sk_psock_test_state(const struct sk_psock *psock, | |
298 | enum sk_psock_state_bits bit) | |
299 | { | |
300 | return test_bit(bit, &psock->state); | |
301 | } | |
302 | ||
303 | static inline void sock_drop(struct sock *sk, struct sk_buff *skb) | |
304 | { | |
305 | sk_drops_add(sk, skb); | |
306 | kfree_skb(skb); | |
307 | } | |
308 | ||
604326b4 DB |
309 | static inline void sk_psock_queue_msg(struct sk_psock *psock, |
310 | struct sk_msg *msg) | |
311 | { | |
b01fd6e8 | 312 | spin_lock_bh(&psock->ingress_lock); |
9635720b JF |
313 | if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) |
314 | list_add_tail(&msg->list, &psock->ingress_msg); | |
938d3480 WY |
315 | else { |
316 | sk_msg_free(psock->sk, msg); | |
317 | kfree(msg); | |
318 | } | |
b01fd6e8 CW |
319 | spin_unlock_bh(&psock->ingress_lock); |
320 | } | |
321 | ||
322 | static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) | |
323 | { | |
324 | struct sk_msg *msg; | |
325 | ||
326 | spin_lock_bh(&psock->ingress_lock); | |
327 | msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); | |
328 | if (msg) | |
329 | list_del(&msg->list); | |
330 | spin_unlock_bh(&psock->ingress_lock); | |
331 | return msg; | |
332 | } | |
333 | ||
334 | static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock) | |
335 | { | |
336 | struct sk_msg *msg; | |
337 | ||
338 | spin_lock_bh(&psock->ingress_lock); | |
339 | msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); | |
340 | spin_unlock_bh(&psock->ingress_lock); | |
341 | return msg; | |
342 | } | |
343 | ||
344 | static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock, | |
345 | struct sk_msg *msg) | |
346 | { | |
347 | struct sk_msg *ret; | |
348 | ||
349 | spin_lock_bh(&psock->ingress_lock); | |
350 | if (list_is_last(&msg->list, &psock->ingress_msg)) | |
351 | ret = NULL; | |
352 | else | |
353 | ret = list_next_entry(msg, list); | |
354 | spin_unlock_bh(&psock->ingress_lock); | |
355 | return ret; | |
604326b4 DB |
356 | } |
357 | ||
d3b18ad3 JF |
358 | static inline bool sk_psock_queue_empty(const struct sk_psock *psock) |
359 | { | |
360 | return psock ? list_empty(&psock->ingress_msg) : true; | |
361 | } | |
362 | ||
b01fd6e8 CW |
363 | static inline void kfree_sk_msg(struct sk_msg *msg) |
364 | { | |
365 | if (msg->skb) | |
366 | consume_skb(msg->skb); | |
367 | kfree(msg); | |
368 | } | |
369 | ||
604326b4 DB |
370 | static inline void sk_psock_report_error(struct sk_psock *psock, int err) |
371 | { | |
372 | struct sock *sk = psock->sk; | |
373 | ||
374 | sk->sk_err = err; | |
e3ae2365 | 375 | sk_error_report(sk); |
604326b4 DB |
376 | } |
377 | ||
378 | struct sk_psock *sk_psock_init(struct sock *sk, int node); | |
799aa7f9 | 379 | void sk_psock_stop(struct sk_psock *psock, bool wait); |
604326b4 | 380 | |
88759609 | 381 | #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) |
604326b4 DB |
382 | int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); |
383 | void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); | |
384 | void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); | |
88759609 CW |
385 | #else |
386 | static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) | |
387 | { | |
388 | return -EOPNOTSUPP; | |
389 | } | |
390 | ||
391 | static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) | |
392 | { | |
393 | } | |
394 | ||
395 | static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) | |
396 | { | |
397 | } | |
398 | #endif | |
399 | ||
ef565928 JF |
400 | void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock); |
401 | void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); | |
604326b4 DB |
402 | |
403 | int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, | |
404 | struct sk_msg *msg); | |
405 | ||
406 | static inline struct sk_psock_link *sk_psock_init_link(void) | |
407 | { | |
408 | return kzalloc(sizeof(struct sk_psock_link), | |
409 | GFP_ATOMIC | __GFP_NOWARN); | |
410 | } | |
411 | ||
412 | static inline void sk_psock_free_link(struct sk_psock_link *link) | |
413 | { | |
414 | kfree(link); | |
415 | } | |
416 | ||
417 | struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); | |
604326b4 | 418 | |
604326b4 DB |
419 | static inline void sk_psock_cork_free(struct sk_psock *psock) |
420 | { | |
421 | if (psock->cork) { | |
422 | sk_msg_free(psock->sk, psock->cork); | |
423 | kfree(psock->cork); | |
424 | psock->cork = NULL; | |
425 | } | |
426 | } | |
427 | ||
604326b4 DB |
428 | static inline void sk_psock_restore_proto(struct sock *sk, |
429 | struct sk_psock *psock) | |
430 | { | |
8a59f9d1 | 431 | if (psock->psock_update_sk_prot) |
51e0158a | 432 | psock->psock_update_sk_prot(sk, psock, true); |
604326b4 DB |
433 | } |
434 | ||
604326b4 DB |
435 | static inline struct sk_psock *sk_psock_get(struct sock *sk) |
436 | { | |
437 | struct sk_psock *psock; | |
438 | ||
439 | rcu_read_lock(); | |
440 | psock = sk_psock(sk); | |
441 | if (psock && !refcount_inc_not_zero(&psock->refcnt)) | |
442 | psock = NULL; | |
443 | rcu_read_unlock(); | |
444 | return psock; | |
445 | } | |
446 | ||
604326b4 DB |
447 | void sk_psock_drop(struct sock *sk, struct sk_psock *psock); |
448 | ||
449 | static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) | |
450 | { | |
451 | if (refcount_dec_and_test(&psock->refcnt)) | |
452 | sk_psock_drop(sk, psock); | |
453 | } | |
454 | ||
552de910 JF |
455 | static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) |
456 | { | |
5a685cd9 CW |
457 | if (psock->saved_data_ready) |
458 | psock->saved_data_ready(sk); | |
552de910 JF |
459 | else |
460 | sk->sk_data_ready(sk); | |
461 | } | |
462 | ||
604326b4 DB |
463 | static inline void psock_set_prog(struct bpf_prog **pprog, |
464 | struct bpf_prog *prog) | |
465 | { | |
466 | prog = xchg(pprog, prog); | |
467 | if (prog) | |
468 | bpf_prog_put(prog); | |
469 | } | |
470 | ||
bb0de313 LB |
471 | static inline int psock_replace_prog(struct bpf_prog **pprog, |
472 | struct bpf_prog *prog, | |
473 | struct bpf_prog *old) | |
474 | { | |
475 | if (cmpxchg(pprog, old, prog) != old) | |
476 | return -ENOENT; | |
477 | ||
478 | if (old) | |
479 | bpf_prog_put(old); | |
480 | ||
481 | return 0; | |
482 | } | |
483 | ||
604326b4 DB |
484 | static inline void psock_progs_drop(struct sk_psock_progs *progs) |
485 | { | |
486 | psock_set_prog(&progs->msg_parser, NULL); | |
ae8b8332 CW |
487 | psock_set_prog(&progs->stream_parser, NULL); |
488 | psock_set_prog(&progs->stream_verdict, NULL); | |
a7ba4558 | 489 | psock_set_prog(&progs->skb_verdict, NULL); |
604326b4 DB |
490 | } |
491 | ||
e91de6af JF |
492 | int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); |
493 | ||
494 | static inline bool sk_psock_strp_enabled(struct sk_psock *psock) | |
495 | { | |
496 | if (!psock) | |
497 | return false; | |
5a685cd9 | 498 | return !!psock->saved_data_ready; |
e91de6af | 499 | } |
e3526bb9 | 500 | |
40a34121 JF |
501 | static inline bool sk_is_udp(const struct sock *sk) |
502 | { | |
503 | return sk->sk_type == SOCK_DGRAM && | |
504 | sk->sk_protocol == IPPROTO_UDP; | |
505 | } | |
506 | ||
e3526bb9 CW |
507 | #if IS_ENABLED(CONFIG_NET_SOCK_MSG) |
508 | ||
7303524e LJ |
509 | #define BPF_F_STRPARSER (1UL << 1) |
510 | ||
511 | /* We only have two bits so far. */ | |
512 | #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER) | |
513 | ||
514 | static inline bool skb_bpf_strparser(const struct sk_buff *skb) | |
515 | { | |
516 | unsigned long sk_redir = skb->_sk_redir; | |
517 | ||
518 | return sk_redir & BPF_F_STRPARSER; | |
519 | } | |
520 | ||
521 | static inline void skb_bpf_set_strparser(struct sk_buff *skb) | |
522 | { | |
523 | skb->_sk_redir |= BPF_F_STRPARSER; | |
524 | } | |
e3526bb9 CW |
525 | |
526 | static inline bool skb_bpf_ingress(const struct sk_buff *skb) | |
527 | { | |
528 | unsigned long sk_redir = skb->_sk_redir; | |
529 | ||
530 | return sk_redir & BPF_F_INGRESS; | |
531 | } | |
532 | ||
533 | static inline void skb_bpf_set_ingress(struct sk_buff *skb) | |
534 | { | |
535 | skb->_sk_redir |= BPF_F_INGRESS; | |
536 | } | |
537 | ||
538 | static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir, | |
539 | bool ingress) | |
540 | { | |
541 | skb->_sk_redir = (unsigned long)sk_redir; | |
542 | if (ingress) | |
543 | skb->_sk_redir |= BPF_F_INGRESS; | |
544 | } | |
545 | ||
546 | static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb) | |
547 | { | |
548 | unsigned long sk_redir = skb->_sk_redir; | |
549 | ||
550 | return (struct sock *)(sk_redir & BPF_F_PTR_MASK); | |
551 | } | |
552 | ||
553 | static inline void skb_bpf_redirect_clear(struct sk_buff *skb) | |
554 | { | |
555 | skb->_sk_redir = 0; | |
556 | } | |
557 | #endif /* CONFIG_NET_SOCK_MSG */ | |
604326b4 | 558 | #endif /* _LINUX_SKMSG_H */ |