1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
11 #include <trace/events/sock.h>
13 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
15 if (msg->sg.end > msg->sg.start &&
16 elem_first_coalesce < msg->sg.end)
19 if (msg->sg.end < msg->sg.start &&
20 (elem_first_coalesce > msg->sg.start ||
21 elem_first_coalesce < msg->sg.end))
27 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
28 int elem_first_coalesce)
30 struct page_frag *pfrag = sk_page_frag(sk);
31 u32 osize = msg->sg.size;
36 struct scatterlist *sge;
40 if (!sk_page_frag_refill(sk, pfrag)) {
45 orig_offset = pfrag->offset;
46 use = min_t(int, len, pfrag->size - orig_offset);
47 if (!sk_wmem_schedule(sk, use)) {
53 sk_msg_iter_var_prev(i);
54 sge = &msg->sg.data[i];
56 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
57 sg_page(sge) == pfrag->page &&
58 sge->offset + sge->length == orig_offset) {
61 if (sk_msg_full(msg)) {
66 sge = &msg->sg.data[msg->sg.end];
68 sg_set_page(sge, pfrag->page, use, orig_offset);
69 get_page(pfrag->page);
70 sk_msg_iter_next(msg, end);
73 sk_mem_charge(sk, use);
82 sk_msg_trim(sk, msg, osize);
85 EXPORT_SYMBOL_GPL(sk_msg_alloc);
87 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
90 int i = src->sg.start;
91 struct scatterlist *sge = sk_msg_elem(src, i);
92 struct scatterlist *sgd = NULL;
96 if (sge->length > off)
99 sk_msg_iter_var_next(i);
100 if (i == src->sg.end && off)
102 sge = sk_msg_elem(src, i);
106 sge_len = sge->length - off;
111 sgd = sk_msg_elem(dst, dst->sg.end - 1);
114 (sg_page(sge) == sg_page(sgd)) &&
115 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
116 sgd->length += sge_len;
117 dst->sg.size += sge_len;
118 } else if (!sk_msg_full(dst)) {
119 sge_off = sge->offset + off;
120 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
127 sk_mem_charge(sk, sge_len);
128 sk_msg_iter_var_next(i);
129 if (i == src->sg.end && len)
131 sge = sk_msg_elem(src, i);
136 EXPORT_SYMBOL_GPL(sk_msg_clone);
138 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
140 int i = msg->sg.start;
143 struct scatterlist *sge = sk_msg_elem(msg, i);
145 if (bytes < sge->length) {
146 sge->length -= bytes;
147 sge->offset += bytes;
148 sk_mem_uncharge(sk, bytes);
152 sk_mem_uncharge(sk, sge->length);
153 bytes -= sge->length;
156 sk_msg_iter_var_next(i);
157 } while (bytes && i != msg->sg.end);
160 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
162 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
164 int i = msg->sg.start;
167 struct scatterlist *sge = &msg->sg.data[i];
168 int uncharge = (bytes < sge->length) ? bytes : sge->length;
170 sk_mem_uncharge(sk, uncharge);
172 sk_msg_iter_var_next(i);
173 } while (i != msg->sg.end);
175 EXPORT_SYMBOL_GPL(sk_msg_return);
177 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
180 struct scatterlist *sge = sk_msg_elem(msg, i);
181 u32 len = sge->length;
183 /* When the skb owns the memory we free it from consume_skb path. */
186 sk_mem_uncharge(sk, len);
187 put_page(sg_page(sge));
189 memset(sge, 0, sizeof(*sge));
193 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
196 struct scatterlist *sge = sk_msg_elem(msg, i);
199 while (msg->sg.size) {
200 msg->sg.size -= sge->length;
201 freed += sk_msg_free_elem(sk, msg, i, charge);
202 sk_msg_iter_var_next(i);
203 sk_msg_check_to_free(msg, i, msg->sg.size);
204 sge = sk_msg_elem(msg, i);
206 consume_skb(msg->skb);
211 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
213 return __sk_msg_free(sk, msg, msg->sg.start, false);
215 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
217 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
219 return __sk_msg_free(sk, msg, msg->sg.start, true);
221 EXPORT_SYMBOL_GPL(sk_msg_free);
223 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
224 u32 bytes, bool charge)
226 struct scatterlist *sge;
227 u32 i = msg->sg.start;
230 sge = sk_msg_elem(msg, i);
233 if (bytes < sge->length) {
235 sk_mem_uncharge(sk, bytes);
236 sge->length -= bytes;
237 sge->offset += bytes;
238 msg->sg.size -= bytes;
242 msg->sg.size -= sge->length;
243 bytes -= sge->length;
244 sk_msg_free_elem(sk, msg, i, charge);
245 sk_msg_iter_var_next(i);
246 sk_msg_check_to_free(msg, i, bytes);
251 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
253 __sk_msg_free_partial(sk, msg, bytes, true);
255 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
257 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
260 __sk_msg_free_partial(sk, msg, bytes, false);
263 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
265 int trim = msg->sg.size - len;
273 sk_msg_iter_var_prev(i);
275 while (msg->sg.data[i].length &&
276 trim >= msg->sg.data[i].length) {
277 trim -= msg->sg.data[i].length;
278 sk_msg_free_elem(sk, msg, i, true);
279 sk_msg_iter_var_prev(i);
284 msg->sg.data[i].length -= trim;
285 sk_mem_uncharge(sk, trim);
286 /* Adjust copybreak if it falls into the trimmed part of last buf */
287 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
288 msg->sg.copybreak = msg->sg.data[i].length;
290 sk_msg_iter_var_next(i);
293 /* If we trim data a full sg elem before curr pointer update
294 * copybreak and current so that any future copy operations
295 * start at new copy location.
296 * However trimmed data that has not yet been used in a copy op
297 * does not require an update.
300 msg->sg.curr = msg->sg.start;
301 msg->sg.copybreak = 0;
302 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
303 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
304 sk_msg_iter_var_prev(i);
306 msg->sg.copybreak = msg->sg.data[i].length;
309 EXPORT_SYMBOL_GPL(sk_msg_trim);
311 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
312 struct sk_msg *msg, u32 bytes)
314 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
315 const int to_max_pages = MAX_MSG_FRAGS;
316 struct page *pages[MAX_MSG_FRAGS];
317 ssize_t orig, copied, use, offset;
322 maxpages = to_max_pages - num_elems;
328 copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
336 msg->sg.size += copied;
339 use = min_t(int, copied, PAGE_SIZE - offset);
340 sg_set_page(&msg->sg.data[msg->sg.end],
341 pages[i], use, offset);
342 sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 sk_mem_charge(sk, use);
347 sk_msg_iter_next(msg, end);
351 /* When zerocopy is mixed with sk_msg_*copy* operations we
352 * may have a copybreak set in this case clear and prefer
353 * zerocopy remainder when possible.
355 msg->sg.copybreak = 0;
356 msg->sg.curr = msg->sg.end;
359 /* Revert iov_iter updates, msg will need to use 'trim' later if it
360 * also needs to be cleared.
363 iov_iter_revert(from, msg->sg.size - orig);
366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 struct sk_msg *msg, u32 bytes)
371 int ret = -ENOSPC, i = msg->sg.curr;
372 u32 copy, buf_size, copied = 0;
373 struct scatterlist *sge;
377 sge = sk_msg_elem(msg, i);
378 /* This is possible if a trim operation shrunk the buffer */
379 if (msg->sg.copybreak >= sge->length) {
380 msg->sg.copybreak = 0;
381 sk_msg_iter_var_next(i);
382 if (i == msg->sg.end)
384 sge = sk_msg_elem(msg, i);
387 buf_size = sge->length - msg->sg.copybreak;
388 copy = (buf_size > bytes) ? bytes : buf_size;
389 to = sg_virt(sge) + msg->sg.copybreak;
390 msg->sg.copybreak += copy;
391 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 ret = copy_from_iter_nocache(to, copy, from);
394 ret = copy_from_iter(to, copy, from);
403 msg->sg.copybreak = 0;
404 sk_msg_iter_var_next(i);
405 } while (i != msg->sg.end);
408 return (ret < 0) ? ret : copied;
410 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
412 /* Receive sk_msg from psock->ingress_msg to @msg. */
413 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
416 struct iov_iter *iter = &msg->msg_iter;
417 int peek = flags & MSG_PEEK;
418 struct sk_msg *msg_rx;
421 msg_rx = sk_psock_peek_msg(psock);
422 while (copied != len) {
423 struct scatterlist *sge;
425 if (unlikely(!msg_rx))
428 i = msg_rx->sg.start;
433 sge = sk_msg_elem(msg_rx, i);
436 if (copied + copy > len)
439 copy = copy_page_to_iter(page, sge->offset, copy, iter);
441 copied = copied ? copied : -EFAULT;
450 sk_mem_uncharge(sk, copy);
451 atomic_sub(copy, &sk->sk_rmem_alloc);
453 msg_rx->sg.size -= copy;
456 sk_msg_iter_var_next(i);
461 /* Lets not optimize peek case if copy_page_to_iter
462 * didn't copy the entire length lets just break.
464 if (copy != sge->length)
466 sk_msg_iter_var_next(i);
471 } while ((i != msg_rx->sg.end) && !sg_is_last(sge));
473 if (unlikely(peek)) {
474 msg_rx = sk_psock_next_msg(psock, msg_rx);
480 msg_rx->sg.start = i;
481 if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
482 msg_rx = sk_psock_dequeue_msg(psock);
483 kfree_sk_msg(msg_rx);
485 msg_rx = sk_psock_peek_msg(psock);
490 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
492 bool sk_msg_is_readable(struct sock *sk)
494 struct sk_psock *psock;
498 psock = sk_psock(sk);
500 empty = list_empty(&psock->ingress_msg);
504 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
506 static struct sk_msg *alloc_sk_msg(gfp_t gfp)
510 msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
513 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
517 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
520 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
523 if (!sk_rmem_schedule(sk, skb, skb->truesize))
526 return alloc_sk_msg(GFP_KERNEL);
529 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
531 struct sk_psock *psock,
537 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
539 /* skb linearize may fail with ENOMEM, but lets simply try again
540 * later if this happens. Under memory pressure we don't want to
541 * drop the skb. We need to linearize the skb so that the mapping
542 * in skb_to_sgvec can not error.
544 if (skb_linearize(skb))
547 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
548 if (unlikely(num_sge < 0))
554 msg->sg.size = copied;
555 msg->sg.end = num_sge;
558 sk_psock_queue_msg(psock, msg);
559 sk_psock_data_ready(sk, psock);
563 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
566 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
569 struct sock *sk = psock->sk;
573 /* If we are receiving on the same sock skb->sk is already assigned,
574 * skip memory accounting and owner transition seeing it already set
577 if (unlikely(skb->sk == sk))
578 return sk_psock_skb_ingress_self(psock, skb, off, len);
579 msg = sk_psock_create_ingress_msg(sk, skb);
583 /* This will transition ownership of the data from the socket where
584 * the BPF program was run initiating the redirect to the socket
585 * we will eventually receive this data on. The data will be released
586 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
589 skb_set_owner_r(skb, sk);
590 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
596 /* Puts an skb on the ingress queue of the socket already assigned to the
597 * skb. In this case we do not need to check memory limits or skb_set_owner_r
598 * because the skb is already accounted for here.
600 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
603 struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
604 struct sock *sk = psock->sk;
609 skb_set_owner_r(skb, sk);
610 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
616 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
617 u32 off, u32 len, bool ingress)
622 if (!sock_writeable(psock->sk))
624 return skb_send_sock(psock->sk, skb, off, len);
627 err = sk_psock_skb_ingress(psock, skb, off, len);
633 static void sk_psock_skb_state(struct sk_psock *psock,
634 struct sk_psock_work_state *state,
637 spin_lock_bh(&psock->ingress_lock);
638 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
642 spin_unlock_bh(&psock->ingress_lock);
645 static void sk_psock_backlog(struct work_struct *work)
647 struct delayed_work *dwork = to_delayed_work(work);
648 struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
649 struct sk_psock_work_state *state = &psock->work_state;
650 struct sk_buff *skb = NULL;
651 u32 len = 0, off = 0;
655 mutex_lock(&psock->work_mutex);
656 if (unlikely(state->len)) {
661 while ((skb = skb_peek(&psock->ingress_skb))) {
664 if (skb_bpf_strparser(skb)) {
665 struct strp_msg *stm = strp_msg(skb);
670 ingress = skb_bpf_ingress(skb);
671 skb_bpf_redirect_clear(skb);
674 if (!sock_flag(psock->sk, SOCK_DEAD))
675 ret = sk_psock_handle_skb(psock, skb, off,
678 if (ret == -EAGAIN) {
679 sk_psock_skb_state(psock, state, len, off);
681 /* Delay slightly to prioritize any
682 * other work that might be here.
684 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
685 schedule_delayed_work(&psock->work, 1);
688 /* Hard errors break pipe and stop xmit. */
689 sk_psock_report_error(psock, ret ? -ret : EPIPE);
690 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
697 skb = skb_dequeue(&psock->ingress_skb);
701 mutex_unlock(&psock->work_mutex);
704 struct sk_psock *sk_psock_init(struct sock *sk, int node)
706 struct sk_psock *psock;
709 write_lock_bh(&sk->sk_callback_lock);
711 if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
712 psock = ERR_PTR(-EINVAL);
716 if (sk->sk_user_data) {
717 psock = ERR_PTR(-EBUSY);
721 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
723 psock = ERR_PTR(-ENOMEM);
727 prot = READ_ONCE(sk->sk_prot);
729 psock->eval = __SK_NONE;
730 psock->sk_proto = prot;
731 psock->saved_unhash = prot->unhash;
732 psock->saved_destroy = prot->destroy;
733 psock->saved_close = prot->close;
734 psock->saved_write_space = sk->sk_write_space;
736 INIT_LIST_HEAD(&psock->link);
737 spin_lock_init(&psock->link_lock);
739 INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
740 mutex_init(&psock->work_mutex);
741 INIT_LIST_HEAD(&psock->ingress_msg);
742 spin_lock_init(&psock->ingress_lock);
743 skb_queue_head_init(&psock->ingress_skb);
745 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
746 refcount_set(&psock->refcnt, 1);
748 __rcu_assign_sk_user_data_with_flags(sk, psock,
749 SK_USER_DATA_NOCOPY |
754 write_unlock_bh(&sk->sk_callback_lock);
757 EXPORT_SYMBOL_GPL(sk_psock_init);
759 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
761 struct sk_psock_link *link;
763 spin_lock_bh(&psock->link_lock);
764 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
767 list_del(&link->list);
768 spin_unlock_bh(&psock->link_lock);
772 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
774 struct sk_msg *msg, *tmp;
776 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
777 list_del(&msg->list);
779 atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
780 sk_msg_free(psock->sk, msg);
785 static void __sk_psock_zap_ingress(struct sk_psock *psock)
789 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
790 skb_bpf_redirect_clear(skb);
791 sock_drop(psock->sk, skb);
793 __sk_psock_purge_ingress_msg(psock);
796 static void sk_psock_link_destroy(struct sk_psock *psock)
798 struct sk_psock_link *link, *tmp;
800 list_for_each_entry_safe(link, tmp, &psock->link, list) {
801 list_del(&link->list);
802 sk_psock_free_link(link);
806 void sk_psock_stop(struct sk_psock *psock)
808 spin_lock_bh(&psock->ingress_lock);
809 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
810 sk_psock_cork_free(psock);
811 spin_unlock_bh(&psock->ingress_lock);
814 static void sk_psock_done_strp(struct sk_psock *psock);
816 static void sk_psock_destroy(struct work_struct *work)
818 struct sk_psock *psock = container_of(to_rcu_work(work),
819 struct sk_psock, rwork);
820 /* No sk_callback_lock since already detached. */
822 sk_psock_done_strp(psock);
824 cancel_delayed_work_sync(&psock->work);
825 __sk_psock_zap_ingress(psock);
826 mutex_destroy(&psock->work_mutex);
828 psock_progs_drop(&psock->progs);
830 sk_psock_link_destroy(psock);
831 sk_psock_cork_free(psock);
834 sock_put(psock->sk_redir);
836 sock_put(psock->sk_pair);
841 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
843 write_lock_bh(&sk->sk_callback_lock);
844 sk_psock_restore_proto(sk, psock);
845 rcu_assign_sk_user_data(sk, NULL);
846 if (psock->progs.stream_parser)
847 sk_psock_stop_strp(sk, psock);
848 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
849 sk_psock_stop_verdict(sk, psock);
850 write_unlock_bh(&sk->sk_callback_lock);
852 sk_psock_stop(psock);
854 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
855 queue_rcu_work(system_wq, &psock->rwork);
857 EXPORT_SYMBOL_GPL(sk_psock_drop);
859 static int sk_psock_map_verd(int verdict, bool redir)
863 return redir ? __SK_REDIRECT : __SK_PASS;
872 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
875 struct bpf_prog *prog;
879 prog = READ_ONCE(psock->progs.msg_parser);
880 if (unlikely(!prog)) {
885 sk_msg_compute_data_pointers(msg);
887 ret = bpf_prog_run_pin_on_cpu(prog, msg);
888 ret = sk_psock_map_verd(ret, msg->sk_redir);
889 psock->apply_bytes = msg->apply_bytes;
890 if (ret == __SK_REDIRECT) {
891 if (psock->sk_redir) {
892 sock_put(psock->sk_redir);
893 psock->sk_redir = NULL;
895 if (!msg->sk_redir) {
899 psock->redir_ingress = sk_msg_to_ingress(msg);
900 psock->sk_redir = msg->sk_redir;
901 sock_hold(psock->sk_redir);
907 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
909 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
911 struct sk_psock *psock_other;
912 struct sock *sk_other;
914 sk_other = skb_bpf_redirect_fetch(skb);
915 /* This error is a buggy BPF program, it returned a redirect
916 * return code, but then didn't set a redirect interface.
918 if (unlikely(!sk_other)) {
919 skb_bpf_redirect_clear(skb);
920 sock_drop(from->sk, skb);
923 psock_other = sk_psock(sk_other);
924 /* This error indicates the socket is being torn down or had another
925 * error that caused the pipe to break. We can't send a packet on
926 * a socket that is in this state so we drop the skb.
928 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
929 skb_bpf_redirect_clear(skb);
930 sock_drop(from->sk, skb);
933 spin_lock_bh(&psock_other->ingress_lock);
934 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
935 spin_unlock_bh(&psock_other->ingress_lock);
936 skb_bpf_redirect_clear(skb);
937 sock_drop(from->sk, skb);
941 skb_queue_tail(&psock_other->ingress_skb, skb);
942 schedule_delayed_work(&psock_other->work, 0);
943 spin_unlock_bh(&psock_other->ingress_lock);
947 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
948 struct sk_psock *from, int verdict)
952 sk_psock_skb_redirect(from, skb);
961 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
963 struct bpf_prog *prog;
967 prog = READ_ONCE(psock->progs.stream_verdict);
971 skb_bpf_redirect_clear(skb);
972 ret = bpf_prog_run_pin_on_cpu(prog, skb);
973 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
976 sk_psock_tls_verdict_apply(skb, psock, ret);
980 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
982 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
985 struct sock *sk_other;
992 sk_other = psock->sk;
993 if (sock_flag(sk_other, SOCK_DEAD) ||
994 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
997 skb_bpf_set_ingress(skb);
999 /* If the queue is empty then we can submit directly
1000 * into the msg queue. If its not empty we have to
1001 * queue work otherwise we may get OOO data. Otherwise,
1002 * if sk_psock_skb_ingress errors will be handled by
1003 * retrying later from workqueue.
1005 if (skb_queue_empty(&psock->ingress_skb)) {
1008 if (skb_bpf_strparser(skb)) {
1009 struct strp_msg *stm = strp_msg(skb);
1012 len = stm->full_len;
1014 err = sk_psock_skb_ingress_self(psock, skb, off, len);
1017 spin_lock_bh(&psock->ingress_lock);
1018 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1019 skb_queue_tail(&psock->ingress_skb, skb);
1020 schedule_delayed_work(&psock->work, 0);
1023 spin_unlock_bh(&psock->ingress_lock);
1029 tcp_eat_skb(psock->sk, skb);
1030 err = sk_psock_skb_redirect(psock, skb);
1035 skb_bpf_redirect_clear(skb);
1036 tcp_eat_skb(psock->sk, skb);
1037 sock_drop(psock->sk, skb);
1043 static void sk_psock_write_space(struct sock *sk)
1045 struct sk_psock *psock;
1046 void (*write_space)(struct sock *sk) = NULL;
1049 psock = sk_psock(sk);
1050 if (likely(psock)) {
1051 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1052 schedule_delayed_work(&psock->work, 0);
1053 write_space = psock->saved_write_space;
1060 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1061 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1063 struct sk_psock *psock;
1064 struct bpf_prog *prog;
1065 int ret = __SK_DROP;
1070 psock = sk_psock(sk);
1071 if (unlikely(!psock)) {
1075 prog = READ_ONCE(psock->progs.stream_verdict);
1079 skb_bpf_redirect_clear(skb);
1080 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1081 skb_bpf_set_strparser(skb);
1082 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1085 sk_psock_verdict_apply(psock, skb, ret);
1090 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1095 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1097 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1098 struct bpf_prog *prog;
1102 prog = READ_ONCE(psock->progs.stream_parser);
1104 skb->sk = psock->sk;
1105 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1112 /* Called with socket lock held. */
1113 static void sk_psock_strp_data_ready(struct sock *sk)
1115 struct sk_psock *psock;
1117 trace_sk_data_ready(sk);
1120 psock = sk_psock(sk);
1121 if (likely(psock)) {
1122 if (tls_sw_has_ctx_rx(sk)) {
1123 psock->saved_data_ready(sk);
1125 read_lock_bh(&sk->sk_callback_lock);
1126 strp_data_ready(&psock->strp);
1127 read_unlock_bh(&sk->sk_callback_lock);
1133 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1137 static const struct strp_callbacks cb = {
1138 .rcv_msg = sk_psock_strp_read,
1139 .read_sock_done = sk_psock_strp_read_done,
1140 .parse_msg = sk_psock_strp_parse,
1143 ret = strp_init(&psock->strp, sk, &cb);
1145 sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1150 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1152 if (psock->saved_data_ready)
1155 psock->saved_data_ready = sk->sk_data_ready;
1156 sk->sk_data_ready = sk_psock_strp_data_ready;
1157 sk->sk_write_space = sk_psock_write_space;
1160 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1162 psock_set_prog(&psock->progs.stream_parser, NULL);
1164 if (!psock->saved_data_ready)
1167 sk->sk_data_ready = psock->saved_data_ready;
1168 psock->saved_data_ready = NULL;
1169 strp_stop(&psock->strp);
1172 static void sk_psock_done_strp(struct sk_psock *psock)
1174 /* Parser has been stopped */
1175 if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1176 strp_done(&psock->strp);
1179 static void sk_psock_done_strp(struct sk_psock *psock)
1182 #endif /* CONFIG_BPF_STREAM_PARSER */
1184 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1186 struct sk_psock *psock;
1187 struct bpf_prog *prog;
1188 int ret = __SK_DROP;
1192 psock = sk_psock(sk);
1193 if (unlikely(!psock)) {
1195 tcp_eat_skb(sk, skb);
1199 prog = READ_ONCE(psock->progs.stream_verdict);
1201 prog = READ_ONCE(psock->progs.skb_verdict);
1204 skb_bpf_redirect_clear(skb);
1205 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1206 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1208 ret = sk_psock_verdict_apply(psock, skb, ret);
1216 static void sk_psock_verdict_data_ready(struct sock *sk)
1218 struct socket *sock = sk->sk_socket;
1219 const struct proto_ops *ops;
1222 trace_sk_data_ready(sk);
1224 if (unlikely(!sock))
1226 ops = READ_ONCE(sock->ops);
1227 if (!ops || !ops->read_skb)
1229 copied = ops->read_skb(sk, sk_psock_verdict_recv);
1231 struct sk_psock *psock;
1234 psock = sk_psock(sk);
1236 sk_psock_data_ready(sk, psock);
1241 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1243 if (psock->saved_data_ready)
1246 psock->saved_data_ready = sk->sk_data_ready;
1247 sk->sk_data_ready = sk_psock_verdict_data_ready;
1248 sk->sk_write_space = sk_psock_write_space;
1251 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1253 psock_set_prog(&psock->progs.stream_verdict, NULL);
1254 psock_set_prog(&psock->progs.skb_verdict, NULL);
1256 if (!psock->saved_data_ready)
1259 sk->sk_data_ready = psock->saved_data_ready;
1260 psock->saved_data_ready = NULL;