1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2017 Nicira, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/skbuff.h>
11 #include <linux/openvswitch.h>
12 #include <linux/sctp.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/in6.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
23 #include <net/ip6_fib.h>
24 #include <net/checksum.h>
25 #include <net/dsfield.h>
28 #if IS_ENABLED(CONFIG_PSAMPLE)
29 #include <net/psample.h>
32 #include <net/sctp/checksum.h>
37 #include "conntrack.h"
39 #include "flow_netlink.h"
40 #include "openvswitch_trace.h"
42 struct deferred_action {
44 const struct nlattr *actions;
47 /* Store pkt_key clone when creating deferred action. */
48 struct sw_flow_key pkt_key;
51 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
52 struct ovs_frag_data {
56 __be16 inner_protocol;
57 u16 network_offset; /* valid only for MPLS */
62 u8 l2_data[MAX_L2_LEN];
65 static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
67 #define DEFERRED_ACTION_FIFO_SIZE 10
68 #define OVS_RECURSION_LIMIT 5
69 #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
73 /* Deferred action fifo queue storage. */
74 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
77 struct action_flow_keys {
78 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
81 static struct action_fifo __percpu *action_fifos;
82 static struct action_flow_keys __percpu *flow_keys;
83 static DEFINE_PER_CPU(int, exec_actions_level);
85 /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
86 * space. Return NULL if out of key spaces.
88 static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
90 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
91 int level = this_cpu_read(exec_actions_level);
92 struct sw_flow_key *key = NULL;
94 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
95 key = &keys->key[level - 1];
102 static void action_fifo_init(struct action_fifo *fifo)
108 static bool action_fifo_is_empty(const struct action_fifo *fifo)
110 return (fifo->head == fifo->tail);
113 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
115 if (action_fifo_is_empty(fifo))
118 return &fifo->fifo[fifo->tail++];
121 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
123 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
126 return &fifo->fifo[fifo->head++];
129 /* Return true if fifo is not full */
130 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
131 const struct sw_flow_key *key,
132 const struct nlattr *actions,
133 const int actions_len)
135 struct action_fifo *fifo;
136 struct deferred_action *da;
138 fifo = this_cpu_ptr(action_fifos);
139 da = action_fifo_put(fifo);
142 da->actions = actions;
143 da->actions_len = actions_len;
150 static void invalidate_flow_key(struct sw_flow_key *key)
152 key->mac_proto |= SW_FLOW_KEY_INVALID;
155 static bool is_flow_key_valid(const struct sw_flow_key *key)
157 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
160 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
161 struct sw_flow_key *key,
163 const struct nlattr *actions, int len,
164 bool last, bool clone_flow_key);
166 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
167 struct sw_flow_key *key,
168 const struct nlattr *attr, int len);
170 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
171 __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
175 err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
180 key->mac_proto = MAC_PROTO_NONE;
182 invalidate_flow_key(key);
186 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
187 const __be16 ethertype)
191 err = skb_mpls_pop(skb, ethertype, skb->mac_len,
192 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
196 if (ethertype == htons(ETH_P_TEB))
197 key->mac_proto = MAC_PROTO_ETHERNET;
199 invalidate_flow_key(key);
203 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
204 const __be32 *mpls_lse, const __be32 *mask)
206 struct mpls_shim_hdr *stack;
210 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
213 stack = mpls_hdr(skb);
214 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
215 err = skb_mpls_update_lse(skb, lse);
219 flow_key->mpls.lse[0] = lse;
223 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
227 err = skb_vlan_pop(skb);
228 if (skb_vlan_tag_present(skb)) {
229 invalidate_flow_key(key);
231 key->eth.vlan.tci = 0;
232 key->eth.vlan.tpid = 0;
237 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
238 const struct ovs_action_push_vlan *vlan)
240 if (skb_vlan_tag_present(skb)) {
241 invalidate_flow_key(key);
243 key->eth.vlan.tci = vlan->vlan_tci;
244 key->eth.vlan.tpid = vlan->vlan_tpid;
246 return skb_vlan_push(skb, vlan->vlan_tpid,
247 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
250 /* 'src' is already properly masked. */
251 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
253 u16 *dst = (u16 *)dst_;
254 const u16 *src = (const u16 *)src_;
255 const u16 *mask = (const u16 *)mask_;
257 OVS_SET_MASKED(dst[0], src[0], mask[0]);
258 OVS_SET_MASKED(dst[1], src[1], mask[1]);
259 OVS_SET_MASKED(dst[2], src[2], mask[2]);
262 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
263 const struct ovs_key_ethernet *key,
264 const struct ovs_key_ethernet *mask)
268 err = skb_ensure_writable(skb, ETH_HLEN);
272 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
274 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
276 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
279 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
281 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
282 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
286 /* pop_eth does not support VLAN packets as this action is never called
289 static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
293 err = skb_eth_pop(skb);
297 /* safe right before invalidate_flow_key */
298 key->mac_proto = MAC_PROTO_NONE;
299 invalidate_flow_key(key);
303 static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
304 const struct ovs_action_push_eth *ethh)
308 err = skb_eth_push(skb, ethh->addresses.eth_dst,
309 ethh->addresses.eth_src);
313 /* safe right before invalidate_flow_key */
314 key->mac_proto = MAC_PROTO_ETHERNET;
315 invalidate_flow_key(key);
319 static noinline_for_stack int push_nsh(struct sk_buff *skb,
320 struct sw_flow_key *key,
321 const struct nlattr *a)
323 u8 buffer[NSH_HDR_MAX_LEN];
324 struct nshhdr *nh = (struct nshhdr *)buffer;
327 err = nsh_hdr_from_nlattr(a, nh, NSH_HDR_MAX_LEN);
331 err = nsh_push(skb, nh);
335 /* safe right before invalidate_flow_key */
336 key->mac_proto = MAC_PROTO_NONE;
337 invalidate_flow_key(key);
341 static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
349 /* safe right before invalidate_flow_key */
350 if (skb->protocol == htons(ETH_P_TEB))
351 key->mac_proto = MAC_PROTO_ETHERNET;
353 key->mac_proto = MAC_PROTO_NONE;
354 invalidate_flow_key(key);
358 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
359 __be32 addr, __be32 new_addr)
361 int transport_len = skb->len - skb_transport_offset(skb);
363 if (nh->frag_off & htons(IP_OFFSET))
366 if (nh->protocol == IPPROTO_TCP) {
367 if (likely(transport_len >= sizeof(struct tcphdr)))
368 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
369 addr, new_addr, true);
370 } else if (nh->protocol == IPPROTO_UDP) {
371 if (likely(transport_len >= sizeof(struct udphdr))) {
372 struct udphdr *uh = udp_hdr(skb);
374 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
375 inet_proto_csum_replace4(&uh->check, skb,
376 addr, new_addr, true);
378 uh->check = CSUM_MANGLED_0;
384 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
385 __be32 *addr, __be32 new_addr)
387 update_ip_l4_checksum(skb, nh, *addr, new_addr);
388 csum_replace4(&nh->check, *addr, new_addr);
390 ovs_ct_clear(skb, NULL);
394 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
395 __be32 addr[4], const __be32 new_addr[4])
397 int transport_len = skb->len - skb_transport_offset(skb);
399 if (l4_proto == NEXTHDR_TCP) {
400 if (likely(transport_len >= sizeof(struct tcphdr)))
401 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
402 addr, new_addr, true);
403 } else if (l4_proto == NEXTHDR_UDP) {
404 if (likely(transport_len >= sizeof(struct udphdr))) {
405 struct udphdr *uh = udp_hdr(skb);
407 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
408 inet_proto_csum_replace16(&uh->check, skb,
409 addr, new_addr, true);
411 uh->check = CSUM_MANGLED_0;
414 } else if (l4_proto == NEXTHDR_ICMP) {
415 if (likely(transport_len >= sizeof(struct icmp6hdr)))
416 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
417 skb, addr, new_addr, true);
421 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
422 const __be32 mask[4], __be32 masked[4])
424 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
425 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
426 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
427 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
430 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
431 __be32 addr[4], const __be32 new_addr[4],
432 bool recalculate_csum)
434 if (recalculate_csum)
435 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
438 ovs_ct_clear(skb, NULL);
439 memcpy(addr, new_addr, sizeof(__be32[4]));
442 static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
444 u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
446 ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
448 if (skb->ip_summed == CHECKSUM_COMPLETE)
449 csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
450 (__force __wsum)(ipv6_tclass << 12));
452 ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
455 static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
459 ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
460 fl = OVS_MASKED(ofl, fl, mask);
462 /* Bits 21-24 are always unmasked, so this retains their values. */
463 nh->flow_lbl[0] = (u8)(fl >> 16);
464 nh->flow_lbl[1] = (u8)(fl >> 8);
465 nh->flow_lbl[2] = (u8)fl;
467 if (skb->ip_summed == CHECKSUM_COMPLETE)
468 csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
471 static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
473 new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
475 if (skb->ip_summed == CHECKSUM_COMPLETE)
476 csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
477 (__force __wsum)(new_ttl << 8));
478 nh->hop_limit = new_ttl;
481 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
484 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
486 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
490 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
491 const struct ovs_key_ipv4 *key,
492 const struct ovs_key_ipv4 *mask)
498 err = skb_ensure_writable(skb, skb_network_offset(skb) +
499 sizeof(struct iphdr));
505 /* Setting an IP addresses is typically only a side effect of
506 * matching on them in the current userspace implementation, so it
507 * makes sense to check if the value actually changed.
509 if (mask->ipv4_src) {
510 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
512 if (unlikely(new_addr != nh->saddr)) {
513 set_ip_addr(skb, nh, &nh->saddr, new_addr);
514 flow_key->ipv4.addr.src = new_addr;
517 if (mask->ipv4_dst) {
518 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
520 if (unlikely(new_addr != nh->daddr)) {
521 set_ip_addr(skb, nh, &nh->daddr, new_addr);
522 flow_key->ipv4.addr.dst = new_addr;
525 if (mask->ipv4_tos) {
526 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
527 flow_key->ip.tos = nh->tos;
529 if (mask->ipv4_ttl) {
530 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
531 flow_key->ip.ttl = nh->ttl;
537 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
539 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
542 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
543 const struct ovs_key_ipv6 *key,
544 const struct ovs_key_ipv6 *mask)
549 err = skb_ensure_writable(skb, skb_network_offset(skb) +
550 sizeof(struct ipv6hdr));
556 /* Setting an IP addresses is typically only a side effect of
557 * matching on them in the current userspace implementation, so it
558 * makes sense to check if the value actually changed.
560 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
561 __be32 *saddr = (__be32 *)&nh->saddr;
564 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
566 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
567 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
569 memcpy(&flow_key->ipv6.addr.src, masked,
570 sizeof(flow_key->ipv6.addr.src));
573 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
574 unsigned int offset = 0;
575 int flags = IP6_FH_F_SKIP_RH;
576 bool recalc_csum = true;
577 __be32 *daddr = (__be32 *)&nh->daddr;
580 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
582 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
583 if (ipv6_ext_hdr(nh->nexthdr))
584 recalc_csum = (ipv6_find_hdr(skb, &offset,
589 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
591 memcpy(&flow_key->ipv6.addr.dst, masked,
592 sizeof(flow_key->ipv6.addr.dst));
595 if (mask->ipv6_tclass) {
596 set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
597 flow_key->ip.tos = ipv6_get_dsfield(nh);
599 if (mask->ipv6_label) {
600 set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
601 ntohl(mask->ipv6_label));
602 flow_key->ipv6.label =
603 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
605 if (mask->ipv6_hlimit) {
606 set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
607 flow_key->ip.ttl = nh->hop_limit;
612 static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
613 const struct nlattr *a)
622 struct ovs_key_nsh key;
623 struct ovs_key_nsh mask;
625 err = nsh_key_from_nlattr(a, &key, &mask);
629 /* Make sure the NSH base header is there */
630 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
634 length = nsh_hdr_len(nh);
636 /* Make sure the whole NSH header is there */
637 err = skb_ensure_writable(skb, skb_network_offset(skb) +
643 skb_postpull_rcsum(skb, nh, length);
644 flags = nsh_get_flags(nh);
645 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
646 flow_key->nsh.base.flags = flags;
647 ttl = nsh_get_ttl(nh);
648 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
649 flow_key->nsh.base.ttl = ttl;
650 nsh_set_flags_and_ttl(nh, flags, ttl);
651 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
653 flow_key->nsh.base.path_hdr = nh->path_hdr;
654 switch (nh->mdtype) {
656 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
658 OVS_MASKED(nh->md1.context[i], key.context[i],
661 memcpy(flow_key->nsh.context, nh->md1.context,
662 sizeof(nh->md1.context));
665 memset(flow_key->nsh.context, 0,
666 sizeof(flow_key->nsh.context));
671 skb_postpush_rcsum(skb, nh, length);
675 /* Must follow skb_ensure_writable() since that can move the skb data. */
676 static void set_tp_port(struct sk_buff *skb, __be16 *port,
677 __be16 new_port, __sum16 *check)
679 ovs_ct_clear(skb, NULL);
680 inet_proto_csum_replace2(check, skb, *port, new_port, false);
684 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
685 const struct ovs_key_udp *key,
686 const struct ovs_key_udp *mask)
692 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
693 sizeof(struct udphdr));
698 /* Either of the masks is non-zero, so do not bother checking them. */
699 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
700 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
702 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
703 if (likely(src != uh->source)) {
704 set_tp_port(skb, &uh->source, src, &uh->check);
705 flow_key->tp.src = src;
707 if (likely(dst != uh->dest)) {
708 set_tp_port(skb, &uh->dest, dst, &uh->check);
709 flow_key->tp.dst = dst;
712 if (unlikely(!uh->check))
713 uh->check = CSUM_MANGLED_0;
717 flow_key->tp.src = src;
718 flow_key->tp.dst = dst;
719 ovs_ct_clear(skb, NULL);
727 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
728 const struct ovs_key_tcp *key,
729 const struct ovs_key_tcp *mask)
735 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
736 sizeof(struct tcphdr));
741 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
742 if (likely(src != th->source)) {
743 set_tp_port(skb, &th->source, src, &th->check);
744 flow_key->tp.src = src;
746 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
747 if (likely(dst != th->dest)) {
748 set_tp_port(skb, &th->dest, dst, &th->check);
749 flow_key->tp.dst = dst;
756 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
757 const struct ovs_key_sctp *key,
758 const struct ovs_key_sctp *mask)
760 unsigned int sctphoff = skb_transport_offset(skb);
762 __le32 old_correct_csum, new_csum, old_csum;
765 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
770 old_csum = sh->checksum;
771 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
773 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
774 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
776 new_csum = sctp_compute_cksum(skb, sctphoff);
778 /* Carry any checksum errors through. */
779 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
782 ovs_ct_clear(skb, NULL);
784 flow_key->tp.src = sh->source;
785 flow_key->tp.dst = sh->dest;
790 static int ovs_vport_output(struct net *net, struct sock *sk,
793 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
794 struct vport *vport = data->vport;
796 if (skb_cow_head(skb, data->l2_len) < 0) {
797 kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
801 __skb_dst_copy(skb, data->dst);
802 *OVS_CB(skb) = data->cb;
803 skb->inner_protocol = data->inner_protocol;
804 if (data->vlan_tci & VLAN_CFI_MASK)
805 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
807 __vlan_hwaccel_clear_tag(skb);
809 /* Reconstruct the MAC header. */
810 skb_push(skb, data->l2_len);
811 memcpy(skb->data, &data->l2_data, data->l2_len);
812 skb_postpush_rcsum(skb, skb->data, data->l2_len);
813 skb_reset_mac_header(skb);
815 if (eth_p_mpls(skb->protocol)) {
816 skb->inner_network_header = skb->network_header;
817 skb_set_network_header(skb, data->network_offset);
818 skb_reset_mac_len(skb);
821 ovs_vport_send(vport, skb, data->mac_proto);
826 ovs_dst_get_mtu(const struct dst_entry *dst)
828 return dst->dev->mtu;
831 static struct dst_ops ovs_dst_ops = {
833 .mtu = ovs_dst_get_mtu,
836 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
837 * ovs_vport_output(), which is called once per fragmented packet.
839 static void prepare_frag(struct vport *vport, struct sk_buff *skb,
840 u16 orig_network_offset, u8 mac_proto)
842 unsigned int hlen = skb_network_offset(skb);
843 struct ovs_frag_data *data;
845 data = this_cpu_ptr(&ovs_frag_data_storage);
846 data->dst = skb->_skb_refdst;
848 data->cb = *OVS_CB(skb);
849 data->inner_protocol = skb->inner_protocol;
850 data->network_offset = orig_network_offset;
851 if (skb_vlan_tag_present(skb))
852 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
855 data->vlan_proto = skb->vlan_proto;
856 data->mac_proto = mac_proto;
858 memcpy(&data->l2_data, skb->data, hlen);
860 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
864 static void ovs_fragment(struct net *net, struct vport *vport,
865 struct sk_buff *skb, u16 mru,
866 struct sw_flow_key *key)
868 enum ovs_drop_reason reason;
869 u16 orig_network_offset = 0;
871 if (eth_p_mpls(skb->protocol)) {
872 orig_network_offset = skb_network_offset(skb);
873 skb->network_header = skb->inner_network_header;
876 if (skb_network_offset(skb) > MAX_L2_LEN) {
877 OVS_NLERR(1, "L2 header too long to fragment");
878 reason = OVS_DROP_FRAG_L2_TOO_LONG;
882 if (key->eth.type == htons(ETH_P_IP)) {
883 struct rtable ovs_rt = { 0 };
884 unsigned long orig_dst;
886 prepare_frag(vport, skb, orig_network_offset,
887 ovs_key_mac_proto(key));
888 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
889 DST_OBSOLETE_NONE, DST_NOCOUNT);
890 ovs_rt.dst.dev = vport->dev;
892 orig_dst = skb->_skb_refdst;
893 skb_dst_set_noref(skb, &ovs_rt.dst);
894 IPCB(skb)->frag_max_size = mru;
896 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
897 refdst_drop(orig_dst);
898 } else if (key->eth.type == htons(ETH_P_IPV6)) {
899 unsigned long orig_dst;
900 struct rt6_info ovs_rt;
902 prepare_frag(vport, skb, orig_network_offset,
903 ovs_key_mac_proto(key));
904 memset(&ovs_rt, 0, sizeof(ovs_rt));
905 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
906 DST_OBSOLETE_NONE, DST_NOCOUNT);
907 ovs_rt.dst.dev = vport->dev;
909 orig_dst = skb->_skb_refdst;
910 skb_dst_set_noref(skb, &ovs_rt.dst);
911 IP6CB(skb)->frag_max_size = mru;
913 ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
914 refdst_drop(orig_dst);
916 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
917 ovs_vport_name(vport), ntohs(key->eth.type), mru,
919 reason = OVS_DROP_FRAG_INVALID_PROTO;
925 ovs_kfree_skb_reason(skb, reason);
928 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
929 struct sw_flow_key *key)
931 struct vport *vport = ovs_vport_rcu(dp, out_port);
933 if (likely(vport && netif_carrier_ok(vport->dev))) {
934 u16 mru = OVS_CB(skb)->mru;
935 u32 cutlen = OVS_CB(skb)->cutlen;
937 if (unlikely(cutlen > 0)) {
938 if (skb->len - cutlen > ovs_mac_header_len(key))
939 pskb_trim(skb, skb->len - cutlen);
941 pskb_trim(skb, ovs_mac_header_len(key));
944 /* Need to set the pkt_type to involve the routing layer. The
945 * packet movement through the OVS datapath doesn't generally
946 * use routing, but this is needed for tunnel cases.
948 skb->pkt_type = PACKET_OUTGOING;
951 (skb->len <= mru + vport->dev->hard_header_len))) {
952 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
953 } else if (mru <= vport->dev->mtu) {
954 struct net *net = read_pnet(&dp->net);
956 ovs_fragment(net, vport, skb, mru, key);
958 kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
961 kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY);
965 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
966 struct sw_flow_key *key, const struct nlattr *attr,
967 const struct nlattr *actions, int actions_len,
970 struct dp_upcall_info upcall;
971 const struct nlattr *a;
974 memset(&upcall, 0, sizeof(upcall));
975 upcall.cmd = OVS_PACKET_CMD_ACTION;
976 upcall.mru = OVS_CB(skb)->mru;
978 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
979 a = nla_next(a, &rem)) {
980 switch (nla_type(a)) {
981 case OVS_USERSPACE_ATTR_USERDATA:
985 case OVS_USERSPACE_ATTR_PID:
986 if (dp->user_features &
987 OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
989 ovs_dp_get_upcall_portid(dp,
992 upcall.portid = nla_get_u32(a);
995 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
996 /* Get out tunnel info. */
999 vport = ovs_vport_rcu(dp, nla_get_u32(a));
1003 err = dev_fill_metadata_dst(vport->dev, skb);
1005 upcall.egress_tun_info = skb_tunnel_info(skb);
1011 case OVS_USERSPACE_ATTR_ACTIONS: {
1012 /* Include actions. */
1013 upcall.actions = actions;
1014 upcall.actions_len = actions_len;
1018 } /* End of switch. */
1021 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
1024 static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
1025 struct sw_flow_key *key,
1026 const struct nlattr *attr)
1028 /* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
1029 struct nlattr *actions = nla_data(attr);
1031 if (nla_len(actions))
1032 return clone_execute(dp, skb, key, 0, nla_data(actions),
1033 nla_len(actions), true, false);
1035 ovs_kfree_skb_reason(skb, OVS_DROP_IP_TTL);
1039 /* When 'last' is true, sample() should always consume the 'skb'.
1040 * Otherwise, sample() should keep 'skb' intact regardless what
1041 * actions are executed within sample().
1043 static int sample(struct datapath *dp, struct sk_buff *skb,
1044 struct sw_flow_key *key, const struct nlattr *attr,
1047 struct nlattr *actions;
1048 struct nlattr *sample_arg;
1049 int rem = nla_len(attr);
1050 const struct sample_arg *arg;
1051 u32 init_probability;
1052 bool clone_flow_key;
1055 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1056 sample_arg = nla_data(attr);
1057 arg = nla_data(sample_arg);
1058 actions = nla_next(sample_arg, &rem);
1059 init_probability = OVS_CB(skb)->probability;
1061 if ((arg->probability != U32_MAX) &&
1062 (!arg->probability || get_random_u32() > arg->probability)) {
1064 ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1068 OVS_CB(skb)->probability = arg->probability;
1070 clone_flow_key = !arg->exec;
1071 err = clone_execute(dp, skb, key, 0, actions, rem, last,
1075 OVS_CB(skb)->probability = init_probability;
1080 /* When 'last' is true, clone() should always consume the 'skb'.
1081 * Otherwise, clone() should keep 'skb' intact regardless what
1082 * actions are executed within clone().
1084 static int clone(struct datapath *dp, struct sk_buff *skb,
1085 struct sw_flow_key *key, const struct nlattr *attr,
1088 struct nlattr *actions;
1089 struct nlattr *clone_arg;
1090 int rem = nla_len(attr);
1091 bool dont_clone_flow_key;
1093 /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1094 clone_arg = nla_data(attr);
1095 dont_clone_flow_key = nla_get_u32(clone_arg);
1096 actions = nla_next(clone_arg, &rem);
1098 return clone_execute(dp, skb, key, 0, actions, rem, last,
1099 !dont_clone_flow_key);
1102 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1103 const struct nlattr *attr)
1105 struct ovs_action_hash *hash_act = nla_data(attr);
1108 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
1109 /* OVS_HASH_ALG_L4 hasing type. */
1110 hash = skb_get_hash(skb);
1111 } else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
1112 /* OVS_HASH_ALG_SYM_L4 hashing type. NOTE: this doesn't
1113 * extend past an encapsulated header.
1115 hash = __skb_get_hash_symmetric(skb);
1118 hash = jhash_1word(hash, hash_act->hash_basis);
1122 key->ovs_flow_hash = hash;
1125 static int execute_set_action(struct sk_buff *skb,
1126 struct sw_flow_key *flow_key,
1127 const struct nlattr *a)
1129 /* Only tunnel set execution is supported without a mask. */
1130 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1131 struct ovs_tunnel_info *tun = nla_data(a);
1134 dst_hold((struct dst_entry *)tun->tun_dst);
1135 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1142 /* Mask is at the midpoint of the data. */
1143 #define get_mask(a, type) ((const type)nla_data(a) + 1)
1145 static int execute_masked_set_action(struct sk_buff *skb,
1146 struct sw_flow_key *flow_key,
1147 const struct nlattr *a)
1151 switch (nla_type(a)) {
1152 case OVS_KEY_ATTR_PRIORITY:
1153 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1154 *get_mask(a, u32 *));
1155 flow_key->phy.priority = skb->priority;
1158 case OVS_KEY_ATTR_SKB_MARK:
1159 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1160 flow_key->phy.skb_mark = skb->mark;
1163 case OVS_KEY_ATTR_TUNNEL_INFO:
1164 /* Masked data not supported for tunnel. */
1168 case OVS_KEY_ATTR_ETHERNET:
1169 err = set_eth_addr(skb, flow_key, nla_data(a),
1170 get_mask(a, struct ovs_key_ethernet *));
1173 case OVS_KEY_ATTR_NSH:
1174 err = set_nsh(skb, flow_key, a);
1177 case OVS_KEY_ATTR_IPV4:
1178 err = set_ipv4(skb, flow_key, nla_data(a),
1179 get_mask(a, struct ovs_key_ipv4 *));
1182 case OVS_KEY_ATTR_IPV6:
1183 err = set_ipv6(skb, flow_key, nla_data(a),
1184 get_mask(a, struct ovs_key_ipv6 *));
1187 case OVS_KEY_ATTR_TCP:
1188 err = set_tcp(skb, flow_key, nla_data(a),
1189 get_mask(a, struct ovs_key_tcp *));
1192 case OVS_KEY_ATTR_UDP:
1193 err = set_udp(skb, flow_key, nla_data(a),
1194 get_mask(a, struct ovs_key_udp *));
1197 case OVS_KEY_ATTR_SCTP:
1198 err = set_sctp(skb, flow_key, nla_data(a),
1199 get_mask(a, struct ovs_key_sctp *));
1202 case OVS_KEY_ATTR_MPLS:
1203 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1207 case OVS_KEY_ATTR_CT_STATE:
1208 case OVS_KEY_ATTR_CT_ZONE:
1209 case OVS_KEY_ATTR_CT_MARK:
1210 case OVS_KEY_ATTR_CT_LABELS:
1211 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1212 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1220 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1221 struct sw_flow_key *key,
1222 const struct nlattr *a, bool last)
1226 if (!is_flow_key_valid(key)) {
1229 err = ovs_flow_key_update(skb, key);
1233 BUG_ON(!is_flow_key_valid(key));
1235 recirc_id = nla_get_u32(a);
1236 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1239 static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1240 struct sw_flow_key *key,
1241 const struct nlattr *attr, bool last)
1243 struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1244 const struct nlattr *actions, *cpl_arg;
1245 int len, max_len, rem = nla_len(attr);
1246 const struct check_pkt_len_arg *arg;
1247 bool clone_flow_key;
1249 /* The first netlink attribute in 'attr' is always
1250 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1252 cpl_arg = nla_data(attr);
1253 arg = nla_data(cpl_arg);
1255 len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1256 max_len = arg->pkt_len;
1258 if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1260 /* Second netlink attribute in 'attr' is always
1261 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1263 actions = nla_next(cpl_arg, &rem);
1264 clone_flow_key = !arg->exec_for_lesser_equal;
1266 /* Third netlink attribute in 'attr' is always
1267 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1269 actions = nla_next(cpl_arg, &rem);
1270 actions = nla_next(actions, &rem);
1271 clone_flow_key = !arg->exec_for_greater;
1274 return clone_execute(dp, skb, key, 0, nla_data(actions),
1275 nla_len(actions), last, clone_flow_key);
1278 static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1282 if (skb->protocol == htons(ETH_P_IPV6)) {
1285 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1292 if (nh->hop_limit <= 1)
1293 return -EHOSTUNREACH;
1295 key->ip.ttl = --nh->hop_limit;
1296 } else if (skb->protocol == htons(ETH_P_IP)) {
1300 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1307 return -EHOSTUNREACH;
1309 old_ttl = nh->ttl--;
1310 csum_replace2(&nh->check, htons(old_ttl << 8),
1311 htons(nh->ttl << 8));
1312 key->ip.ttl = nh->ttl;
1317 #if IS_ENABLED(CONFIG_PSAMPLE)
1318 static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1319 const struct nlattr *attr)
1321 struct psample_group psample_group = {};
1322 struct psample_metadata md = {};
1323 const struct nlattr *a;
1327 nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
1328 switch (nla_type(a)) {
1329 case OVS_PSAMPLE_ATTR_GROUP:
1330 psample_group.group_num = nla_get_u32(a);
1333 case OVS_PSAMPLE_ATTR_COOKIE:
1334 md.user_cookie = nla_data(a);
1335 md.user_cookie_len = nla_len(a);
1340 psample_group.net = ovs_dp_get_net(dp);
1341 md.in_ifindex = OVS_CB(skb)->input_vport->dev->ifindex;
1342 md.trunc_size = skb->len - OVS_CB(skb)->cutlen;
1343 md.rate_as_probability = 1;
1345 rate = OVS_CB(skb)->probability ? OVS_CB(skb)->probability : U32_MAX;
1347 psample_sample_packet(&psample_group, skb, rate, &md);
1350 static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1351 const struct nlattr *attr)
1355 /* Execute a list of actions against 'skb'. */
1356 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1357 struct sw_flow_key *key,
1358 const struct nlattr *attr, int len)
1360 const struct nlattr *a;
1363 for (a = attr, rem = len; rem > 0;
1364 a = nla_next(a, &rem)) {
1367 if (trace_ovs_do_execute_action_enabled())
1368 trace_ovs_do_execute_action(dp, skb, key, a, rem);
1370 /* Actions that rightfully have to consume the skb should do it
1371 * and return directly.
1373 switch (nla_type(a)) {
1374 case OVS_ACTION_ATTR_OUTPUT: {
1375 int port = nla_get_u32(a);
1376 struct sk_buff *clone;
1378 /* Every output action needs a separate clone
1379 * of 'skb', In case the output action is the
1380 * last action, cloning can be avoided.
1382 if (nla_is_last(a, rem)) {
1383 do_output(dp, skb, port, key);
1384 /* 'skb' has been used for output.
1389 clone = skb_clone(skb, GFP_ATOMIC);
1391 do_output(dp, clone, port, key);
1392 OVS_CB(skb)->cutlen = 0;
1396 case OVS_ACTION_ATTR_TRUNC: {
1397 struct ovs_action_trunc *trunc = nla_data(a);
1399 if (skb->len > trunc->max_len)
1400 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1404 case OVS_ACTION_ATTR_USERSPACE:
1405 output_userspace(dp, skb, key, a, attr,
1406 len, OVS_CB(skb)->cutlen);
1407 OVS_CB(skb)->cutlen = 0;
1408 if (nla_is_last(a, rem)) {
1414 case OVS_ACTION_ATTR_HASH:
1415 execute_hash(skb, key, a);
1418 case OVS_ACTION_ATTR_PUSH_MPLS: {
1419 struct ovs_action_push_mpls *mpls = nla_data(a);
1421 err = push_mpls(skb, key, mpls->mpls_lse,
1422 mpls->mpls_ethertype, skb->mac_len);
1425 case OVS_ACTION_ATTR_ADD_MPLS: {
1426 struct ovs_action_add_mpls *mpls = nla_data(a);
1429 if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1430 mac_len = skb->mac_len;
1432 err = push_mpls(skb, key, mpls->mpls_lse,
1433 mpls->mpls_ethertype, mac_len);
1436 case OVS_ACTION_ATTR_POP_MPLS:
1437 err = pop_mpls(skb, key, nla_get_be16(a));
1440 case OVS_ACTION_ATTR_PUSH_VLAN:
1441 err = push_vlan(skb, key, nla_data(a));
1444 case OVS_ACTION_ATTR_POP_VLAN:
1445 err = pop_vlan(skb, key);
1448 case OVS_ACTION_ATTR_RECIRC: {
1449 bool last = nla_is_last(a, rem);
1451 err = execute_recirc(dp, skb, key, a, last);
1453 /* If this is the last action, the skb has
1454 * been consumed or freed.
1455 * Return immediately.
1462 case OVS_ACTION_ATTR_SET:
1463 err = execute_set_action(skb, key, nla_data(a));
1466 case OVS_ACTION_ATTR_SET_MASKED:
1467 case OVS_ACTION_ATTR_SET_TO_MASKED:
1468 err = execute_masked_set_action(skb, key, nla_data(a));
1471 case OVS_ACTION_ATTR_SAMPLE: {
1472 bool last = nla_is_last(a, rem);
1474 err = sample(dp, skb, key, a, last);
1481 case OVS_ACTION_ATTR_CT:
1482 if (!is_flow_key_valid(key)) {
1483 err = ovs_flow_key_update(skb, key);
1488 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1491 /* Hide stolen IP fragments from user space. */
1493 return err == -EINPROGRESS ? 0 : err;
1496 case OVS_ACTION_ATTR_CT_CLEAR:
1497 err = ovs_ct_clear(skb, key);
1500 case OVS_ACTION_ATTR_PUSH_ETH:
1501 err = push_eth(skb, key, nla_data(a));
1504 case OVS_ACTION_ATTR_POP_ETH:
1505 err = pop_eth(skb, key);
1508 case OVS_ACTION_ATTR_PUSH_NSH:
1509 err = push_nsh(skb, key, nla_data(a));
1512 case OVS_ACTION_ATTR_POP_NSH:
1513 err = pop_nsh(skb, key);
1516 case OVS_ACTION_ATTR_METER:
1517 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1518 ovs_kfree_skb_reason(skb, OVS_DROP_METER);
1523 case OVS_ACTION_ATTR_CLONE: {
1524 bool last = nla_is_last(a, rem);
1526 err = clone(dp, skb, key, a, last);
1533 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1534 bool last = nla_is_last(a, rem);
1536 err = execute_check_pkt_len(dp, skb, key, a, last);
1543 case OVS_ACTION_ATTR_DEC_TTL:
1544 err = execute_dec_ttl(skb, key);
1545 if (err == -EHOSTUNREACH)
1546 return dec_ttl_exception_handler(dp, skb,
1550 case OVS_ACTION_ATTR_DROP: {
1551 enum ovs_drop_reason reason = nla_get_u32(a)
1552 ? OVS_DROP_EXPLICIT_WITH_ERROR
1553 : OVS_DROP_EXPLICIT;
1555 ovs_kfree_skb_reason(skb, reason);
1559 case OVS_ACTION_ATTR_PSAMPLE:
1560 execute_psample(dp, skb, a);
1561 OVS_CB(skb)->cutlen = 0;
1562 if (nla_is_last(a, rem)) {
1569 if (unlikely(err)) {
1570 ovs_kfree_skb_reason(skb, OVS_DROP_ACTION_ERROR);
1575 ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1579 /* Execute the actions on the clone of the packet. The effect of the
1580 * execution does not affect the original 'skb' nor the original 'key'.
1582 * The execution may be deferred in case the actions can not be executed
1585 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1586 struct sw_flow_key *key, u32 recirc_id,
1587 const struct nlattr *actions, int len,
1588 bool last, bool clone_flow_key)
1590 struct deferred_action *da;
1591 struct sw_flow_key *clone;
1593 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1595 /* Out of memory, skip this action.
1600 /* When clone_flow_key is false, the 'key' will not be change
1601 * by the actions, then the 'key' can be used directly.
1602 * Otherwise, try to clone key from the next recursion level of
1603 * 'flow_keys'. If clone is successful, execute the actions
1604 * without deferring.
1606 clone = clone_flow_key ? clone_key(key) : key;
1610 if (actions) { /* Sample action */
1612 __this_cpu_inc(exec_actions_level);
1614 err = do_execute_actions(dp, skb, clone,
1618 __this_cpu_dec(exec_actions_level);
1619 } else { /* Recirc action */
1620 clone->recirc_id = recirc_id;
1621 ovs_dp_process_packet(skb, clone);
1626 /* Out of 'flow_keys' space. Defer actions */
1627 da = add_deferred_actions(skb, key, actions, len);
1629 if (!actions) { /* Recirc action */
1631 key->recirc_id = recirc_id;
1634 /* Out of per CPU action FIFO space. Drop the 'skb' and
1637 ovs_kfree_skb_reason(skb, OVS_DROP_DEFERRED_LIMIT);
1639 if (net_ratelimit()) {
1640 if (actions) { /* Sample action */
1641 pr_warn("%s: deferred action limit reached, drop sample action\n",
1643 } else { /* Recirc action */
1644 pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
1645 ovs_dp_name(dp), recirc_id);
1652 static void process_deferred_actions(struct datapath *dp)
1654 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1656 /* Do not touch the FIFO in case there is no deferred actions. */
1657 if (action_fifo_is_empty(fifo))
1660 /* Finishing executing all deferred actions. */
1662 struct deferred_action *da = action_fifo_get(fifo);
1663 struct sk_buff *skb = da->skb;
1664 struct sw_flow_key *key = &da->pkt_key;
1665 const struct nlattr *actions = da->actions;
1666 int actions_len = da->actions_len;
1669 do_execute_actions(dp, skb, key, actions, actions_len);
1671 ovs_dp_process_packet(skb, key);
1672 } while (!action_fifo_is_empty(fifo));
1674 /* Reset FIFO for the next packet. */
1675 action_fifo_init(fifo);
1678 /* Execute a list of actions against 'skb'. */
1679 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1680 const struct sw_flow_actions *acts,
1681 struct sw_flow_key *key)
1685 level = __this_cpu_inc_return(exec_actions_level);
1686 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1687 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1689 ovs_kfree_skb_reason(skb, OVS_DROP_RECURSION_LIMIT);
1694 OVS_CB(skb)->acts_origlen = acts->orig_len;
1695 err = do_execute_actions(dp, skb, key,
1696 acts->actions, acts->actions_len);
1699 process_deferred_actions(dp);
1702 __this_cpu_dec(exec_actions_level);
1706 int action_fifos_init(void)
1708 action_fifos = alloc_percpu(struct action_fifo);
1712 flow_keys = alloc_percpu(struct action_flow_keys);
1714 free_percpu(action_fifos);
1721 void action_fifos_exit(void)
1723 free_percpu(action_fifos);
1724 free_percpu(flow_keys);