1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
9 #include "ena_netdev.h"
10 #include <linux/bpf_trace.h>
12 /* The max MTU size is configured to be the ethernet frame size without
13 * the overhead of the ethernet header, which can have a VLAN header, and
14 * a frame check sequence (FCS).
15 * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
17 #define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
18 VLAN_HLEN - XDP_PACKET_HEADROOM - \
19 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
21 #define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
22 ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
24 enum ENA_XDP_ACTIONS {
27 ENA_XDP_REDIRECT = BIT(1),
31 #define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
33 int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter);
34 void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
35 struct bpf_prog *prog,
36 int first, int count);
37 int ena_xdp_io_poll(struct napi_struct *napi, int budget);
38 int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
39 struct ena_adapter *adapter,
40 struct xdp_frame *xdpf,
42 int ena_xdp_xmit(struct net_device *dev, int n,
43 struct xdp_frame **frames, u32 flags);
44 int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
45 int ena_xdp_register_rxq_info(struct ena_ring *rx_ring);
46 void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring);
48 enum ena_xdp_errors_t {
50 ENA_XDP_CURRENT_MTU_TOO_LARGE,
51 ENA_XDP_NO_ENOUGH_QUEUES,
54 static inline bool ena_xdp_present(struct ena_adapter *adapter)
56 return !!adapter->xdp_bpf_prog;
59 static inline bool ena_xdp_present_ring(struct ena_ring *ring)
61 return !!ring->xdp_bpf_prog;
64 static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
67 return 2 * queues <= adapter->max_num_io_queues;
70 static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
72 enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
74 if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
75 rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
76 else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
77 rc = ENA_XDP_NO_ENOUGH_QUEUES;
82 static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
84 u32 verdict = ENA_XDP_PASS;
85 struct bpf_prog *xdp_prog;
86 struct ena_ring *xdp_ring;
87 struct xdp_frame *xdpf;
90 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
92 verdict = bpf_prog_run_xdp(xdp_prog, xdp);
96 xdpf = xdp_convert_buff_to_frame(xdp);
97 if (unlikely(!xdpf)) {
98 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
99 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
100 verdict = ENA_XDP_DROP;
104 /* Find xmit queue */
105 xdp_ring = rx_ring->xdp_ring;
107 /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
108 spin_lock(&xdp_ring->xdp_tx_lock);
110 if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf,
112 xdp_return_frame(xdpf);
114 spin_unlock(&xdp_ring->xdp_tx_lock);
115 xdp_stat = &rx_ring->rx_stats.xdp_tx;
116 verdict = ENA_XDP_TX;
119 if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
120 xdp_stat = &rx_ring->rx_stats.xdp_redirect;
121 verdict = ENA_XDP_REDIRECT;
124 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
125 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
126 verdict = ENA_XDP_DROP;
129 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
130 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
131 verdict = ENA_XDP_DROP;
134 xdp_stat = &rx_ring->rx_stats.xdp_drop;
135 verdict = ENA_XDP_DROP;
138 xdp_stat = &rx_ring->rx_stats.xdp_pass;
139 verdict = ENA_XDP_PASS;
142 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
143 xdp_stat = &rx_ring->rx_stats.xdp_invalid;
144 verdict = ENA_XDP_DROP;
147 ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
151 #endif /* ENA_XDP_H */