1 /* SPDX-License-Identifier: GPL-2.0 */
10 #include <linux/types.h>
11 #include <linux/skbuff.h>
13 #include <linux/ipv6.h>
14 #include <net/ip_tunnels.h>
18 struct multicore_worker;
22 /* queueing.c APIs: */
23 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
24 bool multicore, unsigned int len);
25 void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
26 struct multicore_worker __percpu *
27 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
30 void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
31 void wg_packet_handshake_receive_worker(struct work_struct *work);
32 /* NAPI poll function: */
33 int wg_packet_rx_poll(struct napi_struct *napi, int budget);
34 /* Workqueue worker: */
35 void wg_packet_decrypt_worker(struct work_struct *work);
38 void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
40 void wg_packet_send_handshake_response(struct wg_peer *peer);
41 void wg_packet_send_handshake_cookie(struct wg_device *wg,
42 struct sk_buff *initiating_skb,
44 void wg_packet_send_keepalive(struct wg_peer *peer);
45 void wg_packet_purge_staged_packets(struct wg_peer *peer);
46 void wg_packet_send_staged_packets(struct wg_peer *peer);
47 /* Workqueue workers: */
48 void wg_packet_handshake_send_worker(struct work_struct *work);
49 void wg_packet_tx_worker(struct work_struct *work);
50 void wg_packet_encrypt_worker(struct work_struct *work);
53 PACKET_STATE_UNCRYPTED,
60 struct noise_keypair *keypair;
66 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
67 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
69 static inline bool wg_check_packet_protocol(struct sk_buff *skb)
71 __be16 real_protocol = ip_tunnel_parse_protocol(skb);
72 return real_protocol && skb->protocol == real_protocol;
75 static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
77 u8 l4_hash = skb->l4_hash;
78 u8 sw_hash = skb->sw_hash;
80 skb_scrub_packet(skb, true);
81 memset(&skb->headers_start, 0,
82 offsetof(struct sk_buff, headers_end) -
83 offsetof(struct sk_buff, headers_start));
85 skb->l4_hash = l4_hash;
86 skb->sw_hash = sw_hash;
89 skb->queue_mapping = 0;
94 #ifdef CONFIG_NET_SCHED
97 skb_reset_redirect(skb);
98 skb->hdr_len = skb_headroom(skb);
99 skb_reset_mac_header(skb);
100 skb_reset_network_header(skb);
101 skb_reset_transport_header(skb);
102 skb_probe_transport_header(skb);
103 skb_reset_inner_headers(skb);
106 static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
108 unsigned int cpu = *stored_cpu, cpu_index, i;
110 if (unlikely(cpu == nr_cpumask_bits ||
111 !cpumask_test_cpu(cpu, cpu_online_mask))) {
112 cpu_index = id % cpumask_weight(cpu_online_mask);
113 cpu = cpumask_first(cpu_online_mask);
114 for (i = 0; i < cpu_index; ++i)
115 cpu = cpumask_next(cpu, cpu_online_mask);
121 /* This function is racy, in the sense that next is unlocked, so it could return
122 * the same CPU twice. A race-free version of this would be to instead store an
123 * atomic sequence number, do an increment-and-return, and then iterate through
124 * every possible CPU until we get to that index -- choose_cpu. However that's
125 * a bit slower, and it doesn't seem like this potential race actually
126 * introduces any performance loss, so we live with it.
128 static inline int wg_cpumask_next_online(int *next)
132 while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
133 cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
134 *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
138 static inline int wg_queue_enqueue_per_device_and_peer(
139 struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
140 struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
144 atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
145 /* We first queue this up for the peer ingestion, but the consumer
146 * will wait for the state to change to CRYPTED or DEAD before.
148 if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
150 /* Then we queue it up in the device queue, which consumes the
151 * packet as soon as it can.
153 cpu = wg_cpumask_next_online(next_cpu);
154 if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
156 queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
160 static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
162 enum packet_state state)
164 /* We take a reference, because as soon as we call atomic_set, the
165 * peer can be freed from below us.
167 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
169 atomic_set_release(&PACKET_CB(skb)->state, state);
170 queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
172 peer->device->packet_crypt_wq, &queue->work);
176 static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
177 enum packet_state state)
179 /* We take a reference, because as soon as we call atomic_set, the
180 * peer can be freed from below us.
182 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
184 atomic_set_release(&PACKET_CB(skb)->state, state);
185 napi_schedule(&peer->napi);
190 bool wg_packet_counter_selftest(void);
193 #endif /* _WG_QUEUEING_H */