]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
21f42cc9 SK |
2 | /* |
3 | * xfrm_device.c - IPsec device offloading code. | |
4 | * | |
5 | * Copyright (c) 2015 secunet Security Networks AG | |
6 | * | |
7 | * Author: | |
8 | * Steffen Klassert <[email protected]> | |
21f42cc9 SK |
9 | */ |
10 | ||
11 | #include <linux/errno.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/netdevice.h> | |
14 | #include <linux/skbuff.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <net/dst.h> | |
18 | #include <net/xfrm.h> | |
19 | #include <linux/notifier.h> | |
20 | ||
b81f884a | 21 | #ifdef CONFIG_XFRM_OFFLOAD |
303c5fab FW |
22 | static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb, |
23 | unsigned int hsize) | |
24 | { | |
25 | struct xfrm_offload *xo = xfrm_offload(skb); | |
26 | ||
27 | skb_reset_mac_len(skb); | |
06a0afcf | 28 | if (xo->flags & XFRM_GSO_SEGMENT) |
303c5fab | 29 | skb->transport_header -= x->props.header_len; |
06a0afcf XL |
30 | |
31 | pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); | |
303c5fab FW |
32 | } |
33 | ||
34 | static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb, | |
35 | unsigned int hsize) | |
36 | ||
37 | { | |
38 | struct xfrm_offload *xo = xfrm_offload(skb); | |
39 | ||
40 | if (xo->flags & XFRM_GSO_SEGMENT) | |
41 | skb->transport_header = skb->network_header + hsize; | |
42 | ||
43 | skb_reset_mac_len(skb); | |
44 | pskb_pull(skb, skb->mac_len + x->props.header_len); | |
45 | } | |
46 | ||
30849175 XL |
47 | static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb, |
48 | unsigned int hsize) | |
49 | { | |
50 | struct xfrm_offload *xo = xfrm_offload(skb); | |
51 | int phlen = 0; | |
52 | ||
53 | if (xo->flags & XFRM_GSO_SEGMENT) | |
54 | skb->transport_header = skb->network_header + hsize; | |
55 | ||
56 | skb_reset_mac_len(skb); | |
57 | if (x->sel.family != AF_INET6) { | |
58 | phlen = IPV4_BEET_PHMAXLEN; | |
59 | if (x->outer_mode.family == AF_INET6) | |
60 | phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr); | |
61 | } | |
62 | ||
63 | pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen)); | |
64 | } | |
65 | ||
303c5fab FW |
66 | /* Adjust pointers into the packet when IPsec is done at layer2 */ |
67 | static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb) | |
68 | { | |
c9500d7b | 69 | switch (x->outer_mode.encap) { |
303c5fab | 70 | case XFRM_MODE_TUNNEL: |
c9500d7b | 71 | if (x->outer_mode.family == AF_INET) |
303c5fab FW |
72 | return __xfrm_mode_tunnel_prep(x, skb, |
73 | sizeof(struct iphdr)); | |
c9500d7b | 74 | if (x->outer_mode.family == AF_INET6) |
303c5fab FW |
75 | return __xfrm_mode_tunnel_prep(x, skb, |
76 | sizeof(struct ipv6hdr)); | |
77 | break; | |
78 | case XFRM_MODE_TRANSPORT: | |
c9500d7b | 79 | if (x->outer_mode.family == AF_INET) |
303c5fab FW |
80 | return __xfrm_transport_prep(x, skb, |
81 | sizeof(struct iphdr)); | |
c9500d7b | 82 | if (x->outer_mode.family == AF_INET6) |
303c5fab FW |
83 | return __xfrm_transport_prep(x, skb, |
84 | sizeof(struct ipv6hdr)); | |
85 | break; | |
30849175 XL |
86 | case XFRM_MODE_BEET: |
87 | if (x->outer_mode.family == AF_INET) | |
88 | return __xfrm_mode_beet_prep(x, skb, | |
89 | sizeof(struct iphdr)); | |
90 | if (x->outer_mode.family == AF_INET6) | |
91 | return __xfrm_mode_beet_prep(x, skb, | |
92 | sizeof(struct ipv6hdr)); | |
93 | break; | |
303c5fab FW |
94 | case XFRM_MODE_ROUTEOPTIMIZATION: |
95 | case XFRM_MODE_IN_TRIGGER: | |
303c5fab FW |
96 | break; |
97 | } | |
98 | } | |
99 | ||
f53c7239 | 100 | struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) |
f6e27114 SK |
101 | { |
102 | int err; | |
f53c7239 | 103 | unsigned long flags; |
f6e27114 | 104 | struct xfrm_state *x; |
f53c7239 | 105 | struct softnet_data *sd; |
d1d17a35 | 106 | struct sk_buff *skb2, *nskb, *pskb = NULL; |
3dca3f38 | 107 | netdev_features_t esp_features = features; |
f6e27114 | 108 | struct xfrm_offload *xo = xfrm_offload(skb); |
272c2330 | 109 | struct net_device *dev = skb->dev; |
2294be0f | 110 | struct sec_path *sp; |
f6e27114 | 111 | |
94579ac3 | 112 | if (!xo || (xo->flags & XFRM_XMIT)) |
3dca3f38 | 113 | return skb; |
f6e27114 | 114 | |
3dca3f38 SK |
115 | if (!(features & NETIF_F_HW_ESP)) |
116 | esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); | |
f6e27114 | 117 | |
2294be0f FW |
118 | sp = skb_sec_path(skb); |
119 | x = sp->xvec[sp->len - 1]; | |
3dca3f38 SK |
120 | if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) |
121 | return skb; | |
122 | ||
bdfd2d1f JW |
123 | /* This skb was already validated on the upper/virtual dev */ |
124 | if ((x->xso.dev != dev) && (x->xso.real_dev == dev)) | |
272c2330 JW |
125 | return skb; |
126 | ||
f53c7239 SK |
127 | local_irq_save(flags); |
128 | sd = this_cpu_ptr(&softnet_data); | |
129 | err = !skb_queue_empty(&sd->xfrm_backlog); | |
130 | local_irq_restore(flags); | |
131 | ||
132 | if (err) { | |
133 | *again = true; | |
134 | return skb; | |
135 | } | |
136 | ||
272c2330 JW |
137 | if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) { |
138 | struct sk_buff *segs; | |
3dca3f38 | 139 | |
272c2330 JW |
140 | /* Packet got rerouted, fixup features and segment it. */ |
141 | esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP); | |
3dca3f38 | 142 | |
272c2330 JW |
143 | segs = skb_gso_segment(skb, esp_features); |
144 | if (IS_ERR(segs)) { | |
145 | kfree_skb(skb); | |
146 | atomic_long_inc(&dev->tx_dropped); | |
147 | return NULL; | |
148 | } else { | |
149 | consume_skb(skb); | |
150 | skb = segs; | |
3dca3f38 SK |
151 | } |
152 | } | |
153 | ||
154 | if (!skb->next) { | |
65fd2c2a | 155 | esp_features |= skb->dev->gso_partial_features; |
303c5fab | 156 | xfrm_outer_mode_prep(x, skb); |
f6e27114 | 157 | |
f53c7239 SK |
158 | xo->flags |= XFRM_DEV_RESUME; |
159 | ||
3dca3f38 | 160 | err = x->type_offload->xmit(x, skb, esp_features); |
f6e27114 | 161 | if (err) { |
f53c7239 SK |
162 | if (err == -EINPROGRESS) |
163 | return NULL; | |
164 | ||
f6e27114 | 165 | XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); |
3dca3f38 SK |
166 | kfree_skb(skb); |
167 | return NULL; | |
f6e27114 SK |
168 | } |
169 | ||
170 | skb_push(skb, skb->data - skb_mac_header(skb)); | |
3dca3f38 SK |
171 | |
172 | return skb; | |
f6e27114 SK |
173 | } |
174 | ||
c3b18e0d | 175 | skb_list_walk_safe(skb, skb2, nskb) { |
65fd2c2a | 176 | esp_features |= skb->dev->gso_partial_features; |
a8305bff | 177 | skb_mark_not_on_list(skb2); |
3dca3f38 SK |
178 | |
179 | xo = xfrm_offload(skb2); | |
f53c7239 | 180 | xo->flags |= XFRM_DEV_RESUME; |
3dca3f38 | 181 | |
303c5fab | 182 | xfrm_outer_mode_prep(x, skb2); |
3dca3f38 SK |
183 | |
184 | err = x->type_offload->xmit(x, skb2, esp_features); | |
f53c7239 SK |
185 | if (!err) { |
186 | skb2->next = nskb; | |
187 | } else if (err != -EINPROGRESS) { | |
3dca3f38 SK |
188 | XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); |
189 | skb2->next = nskb; | |
190 | kfree_skb_list(skb2); | |
191 | return NULL; | |
f53c7239 SK |
192 | } else { |
193 | if (skb == skb2) | |
194 | skb = nskb; | |
d1d17a35 XL |
195 | else |
196 | pskb->next = nskb; | |
3dca3f38 | 197 | |
c3b18e0d | 198 | continue; |
f53c7239 | 199 | } |
3dca3f38 SK |
200 | |
201 | skb_push(skb2, skb2->data - skb_mac_header(skb2)); | |
d1d17a35 | 202 | pskb = skb2; |
c3b18e0d | 203 | } |
3dca3f38 SK |
204 | |
205 | return skb; | |
f6e27114 SK |
206 | } |
207 | EXPORT_SYMBOL_GPL(validate_xmit_xfrm); | |
208 | ||
d77e38e6 SK |
209 | int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, |
210 | struct xfrm_user_offload *xuo) | |
211 | { | |
212 | int err; | |
213 | struct dst_entry *dst; | |
214 | struct net_device *dev; | |
215 | struct xfrm_state_offload *xso = &x->xso; | |
216 | xfrm_address_t *saddr; | |
217 | xfrm_address_t *daddr; | |
218 | ||
219 | if (!x->type_offload) | |
ffdb5211 | 220 | return -EINVAL; |
d77e38e6 | 221 | |
50bd870a YE |
222 | /* We don't yet support UDP encapsulation and TFC padding. */ |
223 | if (x->encap || x->tfcpad) | |
43024b9c | 224 | return -EINVAL; |
d77e38e6 SK |
225 | |
226 | dev = dev_get_by_index(net, xuo->ifindex); | |
227 | if (!dev) { | |
228 | if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { | |
229 | saddr = &x->props.saddr; | |
230 | daddr = &x->id.daddr; | |
231 | } else { | |
232 | saddr = &x->id.daddr; | |
233 | daddr = &x->props.saddr; | |
234 | } | |
235 | ||
077fbac4 | 236 | dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, |
9b42c1f1 SK |
237 | x->props.family, |
238 | xfrm_smark_get(0, x)); | |
d77e38e6 SK |
239 | if (IS_ERR(dst)) |
240 | return 0; | |
241 | ||
242 | dev = dst->dev; | |
243 | ||
244 | dev_hold(dev); | |
245 | dst_release(dst); | |
246 | } | |
247 | ||
248 | if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { | |
67a63387 | 249 | xso->dev = NULL; |
d77e38e6 SK |
250 | dev_put(dev); |
251 | return 0; | |
252 | } | |
253 | ||
50bd870a YE |
254 | if (x->props.flags & XFRM_STATE_ESN && |
255 | !dev->xfrmdev_ops->xdo_dev_state_advance_esn) { | |
256 | xso->dev = NULL; | |
257 | dev_put(dev); | |
258 | return -EINVAL; | |
259 | } | |
260 | ||
d77e38e6 | 261 | xso->dev = dev; |
bdfd2d1f | 262 | xso->real_dev = dev; |
d77e38e6 SK |
263 | xso->num_exthdrs = 1; |
264 | xso->flags = xuo->flags; | |
265 | ||
266 | err = dev->xfrmdev_ops->xdo_dev_state_add(x); | |
267 | if (err) { | |
4a132095 SN |
268 | xso->num_exthdrs = 0; |
269 | xso->flags = 0; | |
aa5dd6fa | 270 | xso->dev = NULL; |
dd72fadf | 271 | xso->real_dev = NULL; |
d77e38e6 | 272 | dev_put(dev); |
4a132095 SN |
273 | |
274 | if (err != -EOPNOTSUPP) | |
275 | return err; | |
d77e38e6 SK |
276 | } |
277 | ||
278 | return 0; | |
279 | } | |
280 | EXPORT_SYMBOL_GPL(xfrm_dev_state_add); | |
281 | ||
282 | bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) | |
283 | { | |
284 | int mtu; | |
285 | struct dst_entry *dst = skb_dst(skb); | |
286 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | |
287 | struct net_device *dev = x->xso.dev; | |
288 | ||
289 | if (!x->type_offload || x->encap) | |
290 | return false; | |
291 | ||
fcb662de | 292 | if ((!dev || (dev == xfrm_dst_path(dst)->dev)) && |
c7b37c76 FW |
293 | (!xdst->child->xfrm)) { |
294 | mtu = xfrm_state_mtu(x, xdst->child_mtu_cached); | |
d77e38e6 SK |
295 | if (skb->len <= mtu) |
296 | goto ok; | |
297 | ||
779b7931 | 298 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
d77e38e6 SK |
299 | goto ok; |
300 | } | |
301 | ||
302 | return false; | |
303 | ||
304 | ok: | |
305 | if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) | |
306 | return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); | |
307 | ||
308 | return true; | |
309 | } | |
310 | EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); | |
f53c7239 SK |
311 | |
312 | void xfrm_dev_resume(struct sk_buff *skb) | |
313 | { | |
314 | struct net_device *dev = skb->dev; | |
315 | int ret = NETDEV_TX_BUSY; | |
316 | struct netdev_queue *txq; | |
317 | struct softnet_data *sd; | |
318 | unsigned long flags; | |
319 | ||
320 | rcu_read_lock(); | |
4bd97d51 | 321 | txq = netdev_core_pick_tx(dev, skb, NULL); |
f53c7239 SK |
322 | |
323 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | |
324 | if (!netif_xmit_frozen_or_stopped(txq)) | |
325 | skb = dev_hard_start_xmit(skb, dev, txq, &ret); | |
326 | HARD_TX_UNLOCK(dev, txq); | |
327 | ||
328 | if (!dev_xmit_complete(ret)) { | |
329 | local_irq_save(flags); | |
330 | sd = this_cpu_ptr(&softnet_data); | |
331 | skb_queue_tail(&sd->xfrm_backlog, skb); | |
332 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | |
333 | local_irq_restore(flags); | |
334 | } | |
335 | rcu_read_unlock(); | |
336 | } | |
337 | EXPORT_SYMBOL_GPL(xfrm_dev_resume); | |
338 | ||
339 | void xfrm_dev_backlog(struct softnet_data *sd) | |
340 | { | |
341 | struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog; | |
342 | struct sk_buff_head list; | |
343 | struct sk_buff *skb; | |
344 | ||
345 | if (skb_queue_empty(xfrm_backlog)) | |
346 | return; | |
347 | ||
348 | __skb_queue_head_init(&list); | |
349 | ||
350 | spin_lock(&xfrm_backlog->lock); | |
351 | skb_queue_splice_init(xfrm_backlog, &list); | |
352 | spin_unlock(&xfrm_backlog->lock); | |
353 | ||
354 | while (!skb_queue_empty(&list)) { | |
355 | skb = __skb_dequeue(&list); | |
356 | xfrm_dev_resume(skb); | |
357 | } | |
358 | ||
359 | } | |
b81f884a | 360 | #endif |
d77e38e6 | 361 | |
92a23206 | 362 | static int xfrm_api_check(struct net_device *dev) |
d77e38e6 | 363 | { |
92a23206 | 364 | #ifdef CONFIG_XFRM_OFFLOAD |
d77e38e6 SK |
365 | if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && |
366 | !(dev->features & NETIF_F_HW_ESP)) | |
367 | return NOTIFY_BAD; | |
368 | ||
92a23206 SN |
369 | if ((dev->features & NETIF_F_HW_ESP) && |
370 | (!(dev->xfrmdev_ops && | |
371 | dev->xfrmdev_ops->xdo_dev_state_add && | |
372 | dev->xfrmdev_ops->xdo_dev_state_delete))) | |
373 | return NOTIFY_BAD; | |
374 | #else | |
375 | if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM)) | |
376 | return NOTIFY_BAD; | |
377 | #endif | |
378 | ||
d77e38e6 SK |
379 | return NOTIFY_DONE; |
380 | } | |
381 | ||
92a23206 SN |
382 | static int xfrm_dev_register(struct net_device *dev) |
383 | { | |
384 | return xfrm_api_check(dev); | |
385 | } | |
386 | ||
d77e38e6 SK |
387 | static int xfrm_dev_feat_change(struct net_device *dev) |
388 | { | |
92a23206 | 389 | return xfrm_api_check(dev); |
d77e38e6 SK |
390 | } |
391 | ||
392 | static int xfrm_dev_down(struct net_device *dev) | |
393 | { | |
2c1497bb | 394 | if (dev->features & NETIF_F_HW_ESP) |
d77e38e6 SK |
395 | xfrm_dev_state_flush(dev_net(dev), dev, true); |
396 | ||
d77e38e6 SK |
397 | return NOTIFY_DONE; |
398 | } | |
399 | ||
21f42cc9 SK |
400 | static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) |
401 | { | |
402 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | |
403 | ||
404 | switch (event) { | |
d77e38e6 SK |
405 | case NETDEV_REGISTER: |
406 | return xfrm_dev_register(dev); | |
407 | ||
d77e38e6 SK |
408 | case NETDEV_FEAT_CHANGE: |
409 | return xfrm_dev_feat_change(dev); | |
410 | ||
21f42cc9 | 411 | case NETDEV_DOWN: |
03891f82 | 412 | case NETDEV_UNREGISTER: |
d77e38e6 | 413 | return xfrm_dev_down(dev); |
21f42cc9 SK |
414 | } |
415 | return NOTIFY_DONE; | |
416 | } | |
417 | ||
418 | static struct notifier_block xfrm_dev_notifier = { | |
419 | .notifier_call = xfrm_dev_event, | |
420 | }; | |
421 | ||
e9a441b6 | 422 | void __init xfrm_dev_init(void) |
21f42cc9 SK |
423 | { |
424 | register_netdevice_notifier(&xfrm_dev_notifier); | |
425 | } |