]>
Commit | Line | Data |
---|---|---|
193125db DA |
1 | /* |
2 | * vrf.c: device driver to encapsulate a VRF space | |
3 | * | |
4 | * Copyright (c) 2015 Cumulus Networks. All rights reserved. | |
5 | * Copyright (c) 2015 Shrijeet Mukherjee <[email protected]> | |
6 | * Copyright (c) 2015 David Ahern <[email protected]> | |
7 | * | |
8 | * Based on dummy, team and ipvlan drivers | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation; either version 2 of the License, or | |
13 | * (at your option) any later version. | |
14 | */ | |
15 | ||
16 | #include <linux/module.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/netdevice.h> | |
19 | #include <linux/etherdevice.h> | |
20 | #include <linux/ip.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/moduleparam.h> | |
23 | #include <linux/netfilter.h> | |
24 | #include <linux/rtnetlink.h> | |
25 | #include <net/rtnetlink.h> | |
26 | #include <linux/u64_stats_sync.h> | |
27 | #include <linux/hashtable.h> | |
28 | ||
29 | #include <linux/inetdevice.h> | |
8f58336d | 30 | #include <net/arp.h> |
193125db DA |
31 | #include <net/ip.h> |
32 | #include <net/ip_fib.h> | |
35402e31 | 33 | #include <net/ip6_fib.h> |
193125db | 34 | #include <net/ip6_route.h> |
193125db DA |
35 | #include <net/route.h> |
36 | #include <net/addrconf.h> | |
ee15ee5d | 37 | #include <net/l3mdev.h> |
1aa6c4f6 | 38 | #include <net/fib_rules.h> |
097d3c95 | 39 | #include <net/netns/generic.h> |
193125db DA |
40 | |
41 | #define DRV_NAME "vrf" | |
42 | #define DRV_VERSION "1.0" | |
43 | ||
1aa6c4f6 | 44 | #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ |
097d3c95 DA |
45 | |
46 | static unsigned int vrf_net_id; | |
1aa6c4f6 | 47 | |
ec539514 | 48 | struct net_vrf { |
b0e95ccd DA |
49 | struct rtable __rcu *rth; |
50 | struct rt6_info __rcu *rt6; | |
ec539514 DA |
51 | u32 tb_id; |
52 | }; | |
53 | ||
193125db DA |
54 | struct pcpu_dstats { |
55 | u64 tx_pkts; | |
56 | u64 tx_bytes; | |
57 | u64 tx_drps; | |
58 | u64 rx_pkts; | |
59 | u64 rx_bytes; | |
afe80a49 | 60 | u64 rx_drps; |
193125db DA |
61 | struct u64_stats_sync syncp; |
62 | }; | |
63 | ||
afe80a49 DA |
64 | static void vrf_rx_stats(struct net_device *dev, int len) |
65 | { | |
66 | struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); | |
67 | ||
68 | u64_stats_update_begin(&dstats->syncp); | |
69 | dstats->rx_pkts++; | |
70 | dstats->rx_bytes += len; | |
71 | u64_stats_update_end(&dstats->syncp); | |
72 | } | |
73 | ||
57b8efa1 NA |
74 | static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) |
75 | { | |
76 | vrf_dev->stats.tx_errors++; | |
77 | kfree_skb(skb); | |
78 | } | |
79 | ||
bc1f4470 | 80 | static void vrf_get_stats64(struct net_device *dev, |
81 | struct rtnl_link_stats64 *stats) | |
193125db DA |
82 | { |
83 | int i; | |
84 | ||
85 | for_each_possible_cpu(i) { | |
86 | const struct pcpu_dstats *dstats; | |
87 | u64 tbytes, tpkts, tdrops, rbytes, rpkts; | |
88 | unsigned int start; | |
89 | ||
90 | dstats = per_cpu_ptr(dev->dstats, i); | |
91 | do { | |
92 | start = u64_stats_fetch_begin_irq(&dstats->syncp); | |
93 | tbytes = dstats->tx_bytes; | |
94 | tpkts = dstats->tx_pkts; | |
95 | tdrops = dstats->tx_drps; | |
96 | rbytes = dstats->rx_bytes; | |
97 | rpkts = dstats->rx_pkts; | |
98 | } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); | |
99 | stats->tx_bytes += tbytes; | |
100 | stats->tx_packets += tpkts; | |
101 | stats->tx_dropped += tdrops; | |
102 | stats->rx_bytes += rbytes; | |
103 | stats->rx_packets += rpkts; | |
104 | } | |
193125db DA |
105 | } |
106 | ||
dcdd43c4 DA |
107 | /* by default VRF devices do not have a qdisc and are expected |
108 | * to be created with only a single queue. | |
109 | */ | |
110 | static bool qdisc_tx_is_default(const struct net_device *dev) | |
111 | { | |
112 | struct netdev_queue *txq; | |
113 | struct Qdisc *qdisc; | |
114 | ||
115 | if (dev->num_tx_queues > 1) | |
116 | return false; | |
117 | ||
118 | txq = netdev_get_tx_queue(dev, 0); | |
119 | qdisc = rcu_access_pointer(txq->qdisc); | |
120 | ||
121 | return !qdisc->enqueue; | |
122 | } | |
123 | ||
afe80a49 DA |
124 | /* Local traffic destined to local address. Reinsert the packet to rx |
125 | * path, similar to loopback handling. | |
126 | */ | |
127 | static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, | |
128 | struct dst_entry *dst) | |
129 | { | |
130 | int len = skb->len; | |
131 | ||
132 | skb_orphan(skb); | |
133 | ||
134 | skb_dst_set(skb, dst); | |
135 | skb_dst_force(skb); | |
136 | ||
137 | /* set pkt_type to avoid skb hitting packet taps twice - | |
138 | * once on Tx and again in Rx processing | |
139 | */ | |
140 | skb->pkt_type = PACKET_LOOPBACK; | |
141 | ||
142 | skb->protocol = eth_type_trans(skb, dev); | |
143 | ||
144 | if (likely(netif_rx(skb) == NET_RX_SUCCESS)) | |
145 | vrf_rx_stats(dev, len); | |
146 | else | |
147 | this_cpu_inc(dev->dstats->rx_drps); | |
148 | ||
149 | return NETDEV_TX_OK; | |
150 | } | |
151 | ||
35402e31 | 152 | #if IS_ENABLED(CONFIG_IPV6) |
4c1feac5 DA |
153 | static int vrf_ip6_local_out(struct net *net, struct sock *sk, |
154 | struct sk_buff *skb) | |
155 | { | |
156 | int err; | |
157 | ||
158 | err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, | |
159 | sk, skb, NULL, skb_dst(skb)->dev, dst_output); | |
160 | ||
161 | if (likely(err == 1)) | |
162 | err = dst_output(net, sk, skb); | |
163 | ||
164 | return err; | |
165 | } | |
166 | ||
35402e31 DA |
167 | static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, |
168 | struct net_device *dev) | |
169 | { | |
170 | const struct ipv6hdr *iph = ipv6_hdr(skb); | |
171 | struct net *net = dev_net(skb->dev); | |
172 | struct flowi6 fl6 = { | |
173 | /* needed to match OIF rule */ | |
174 | .flowi6_oif = dev->ifindex, | |
175 | .flowi6_iif = LOOPBACK_IFINDEX, | |
176 | .daddr = iph->daddr, | |
177 | .saddr = iph->saddr, | |
178 | .flowlabel = ip6_flowinfo(iph), | |
179 | .flowi6_mark = skb->mark, | |
180 | .flowi6_proto = iph->nexthdr, | |
c71ad3d4 | 181 | .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF, |
35402e31 DA |
182 | }; |
183 | int ret = NET_XMIT_DROP; | |
184 | struct dst_entry *dst; | |
185 | struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst; | |
186 | ||
187 | dst = ip6_route_output(net, NULL, &fl6); | |
188 | if (dst == dst_null) | |
189 | goto err; | |
190 | ||
191 | skb_dst_drop(skb); | |
b4869aa2 DA |
192 | |
193 | /* if dst.dev is loopback or the VRF device again this is locally | |
194 | * originated traffic destined to a local address. Short circuit | |
4f04256c | 195 | * to Rx path |
b4869aa2 | 196 | */ |
4f04256c DA |
197 | if (dst->dev == dev) |
198 | return vrf_local_xmit(skb, dev, dst); | |
b4869aa2 | 199 | |
35402e31 DA |
200 | skb_dst_set(skb, dst); |
201 | ||
911a66fb DA |
202 | /* strip the ethernet header added for pass through VRF device */ |
203 | __skb_pull(skb, skb_network_offset(skb)); | |
204 | ||
4c1feac5 | 205 | ret = vrf_ip6_local_out(net, skb->sk, skb); |
35402e31 DA |
206 | if (unlikely(net_xmit_eval(ret))) |
207 | dev->stats.tx_errors++; | |
208 | else | |
209 | ret = NET_XMIT_SUCCESS; | |
210 | ||
211 | return ret; | |
212 | err: | |
213 | vrf_tx_error(dev, skb); | |
214 | return NET_XMIT_DROP; | |
215 | } | |
216 | #else | |
193125db DA |
217 | static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, |
218 | struct net_device *dev) | |
219 | { | |
57b8efa1 NA |
220 | vrf_tx_error(dev, skb); |
221 | return NET_XMIT_DROP; | |
193125db | 222 | } |
35402e31 | 223 | #endif |
193125db | 224 | |
ebfc102c DA |
225 | /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */ |
226 | static int vrf_ip_local_out(struct net *net, struct sock *sk, | |
227 | struct sk_buff *skb) | |
228 | { | |
229 | int err; | |
230 | ||
231 | err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, | |
232 | skb, NULL, skb_dst(skb)->dev, dst_output); | |
233 | if (likely(err == 1)) | |
234 | err = dst_output(net, sk, skb); | |
235 | ||
236 | return err; | |
237 | } | |
238 | ||
193125db DA |
239 | static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, |
240 | struct net_device *vrf_dev) | |
241 | { | |
242 | struct iphdr *ip4h = ip_hdr(skb); | |
243 | int ret = NET_XMIT_DROP; | |
244 | struct flowi4 fl4 = { | |
245 | /* needed to match OIF rule */ | |
246 | .flowi4_oif = vrf_dev->ifindex, | |
247 | .flowi4_iif = LOOPBACK_IFINDEX, | |
248 | .flowi4_tos = RT_TOS(ip4h->tos), | |
c71ad3d4 | 249 | .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF, |
7a18c5b9 | 250 | .flowi4_proto = ip4h->protocol, |
193125db | 251 | .daddr = ip4h->daddr, |
7a18c5b9 | 252 | .saddr = ip4h->saddr, |
193125db | 253 | }; |
911a66fb DA |
254 | struct net *net = dev_net(vrf_dev); |
255 | struct rtable *rt; | |
256 | ||
257 | rt = ip_route_output_flow(net, &fl4, NULL); | |
258 | if (IS_ERR(rt)) | |
259 | goto err; | |
193125db | 260 | |
911a66fb | 261 | skb_dst_drop(skb); |
afe80a49 DA |
262 | |
263 | /* if dst.dev is loopback or the VRF device again this is locally | |
264 | * originated traffic destined to a local address. Short circuit | |
4f04256c | 265 | * to Rx path |
afe80a49 | 266 | */ |
4f04256c DA |
267 | if (rt->dst.dev == vrf_dev) |
268 | return vrf_local_xmit(skb, vrf_dev, &rt->dst); | |
afe80a49 | 269 | |
911a66fb DA |
270 | skb_dst_set(skb, &rt->dst); |
271 | ||
272 | /* strip the ethernet header added for pass through VRF device */ | |
273 | __skb_pull(skb, skb_network_offset(skb)); | |
193125db DA |
274 | |
275 | if (!ip4h->saddr) { | |
276 | ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, | |
277 | RT_SCOPE_LINK); | |
278 | } | |
279 | ||
ebfc102c | 280 | ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); |
193125db DA |
281 | if (unlikely(net_xmit_eval(ret))) |
282 | vrf_dev->stats.tx_errors++; | |
283 | else | |
284 | ret = NET_XMIT_SUCCESS; | |
285 | ||
286 | out: | |
287 | return ret; | |
288 | err: | |
57b8efa1 | 289 | vrf_tx_error(vrf_dev, skb); |
193125db DA |
290 | goto out; |
291 | } | |
292 | ||
293 | static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) | |
294 | { | |
295 | switch (skb->protocol) { | |
296 | case htons(ETH_P_IP): | |
297 | return vrf_process_v4_outbound(skb, dev); | |
298 | case htons(ETH_P_IPV6): | |
299 | return vrf_process_v6_outbound(skb, dev); | |
300 | default: | |
57b8efa1 | 301 | vrf_tx_error(dev, skb); |
193125db DA |
302 | return NET_XMIT_DROP; |
303 | } | |
304 | } | |
305 | ||
306 | static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) | |
307 | { | |
f7887d40 | 308 | int len = skb->len; |
193125db DA |
309 | netdev_tx_t ret = is_ip_tx_frame(skb, dev); |
310 | ||
311 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { | |
312 | struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); | |
313 | ||
314 | u64_stats_update_begin(&dstats->syncp); | |
315 | dstats->tx_pkts++; | |
f7887d40 | 316 | dstats->tx_bytes += len; |
193125db DA |
317 | u64_stats_update_end(&dstats->syncp); |
318 | } else { | |
319 | this_cpu_inc(dev->dstats->tx_drps); | |
320 | } | |
321 | ||
322 | return ret; | |
323 | } | |
324 | ||
dcdd43c4 DA |
325 | static int vrf_finish_direct(struct net *net, struct sock *sk, |
326 | struct sk_buff *skb) | |
327 | { | |
328 | struct net_device *vrf_dev = skb->dev; | |
329 | ||
330 | if (!list_empty(&vrf_dev->ptype_all) && | |
331 | likely(skb_headroom(skb) >= ETH_HLEN)) { | |
d58ff351 | 332 | struct ethhdr *eth = skb_push(skb, ETH_HLEN); |
dcdd43c4 DA |
333 | |
334 | ether_addr_copy(eth->h_source, vrf_dev->dev_addr); | |
335 | eth_zero_addr(eth->h_dest); | |
336 | eth->h_proto = skb->protocol; | |
337 | ||
338 | rcu_read_lock_bh(); | |
339 | dev_queue_xmit_nit(skb, vrf_dev); | |
340 | rcu_read_unlock_bh(); | |
341 | ||
342 | skb_pull(skb, ETH_HLEN); | |
343 | } | |
344 | ||
345 | return 1; | |
346 | } | |
347 | ||
35402e31 | 348 | #if IS_ENABLED(CONFIG_IPV6) |
35402e31 DA |
349 | /* modelled after ip6_finish_output2 */ |
350 | static int vrf_finish_output6(struct net *net, struct sock *sk, | |
351 | struct sk_buff *skb) | |
352 | { | |
353 | struct dst_entry *dst = skb_dst(skb); | |
354 | struct net_device *dev = dst->dev; | |
355 | struct neighbour *neigh; | |
356 | struct in6_addr *nexthop; | |
357 | int ret; | |
358 | ||
eb63ecc1 DA |
359 | nf_reset(skb); |
360 | ||
35402e31 DA |
361 | skb->protocol = htons(ETH_P_IPV6); |
362 | skb->dev = dev; | |
363 | ||
364 | rcu_read_lock_bh(); | |
365 | nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); | |
366 | neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); | |
367 | if (unlikely(!neigh)) | |
368 | neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); | |
369 | if (!IS_ERR(neigh)) { | |
4ff06203 | 370 | sock_confirm_neigh(skb, neigh); |
c16ec185 | 371 | ret = neigh_output(neigh, skb); |
35402e31 DA |
372 | rcu_read_unlock_bh(); |
373 | return ret; | |
374 | } | |
375 | rcu_read_unlock_bh(); | |
376 | ||
377 | IP6_INC_STATS(dev_net(dst->dev), | |
378 | ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); | |
379 | kfree_skb(skb); | |
380 | return -EINVAL; | |
381 | } | |
382 | ||
383 | /* modelled after ip6_output */ | |
384 | static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) | |
385 | { | |
386 | return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, | |
387 | net, sk, skb, NULL, skb_dst(skb)->dev, | |
388 | vrf_finish_output6, | |
389 | !(IP6CB(skb)->flags & IP6SKB_REROUTED)); | |
390 | } | |
391 | ||
4c1feac5 DA |
392 | /* set dst on skb to send packet to us via dev_xmit path. Allows |
393 | * packet to go through device based features such as qdisc, netfilter | |
394 | * hooks and packet sockets with skb->dev set to vrf device. | |
395 | */ | |
a9ec54d1 DA |
396 | static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev, |
397 | struct sk_buff *skb) | |
4c1feac5 DA |
398 | { |
399 | struct net_vrf *vrf = netdev_priv(vrf_dev); | |
400 | struct dst_entry *dst = NULL; | |
401 | struct rt6_info *rt6; | |
402 | ||
4c1feac5 DA |
403 | rcu_read_lock(); |
404 | ||
405 | rt6 = rcu_dereference(vrf->rt6); | |
406 | if (likely(rt6)) { | |
407 | dst = &rt6->dst; | |
408 | dst_hold(dst); | |
409 | } | |
410 | ||
411 | rcu_read_unlock(); | |
412 | ||
413 | if (unlikely(!dst)) { | |
414 | vrf_tx_error(vrf_dev, skb); | |
415 | return NULL; | |
416 | } | |
417 | ||
418 | skb_dst_drop(skb); | |
419 | skb_dst_set(skb, dst); | |
420 | ||
421 | return skb; | |
422 | } | |
423 | ||
a9ec54d1 DA |
424 | static int vrf_output6_direct(struct net *net, struct sock *sk, |
425 | struct sk_buff *skb) | |
426 | { | |
427 | skb->protocol = htons(ETH_P_IPV6); | |
428 | ||
429 | return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, | |
430 | net, sk, skb, NULL, skb->dev, | |
431 | vrf_finish_direct, | |
432 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | |
433 | } | |
434 | ||
435 | static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, | |
436 | struct sock *sk, | |
437 | struct sk_buff *skb) | |
438 | { | |
439 | struct net *net = dev_net(vrf_dev); | |
440 | int err; | |
441 | ||
442 | skb->dev = vrf_dev; | |
443 | ||
444 | err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, | |
445 | skb, NULL, vrf_dev, vrf_output6_direct); | |
446 | ||
447 | if (likely(err == 1)) | |
448 | err = vrf_output6_direct(net, sk, skb); | |
449 | ||
450 | /* reset skb device */ | |
451 | if (likely(err == 1)) | |
452 | nf_reset(skb); | |
453 | else | |
454 | skb = NULL; | |
455 | ||
456 | return skb; | |
457 | } | |
458 | ||
459 | static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, | |
460 | struct sock *sk, | |
461 | struct sk_buff *skb) | |
462 | { | |
463 | /* don't divert link scope packets */ | |
464 | if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) | |
465 | return skb; | |
466 | ||
467 | if (qdisc_tx_is_default(vrf_dev)) | |
468 | return vrf_ip6_out_direct(vrf_dev, sk, skb); | |
469 | ||
470 | return vrf_ip6_out_redirect(vrf_dev, skb); | |
471 | } | |
472 | ||
b0e95ccd | 473 | /* holding rtnl */ |
810e530b | 474 | static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) |
35402e31 | 475 | { |
b0e95ccd | 476 | struct rt6_info *rt6 = rtnl_dereference(vrf->rt6); |
810e530b DA |
477 | struct net *net = dev_net(dev); |
478 | struct dst_entry *dst; | |
b0e95ccd | 479 | |
b4869aa2 | 480 | RCU_INIT_POINTER(vrf->rt6, NULL); |
b4869aa2 | 481 | synchronize_rcu(); |
b0e95ccd | 482 | |
810e530b DA |
483 | /* move dev in dst's to loopback so this VRF device can be deleted |
484 | * - based on dst_ifdown | |
485 | */ | |
486 | if (rt6) { | |
487 | dst = &rt6->dst; | |
488 | dev_put(dst->dev); | |
489 | dst->dev = net->loopback_dev; | |
490 | dev_hold(dst->dev); | |
491 | dst_release(dst); | |
492 | } | |
35402e31 DA |
493 | } |
494 | ||
495 | static int vrf_rt6_create(struct net_device *dev) | |
496 | { | |
a4c2fd7f | 497 | int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM; |
35402e31 | 498 | struct net_vrf *vrf = netdev_priv(dev); |
9ab179d8 | 499 | struct net *net = dev_net(dev); |
b3b4663c | 500 | struct fib6_table *rt6i_table; |
4f04256c | 501 | struct rt6_info *rt6; |
35402e31 DA |
502 | int rc = -ENOMEM; |
503 | ||
e4348637 DA |
504 | /* IPv6 can be CONFIG enabled and then disabled runtime */ |
505 | if (!ipv6_mod_enabled()) | |
506 | return 0; | |
507 | ||
b3b4663c DA |
508 | rt6i_table = fib6_new_table(net, vrf->tb_id); |
509 | if (!rt6i_table) | |
510 | goto out; | |
511 | ||
b4869aa2 DA |
512 | /* create a dst for routing packets out a VRF device */ |
513 | rt6 = ip6_dst_alloc(net, dev, flags); | |
35402e31 DA |
514 | if (!rt6) |
515 | goto out; | |
516 | ||
b3b4663c DA |
517 | rt6->rt6i_table = rt6i_table; |
518 | rt6->dst.output = vrf_output6; | |
b4869aa2 | 519 | |
b0e95ccd DA |
520 | rcu_assign_pointer(vrf->rt6, rt6); |
521 | ||
35402e31 DA |
522 | rc = 0; |
523 | out: | |
524 | return rc; | |
525 | } | |
526 | #else | |
4c1feac5 DA |
527 | static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, |
528 | struct sock *sk, | |
529 | struct sk_buff *skb) | |
530 | { | |
531 | return skb; | |
532 | } | |
533 | ||
810e530b | 534 | static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) |
35402e31 DA |
535 | { |
536 | } | |
537 | ||
538 | static int vrf_rt6_create(struct net_device *dev) | |
539 | { | |
540 | return 0; | |
541 | } | |
542 | #endif | |
543 | ||
8f58336d | 544 | /* modelled after ip_finish_output2 */ |
0c4b51f0 | 545 | static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) |
193125db | 546 | { |
8f58336d DA |
547 | struct dst_entry *dst = skb_dst(skb); |
548 | struct rtable *rt = (struct rtable *)dst; | |
549 | struct net_device *dev = dst->dev; | |
550 | unsigned int hh_len = LL_RESERVED_SPACE(dev); | |
551 | struct neighbour *neigh; | |
552 | u32 nexthop; | |
553 | int ret = -EINVAL; | |
554 | ||
eb63ecc1 DA |
555 | nf_reset(skb); |
556 | ||
8f58336d DA |
557 | /* Be paranoid, rather than too clever. */ |
558 | if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { | |
559 | struct sk_buff *skb2; | |
560 | ||
561 | skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); | |
562 | if (!skb2) { | |
563 | ret = -ENOMEM; | |
564 | goto err; | |
565 | } | |
566 | if (skb->sk) | |
567 | skb_set_owner_w(skb2, skb->sk); | |
568 | ||
569 | consume_skb(skb); | |
570 | skb = skb2; | |
571 | } | |
572 | ||
573 | rcu_read_lock_bh(); | |
574 | ||
575 | nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr); | |
576 | neigh = __ipv4_neigh_lookup_noref(dev, nexthop); | |
577 | if (unlikely(!neigh)) | |
578 | neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); | |
4ff06203 JA |
579 | if (!IS_ERR(neigh)) { |
580 | sock_confirm_neigh(skb, neigh); | |
c16ec185 | 581 | ret = neigh_output(neigh, skb); |
4ff06203 | 582 | } |
8f58336d DA |
583 | |
584 | rcu_read_unlock_bh(); | |
585 | err: | |
586 | if (unlikely(ret < 0)) | |
587 | vrf_tx_error(skb->dev, skb); | |
588 | return ret; | |
193125db DA |
589 | } |
590 | ||
ede2059d | 591 | static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb) |
193125db DA |
592 | { |
593 | struct net_device *dev = skb_dst(skb)->dev; | |
594 | ||
29a26a56 | 595 | IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); |
193125db DA |
596 | |
597 | skb->dev = dev; | |
598 | skb->protocol = htons(ETH_P_IP); | |
599 | ||
29a26a56 EB |
600 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, |
601 | net, sk, skb, NULL, dev, | |
8f58336d | 602 | vrf_finish_output, |
193125db DA |
603 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
604 | } | |
605 | ||
ebfc102c DA |
606 | /* set dst on skb to send packet to us via dev_xmit path. Allows |
607 | * packet to go through device based features such as qdisc, netfilter | |
608 | * hooks and packet sockets with skb->dev set to vrf device. | |
609 | */ | |
dcdd43c4 DA |
610 | static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev, |
611 | struct sk_buff *skb) | |
ebfc102c DA |
612 | { |
613 | struct net_vrf *vrf = netdev_priv(vrf_dev); | |
614 | struct dst_entry *dst = NULL; | |
615 | struct rtable *rth; | |
616 | ||
617 | rcu_read_lock(); | |
618 | ||
619 | rth = rcu_dereference(vrf->rth); | |
620 | if (likely(rth)) { | |
621 | dst = &rth->dst; | |
622 | dst_hold(dst); | |
623 | } | |
624 | ||
625 | rcu_read_unlock(); | |
626 | ||
627 | if (unlikely(!dst)) { | |
628 | vrf_tx_error(vrf_dev, skb); | |
629 | return NULL; | |
630 | } | |
631 | ||
632 | skb_dst_drop(skb); | |
633 | skb_dst_set(skb, dst); | |
634 | ||
635 | return skb; | |
636 | } | |
637 | ||
dcdd43c4 DA |
638 | static int vrf_output_direct(struct net *net, struct sock *sk, |
639 | struct sk_buff *skb) | |
640 | { | |
641 | skb->protocol = htons(ETH_P_IP); | |
642 | ||
643 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, | |
644 | net, sk, skb, NULL, skb->dev, | |
645 | vrf_finish_direct, | |
646 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | |
647 | } | |
648 | ||
649 | static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, | |
650 | struct sock *sk, | |
651 | struct sk_buff *skb) | |
652 | { | |
653 | struct net *net = dev_net(vrf_dev); | |
654 | int err; | |
655 | ||
656 | skb->dev = vrf_dev; | |
657 | ||
658 | err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, | |
659 | skb, NULL, vrf_dev, vrf_output_direct); | |
660 | ||
661 | if (likely(err == 1)) | |
662 | err = vrf_output_direct(net, sk, skb); | |
663 | ||
664 | /* reset skb device */ | |
665 | if (likely(err == 1)) | |
666 | nf_reset(skb); | |
667 | else | |
668 | skb = NULL; | |
669 | ||
670 | return skb; | |
671 | } | |
672 | ||
673 | static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, | |
674 | struct sock *sk, | |
675 | struct sk_buff *skb) | |
676 | { | |
677 | /* don't divert multicast */ | |
678 | if (ipv4_is_multicast(ip_hdr(skb)->daddr)) | |
679 | return skb; | |
680 | ||
681 | if (qdisc_tx_is_default(vrf_dev)) | |
682 | return vrf_ip_out_direct(vrf_dev, sk, skb); | |
683 | ||
684 | return vrf_ip_out_redirect(vrf_dev, skb); | |
685 | } | |
686 | ||
ebfc102c DA |
687 | /* called with rcu lock held */ |
688 | static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev, | |
689 | struct sock *sk, | |
690 | struct sk_buff *skb, | |
691 | u16 proto) | |
692 | { | |
693 | switch (proto) { | |
694 | case AF_INET: | |
695 | return vrf_ip_out(vrf_dev, sk, skb); | |
4c1feac5 DA |
696 | case AF_INET6: |
697 | return vrf_ip6_out(vrf_dev, sk, skb); | |
ebfc102c DA |
698 | } |
699 | ||
700 | return skb; | |
701 | } | |
702 | ||
b0e95ccd | 703 | /* holding rtnl */ |
810e530b | 704 | static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) |
193125db | 705 | { |
b0e95ccd | 706 | struct rtable *rth = rtnl_dereference(vrf->rth); |
810e530b DA |
707 | struct net *net = dev_net(dev); |
708 | struct dst_entry *dst; | |
b0e95ccd | 709 | |
afe80a49 | 710 | RCU_INIT_POINTER(vrf->rth, NULL); |
afe80a49 | 711 | synchronize_rcu(); |
193125db | 712 | |
810e530b DA |
713 | /* move dev in dst's to loopback so this VRF device can be deleted |
714 | * - based on dst_ifdown | |
715 | */ | |
716 | if (rth) { | |
717 | dst = &rth->dst; | |
718 | dev_put(dst->dev); | |
719 | dst->dev = net->loopback_dev; | |
720 | dev_hold(dst->dev); | |
721 | dst_release(dst); | |
722 | } | |
193125db DA |
723 | } |
724 | ||
b0e95ccd | 725 | static int vrf_rtable_create(struct net_device *dev) |
193125db | 726 | { |
b7503e0c | 727 | struct net_vrf *vrf = netdev_priv(dev); |
4f04256c | 728 | struct rtable *rth; |
193125db | 729 | |
b3b4663c | 730 | if (!fib_new_table(dev_net(dev), vrf->tb_id)) |
b0e95ccd | 731 | return -ENOMEM; |
b3b4663c | 732 | |
afe80a49 | 733 | /* create a dst for routing packets out through a VRF device */ |
9ab179d8 | 734 | rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); |
b0e95ccd DA |
735 | if (!rth) |
736 | return -ENOMEM; | |
193125db | 737 | |
b0e95ccd DA |
738 | rth->dst.output = vrf_output; |
739 | rth->rt_table_id = vrf->tb_id; | |
740 | ||
741 | rcu_assign_pointer(vrf->rth, rth); | |
742 | ||
743 | return 0; | |
193125db DA |
744 | } |
745 | ||
746 | /**************************** device handling ********************/ | |
747 | ||
748 | /* cycle interface to flush neighbor cache and move routes across tables */ | |
749 | static void cycle_netdev(struct net_device *dev) | |
750 | { | |
751 | unsigned int flags = dev->flags; | |
752 | int ret; | |
753 | ||
754 | if (!netif_running(dev)) | |
755 | return; | |
756 | ||
757 | ret = dev_change_flags(dev, flags & ~IFF_UP); | |
758 | if (ret >= 0) | |
759 | ret = dev_change_flags(dev, flags); | |
760 | ||
761 | if (ret < 0) { | |
762 | netdev_err(dev, | |
763 | "Failed to cycle device %s; route tables might be wrong!\n", | |
764 | dev->name); | |
765 | } | |
766 | } | |
767 | ||
193125db DA |
768 | static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) |
769 | { | |
bad53162 | 770 | int ret; |
193125db | 771 | |
26d31ac1 DA |
772 | /* do not allow loopback device to be enslaved to a VRF. |
773 | * The vrf device acts as the loopback for the vrf. | |
774 | */ | |
775 | if (port_dev == dev_net(dev)->loopback_dev) | |
776 | return -EOPNOTSUPP; | |
777 | ||
fdeea7be | 778 | port_dev->priv_flags |= IFF_L3MDEV_SLAVE; |
29bf24af | 779 | ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL); |
193125db | 780 | if (ret < 0) |
fdeea7be | 781 | goto err; |
193125db | 782 | |
193125db DA |
783 | cycle_netdev(port_dev); |
784 | ||
785 | return 0; | |
fdeea7be IS |
786 | |
787 | err: | |
788 | port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; | |
789 | return ret; | |
193125db DA |
790 | } |
791 | ||
792 | static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev) | |
793 | { | |
fee6d4c7 | 794 | if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev)) |
193125db DA |
795 | return -EINVAL; |
796 | ||
797 | return do_vrf_add_slave(dev, port_dev); | |
798 | } | |
799 | ||
800 | /* inverse of do_vrf_add_slave */ | |
801 | static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) | |
802 | { | |
193125db | 803 | netdev_upper_dev_unlink(port_dev, dev); |
fee6d4c7 | 804 | port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; |
193125db | 805 | |
193125db DA |
806 | cycle_netdev(port_dev); |
807 | ||
193125db DA |
808 | return 0; |
809 | } | |
810 | ||
811 | static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) | |
812 | { | |
193125db DA |
813 | return do_vrf_del_slave(dev, port_dev); |
814 | } | |
815 | ||
816 | static void vrf_dev_uninit(struct net_device *dev) | |
817 | { | |
818 | struct net_vrf *vrf = netdev_priv(dev); | |
193125db | 819 | |
810e530b DA |
820 | vrf_rtable_release(dev, vrf); |
821 | vrf_rt6_release(dev, vrf); | |
193125db | 822 | |
3a4a27d3 | 823 | free_percpu(dev->dstats); |
193125db DA |
824 | dev->dstats = NULL; |
825 | } | |
826 | ||
827 | static int vrf_dev_init(struct net_device *dev) | |
828 | { | |
829 | struct net_vrf *vrf = netdev_priv(dev); | |
830 | ||
193125db DA |
831 | dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); |
832 | if (!dev->dstats) | |
833 | goto out_nomem; | |
834 | ||
835 | /* create the default dst which points back to us */ | |
b0e95ccd | 836 | if (vrf_rtable_create(dev) != 0) |
193125db DA |
837 | goto out_stats; |
838 | ||
35402e31 DA |
839 | if (vrf_rt6_create(dev) != 0) |
840 | goto out_rth; | |
841 | ||
193125db DA |
842 | dev->flags = IFF_MASTER | IFF_NOARP; |
843 | ||
b87ab6b8 DA |
844 | /* MTU is irrelevant for VRF device; set to 64k similar to lo */ |
845 | dev->mtu = 64 * 1024; | |
846 | ||
847 | /* similarly, oper state is irrelevant; set to up to avoid confusion */ | |
848 | dev->operstate = IF_OPER_UP; | |
78e7a2ae | 849 | netdev_lockdep_set_classes(dev); |
193125db DA |
850 | return 0; |
851 | ||
35402e31 | 852 | out_rth: |
810e530b | 853 | vrf_rtable_release(dev, vrf); |
193125db DA |
854 | out_stats: |
855 | free_percpu(dev->dstats); | |
856 | dev->dstats = NULL; | |
857 | out_nomem: | |
858 | return -ENOMEM; | |
859 | } | |
860 | ||
861 | static const struct net_device_ops vrf_netdev_ops = { | |
862 | .ndo_init = vrf_dev_init, | |
863 | .ndo_uninit = vrf_dev_uninit, | |
864 | .ndo_start_xmit = vrf_xmit, | |
865 | .ndo_get_stats64 = vrf_get_stats64, | |
866 | .ndo_add_slave = vrf_add_slave, | |
867 | .ndo_del_slave = vrf_del_slave, | |
868 | }; | |
869 | ||
ee15ee5d DA |
870 | static u32 vrf_fib_table(const struct net_device *dev) |
871 | { | |
872 | struct net_vrf *vrf = netdev_priv(dev); | |
873 | ||
874 | return vrf->tb_id; | |
875 | } | |
876 | ||
73e20b76 DA |
877 | static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
878 | { | |
1a4a5bf5 | 879 | kfree_skb(skb); |
73e20b76 DA |
880 | return 0; |
881 | } | |
882 | ||
883 | static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook, | |
884 | struct sk_buff *skb, | |
885 | struct net_device *dev) | |
886 | { | |
887 | struct net *net = dev_net(dev); | |
888 | ||
1a4a5bf5 | 889 | if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1) |
73e20b76 DA |
890 | skb = NULL; /* kfree_skb(skb) handled by nf code */ |
891 | ||
892 | return skb; | |
893 | } | |
894 | ||
74b20582 DA |
895 | #if IS_ENABLED(CONFIG_IPV6) |
896 | /* neighbor handling is done with actual device; do not want | |
897 | * to flip skb->dev for those ndisc packets. This really fails | |
898 | * for multiple next protocols (e.g., NEXTHDR_HOP). But it is | |
899 | * a start. | |
900 | */ | |
901 | static bool ipv6_ndisc_frame(const struct sk_buff *skb) | |
902 | { | |
903 | const struct ipv6hdr *iph = ipv6_hdr(skb); | |
904 | bool rc = false; | |
905 | ||
906 | if (iph->nexthdr == NEXTHDR_ICMP) { | |
907 | const struct icmp6hdr *icmph; | |
908 | struct icmp6hdr _icmph; | |
909 | ||
910 | icmph = skb_header_pointer(skb, sizeof(*iph), | |
911 | sizeof(_icmph), &_icmph); | |
912 | if (!icmph) | |
913 | goto out; | |
914 | ||
915 | switch (icmph->icmp6_type) { | |
916 | case NDISC_ROUTER_SOLICITATION: | |
917 | case NDISC_ROUTER_ADVERTISEMENT: | |
918 | case NDISC_NEIGHBOUR_SOLICITATION: | |
919 | case NDISC_NEIGHBOUR_ADVERTISEMENT: | |
920 | case NDISC_REDIRECT: | |
921 | rc = true; | |
922 | break; | |
923 | } | |
924 | } | |
925 | ||
926 | out: | |
927 | return rc; | |
928 | } | |
929 | ||
9ff74384 DA |
930 | static struct rt6_info *vrf_ip6_route_lookup(struct net *net, |
931 | const struct net_device *dev, | |
932 | struct flowi6 *fl6, | |
933 | int ifindex, | |
934 | int flags) | |
935 | { | |
936 | struct net_vrf *vrf = netdev_priv(dev); | |
937 | struct fib6_table *table = NULL; | |
938 | struct rt6_info *rt6; | |
939 | ||
940 | rcu_read_lock(); | |
941 | ||
942 | /* fib6_table does not have a refcnt and can not be freed */ | |
943 | rt6 = rcu_dereference(vrf->rt6); | |
944 | if (likely(rt6)) | |
945 | table = rt6->rt6i_table; | |
946 | ||
947 | rcu_read_unlock(); | |
948 | ||
949 | if (!table) | |
950 | return NULL; | |
951 | ||
952 | return ip6_pol_route(net, table, ifindex, fl6, flags); | |
953 | } | |
954 | ||
955 | static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, | |
956 | int ifindex) | |
957 | { | |
958 | const struct ipv6hdr *iph = ipv6_hdr(skb); | |
959 | struct flowi6 fl6 = { | |
ecf09117 AB |
960 | .flowi6_iif = ifindex, |
961 | .flowi6_mark = skb->mark, | |
962 | .flowi6_proto = iph->nexthdr, | |
9ff74384 DA |
963 | .daddr = iph->daddr, |
964 | .saddr = iph->saddr, | |
965 | .flowlabel = ip6_flowinfo(iph), | |
9ff74384 DA |
966 | }; |
967 | struct net *net = dev_net(vrf_dev); | |
968 | struct rt6_info *rt6; | |
969 | ||
970 | rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, | |
971 | RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE); | |
972 | if (unlikely(!rt6)) | |
973 | return; | |
974 | ||
975 | if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst)) | |
976 | return; | |
977 | ||
978 | skb_dst_set(skb, &rt6->dst); | |
979 | } | |
980 | ||
74b20582 DA |
981 | static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, |
982 | struct sk_buff *skb) | |
983 | { | |
9ff74384 DA |
984 | int orig_iif = skb->skb_iif; |
985 | bool need_strict; | |
986 | ||
b4869aa2 DA |
987 | /* loopback traffic; do not push through packet taps again. |
988 | * Reset pkt_type for upper layers to process skb | |
989 | */ | |
990 | if (skb->pkt_type == PACKET_LOOPBACK) { | |
991 | skb->dev = vrf_dev; | |
992 | skb->skb_iif = vrf_dev->ifindex; | |
a04a480d | 993 | IP6CB(skb)->flags |= IP6SKB_L3SLAVE; |
b4869aa2 DA |
994 | skb->pkt_type = PACKET_HOST; |
995 | goto out; | |
996 | } | |
997 | ||
9ff74384 DA |
998 | /* if packet is NDISC or addressed to multicast or link-local |
999 | * then keep the ingress interface | |
1000 | */ | |
1001 | need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); | |
1002 | if (!ipv6_ndisc_frame(skb) && !need_strict) { | |
926d93a3 | 1003 | vrf_rx_stats(vrf_dev, skb->len); |
74b20582 DA |
1004 | skb->dev = vrf_dev; |
1005 | skb->skb_iif = vrf_dev->ifindex; | |
1006 | ||
a9ec54d1 DA |
1007 | if (!list_empty(&vrf_dev->ptype_all)) { |
1008 | skb_push(skb, skb->mac_len); | |
1009 | dev_queue_xmit_nit(skb, vrf_dev); | |
1010 | skb_pull(skb, skb->mac_len); | |
1011 | } | |
74b20582 DA |
1012 | |
1013 | IP6CB(skb)->flags |= IP6SKB_L3SLAVE; | |
1014 | } | |
1015 | ||
9ff74384 DA |
1016 | if (need_strict) |
1017 | vrf_ip6_input_dst(skb, vrf_dev, orig_iif); | |
1018 | ||
73e20b76 | 1019 | skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev); |
b4869aa2 | 1020 | out: |
74b20582 DA |
1021 | return skb; |
1022 | } | |
1023 | ||
1024 | #else | |
1025 | static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, | |
1026 | struct sk_buff *skb) | |
1027 | { | |
1028 | return skb; | |
1029 | } | |
1030 | #endif | |
1031 | ||
1032 | static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, | |
1033 | struct sk_buff *skb) | |
1034 | { | |
1035 | skb->dev = vrf_dev; | |
1036 | skb->skb_iif = vrf_dev->ifindex; | |
a04a480d | 1037 | IPCB(skb)->flags |= IPSKB_L3SLAVE; |
74b20582 | 1038 | |
e58e4159 DA |
1039 | if (ipv4_is_multicast(ip_hdr(skb)->daddr)) |
1040 | goto out; | |
1041 | ||
afe80a49 DA |
1042 | /* loopback traffic; do not push through packet taps again. |
1043 | * Reset pkt_type for upper layers to process skb | |
1044 | */ | |
1045 | if (skb->pkt_type == PACKET_LOOPBACK) { | |
1046 | skb->pkt_type = PACKET_HOST; | |
1047 | goto out; | |
1048 | } | |
1049 | ||
926d93a3 DA |
1050 | vrf_rx_stats(vrf_dev, skb->len); |
1051 | ||
dcdd43c4 DA |
1052 | if (!list_empty(&vrf_dev->ptype_all)) { |
1053 | skb_push(skb, skb->mac_len); | |
1054 | dev_queue_xmit_nit(skb, vrf_dev); | |
1055 | skb_pull(skb, skb->mac_len); | |
1056 | } | |
74b20582 | 1057 | |
73e20b76 | 1058 | skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev); |
afe80a49 | 1059 | out: |
74b20582 DA |
1060 | return skb; |
1061 | } | |
1062 | ||
1063 | /* called with rcu lock held */ | |
1064 | static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev, | |
1065 | struct sk_buff *skb, | |
1066 | u16 proto) | |
1067 | { | |
1068 | switch (proto) { | |
1069 | case AF_INET: | |
1070 | return vrf_ip_rcv(vrf_dev, skb); | |
1071 | case AF_INET6: | |
1072 | return vrf_ip6_rcv(vrf_dev, skb); | |
1073 | } | |
1074 | ||
1075 | return skb; | |
1076 | } | |
1077 | ||
35402e31 | 1078 | #if IS_ENABLED(CONFIG_IPV6) |
4c1feac5 DA |
1079 | /* send to link-local or multicast address via interface enslaved to |
1080 | * VRF device. Force lookup to VRF table without changing flow struct | |
1081 | */ | |
1082 | static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev, | |
1083 | struct flowi6 *fl6) | |
35402e31 | 1084 | { |
9ff74384 | 1085 | struct net *net = dev_net(dev); |
4c1feac5 | 1086 | int flags = RT6_LOOKUP_F_IFACE; |
b0e95ccd | 1087 | struct dst_entry *dst = NULL; |
9ff74384 | 1088 | struct rt6_info *rt; |
35402e31 | 1089 | |
4c1feac5 DA |
1090 | /* VRF device does not have a link-local address and |
1091 | * sending packets to link-local or mcast addresses over | |
1092 | * a VRF device does not make sense | |
1093 | */ | |
1094 | if (fl6->flowi6_oif == dev->ifindex) { | |
1095 | dst = &net->ipv6.ip6_null_entry->dst; | |
1096 | dst_hold(dst); | |
1097 | return dst; | |
35402e31 DA |
1098 | } |
1099 | ||
4c1feac5 DA |
1100 | if (!ipv6_addr_any(&fl6->saddr)) |
1101 | flags |= RT6_LOOKUP_F_HAS_SADDR; | |
1102 | ||
1103 | rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags); | |
1104 | if (rt) | |
1105 | dst = &rt->dst; | |
9ff74384 | 1106 | |
b0e95ccd | 1107 | return dst; |
35402e31 DA |
1108 | } |
1109 | #endif | |
1110 | ||
ee15ee5d DA |
1111 | static const struct l3mdev_ops vrf_l3mdev_ops = { |
1112 | .l3mdev_fib_table = vrf_fib_table, | |
74b20582 | 1113 | .l3mdev_l3_rcv = vrf_l3_rcv, |
ebfc102c | 1114 | .l3mdev_l3_out = vrf_l3_out, |
35402e31 | 1115 | #if IS_ENABLED(CONFIG_IPV6) |
4c1feac5 | 1116 | .l3mdev_link_scope_lookup = vrf_link_scope_lookup, |
35402e31 | 1117 | #endif |
ee15ee5d DA |
1118 | }; |
1119 | ||
193125db DA |
1120 | static void vrf_get_drvinfo(struct net_device *dev, |
1121 | struct ethtool_drvinfo *info) | |
1122 | { | |
1123 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); | |
1124 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); | |
1125 | } | |
1126 | ||
1127 | static const struct ethtool_ops vrf_ethtool_ops = { | |
1128 | .get_drvinfo = vrf_get_drvinfo, | |
1129 | }; | |
1130 | ||
1aa6c4f6 DA |
1131 | static inline size_t vrf_fib_rule_nl_size(void) |
1132 | { | |
1133 | size_t sz; | |
1134 | ||
1135 | sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)); | |
1136 | sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */ | |
1137 | sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */ | |
1138 | ||
1139 | return sz; | |
1140 | } | |
1141 | ||
1142 | static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) | |
1143 | { | |
1144 | struct fib_rule_hdr *frh; | |
1145 | struct nlmsghdr *nlh; | |
1146 | struct sk_buff *skb; | |
1147 | int err; | |
1148 | ||
e4348637 DA |
1149 | if (family == AF_INET6 && !ipv6_mod_enabled()) |
1150 | return 0; | |
1151 | ||
1aa6c4f6 DA |
1152 | skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL); |
1153 | if (!skb) | |
1154 | return -ENOMEM; | |
1155 | ||
1156 | nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0); | |
1157 | if (!nlh) | |
1158 | goto nla_put_failure; | |
1159 | ||
1160 | /* rule only needs to appear once */ | |
426c87ca | 1161 | nlh->nlmsg_flags |= NLM_F_EXCL; |
1aa6c4f6 DA |
1162 | |
1163 | frh = nlmsg_data(nlh); | |
1164 | memset(frh, 0, sizeof(*frh)); | |
1165 | frh->family = family; | |
1166 | frh->action = FR_ACT_TO_TBL; | |
1167 | ||
1168 | if (nla_put_u32(skb, FRA_L3MDEV, 1)) | |
1169 | goto nla_put_failure; | |
1170 | ||
1171 | if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF)) | |
1172 | goto nla_put_failure; | |
1173 | ||
1174 | nlmsg_end(skb, nlh); | |
1175 | ||
1176 | /* fib_nl_{new,del}rule handling looks for net from skb->sk */ | |
1177 | skb->sk = dev_net(dev)->rtnl; | |
1178 | if (add_it) { | |
c21ef3e3 | 1179 | err = fib_nl_newrule(skb, nlh, NULL); |
1aa6c4f6 DA |
1180 | if (err == -EEXIST) |
1181 | err = 0; | |
1182 | } else { | |
c21ef3e3 | 1183 | err = fib_nl_delrule(skb, nlh, NULL); |
1aa6c4f6 DA |
1184 | if (err == -ENOENT) |
1185 | err = 0; | |
1186 | } | |
1187 | nlmsg_free(skb); | |
1188 | ||
1189 | return err; | |
1190 | ||
1191 | nla_put_failure: | |
1192 | nlmsg_free(skb); | |
1193 | ||
1194 | return -EMSGSIZE; | |
1195 | } | |
1196 | ||
1197 | static int vrf_add_fib_rules(const struct net_device *dev) | |
1198 | { | |
1199 | int err; | |
1200 | ||
1201 | err = vrf_fib_rule(dev, AF_INET, true); | |
1202 | if (err < 0) | |
1203 | goto out_err; | |
1204 | ||
1205 | err = vrf_fib_rule(dev, AF_INET6, true); | |
1206 | if (err < 0) | |
1207 | goto ipv6_err; | |
1208 | ||
e58e4159 DA |
1209 | #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES) |
1210 | err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true); | |
1211 | if (err < 0) | |
1212 | goto ipmr_err; | |
1213 | #endif | |
1214 | ||
1aa6c4f6 DA |
1215 | return 0; |
1216 | ||
e58e4159 DA |
1217 | #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES) |
1218 | ipmr_err: | |
1219 | vrf_fib_rule(dev, AF_INET6, false); | |
1220 | #endif | |
1221 | ||
1aa6c4f6 DA |
1222 | ipv6_err: |
1223 | vrf_fib_rule(dev, AF_INET, false); | |
1224 | ||
1225 | out_err: | |
1226 | netdev_err(dev, "Failed to add FIB rules.\n"); | |
1227 | return err; | |
1228 | } | |
1229 | ||
193125db DA |
1230 | static void vrf_setup(struct net_device *dev) |
1231 | { | |
1232 | ether_setup(dev); | |
1233 | ||
1234 | /* Initialize the device structure. */ | |
1235 | dev->netdev_ops = &vrf_netdev_ops; | |
ee15ee5d | 1236 | dev->l3mdev_ops = &vrf_l3mdev_ops; |
193125db | 1237 | dev->ethtool_ops = &vrf_ethtool_ops; |
cf124db5 | 1238 | dev->needs_free_netdev = true; |
193125db DA |
1239 | |
1240 | /* Fill in device structure with ethernet-generic values. */ | |
1241 | eth_hw_addr_random(dev); | |
1242 | ||
1243 | /* don't acquire vrf device's netif_tx_lock when transmitting */ | |
1244 | dev->features |= NETIF_F_LLTX; | |
1245 | ||
1246 | /* don't allow vrf devices to change network namespaces. */ | |
1247 | dev->features |= NETIF_F_NETNS_LOCAL; | |
7889681f DA |
1248 | |
1249 | /* does not make sense for a VLAN to be added to a vrf device */ | |
1250 | dev->features |= NETIF_F_VLAN_CHALLENGED; | |
1251 | ||
1252 | /* enable offload features */ | |
1253 | dev->features |= NETIF_F_GSO_SOFTWARE; | |
1254 | dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM; | |
1255 | dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; | |
1256 | ||
1257 | dev->hw_features = dev->features; | |
1258 | dev->hw_enc_features = dev->features; | |
1259 | ||
1260 | /* default to no qdisc; user can add if desired */ | |
1261 | dev->priv_flags |= IFF_NO_QUEUE; | |
193125db DA |
1262 | } |
1263 | ||
a8b8a889 MS |
1264 | static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], |
1265 | struct netlink_ext_ack *extack) | |
193125db DA |
1266 | { |
1267 | if (tb[IFLA_ADDRESS]) { | |
53b94835 DA |
1268 | if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { |
1269 | NL_SET_ERR_MSG(extack, "Invalid hardware address"); | |
193125db | 1270 | return -EINVAL; |
53b94835 DA |
1271 | } |
1272 | if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { | |
1273 | NL_SET_ERR_MSG(extack, "Invalid hardware address"); | |
193125db | 1274 | return -EADDRNOTAVAIL; |
53b94835 | 1275 | } |
193125db DA |
1276 | } |
1277 | return 0; | |
1278 | } | |
1279 | ||
1280 | static void vrf_dellink(struct net_device *dev, struct list_head *head) | |
1281 | { | |
f630c38e NA |
1282 | struct net_device *port_dev; |
1283 | struct list_head *iter; | |
1284 | ||
1285 | netdev_for_each_lower_dev(dev, port_dev, iter) | |
1286 | vrf_del_slave(dev, port_dev); | |
1287 | ||
193125db DA |
1288 | unregister_netdevice_queue(dev, head); |
1289 | } | |
1290 | ||
1291 | static int vrf_newlink(struct net *src_net, struct net_device *dev, | |
7a3f4a18 MS |
1292 | struct nlattr *tb[], struct nlattr *data[], |
1293 | struct netlink_ext_ack *extack) | |
193125db DA |
1294 | { |
1295 | struct net_vrf *vrf = netdev_priv(dev); | |
097d3c95 DA |
1296 | bool *add_fib_rules; |
1297 | struct net *net; | |
1aa6c4f6 | 1298 | int err; |
193125db | 1299 | |
53b94835 DA |
1300 | if (!data || !data[IFLA_VRF_TABLE]) { |
1301 | NL_SET_ERR_MSG(extack, "VRF table id is missing"); | |
193125db | 1302 | return -EINVAL; |
53b94835 | 1303 | } |
193125db DA |
1304 | |
1305 | vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); | |
53b94835 DA |
1306 | if (vrf->tb_id == RT_TABLE_UNSPEC) { |
1307 | NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE], | |
1308 | "Invalid VRF table id"); | |
24c63bbc | 1309 | return -EINVAL; |
53b94835 | 1310 | } |
193125db | 1311 | |
007979ea | 1312 | dev->priv_flags |= IFF_L3MDEV_MASTER; |
193125db | 1313 | |
1aa6c4f6 DA |
1314 | err = register_netdevice(dev); |
1315 | if (err) | |
1316 | goto out; | |
1317 | ||
097d3c95 DA |
1318 | net = dev_net(dev); |
1319 | add_fib_rules = net_generic(net, vrf_net_id); | |
1320 | if (*add_fib_rules) { | |
1aa6c4f6 DA |
1321 | err = vrf_add_fib_rules(dev); |
1322 | if (err) { | |
1323 | unregister_netdevice(dev); | |
1324 | goto out; | |
1325 | } | |
097d3c95 | 1326 | *add_fib_rules = false; |
1aa6c4f6 DA |
1327 | } |
1328 | ||
1329 | out: | |
1330 | return err; | |
193125db DA |
1331 | } |
1332 | ||
1333 | static size_t vrf_nl_getsize(const struct net_device *dev) | |
1334 | { | |
1335 | return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */ | |
1336 | } | |
1337 | ||
1338 | static int vrf_fillinfo(struct sk_buff *skb, | |
1339 | const struct net_device *dev) | |
1340 | { | |
1341 | struct net_vrf *vrf = netdev_priv(dev); | |
1342 | ||
1343 | return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id); | |
1344 | } | |
1345 | ||
67eb0331 DA |
1346 | static size_t vrf_get_slave_size(const struct net_device *bond_dev, |
1347 | const struct net_device *slave_dev) | |
1348 | { | |
1349 | return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */ | |
1350 | } | |
1351 | ||
1352 | static int vrf_fill_slave_info(struct sk_buff *skb, | |
1353 | const struct net_device *vrf_dev, | |
1354 | const struct net_device *slave_dev) | |
1355 | { | |
1356 | struct net_vrf *vrf = netdev_priv(vrf_dev); | |
1357 | ||
1358 | if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id)) | |
1359 | return -EMSGSIZE; | |
1360 | ||
1361 | return 0; | |
1362 | } | |
1363 | ||
193125db DA |
1364 | static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { |
1365 | [IFLA_VRF_TABLE] = { .type = NLA_U32 }, | |
1366 | }; | |
1367 | ||
1368 | static struct rtnl_link_ops vrf_link_ops __read_mostly = { | |
1369 | .kind = DRV_NAME, | |
1370 | .priv_size = sizeof(struct net_vrf), | |
1371 | ||
1372 | .get_size = vrf_nl_getsize, | |
1373 | .policy = vrf_nl_policy, | |
1374 | .validate = vrf_validate, | |
1375 | .fill_info = vrf_fillinfo, | |
1376 | ||
67eb0331 DA |
1377 | .get_slave_size = vrf_get_slave_size, |
1378 | .fill_slave_info = vrf_fill_slave_info, | |
1379 | ||
193125db DA |
1380 | .newlink = vrf_newlink, |
1381 | .dellink = vrf_dellink, | |
1382 | .setup = vrf_setup, | |
1383 | .maxtype = IFLA_VRF_MAX, | |
1384 | }; | |
1385 | ||
1386 | static int vrf_device_event(struct notifier_block *unused, | |
1387 | unsigned long event, void *ptr) | |
1388 | { | |
1389 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | |
1390 | ||
1391 | /* only care about unregister events to drop slave references */ | |
1392 | if (event == NETDEV_UNREGISTER) { | |
193125db DA |
1393 | struct net_device *vrf_dev; |
1394 | ||
fee6d4c7 | 1395 | if (!netif_is_l3_slave(dev)) |
193125db DA |
1396 | goto out; |
1397 | ||
58aa9087 NA |
1398 | vrf_dev = netdev_master_upper_dev_get(dev); |
1399 | vrf_del_slave(vrf_dev, dev); | |
193125db DA |
1400 | } |
1401 | out: | |
1402 | return NOTIFY_DONE; | |
1403 | } | |
1404 | ||
1405 | static struct notifier_block vrf_notifier_block __read_mostly = { | |
1406 | .notifier_call = vrf_device_event, | |
1407 | }; | |
1408 | ||
097d3c95 DA |
1409 | /* Initialize per network namespace state */ |
1410 | static int __net_init vrf_netns_init(struct net *net) | |
1411 | { | |
1412 | bool *add_fib_rules = net_generic(net, vrf_net_id); | |
1413 | ||
1414 | *add_fib_rules = true; | |
1415 | ||
1416 | return 0; | |
1417 | } | |
1418 | ||
1419 | static struct pernet_operations vrf_net_ops __net_initdata = { | |
1420 | .init = vrf_netns_init, | |
1421 | .id = &vrf_net_id, | |
1422 | .size = sizeof(bool), | |
1423 | }; | |
1424 | ||
193125db DA |
1425 | static int __init vrf_init_module(void) |
1426 | { | |
1427 | int rc; | |
1428 | ||
193125db DA |
1429 | register_netdevice_notifier(&vrf_notifier_block); |
1430 | ||
097d3c95 | 1431 | rc = register_pernet_subsys(&vrf_net_ops); |
193125db DA |
1432 | if (rc < 0) |
1433 | goto error; | |
1434 | ||
097d3c95 DA |
1435 | rc = rtnl_link_register(&vrf_link_ops); |
1436 | if (rc < 0) { | |
1437 | unregister_pernet_subsys(&vrf_net_ops); | |
1438 | goto error; | |
1439 | } | |
1440 | ||
193125db DA |
1441 | return 0; |
1442 | ||
1443 | error: | |
1444 | unregister_netdevice_notifier(&vrf_notifier_block); | |
193125db DA |
1445 | return rc; |
1446 | } | |
1447 | ||
193125db | 1448 | module_init(vrf_init_module); |
193125db DA |
1449 | MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); |
1450 | MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); | |
1451 | MODULE_LICENSE("GPL"); | |
1452 | MODULE_ALIAS_RTNL_LINK(DRV_NAME); | |
1453 | MODULE_VERSION(DRV_VERSION); |