]> Git Repo - linux.git/blame - drivers/net/vrf.c
net: phy: fix phylib's dual eee_enabled
[linux.git] / drivers / net / vrf.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
193125db
DA
2/*
3 * vrf.c: device driver to encapsulate a VRF space
4 *
5 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
6 * Copyright (c) 2015 Shrijeet Mukherjee <[email protected]>
7 * Copyright (c) 2015 David Ahern <[email protected]>
8 *
9 * Based on dummy, team and ipvlan drivers
193125db
DA
10 */
11
cc69837f 12#include <linux/ethtool.h>
193125db
DA
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/ip.h>
18#include <linux/init.h>
19#include <linux/moduleparam.h>
20#include <linux/netfilter.h>
21#include <linux/rtnetlink.h>
22#include <net/rtnetlink.h>
23#include <linux/u64_stats_sync.h>
24#include <linux/hashtable.h>
c8baec38 25#include <linux/spinlock_types.h>
193125db
DA
26
27#include <linux/inetdevice.h>
8f58336d 28#include <net/arp.h>
193125db
DA
29#include <net/ip.h>
30#include <net/ip_fib.h>
35402e31 31#include <net/ip6_fib.h>
193125db 32#include <net/ip6_route.h>
193125db
DA
33#include <net/route.h>
34#include <net/addrconf.h>
ee15ee5d 35#include <net/l3mdev.h>
1aa6c4f6 36#include <net/fib_rules.h>
b6459415 37#include <net/sch_generic.h>
097d3c95 38#include <net/netns/generic.h>
8c9c296a 39#include <net/netfilter/nf_conntrack.h>
c5d8ffe2 40#include <net/inet_dscp.h>
193125db
DA
41
42#define DRV_NAME "vrf"
c8baec38 43#define DRV_VERSION "1.1"
193125db 44
1aa6c4f6 45#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
097d3c95 46
c8baec38
AM
47#define HT_MAP_BITS 4
48#define HASH_INITVAL ((u32)0xcafef00d)
49
50struct vrf_map {
51 DECLARE_HASHTABLE(ht, HT_MAP_BITS);
52 spinlock_t vmap_lock;
53
54 /* shared_tables:
55 * count how many distinct tables do not comply with the strict mode
56 * requirement.
57 * shared_tables value must be 0 in order to enable the strict mode.
58 *
59 * example of the evolution of shared_tables:
60 * | time
61 * add vrf0 --> table 100 shared_tables = 0 | t0
62 * add vrf1 --> table 101 shared_tables = 0 | t1
63 * add vrf2 --> table 100 shared_tables = 1 | t2
64 * add vrf3 --> table 100 shared_tables = 1 | t3
65 * add vrf4 --> table 101 shared_tables = 2 v t4
66 *
67 * shared_tables is a "step function" (or "staircase function")
68 * and it is increased by one when the second vrf is associated to a
69 * table.
70 *
71 * at t2, vrf0 and vrf2 are bound to table 100: shared_tables = 1.
72 *
73 * at t3, another dev (vrf3) is bound to the same table 100 but the
74 * value of shared_tables is still 1.
75 * This means that no matter how many new vrfs will register on the
76 * table 100, the shared_tables will not increase (considering only
77 * table 100).
78 *
79 * at t4, vrf4 is bound to table 101, and shared_tables = 2.
80 *
81 * Looking at the value of shared_tables we can immediately know if
82 * the strict_mode can or cannot be enforced. Indeed, strict_mode
83 * can be enforced iff shared_tables = 0.
84 *
85 * Conversely, shared_tables is decreased when a vrf is de-associated
86 * from a table with exactly two associated vrfs.
87 */
88 u32 shared_tables;
89
90 bool strict_mode;
91};
92
93struct vrf_map_elem {
94 struct hlist_node hnode;
95 struct list_head vrf_list; /* VRFs registered to this table */
96
97 u32 table_id;
98 int users;
99 int ifindex;
100};
101
097d3c95 102static unsigned int vrf_net_id;
1aa6c4f6 103
c8baec38
AM
104/* per netns vrf data */
105struct netns_vrf {
106 /* protected by rtnl lock */
107 bool add_fib_rules;
108
109 struct vrf_map vmap;
33306f1a 110 struct ctl_table_header *ctl_hdr;
c8baec38
AM
111};
112
ec539514 113struct net_vrf {
b0e95ccd
DA
114 struct rtable __rcu *rth;
115 struct rt6_info __rcu *rt6;
43b059a3
DA
116#if IS_ENABLED(CONFIG_IPV6)
117 struct fib6_table *fib6_table;
118#endif
ec539514 119 u32 tb_id;
c8baec38
AM
120
121 struct list_head me_list; /* entry in vrf_map_elem */
122 int ifindex;
ec539514
DA
123};
124
afe80a49
DA
125static void vrf_rx_stats(struct net_device *dev, int len)
126{
127 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
128
129 u64_stats_update_begin(&dstats->syncp);
fa59dc2f
JK
130 u64_stats_inc(&dstats->rx_packets);
131 u64_stats_add(&dstats->rx_bytes, len);
afe80a49
DA
132 u64_stats_update_end(&dstats->syncp);
133}
134
57b8efa1
NA
135static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
136{
137 vrf_dev->stats.tx_errors++;
138 kfree_skb(skb);
139}
140
c8baec38
AM
141static struct vrf_map *netns_vrf_map(struct net *net)
142{
143 struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
144
145 return &nn_vrf->vmap;
146}
147
148static struct vrf_map *netns_vrf_map_by_dev(struct net_device *dev)
149{
150 return netns_vrf_map(dev_net(dev));
151}
152
a59a8ffd
AM
153static int vrf_map_elem_get_vrf_ifindex(struct vrf_map_elem *me)
154{
155 struct list_head *me_head = &me->vrf_list;
156 struct net_vrf *vrf;
157
158 if (list_empty(me_head))
159 return -ENODEV;
160
161 vrf = list_first_entry(me_head, struct net_vrf, me_list);
162
163 return vrf->ifindex;
164}
165
c8baec38
AM
166static struct vrf_map_elem *vrf_map_elem_alloc(gfp_t flags)
167{
168 struct vrf_map_elem *me;
169
170 me = kmalloc(sizeof(*me), flags);
171 if (!me)
172 return NULL;
173
174 return me;
175}
176
177static void vrf_map_elem_free(struct vrf_map_elem *me)
178{
179 kfree(me);
180}
181
182static void vrf_map_elem_init(struct vrf_map_elem *me, int table_id,
183 int ifindex, int users)
184{
185 me->table_id = table_id;
186 me->ifindex = ifindex;
187 me->users = users;
188 INIT_LIST_HEAD(&me->vrf_list);
189}
190
191static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap,
192 u32 table_id)
193{
194 struct vrf_map_elem *me;
195 u32 key;
196
197 key = jhash_1word(table_id, HASH_INITVAL);
198 hash_for_each_possible(vmap->ht, me, hnode, key) {
199 if (me->table_id == table_id)
200 return me;
201 }
202
203 return NULL;
204}
205
206static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me)
207{
208 u32 table_id = me->table_id;
209 u32 key;
210
211 key = jhash_1word(table_id, HASH_INITVAL);
212 hash_add(vmap->ht, &me->hnode, key);
213}
214
215static void vrf_map_del_elem(struct vrf_map_elem *me)
216{
217 hash_del(&me->hnode);
218}
219
220static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock)
221{
222 spin_lock(&vmap->vmap_lock);
223}
224
225static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock)
226{
227 spin_unlock(&vmap->vmap_lock);
228}
229
230/* called with rtnl lock held */
231static int
232vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack)
233{
234 struct vrf_map *vmap = netns_vrf_map_by_dev(dev);
235 struct net_vrf *vrf = netdev_priv(dev);
236 struct vrf_map_elem *new_me, *me;
237 u32 table_id = vrf->tb_id;
238 bool free_new_me = false;
239 int users;
240 int res;
241
242 /* we pre-allocate elements used in the spin-locked section (so that we
e9a0bf6d 243 * keep the spinlock as short as possible).
c8baec38
AM
244 */
245 new_me = vrf_map_elem_alloc(GFP_KERNEL);
246 if (!new_me)
247 return -ENOMEM;
248
249 vrf_map_elem_init(new_me, table_id, dev->ifindex, 0);
250
251 vrf_map_lock(vmap);
252
253 me = vrf_map_lookup_elem(vmap, table_id);
254 if (!me) {
255 me = new_me;
256 vrf_map_add_elem(vmap, me);
257 goto link_vrf;
258 }
259
260 /* we already have an entry in the vrf_map, so it means there is (at
261 * least) a vrf registered on the specific table.
262 */
263 free_new_me = true;
264 if (vmap->strict_mode) {
265 /* vrfs cannot share the same table */
266 NL_SET_ERR_MSG(extack, "Table is used by another VRF");
267 res = -EBUSY;
268 goto unlock;
269 }
270
271link_vrf:
272 users = ++me->users;
273 if (users == 2)
274 ++vmap->shared_tables;
275
276 list_add(&vrf->me_list, &me->vrf_list);
277
278 res = 0;
279
280unlock:
281 vrf_map_unlock(vmap);
282
283 /* clean-up, if needed */
284 if (free_new_me)
285 vrf_map_elem_free(new_me);
286
287 return res;
288}
289
290/* called with rtnl lock held */
291static void vrf_map_unregister_dev(struct net_device *dev)
292{
293 struct vrf_map *vmap = netns_vrf_map_by_dev(dev);
294 struct net_vrf *vrf = netdev_priv(dev);
295 u32 table_id = vrf->tb_id;
296 struct vrf_map_elem *me;
297 int users;
298
299 vrf_map_lock(vmap);
300
301 me = vrf_map_lookup_elem(vmap, table_id);
302 if (!me)
303 goto unlock;
304
305 list_del(&vrf->me_list);
306
307 users = --me->users;
308 if (users == 1) {
309 --vmap->shared_tables;
310 } else if (users == 0) {
311 vrf_map_del_elem(me);
312
313 /* no one will refer to this element anymore */
314 vrf_map_elem_free(me);
315 }
316
317unlock:
318 vrf_map_unlock(vmap);
319}
320
a59a8ffd
AM
321/* return the vrf device index associated with the table_id */
322static int vrf_ifindex_lookup_by_table_id(struct net *net, u32 table_id)
323{
324 struct vrf_map *vmap = netns_vrf_map(net);
325 struct vrf_map_elem *me;
326 int ifindex;
327
328 vrf_map_lock(vmap);
329
330 if (!vmap->strict_mode) {
331 ifindex = -EPERM;
332 goto unlock;
333 }
334
335 me = vrf_map_lookup_elem(vmap, table_id);
336 if (!me) {
337 ifindex = -ENODEV;
338 goto unlock;
339 }
340
341 ifindex = vrf_map_elem_get_vrf_ifindex(me);
342
343unlock:
344 vrf_map_unlock(vmap);
345
346 return ifindex;
347}
348
dcdd43c4
DA
349/* by default VRF devices do not have a qdisc and are expected
350 * to be created with only a single queue.
351 */
352static bool qdisc_tx_is_default(const struct net_device *dev)
353{
354 struct netdev_queue *txq;
355 struct Qdisc *qdisc;
356
357 if (dev->num_tx_queues > 1)
358 return false;
359
360 txq = netdev_get_tx_queue(dev, 0);
361 qdisc = rcu_access_pointer(txq->qdisc);
362
363 return !qdisc->enqueue;
364}
365
afe80a49
DA
366/* Local traffic destined to local address. Reinsert the packet to rx
367 * path, similar to loopback handling.
368 */
369static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
370 struct dst_entry *dst)
371{
372 int len = skb->len;
373
374 skb_orphan(skb);
375
376 skb_dst_set(skb, dst);
afe80a49
DA
377
378 /* set pkt_type to avoid skb hitting packet taps twice -
379 * once on Tx and again in Rx processing
380 */
381 skb->pkt_type = PACKET_LOOPBACK;
382
383 skb->protocol = eth_type_trans(skb, dev);
384
fa59dc2f 385 if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
afe80a49 386 vrf_rx_stats(dev, len);
fa59dc2f
JK
387 } else {
388 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
389
390 u64_stats_update_begin(&dstats->syncp);
391 u64_stats_inc(&dstats->rx_drops);
392 u64_stats_update_end(&dstats->syncp);
393 }
afe80a49
DA
394
395 return NETDEV_TX_OK;
396}
397
8c9c296a
FW
398static void vrf_nf_set_untracked(struct sk_buff *skb)
399{
400 if (skb_get_nfct(skb) == 0)
401 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
402}
403
404static void vrf_nf_reset_ct(struct sk_buff *skb)
405{
406 if (skb_get_nfct(skb) == IP_CT_UNTRACKED)
407 nf_reset_ct(skb);
408}
409
35402e31 410#if IS_ENABLED(CONFIG_IPV6)
4c1feac5
DA
411static int vrf_ip6_local_out(struct net *net, struct sock *sk,
412 struct sk_buff *skb)
413{
414 int err;
415
8c9c296a
FW
416 vrf_nf_reset_ct(skb);
417
4c1feac5
DA
418 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
419 sk, skb, NULL, skb_dst(skb)->dev, dst_output);
420
421 if (likely(err == 1))
422 err = dst_output(net, sk, skb);
423
424 return err;
425}
426
35402e31
DA
427static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
428 struct net_device *dev)
429{
107e47cc 430 const struct ipv6hdr *iph;
35402e31 431 struct net *net = dev_net(skb->dev);
107e47cc 432 struct flowi6 fl6;
35402e31
DA
433 int ret = NET_XMIT_DROP;
434 struct dst_entry *dst;
435 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
436
107e47cc
PK
437 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
438 goto err;
439
440 iph = ipv6_hdr(skb);
441
442 memset(&fl6, 0, sizeof(fl6));
443 /* needed to match OIF rule */
40867d74 444 fl6.flowi6_l3mdev = dev->ifindex;
107e47cc
PK
445 fl6.flowi6_iif = LOOPBACK_IFINDEX;
446 fl6.daddr = iph->daddr;
447 fl6.saddr = iph->saddr;
448 fl6.flowlabel = ip6_flowinfo(iph);
449 fl6.flowi6_mark = skb->mark;
450 fl6.flowi6_proto = iph->nexthdr;
107e47cc 451
a53c1028
DA
452 dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
453 if (IS_ERR(dst) || dst == dst_null)
35402e31
DA
454 goto err;
455
456 skb_dst_drop(skb);
b4869aa2 457
2e1534f3
ND
458 /* if dst.dev is the VRF device again this is locally originated traffic
459 * destined to a local address. Short circuit to Rx path.
b4869aa2 460 */
4f04256c
DA
461 if (dst->dev == dev)
462 return vrf_local_xmit(skb, dev, dst);
b4869aa2 463
35402e31
DA
464 skb_dst_set(skb, dst);
465
911a66fb
DA
466 /* strip the ethernet header added for pass through VRF device */
467 __skb_pull(skb, skb_network_offset(skb));
468
ee201011 469 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
4c1feac5 470 ret = vrf_ip6_local_out(net, skb->sk, skb);
35402e31
DA
471 if (unlikely(net_xmit_eval(ret)))
472 dev->stats.tx_errors++;
473 else
474 ret = NET_XMIT_SUCCESS;
475
476 return ret;
477err:
478 vrf_tx_error(dev, skb);
479 return NET_XMIT_DROP;
480}
481#else
193125db
DA
482static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
483 struct net_device *dev)
484{
57b8efa1
NA
485 vrf_tx_error(dev, skb);
486 return NET_XMIT_DROP;
193125db 487}
35402e31 488#endif
193125db 489
ebfc102c
DA
490/* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
491static int vrf_ip_local_out(struct net *net, struct sock *sk,
492 struct sk_buff *skb)
493{
494 int err;
495
8c9c296a
FW
496 vrf_nf_reset_ct(skb);
497
ebfc102c
DA
498 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
499 skb, NULL, skb_dst(skb)->dev, dst_output);
500 if (likely(err == 1))
501 err = dst_output(net, sk, skb);
502
503 return err;
504}
505
193125db
DA
506static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
507 struct net_device *vrf_dev)
508{
107e47cc 509 struct iphdr *ip4h;
193125db 510 int ret = NET_XMIT_DROP;
107e47cc 511 struct flowi4 fl4;
911a66fb
DA
512 struct net *net = dev_net(vrf_dev);
513 struct rtable *rt;
514
107e47cc
PK
515 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
516 goto err;
517
518 ip4h = ip_hdr(skb);
519
520 memset(&fl4, 0, sizeof(fl4));
521 /* needed to match OIF rule */
40867d74 522 fl4.flowi4_l3mdev = vrf_dev->ifindex;
107e47cc 523 fl4.flowi4_iif = LOOPBACK_IFINDEX;
c5d8ffe2 524 fl4.flowi4_tos = ip4h->tos & INET_DSCP_MASK;
40867d74 525 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
107e47cc
PK
526 fl4.flowi4_proto = ip4h->protocol;
527 fl4.daddr = ip4h->daddr;
528 fl4.saddr = ip4h->saddr;
529
911a66fb
DA
530 rt = ip_route_output_flow(net, &fl4, NULL);
531 if (IS_ERR(rt))
532 goto err;
193125db 533
911a66fb 534 skb_dst_drop(skb);
afe80a49 535
2e1534f3
ND
536 /* if dst.dev is the VRF device again this is locally originated traffic
537 * destined to a local address. Short circuit to Rx path.
afe80a49 538 */
4f04256c
DA
539 if (rt->dst.dev == vrf_dev)
540 return vrf_local_xmit(skb, vrf_dev, &rt->dst);
afe80a49 541
911a66fb
DA
542 skb_dst_set(skb, &rt->dst);
543
544 /* strip the ethernet header added for pass through VRF device */
545 __skb_pull(skb, skb_network_offset(skb));
193125db
DA
546
547 if (!ip4h->saddr) {
548 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
549 RT_SCOPE_LINK);
550 }
551
ee201011 552 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
ebfc102c 553 ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
193125db
DA
554 if (unlikely(net_xmit_eval(ret)))
555 vrf_dev->stats.tx_errors++;
556 else
557 ret = NET_XMIT_SUCCESS;
558
559out:
560 return ret;
561err:
57b8efa1 562 vrf_tx_error(vrf_dev, skb);
193125db
DA
563 goto out;
564}
565
566static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
567{
568 switch (skb->protocol) {
569 case htons(ETH_P_IP):
570 return vrf_process_v4_outbound(skb, dev);
571 case htons(ETH_P_IPV6):
572 return vrf_process_v6_outbound(skb, dev);
573 default:
57b8efa1 574 vrf_tx_error(dev, skb);
193125db
DA
575 return NET_XMIT_DROP;
576 }
577}
578
579static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
580{
fa59dc2f
JK
581 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
582
f7887d40 583 int len = skb->len;
193125db
DA
584 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
585
fa59dc2f 586 u64_stats_update_begin(&dstats->syncp);
193125db 587 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
193125db 588
fa59dc2f
JK
589 u64_stats_inc(&dstats->tx_packets);
590 u64_stats_add(&dstats->tx_bytes, len);
193125db 591 } else {
fa59dc2f 592 u64_stats_inc(&dstats->tx_drops);
193125db 593 }
fa59dc2f 594 u64_stats_update_end(&dstats->syncp);
193125db
DA
595
596 return ret;
597}
598
9e2b7fa2 599static void vrf_finish_direct(struct sk_buff *skb)
dcdd43c4
DA
600{
601 struct net_device *vrf_dev = skb->dev;
602
603 if (!list_empty(&vrf_dev->ptype_all) &&
604 likely(skb_headroom(skb) >= ETH_HLEN)) {
d58ff351 605 struct ethhdr *eth = skb_push(skb, ETH_HLEN);
dcdd43c4
DA
606
607 ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
608 eth_zero_addr(eth->h_dest);
609 eth->h_proto = skb->protocol;
610
b04c4d9e 611 rcu_read_lock_bh();
dcdd43c4 612 dev_queue_xmit_nit(skb, vrf_dev);
b04c4d9e 613 rcu_read_unlock_bh();
dcdd43c4
DA
614
615 skb_pull(skb, ETH_HLEN);
616 }
617
8c9c296a 618 vrf_nf_reset_ct(skb);
dcdd43c4
DA
619}
620
35402e31 621#if IS_ENABLED(CONFIG_IPV6)
35402e31
DA
622/* modelled after ip6_finish_output2 */
623static int vrf_finish_output6(struct net *net, struct sock *sk,
624 struct sk_buff *skb)
625{
626 struct dst_entry *dst = skb_dst(skb);
627 struct net_device *dev = dst->dev;
9b1c1ef1 628 const struct in6_addr *nexthop;
35402e31 629 struct neighbour *neigh;
35402e31
DA
630 int ret;
631
8c9c296a 632 vrf_nf_reset_ct(skb);
eb63ecc1 633
35402e31
DA
634 skb->protocol = htons(ETH_P_IPV6);
635 skb->dev = dev;
636
2033ab90 637 rcu_read_lock();
e8dfd42c 638 nexthop = rt6_nexthop(dst_rt6_info(dst), &ipv6_hdr(skb)->daddr);
35402e31
DA
639 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
640 if (unlikely(!neigh))
641 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
642 if (!IS_ERR(neigh)) {
4ff06203 643 sock_confirm_neigh(skb, neigh);
0353f282 644 ret = neigh_output(neigh, skb, false);
2033ab90 645 rcu_read_unlock();
35402e31
DA
646 return ret;
647 }
2033ab90 648 rcu_read_unlock();
35402e31
DA
649
650 IP6_INC_STATS(dev_net(dst->dev),
651 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
652 kfree_skb(skb);
653 return -EINVAL;
654}
655
656/* modelled after ip6_output */
657static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
658{
659 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
660 net, sk, skb, NULL, skb_dst(skb)->dev,
661 vrf_finish_output6,
662 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
663}
664
4c1feac5
DA
665/* set dst on skb to send packet to us via dev_xmit path. Allows
666 * packet to go through device based features such as qdisc, netfilter
667 * hooks and packet sockets with skb->dev set to vrf device.
668 */
a9ec54d1
DA
669static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
670 struct sk_buff *skb)
4c1feac5
DA
671{
672 struct net_vrf *vrf = netdev_priv(vrf_dev);
673 struct dst_entry *dst = NULL;
674 struct rt6_info *rt6;
675
4c1feac5
DA
676 rcu_read_lock();
677
678 rt6 = rcu_dereference(vrf->rt6);
679 if (likely(rt6)) {
680 dst = &rt6->dst;
681 dst_hold(dst);
682 }
683
684 rcu_read_unlock();
685
686 if (unlikely(!dst)) {
687 vrf_tx_error(vrf_dev, skb);
688 return NULL;
689 }
690
691 skb_dst_drop(skb);
692 skb_dst_set(skb, dst);
693
694 return skb;
695}
696
9e2b7fa2
MW
697static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
698 struct sk_buff *skb)
699{
700 vrf_finish_direct(skb);
701
702 return vrf_ip6_local_out(net, sk, skb);
703}
704
a9ec54d1
DA
705static int vrf_output6_direct(struct net *net, struct sock *sk,
706 struct sk_buff *skb)
707{
9e2b7fa2
MW
708 int err = 1;
709
a9ec54d1
DA
710 skb->protocol = htons(ETH_P_IPV6);
711
9e2b7fa2
MW
712 if (!(IPCB(skb)->flags & IPSKB_REROUTED))
713 err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
714 NULL, skb->dev, vrf_output6_direct_finish);
715
716 if (likely(err == 1))
717 vrf_finish_direct(skb);
718
719 return err;
720}
721
722static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
723 struct sk_buff *skb)
724{
725 int err;
726
727 err = vrf_output6_direct(net, sk, skb);
728 if (likely(err == 1))
729 err = vrf_ip6_local_out(net, sk, skb);
730
731 return err;
a9ec54d1
DA
732}
733
734static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
735 struct sock *sk,
736 struct sk_buff *skb)
737{
738 struct net *net = dev_net(vrf_dev);
739 int err;
740
741 skb->dev = vrf_dev;
742
743 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
9e2b7fa2 744 skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
a9ec54d1
DA
745
746 if (likely(err == 1))
747 err = vrf_output6_direct(net, sk, skb);
748
a9ec54d1 749 if (likely(err == 1))
9e2b7fa2 750 return skb;
a9ec54d1 751
9e2b7fa2 752 return NULL;
a9ec54d1
DA
753}
754
755static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
756 struct sock *sk,
757 struct sk_buff *skb)
758{
759 /* don't divert link scope packets */
760 if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
761 return skb;
762
d43b75fb
ND
763 vrf_nf_set_untracked(skb);
764
16b9db1c
DA
765 if (qdisc_tx_is_default(vrf_dev) ||
766 IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
a9ec54d1
DA
767 return vrf_ip6_out_direct(vrf_dev, sk, skb);
768
769 return vrf_ip6_out_redirect(vrf_dev, skb);
770}
771
b0e95ccd 772/* holding rtnl */
810e530b 773static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
35402e31 774{
b0e95ccd 775 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
810e530b
DA
776 struct net *net = dev_net(dev);
777 struct dst_entry *dst;
b0e95ccd 778
b4869aa2 779 RCU_INIT_POINTER(vrf->rt6, NULL);
b4869aa2 780 synchronize_rcu();
b0e95ccd 781
810e530b
DA
782 /* move dev in dst's to loopback so this VRF device can be deleted
783 * - based on dst_ifdown
784 */
785 if (rt6) {
786 dst = &rt6->dst;
d62607c3
JK
787 netdev_ref_replace(dst->dev, net->loopback_dev,
788 &dst->dev_tracker, GFP_KERNEL);
810e530b 789 dst->dev = net->loopback_dev;
810e530b
DA
790 dst_release(dst);
791 }
35402e31
DA
792}
793
794static int vrf_rt6_create(struct net_device *dev)
795{
af13b3c3 796 int flags = DST_NOPOLICY | DST_NOXFRM;
35402e31 797 struct net_vrf *vrf = netdev_priv(dev);
9ab179d8 798 struct net *net = dev_net(dev);
4f04256c 799 struct rt6_info *rt6;
35402e31
DA
800 int rc = -ENOMEM;
801
e4348637
DA
802 /* IPv6 can be CONFIG enabled and then disabled runtime */
803 if (!ipv6_mod_enabled())
804 return 0;
805
43b059a3
DA
806 vrf->fib6_table = fib6_new_table(net, vrf->tb_id);
807 if (!vrf->fib6_table)
b3b4663c
DA
808 goto out;
809
b4869aa2
DA
810 /* create a dst for routing packets out a VRF device */
811 rt6 = ip6_dst_alloc(net, dev, flags);
35402e31
DA
812 if (!rt6)
813 goto out;
814
b3b4663c 815 rt6->dst.output = vrf_output6;
b4869aa2 816
b0e95ccd
DA
817 rcu_assign_pointer(vrf->rt6, rt6);
818
35402e31
DA
819 rc = 0;
820out:
821 return rc;
822}
823#else
4c1feac5
DA
824static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
825 struct sock *sk,
826 struct sk_buff *skb)
827{
828 return skb;
829}
830
810e530b 831static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
35402e31
DA
832{
833}
834
835static int vrf_rt6_create(struct net_device *dev)
836{
837 return 0;
838}
839#endif
840
8f58336d 841/* modelled after ip_finish_output2 */
0c4b51f0 842static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
193125db 843{
8f58336d 844 struct dst_entry *dst = skb_dst(skb);
05d6d492 845 struct rtable *rt = dst_rtable(dst);
8f58336d
DA
846 struct net_device *dev = dst->dev;
847 unsigned int hh_len = LL_RESERVED_SPACE(dev);
848 struct neighbour *neigh;
5c9f7c1d 849 bool is_v6gw = false;
8f58336d 850
8c9c296a 851 vrf_nf_reset_ct(skb);
eb63ecc1 852
8f58336d
DA
853 /* Be paranoid, rather than too clever. */
854 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
14ee70ca
VA
855 skb = skb_expand_head(skb, hh_len);
856 if (!skb) {
06669e68 857 dev->stats.tx_errors++;
14ee70ca 858 return -ENOMEM;
8f58336d 859 }
8f58336d
DA
860 }
861
2033ab90 862 rcu_read_lock();
8f58336d 863
5c9f7c1d 864 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
4ff06203 865 if (!IS_ERR(neigh)) {
14ee70ca
VA
866 int ret;
867
4ff06203 868 sock_confirm_neigh(skb, neigh);
5c9f7c1d
DA
869 /* if crossing protocols, can not use the cached header */
870 ret = neigh_output(neigh, skb, is_v6gw);
2033ab90 871 rcu_read_unlock();
82dd0d2a 872 return ret;
4ff06203 873 }
8f58336d 874
2033ab90 875 rcu_read_unlock();
82dd0d2a 876 vrf_tx_error(skb->dev, skb);
14ee70ca 877 return -EINVAL;
193125db
DA
878}
879
ede2059d 880static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
193125db
DA
881{
882 struct net_device *dev = skb_dst(skb)->dev;
883
29a26a56 884 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
193125db
DA
885
886 skb->dev = dev;
887 skb->protocol = htons(ETH_P_IP);
888
29a26a56
EB
889 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
890 net, sk, skb, NULL, dev,
8f58336d 891 vrf_finish_output,
193125db
DA
892 !(IPCB(skb)->flags & IPSKB_REROUTED));
893}
894
ebfc102c
DA
895/* set dst on skb to send packet to us via dev_xmit path. Allows
896 * packet to go through device based features such as qdisc, netfilter
897 * hooks and packet sockets with skb->dev set to vrf device.
898 */
dcdd43c4
DA
899static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
900 struct sk_buff *skb)
ebfc102c
DA
901{
902 struct net_vrf *vrf = netdev_priv(vrf_dev);
903 struct dst_entry *dst = NULL;
904 struct rtable *rth;
905
906 rcu_read_lock();
907
908 rth = rcu_dereference(vrf->rth);
909 if (likely(rth)) {
910 dst = &rth->dst;
911 dst_hold(dst);
912 }
913
914 rcu_read_unlock();
915
916 if (unlikely(!dst)) {
917 vrf_tx_error(vrf_dev, skb);
918 return NULL;
919 }
920
921 skb_dst_drop(skb);
922 skb_dst_set(skb, dst);
923
924 return skb;
925}
926
9e2b7fa2
MW
927static int vrf_output_direct_finish(struct net *net, struct sock *sk,
928 struct sk_buff *skb)
929{
930 vrf_finish_direct(skb);
931
932 return vrf_ip_local_out(net, sk, skb);
933}
934
dcdd43c4
DA
935static int vrf_output_direct(struct net *net, struct sock *sk,
936 struct sk_buff *skb)
937{
9e2b7fa2
MW
938 int err = 1;
939
dcdd43c4
DA
940 skb->protocol = htons(ETH_P_IP);
941
9e2b7fa2
MW
942 if (!(IPCB(skb)->flags & IPSKB_REROUTED))
943 err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
944 NULL, skb->dev, vrf_output_direct_finish);
945
946 if (likely(err == 1))
947 vrf_finish_direct(skb);
948
949 return err;
950}
951
952static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
953 struct sk_buff *skb)
954{
955 int err;
956
957 err = vrf_output_direct(net, sk, skb);
958 if (likely(err == 1))
959 err = vrf_ip_local_out(net, sk, skb);
960
961 return err;
dcdd43c4
DA
962}
963
964static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
965 struct sock *sk,
966 struct sk_buff *skb)
967{
968 struct net *net = dev_net(vrf_dev);
969 int err;
970
971 skb->dev = vrf_dev;
972
973 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
9e2b7fa2 974 skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
dcdd43c4
DA
975
976 if (likely(err == 1))
977 err = vrf_output_direct(net, sk, skb);
978
dcdd43c4 979 if (likely(err == 1))
9e2b7fa2 980 return skb;
dcdd43c4 981
9e2b7fa2 982 return NULL;
dcdd43c4
DA
983}
984
985static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
986 struct sock *sk,
987 struct sk_buff *skb)
988{
1e19c4d6
DA
989 /* don't divert multicast or local broadcast */
990 if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
991 ipv4_is_lbcast(ip_hdr(skb)->daddr))
dcdd43c4
DA
992 return skb;
993
d43b75fb
ND
994 vrf_nf_set_untracked(skb);
995
16b9db1c
DA
996 if (qdisc_tx_is_default(vrf_dev) ||
997 IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
dcdd43c4
DA
998 return vrf_ip_out_direct(vrf_dev, sk, skb);
999
1000 return vrf_ip_out_redirect(vrf_dev, skb);
1001}
1002
ebfc102c
DA
1003/* called with rcu lock held */
1004static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
1005 struct sock *sk,
1006 struct sk_buff *skb,
1007 u16 proto)
1008{
1009 switch (proto) {
1010 case AF_INET:
1011 return vrf_ip_out(vrf_dev, sk, skb);
4c1feac5
DA
1012 case AF_INET6:
1013 return vrf_ip6_out(vrf_dev, sk, skb);
ebfc102c
DA
1014 }
1015
1016 return skb;
1017}
1018
b0e95ccd 1019/* holding rtnl */
810e530b 1020static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
193125db 1021{
b0e95ccd 1022 struct rtable *rth = rtnl_dereference(vrf->rth);
810e530b
DA
1023 struct net *net = dev_net(dev);
1024 struct dst_entry *dst;
b0e95ccd 1025
afe80a49 1026 RCU_INIT_POINTER(vrf->rth, NULL);
afe80a49 1027 synchronize_rcu();
193125db 1028
810e530b
DA
1029 /* move dev in dst's to loopback so this VRF device can be deleted
1030 * - based on dst_ifdown
1031 */
1032 if (rth) {
1033 dst = &rth->dst;
d62607c3
JK
1034 netdev_ref_replace(dst->dev, net->loopback_dev,
1035 &dst->dev_tracker, GFP_KERNEL);
810e530b 1036 dst->dev = net->loopback_dev;
810e530b
DA
1037 dst_release(dst);
1038 }
193125db
DA
1039}
1040
b0e95ccd 1041static int vrf_rtable_create(struct net_device *dev)
193125db 1042{
b7503e0c 1043 struct net_vrf *vrf = netdev_priv(dev);
4f04256c 1044 struct rtable *rth;
193125db 1045
b3b4663c 1046 if (!fib_new_table(dev_net(dev), vrf->tb_id))
b0e95ccd 1047 return -ENOMEM;
b3b4663c 1048
afe80a49 1049 /* create a dst for routing packets out through a VRF device */
b5c8b3fe 1050 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1);
b0e95ccd
DA
1051 if (!rth)
1052 return -ENOMEM;
193125db 1053
b0e95ccd 1054 rth->dst.output = vrf_output;
b0e95ccd
DA
1055
1056 rcu_assign_pointer(vrf->rth, rth);
1057
1058 return 0;
193125db
DA
1059}
1060
1061/**************************** device handling ********************/
1062
1063/* cycle interface to flush neighbor cache and move routes across tables */
dc1aea1e
PM
1064static void cycle_netdev(struct net_device *dev,
1065 struct netlink_ext_ack *extack)
193125db
DA
1066{
1067 unsigned int flags = dev->flags;
1068 int ret;
1069
1070 if (!netif_running(dev))
1071 return;
1072
567c5e13 1073 ret = dev_change_flags(dev, flags & ~IFF_UP, extack);
193125db 1074 if (ret >= 0)
567c5e13 1075 ret = dev_change_flags(dev, flags, extack);
193125db
DA
1076
1077 if (ret < 0) {
1078 netdev_err(dev,
1079 "Failed to cycle device %s; route tables might be wrong!\n",
1080 dev->name);
1081 }
1082}
1083
42ab19ee
DA
1084static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
1085 struct netlink_ext_ack *extack)
193125db 1086{
bad53162 1087 int ret;
193125db 1088
26d31ac1
DA
1089 /* do not allow loopback device to be enslaved to a VRF.
1090 * The vrf device acts as the loopback for the vrf.
1091 */
de3baa3e
DA
1092 if (port_dev == dev_net(dev)->loopback_dev) {
1093 NL_SET_ERR_MSG(extack,
1094 "Can not enslave loopback device to a VRF");
26d31ac1 1095 return -EOPNOTSUPP;
de3baa3e 1096 }
26d31ac1 1097
fdeea7be 1098 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
42ab19ee 1099 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack);
193125db 1100 if (ret < 0)
fdeea7be 1101 goto err;
193125db 1102
dc1aea1e 1103 cycle_netdev(port_dev, extack);
193125db
DA
1104
1105 return 0;
fdeea7be
IS
1106
1107err:
1108 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
1109 return ret;
193125db
DA
1110}
1111
33eaf2a6
DA
1112static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
1113 struct netlink_ext_ack *extack)
193125db 1114{
de3baa3e
DA
1115 if (netif_is_l3_master(port_dev)) {
1116 NL_SET_ERR_MSG(extack,
1117 "Can not enslave an L3 master device to a VRF");
1118 return -EINVAL;
1119 }
1120
1121 if (netif_is_l3_slave(port_dev))
193125db
DA
1122 return -EINVAL;
1123
42ab19ee 1124 return do_vrf_add_slave(dev, port_dev, extack);
193125db
DA
1125}
1126
1127/* inverse of do_vrf_add_slave */
1128static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
1129{
193125db 1130 netdev_upper_dev_unlink(port_dev, dev);
fee6d4c7 1131 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
193125db 1132
dc1aea1e 1133 cycle_netdev(port_dev, NULL);
193125db 1134
193125db
DA
1135 return 0;
1136}
1137
1138static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
1139{
193125db
DA
1140 return do_vrf_del_slave(dev, port_dev);
1141}
1142
1143static void vrf_dev_uninit(struct net_device *dev)
1144{
1145 struct net_vrf *vrf = netdev_priv(dev);
193125db 1146
810e530b
DA
1147 vrf_rtable_release(dev, vrf);
1148 vrf_rt6_release(dev, vrf);
193125db
DA
1149}
1150
1151static int vrf_dev_init(struct net_device *dev)
1152{
1153 struct net_vrf *vrf = netdev_priv(dev);
1154
193125db 1155 /* create the default dst which points back to us */
b0e95ccd 1156 if (vrf_rtable_create(dev) != 0)
34d21de9 1157 goto out_nomem;
193125db 1158
35402e31
DA
1159 if (vrf_rt6_create(dev) != 0)
1160 goto out_rth;
1161
193125db
DA
1162 dev->flags = IFF_MASTER | IFF_NOARP;
1163
b87ab6b8
DA
1164 /* similarly, oper state is irrelevant; set to up to avoid confusion */
1165 dev->operstate = IF_OPER_UP;
1a33e10e 1166 netdev_lockdep_set_classes(dev);
193125db
DA
1167 return 0;
1168
35402e31 1169out_rth:
810e530b 1170 vrf_rtable_release(dev, vrf);
193125db
DA
1171out_nomem:
1172 return -ENOMEM;
1173}
1174
1175static const struct net_device_ops vrf_netdev_ops = {
1176 .ndo_init = vrf_dev_init,
1177 .ndo_uninit = vrf_dev_uninit,
1178 .ndo_start_xmit = vrf_xmit,
6819e3f6 1179 .ndo_set_mac_address = eth_mac_addr,
193125db
DA
1180 .ndo_add_slave = vrf_add_slave,
1181 .ndo_del_slave = vrf_del_slave,
1182};
1183
ee15ee5d
DA
1184static u32 vrf_fib_table(const struct net_device *dev)
1185{
1186 struct net_vrf *vrf = netdev_priv(dev);
1187
1188 return vrf->tb_id;
1189}
1190
73e20b76
DA
1191static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1192{
1a4a5bf5 1193 kfree_skb(skb);
73e20b76
DA
1194 return 0;
1195}
1196
1197static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
1198 struct sk_buff *skb,
1199 struct net_device *dev)
1200{
1201 struct net *net = dev_net(dev);
1202
1a4a5bf5 1203 if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
73e20b76
DA
1204 skb = NULL; /* kfree_skb(skb) handled by nf code */
1205
1206 return skb;
1207}
1208
9125abe7
AM
1209static int vrf_prepare_mac_header(struct sk_buff *skb,
1210 struct net_device *vrf_dev, u16 proto)
1211{
1212 struct ethhdr *eth;
1213 int err;
1214
1215 /* in general, we do not know if there is enough space in the head of
1216 * the packet for hosting the mac header.
1217 */
1218 err = skb_cow_head(skb, LL_RESERVED_SPACE(vrf_dev));
1219 if (unlikely(err))
1220 /* no space in the skb head */
1221 return -ENOBUFS;
1222
1223 __skb_push(skb, ETH_HLEN);
1224 eth = (struct ethhdr *)skb->data;
1225
1226 skb_reset_mac_header(skb);
012d69fb 1227 skb_reset_mac_len(skb);
9125abe7
AM
1228
1229 /* we set the ethernet destination and the source addresses to the
1230 * address of the VRF device.
1231 */
1232 ether_addr_copy(eth->h_dest, vrf_dev->dev_addr);
1233 ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
1234 eth->h_proto = htons(proto);
1235
1236 /* the destination address of the Ethernet frame corresponds to the
1237 * address set on the VRF interface; therefore, the packet is intended
1238 * to be processed locally.
1239 */
1240 skb->protocol = eth->h_proto;
1241 skb->pkt_type = PACKET_HOST;
1242
1243 skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
1244
1245 skb_pull_inline(skb, ETH_HLEN);
1246
1247 return 0;
1248}
1249
1250/* prepare and add the mac header to the packet if it was not set previously.
1251 * In this way, packet sniffers such as tcpdump can parse the packet correctly.
1252 * If the mac header was already set, the original mac header is left
1253 * untouched and the function returns immediately.
1254 */
1255static int vrf_add_mac_header_if_unset(struct sk_buff *skb,
1256 struct net_device *vrf_dev,
012d69fb 1257 u16 proto, struct net_device *orig_dev)
9125abe7 1258{
012d69fb 1259 if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev))
9125abe7
AM
1260 return 0;
1261
1262 return vrf_prepare_mac_header(skb, vrf_dev, proto);
1263}
1264
74b20582
DA
1265#if IS_ENABLED(CONFIG_IPV6)
1266/* neighbor handling is done with actual device; do not want
1267 * to flip skb->dev for those ndisc packets. This really fails
1268 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
1269 * a start.
1270 */
1271static bool ipv6_ndisc_frame(const struct sk_buff *skb)
1272{
1273 const struct ipv6hdr *iph = ipv6_hdr(skb);
1274 bool rc = false;
1275
1276 if (iph->nexthdr == NEXTHDR_ICMP) {
1277 const struct icmp6hdr *icmph;
1278 struct icmp6hdr _icmph;
1279
1280 icmph = skb_header_pointer(skb, sizeof(*iph),
1281 sizeof(_icmph), &_icmph);
1282 if (!icmph)
1283 goto out;
1284
1285 switch (icmph->icmp6_type) {
1286 case NDISC_ROUTER_SOLICITATION:
1287 case NDISC_ROUTER_ADVERTISEMENT:
1288 case NDISC_NEIGHBOUR_SOLICITATION:
1289 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1290 case NDISC_REDIRECT:
1291 rc = true;
1292 break;
1293 }
1294 }
1295
1296out:
1297 return rc;
1298}
1299
9ff74384
DA
1300static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
1301 const struct net_device *dev,
1302 struct flowi6 *fl6,
1303 int ifindex,
b75cc8f9 1304 const struct sk_buff *skb,
9ff74384
DA
1305 int flags)
1306{
1307 struct net_vrf *vrf = netdev_priv(dev);
9ff74384 1308
43b059a3 1309 return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags);
9ff74384
DA
1310}
1311
1312static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
1313 int ifindex)
1314{
1315 const struct ipv6hdr *iph = ipv6_hdr(skb);
1316 struct flowi6 fl6 = {
ecf09117
AB
1317 .flowi6_iif = ifindex,
1318 .flowi6_mark = skb->mark,
1319 .flowi6_proto = iph->nexthdr,
9ff74384
DA
1320 .daddr = iph->daddr,
1321 .saddr = iph->saddr,
1322 .flowlabel = ip6_flowinfo(iph),
9ff74384
DA
1323 };
1324 struct net *net = dev_net(vrf_dev);
1325 struct rt6_info *rt6;
1326
b75cc8f9 1327 rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
9ff74384
DA
1328 RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
1329 if (unlikely(!rt6))
1330 return;
1331
1332 if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
1333 return;
1334
1335 skb_dst_set(skb, &rt6->dst);
1336}
1337
74b20582
DA
1338static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1339 struct sk_buff *skb)
1340{
9ff74384 1341 int orig_iif = skb->skb_iif;
6f12fa77
MM
1342 bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
1343 bool is_ndisc = ipv6_ndisc_frame(skb);
9ff74384 1344
6f12fa77 1345 /* loopback, multicast & non-ND link-local traffic; do not push through
205704c6 1346 * packet taps again. Reset pkt_type for upper layers to process skb.
f2575c8f
AT
1347 * For non-loopback strict packets, determine the dst using the original
1348 * ifindex.
b4869aa2 1349 */
603113c5 1350 if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
b4869aa2
DA
1351 skb->dev = vrf_dev;
1352 skb->skb_iif = vrf_dev->ifindex;
a04a480d 1353 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
603113c5 1354
6f12fa77
MM
1355 if (skb->pkt_type == PACKET_LOOPBACK)
1356 skb->pkt_type = PACKET_HOST;
f2575c8f 1357 else
603113c5
AT
1358 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1359
b4869aa2
DA
1360 goto out;
1361 }
1362
6f12fa77
MM
1363 /* if packet is NDISC then keep the ingress interface */
1364 if (!is_ndisc) {
012d69fb
EB
1365 struct net_device *orig_dev = skb->dev;
1366
926d93a3 1367 vrf_rx_stats(vrf_dev, skb->len);
74b20582
DA
1368 skb->dev = vrf_dev;
1369 skb->skb_iif = vrf_dev->ifindex;
1370
a9ec54d1 1371 if (!list_empty(&vrf_dev->ptype_all)) {
04893908
AM
1372 int err;
1373
1374 err = vrf_add_mac_header_if_unset(skb, vrf_dev,
012d69fb
EB
1375 ETH_P_IPV6,
1376 orig_dev);
04893908
AM
1377 if (likely(!err)) {
1378 skb_push(skb, skb->mac_len);
1379 dev_queue_xmit_nit(skb, vrf_dev);
1380 skb_pull(skb, skb->mac_len);
1381 }
a9ec54d1 1382 }
74b20582
DA
1383
1384 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1385 }
1386
9ff74384
DA
1387 if (need_strict)
1388 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1389
73e20b76 1390 skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
b4869aa2 1391out:
74b20582
DA
1392 return skb;
1393}
1394
1395#else
1396static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1397 struct sk_buff *skb)
1398{
1399 return skb;
1400}
1401#endif
1402
1403static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
1404 struct sk_buff *skb)
1405{
012d69fb
EB
1406 struct net_device *orig_dev = skb->dev;
1407
74b20582
DA
1408 skb->dev = vrf_dev;
1409 skb->skb_iif = vrf_dev->ifindex;
a04a480d 1410 IPCB(skb)->flags |= IPSKB_L3SLAVE;
74b20582 1411
e58e4159
DA
1412 if (ipv4_is_multicast(ip_hdr(skb)->daddr))
1413 goto out;
1414
afe80a49
DA
1415 /* loopback traffic; do not push through packet taps again.
1416 * Reset pkt_type for upper layers to process skb
1417 */
1418 if (skb->pkt_type == PACKET_LOOPBACK) {
1419 skb->pkt_type = PACKET_HOST;
1420 goto out;
1421 }
1422
926d93a3
DA
1423 vrf_rx_stats(vrf_dev, skb->len);
1424
dcdd43c4 1425 if (!list_empty(&vrf_dev->ptype_all)) {
04893908
AM
1426 int err;
1427
012d69fb
EB
1428 err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP,
1429 orig_dev);
04893908
AM
1430 if (likely(!err)) {
1431 skb_push(skb, skb->mac_len);
1432 dev_queue_xmit_nit(skb, vrf_dev);
1433 skb_pull(skb, skb->mac_len);
1434 }
dcdd43c4 1435 }
74b20582 1436
73e20b76 1437 skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
afe80a49 1438out:
74b20582
DA
1439 return skb;
1440}
1441
1442/* called with rcu lock held */
1443static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
1444 struct sk_buff *skb,
1445 u16 proto)
1446{
1447 switch (proto) {
1448 case AF_INET:
1449 return vrf_ip_rcv(vrf_dev, skb);
1450 case AF_INET6:
1451 return vrf_ip6_rcv(vrf_dev, skb);
1452 }
1453
1454 return skb;
1455}
1456
35402e31 1457#if IS_ENABLED(CONFIG_IPV6)
4c1feac5
DA
1458/* send to link-local or multicast address via interface enslaved to
1459 * VRF device. Force lookup to VRF table without changing flow struct
7d9e5f42
WW
1460 * Note: Caller to this function must hold rcu_read_lock() and no refcnt
1461 * is taken on the dst by this function.
4c1feac5
DA
1462 */
1463static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
1464 struct flowi6 *fl6)
35402e31 1465{
9ff74384 1466 struct net *net = dev_net(dev);
7d9e5f42 1467 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_DST_NOREF;
b0e95ccd 1468 struct dst_entry *dst = NULL;
9ff74384 1469 struct rt6_info *rt;
35402e31 1470
4c1feac5
DA
1471 /* VRF device does not have a link-local address and
1472 * sending packets to link-local or mcast addresses over
1473 * a VRF device does not make sense
1474 */
1475 if (fl6->flowi6_oif == dev->ifindex) {
1476 dst = &net->ipv6.ip6_null_entry->dst;
4c1feac5 1477 return dst;
35402e31
DA
1478 }
1479
4c1feac5
DA
1480 if (!ipv6_addr_any(&fl6->saddr))
1481 flags |= RT6_LOOKUP_F_HAS_SADDR;
1482
b75cc8f9 1483 rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags);
4c1feac5
DA
1484 if (rt)
1485 dst = &rt->dst;
9ff74384 1486
b0e95ccd 1487 return dst;
35402e31
DA
1488}
1489#endif
1490
ee15ee5d
DA
1491static const struct l3mdev_ops vrf_l3mdev_ops = {
1492 .l3mdev_fib_table = vrf_fib_table,
74b20582 1493 .l3mdev_l3_rcv = vrf_l3_rcv,
ebfc102c 1494 .l3mdev_l3_out = vrf_l3_out,
35402e31 1495#if IS_ENABLED(CONFIG_IPV6)
4c1feac5 1496 .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
35402e31 1497#endif
ee15ee5d
DA
1498};
1499
193125db
DA
1500static void vrf_get_drvinfo(struct net_device *dev,
1501 struct ethtool_drvinfo *info)
1502{
fb3ceec1
WS
1503 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1504 strscpy(info->version, DRV_VERSION, sizeof(info->version));
193125db
DA
1505}
1506
1507static const struct ethtool_ops vrf_ethtool_ops = {
1508 .get_drvinfo = vrf_get_drvinfo,
1509};
1510
1aa6c4f6
DA
1511static inline size_t vrf_fib_rule_nl_size(void)
1512{
1513 size_t sz;
1514
1515 sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1516 sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
1517 sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
1b71af60 1518 sz += nla_total_size(sizeof(u8)); /* FRA_PROTOCOL */
1aa6c4f6
DA
1519
1520 return sz;
1521}
1522
1523static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1524{
1525 struct fib_rule_hdr *frh;
1526 struct nlmsghdr *nlh;
1527 struct sk_buff *skb;
1528 int err;
1529
dac91170
DA
1530 if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) &&
1531 !ipv6_mod_enabled())
e4348637
DA
1532 return 0;
1533
1aa6c4f6
DA
1534 skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1535 if (!skb)
1536 return -ENOMEM;
1537
1538 nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1539 if (!nlh)
1540 goto nla_put_failure;
1541
1542 /* rule only needs to appear once */
426c87ca 1543 nlh->nlmsg_flags |= NLM_F_EXCL;
1aa6c4f6
DA
1544
1545 frh = nlmsg_data(nlh);
1546 memset(frh, 0, sizeof(*frh));
1547 frh->family = family;
1548 frh->action = FR_ACT_TO_TBL;
1b71af60
DS
1549
1550 if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL))
1551 goto nla_put_failure;
1aa6c4f6 1552
18129a24 1553 if (nla_put_u8(skb, FRA_L3MDEV, 1))
1aa6c4f6
DA
1554 goto nla_put_failure;
1555
1556 if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1557 goto nla_put_failure;
1558
1559 nlmsg_end(skb, nlh);
1560
1561 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1562 skb->sk = dev_net(dev)->rtnl;
1563 if (add_it) {
c21ef3e3 1564 err = fib_nl_newrule(skb, nlh, NULL);
1aa6c4f6
DA
1565 if (err == -EEXIST)
1566 err = 0;
1567 } else {
c21ef3e3 1568 err = fib_nl_delrule(skb, nlh, NULL);
1aa6c4f6
DA
1569 if (err == -ENOENT)
1570 err = 0;
1571 }
1572 nlmsg_free(skb);
1573
1574 return err;
1575
1576nla_put_failure:
1577 nlmsg_free(skb);
1578
1579 return -EMSGSIZE;
1580}
1581
1582static int vrf_add_fib_rules(const struct net_device *dev)
1583{
1584 int err;
1585
1586 err = vrf_fib_rule(dev, AF_INET, true);
1587 if (err < 0)
1588 goto out_err;
1589
1590 err = vrf_fib_rule(dev, AF_INET6, true);
1591 if (err < 0)
1592 goto ipv6_err;
1593
e58e4159
DA
1594#if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1595 err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true);
1596 if (err < 0)
1597 goto ipmr_err;
1598#endif
1599
e4a38c0c
PR
1600#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1601 err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true);
1602 if (err < 0)
1603 goto ip6mr_err;
1604#endif
1605
1aa6c4f6
DA
1606 return 0;
1607
e4a38c0c
PR
1608#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1609ip6mr_err:
1610 vrf_fib_rule(dev, RTNL_FAMILY_IPMR, false);
1611#endif
1612
e58e4159
DA
1613#if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1614ipmr_err:
1615 vrf_fib_rule(dev, AF_INET6, false);
1616#endif
1617
1aa6c4f6
DA
1618ipv6_err:
1619 vrf_fib_rule(dev, AF_INET, false);
1620
1621out_err:
1622 netdev_err(dev, "Failed to add FIB rules.\n");
1623 return err;
1624}
1625
193125db
DA
1626static void vrf_setup(struct net_device *dev)
1627{
1628 ether_setup(dev);
1629
1630 /* Initialize the device structure. */
1631 dev->netdev_ops = &vrf_netdev_ops;
ee15ee5d 1632 dev->l3mdev_ops = &vrf_l3mdev_ops;
193125db 1633 dev->ethtool_ops = &vrf_ethtool_ops;
cf124db5 1634 dev->needs_free_netdev = true;
193125db
DA
1635
1636 /* Fill in device structure with ethernet-generic values. */
1637 eth_hw_addr_random(dev);
1638
1639 /* don't acquire vrf device's netif_tx_lock when transmitting */
00d066a4 1640 dev->lltx = true;
193125db
DA
1641
1642 /* don't allow vrf devices to change network namespaces. */
05c1280a 1643 dev->netns_local = true;
7889681f
DA
1644
1645 /* does not make sense for a VLAN to be added to a vrf device */
1646 dev->features |= NETIF_F_VLAN_CHALLENGED;
1647
1648 /* enable offload features */
1649 dev->features |= NETIF_F_GSO_SOFTWARE;
cb160394 1650 dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC;
7889681f
DA
1651 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1652
1653 dev->hw_features = dev->features;
1654 dev->hw_enc_features = dev->features;
1655
1656 /* default to no qdisc; user can add if desired */
1657 dev->priv_flags |= IFF_NO_QUEUE;
1017e098 1658 dev->priv_flags |= IFF_NO_RX_HANDLER;
6819e3f6 1659 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ad49bc63 1660
5055376a
ML
1661 /* VRF devices do not care about MTU, but if the MTU is set
1662 * too low then the ipv4 and ipv6 protocols are disabled
1663 * which breaks networking.
1664 */
1665 dev->min_mtu = IPV6_MIN_MTU;
9bb392f6
ND
1666 dev->max_mtu = IP6_MAX_MTU;
1667 dev->mtu = dev->max_mtu;
34d21de9
DB
1668
1669 dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
193125db
DA
1670}
1671
a8b8a889
MS
1672static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
1673 struct netlink_ext_ack *extack)
193125db
DA
1674{
1675 if (tb[IFLA_ADDRESS]) {
53b94835
DA
1676 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1677 NL_SET_ERR_MSG(extack, "Invalid hardware address");
193125db 1678 return -EINVAL;
53b94835
DA
1679 }
1680 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1681 NL_SET_ERR_MSG(extack, "Invalid hardware address");
193125db 1682 return -EADDRNOTAVAIL;
53b94835 1683 }
193125db
DA
1684 }
1685 return 0;
1686}
1687
1688static void vrf_dellink(struct net_device *dev, struct list_head *head)
1689{
f630c38e
NA
1690 struct net_device *port_dev;
1691 struct list_head *iter;
1692
1693 netdev_for_each_lower_dev(dev, port_dev, iter)
1694 vrf_del_slave(dev, port_dev);
1695
c8baec38
AM
1696 vrf_map_unregister_dev(dev);
1697
193125db
DA
1698 unregister_netdevice_queue(dev, head);
1699}
1700
1701static int vrf_newlink(struct net *src_net, struct net_device *dev,
7a3f4a18
MS
1702 struct nlattr *tb[], struct nlattr *data[],
1703 struct netlink_ext_ack *extack)
193125db
DA
1704{
1705 struct net_vrf *vrf = netdev_priv(dev);
c8baec38 1706 struct netns_vrf *nn_vrf;
097d3c95
DA
1707 bool *add_fib_rules;
1708 struct net *net;
1aa6c4f6 1709 int err;
193125db 1710
53b94835
DA
1711 if (!data || !data[IFLA_VRF_TABLE]) {
1712 NL_SET_ERR_MSG(extack, "VRF table id is missing");
193125db 1713 return -EINVAL;
53b94835 1714 }
193125db
DA
1715
1716 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
53b94835
DA
1717 if (vrf->tb_id == RT_TABLE_UNSPEC) {
1718 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE],
1719 "Invalid VRF table id");
24c63bbc 1720 return -EINVAL;
53b94835 1721 }
193125db 1722
007979ea 1723 dev->priv_flags |= IFF_L3MDEV_MASTER;
193125db 1724
1aa6c4f6
DA
1725 err = register_netdevice(dev);
1726 if (err)
1727 goto out;
1728
c8baec38
AM
1729 /* mapping between table_id and vrf;
1730 * note: such binding could not be done in the dev init function
1731 * because dev->ifindex id is not available yet.
1732 */
1733 vrf->ifindex = dev->ifindex;
1734
1735 err = vrf_map_register_dev(dev, extack);
1736 if (err) {
1737 unregister_netdevice(dev);
1738 goto out;
1739 }
1740
097d3c95 1741 net = dev_net(dev);
c8baec38
AM
1742 nn_vrf = net_generic(net, vrf_net_id);
1743
1744 add_fib_rules = &nn_vrf->add_fib_rules;
097d3c95 1745 if (*add_fib_rules) {
1aa6c4f6
DA
1746 err = vrf_add_fib_rules(dev);
1747 if (err) {
c8baec38 1748 vrf_map_unregister_dev(dev);
1aa6c4f6
DA
1749 unregister_netdevice(dev);
1750 goto out;
1751 }
097d3c95 1752 *add_fib_rules = false;
1aa6c4f6
DA
1753 }
1754
1755out:
1756 return err;
193125db
DA
1757}
1758
1759static size_t vrf_nl_getsize(const struct net_device *dev)
1760{
1761 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
1762}
1763
1764static int vrf_fillinfo(struct sk_buff *skb,
1765 const struct net_device *dev)
1766{
1767 struct net_vrf *vrf = netdev_priv(dev);
1768
1769 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
1770}
1771
67eb0331
DA
1772static size_t vrf_get_slave_size(const struct net_device *bond_dev,
1773 const struct net_device *slave_dev)
1774{
1775 return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */
1776}
1777
1778static int vrf_fill_slave_info(struct sk_buff *skb,
1779 const struct net_device *vrf_dev,
1780 const struct net_device *slave_dev)
1781{
1782 struct net_vrf *vrf = netdev_priv(vrf_dev);
1783
1784 if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
1785 return -EMSGSIZE;
1786
1787 return 0;
1788}
1789
193125db
DA
1790static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
1791 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
1792};
1793
1794static struct rtnl_link_ops vrf_link_ops __read_mostly = {
1795 .kind = DRV_NAME,
1796 .priv_size = sizeof(struct net_vrf),
1797
1798 .get_size = vrf_nl_getsize,
1799 .policy = vrf_nl_policy,
1800 .validate = vrf_validate,
1801 .fill_info = vrf_fillinfo,
1802
67eb0331
DA
1803 .get_slave_size = vrf_get_slave_size,
1804 .fill_slave_info = vrf_fill_slave_info,
1805
193125db
DA
1806 .newlink = vrf_newlink,
1807 .dellink = vrf_dellink,
1808 .setup = vrf_setup,
1809 .maxtype = IFLA_VRF_MAX,
1810};
1811
1812static int vrf_device_event(struct notifier_block *unused,
1813 unsigned long event, void *ptr)
1814{
1815 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1816
1817 /* only care about unregister events to drop slave references */
1818 if (event == NETDEV_UNREGISTER) {
193125db
DA
1819 struct net_device *vrf_dev;
1820
fee6d4c7 1821 if (!netif_is_l3_slave(dev))
193125db
DA
1822 goto out;
1823
58aa9087
NA
1824 vrf_dev = netdev_master_upper_dev_get(dev);
1825 vrf_del_slave(vrf_dev, dev);
193125db
DA
1826 }
1827out:
1828 return NOTIFY_DONE;
1829}
1830
1831static struct notifier_block vrf_notifier_block __read_mostly = {
1832 .notifier_call = vrf_device_event,
1833};
1834
c8baec38
AM
1835static int vrf_map_init(struct vrf_map *vmap)
1836{
1837 spin_lock_init(&vmap->vmap_lock);
1838 hash_init(vmap->ht);
1839
1840 vmap->strict_mode = false;
1841
1842 return 0;
1843}
1844
1b6687e3
DA
1845#ifdef CONFIG_SYSCTL
1846static bool vrf_strict_mode(struct vrf_map *vmap)
1847{
1848 bool strict_mode;
1849
1850 vrf_map_lock(vmap);
1851 strict_mode = vmap->strict_mode;
1852 vrf_map_unlock(vmap);
1853
1854 return strict_mode;
1855}
1856
1857static int vrf_strict_mode_change(struct vrf_map *vmap, bool new_mode)
1858{
1859 bool *cur_mode;
1860 int res = 0;
1861
1862 vrf_map_lock(vmap);
1863
1864 cur_mode = &vmap->strict_mode;
1865 if (*cur_mode == new_mode)
1866 goto unlock;
1867
1868 if (*cur_mode) {
1869 /* disable strict mode */
1870 *cur_mode = false;
1871 } else {
1872 if (vmap->shared_tables) {
1873 /* we cannot allow strict_mode because there are some
1874 * vrfs that share one or more tables.
1875 */
1876 res = -EBUSY;
1877 goto unlock;
1878 }
1879
1880 /* no tables are shared among vrfs, so we can go back
1881 * to 1:1 association between a vrf with its table.
1882 */
1883 *cur_mode = true;
1884 }
1885
1886unlock:
1887 vrf_map_unlock(vmap);
1888
1889 return res;
1890}
1891
78eb4ea2 1892static int vrf_shared_table_handler(const struct ctl_table *table, int write,
33306f1a
AM
1893 void *buffer, size_t *lenp, loff_t *ppos)
1894{
1895 struct net *net = (struct net *)table->extra1;
1896 struct vrf_map *vmap = netns_vrf_map(net);
1897 int proc_strict_mode = 0;
1898 struct ctl_table tmp = {
1899 .procname = table->procname,
1900 .data = &proc_strict_mode,
1901 .maxlen = sizeof(int),
1902 .mode = table->mode,
1903 .extra1 = SYSCTL_ZERO,
1904 .extra2 = SYSCTL_ONE,
1905 };
1906 int ret;
1907
1908 if (!write)
1909 proc_strict_mode = vrf_strict_mode(vmap);
1910
1911 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
1912
1913 if (write && ret == 0)
1914 ret = vrf_strict_mode_change(vmap, (bool)proc_strict_mode);
1915
1916 return ret;
1917}
1918
1919static const struct ctl_table vrf_table[] = {
1920 {
1921 .procname = "strict_mode",
1922 .data = NULL,
1923 .maxlen = sizeof(int),
1924 .mode = 0644,
1925 .proc_handler = vrf_shared_table_handler,
1926 /* set by the vrf_netns_init */
1927 .extra1 = NULL,
1928 },
33306f1a
AM
1929};
1930
1b6687e3 1931static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
097d3c95 1932{
33306f1a 1933 struct ctl_table *table;
097d3c95 1934
33306f1a
AM
1935 table = kmemdup(vrf_table, sizeof(vrf_table), GFP_KERNEL);
1936 if (!table)
1937 return -ENOMEM;
1938
1939 /* init the extra1 parameter with the reference to current netns */
1940 table[0].extra1 = net;
1941
3ca9aa74
JG
1942 nn_vrf->ctl_hdr = register_net_sysctl_sz(net, "net/vrf", table,
1943 ARRAY_SIZE(vrf_table));
33306f1a 1944 if (!nn_vrf->ctl_hdr) {
1b6687e3
DA
1945 kfree(table);
1946 return -ENOMEM;
33306f1a
AM
1947 }
1948
097d3c95 1949 return 0;
33306f1a
AM
1950}
1951
1b6687e3 1952static void vrf_netns_exit_sysctl(struct net *net)
33306f1a
AM
1953{
1954 struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
bfa858f2 1955 const struct ctl_table *table;
33306f1a
AM
1956
1957 table = nn_vrf->ctl_hdr->ctl_table_arg;
1958 unregister_net_sysctl_table(nn_vrf->ctl_hdr);
1959 kfree(table);
097d3c95 1960}
1b6687e3
DA
1961#else
1962static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
1963{
1964 return 0;
1965}
1966
1967static void vrf_netns_exit_sysctl(struct net *net)
1968{
1969}
1970#endif
1971
1972/* Initialize per network namespace state */
1973static int __net_init vrf_netns_init(struct net *net)
1974{
1975 struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
1976
1977 nn_vrf->add_fib_rules = true;
1978 vrf_map_init(&nn_vrf->vmap);
1979
1980 return vrf_netns_init_sysctl(net, nn_vrf);
1981}
1982
1983static void __net_exit vrf_netns_exit(struct net *net)
1984{
1985 vrf_netns_exit_sysctl(net);
1986}
097d3c95
DA
1987
1988static struct pernet_operations vrf_net_ops __net_initdata = {
1989 .init = vrf_netns_init,
33306f1a 1990 .exit = vrf_netns_exit,
097d3c95 1991 .id = &vrf_net_id,
c8baec38 1992 .size = sizeof(struct netns_vrf),
097d3c95
DA
1993};
1994
193125db
DA
1995static int __init vrf_init_module(void)
1996{
1997 int rc;
1998
193125db
DA
1999 register_netdevice_notifier(&vrf_notifier_block);
2000
097d3c95 2001 rc = register_pernet_subsys(&vrf_net_ops);
193125db
DA
2002 if (rc < 0)
2003 goto error;
2004
a59a8ffd
AM
2005 rc = l3mdev_table_lookup_register(L3MDEV_TYPE_VRF,
2006 vrf_ifindex_lookup_by_table_id);
2007 if (rc < 0)
2008 goto unreg_pernet;
2009
097d3c95 2010 rc = rtnl_link_register(&vrf_link_ops);
a59a8ffd
AM
2011 if (rc < 0)
2012 goto table_lookup_unreg;
097d3c95 2013
193125db
DA
2014 return 0;
2015
a59a8ffd
AM
2016table_lookup_unreg:
2017 l3mdev_table_lookup_unregister(L3MDEV_TYPE_VRF,
2018 vrf_ifindex_lookup_by_table_id);
2019
2020unreg_pernet:
2021 unregister_pernet_subsys(&vrf_net_ops);
2022
193125db
DA
2023error:
2024 unregister_netdevice_notifier(&vrf_notifier_block);
193125db
DA
2025 return rc;
2026}
2027
193125db 2028module_init(vrf_init_module);
193125db
DA
2029MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
2030MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
2031MODULE_LICENSE("GPL");
2032MODULE_ALIAS_RTNL_LINK(DRV_NAME);
2033MODULE_VERSION(DRV_VERSION);
This page took 1.184403 seconds and 4 git commands to generate.