1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Linux ethernet bridge
10 * Lennert dedicates this file to Kerstin Wurdinger.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/if_arp.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/if_pppox.h>
23 #include <linux/ppp_defs.h>
24 #include <linux/netfilter_bridge.h>
25 #include <uapi/linux/netfilter_bridge.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter_arp.h>
29 #include <linux/in_route.h>
30 #include <linux/rculist.h>
31 #include <linux/inetdevice.h>
35 #include <net/addrconf.h>
36 #include <net/route.h>
37 #include <net/netfilter/br_netfilter.h>
38 #include <net/netns/generic.h>
40 #include <linux/uaccess.h>
41 #include "br_private.h"
43 #include <linux/sysctl.h>
46 static unsigned int brnf_net_id __read_mostly;
52 struct ctl_table_header *ctl_hdr;
55 /* default value is 1 */
60 /* default value is 0 */
61 int filter_vlan_tagged;
62 int filter_pppoe_tagged;
67 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
69 #define IS_IPV6(skb) \
70 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
73 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
75 static inline __be16 vlan_proto(const struct sk_buff *skb)
77 if (skb_vlan_tag_present(skb))
79 else if (skb->protocol == htons(ETH_P_8021Q))
80 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
85 static inline bool is_vlan_ip(const struct sk_buff *skb, const struct net *net)
87 struct brnf_net *brnet = net_generic(net, brnf_net_id);
89 return vlan_proto(skb) == htons(ETH_P_IP) && brnet->filter_vlan_tagged;
92 static inline bool is_vlan_ipv6(const struct sk_buff *skb,
93 const struct net *net)
95 struct brnf_net *brnet = net_generic(net, brnf_net_id);
97 return vlan_proto(skb) == htons(ETH_P_IPV6) &&
98 brnet->filter_vlan_tagged;
101 static inline bool is_vlan_arp(const struct sk_buff *skb, const struct net *net)
103 struct brnf_net *brnet = net_generic(net, brnf_net_id);
105 return vlan_proto(skb) == htons(ETH_P_ARP) && brnet->filter_vlan_tagged;
108 static inline __be16 pppoe_proto(const struct sk_buff *skb)
110 return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
111 sizeof(struct pppoe_hdr)));
114 static inline bool is_pppoe_ip(const struct sk_buff *skb, const struct net *net)
116 struct brnf_net *brnet = net_generic(net, brnf_net_id);
118 return skb->protocol == htons(ETH_P_PPP_SES) &&
119 pppoe_proto(skb) == htons(PPP_IP) && brnet->filter_pppoe_tagged;
122 static inline bool is_pppoe_ipv6(const struct sk_buff *skb,
123 const struct net *net)
125 struct brnf_net *brnet = net_generic(net, brnf_net_id);
127 return skb->protocol == htons(ETH_P_PPP_SES) &&
128 pppoe_proto(skb) == htons(PPP_IPV6) &&
129 brnet->filter_pppoe_tagged;
132 /* largest possible L2 header, see br_nf_dev_queue_xmit() */
133 #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
135 struct brnf_frag_data {
136 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
143 static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
145 static void nf_bridge_info_free(struct sk_buff *skb)
147 skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
150 static inline struct net_device *bridge_parent(const struct net_device *dev)
152 struct net_bridge_port *port;
154 port = br_port_get_rcu(dev);
155 return port ? port->br->dev : NULL;
158 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
160 return skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
163 unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
165 switch (skb->protocol) {
166 case __cpu_to_be16(ETH_P_8021Q):
168 case __cpu_to_be16(ETH_P_PPP_SES):
169 return PPPOE_SES_HLEN;
175 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
177 unsigned int len = nf_bridge_encap_header_len(skb);
180 skb->network_header += len;
183 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
185 unsigned int len = nf_bridge_encap_header_len(skb);
187 skb_pull_rcsum(skb, len);
188 skb->network_header += len;
191 /* When handing a packet over to the IP layer
192 * check whether we have a skb that is in the
196 static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
198 const struct iphdr *iph;
201 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
206 /* Basic sanity checks */
207 if (iph->ihl < 5 || iph->version != 4)
210 if (!pskb_may_pull(skb, iph->ihl*4))
214 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
217 len = ntohs(iph->tot_len);
218 if (skb->len < len) {
219 __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
221 } else if (len < (iph->ihl*4))
224 if (pskb_trim_rcsum(skb, len)) {
225 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
229 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
230 /* We should really parse IP options here but until
231 * somebody who actually uses IP options complains to
232 * us we'll just silently ignore the options because
238 __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
240 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
245 void nf_bridge_update_protocol(struct sk_buff *skb)
247 const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
249 switch (nf_bridge->orig_proto) {
250 case BRNF_PROTO_8021Q:
251 skb->protocol = htons(ETH_P_8021Q);
253 case BRNF_PROTO_PPPOE:
254 skb->protocol = htons(ETH_P_PPP_SES);
256 case BRNF_PROTO_UNCHANGED:
261 /* Obtain the correct destination MAC address, while preserving the original
262 * source MAC address. If we already know this address, we just copy it. If we
263 * don't, we use the neighbour framework to find out. In both cases, we make
264 * sure that br_handle_frame_finish() is called afterwards.
266 int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb)
268 struct neighbour *neigh;
269 struct dst_entry *dst;
271 skb->dev = bridge_parent(skb->dev);
275 neigh = dst_neigh_lookup_skb(dst, skb);
277 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
280 if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
281 neigh_hh_bridge(&neigh->hh, skb);
282 skb->dev = nf_bridge->physindev;
283 ret = br_handle_frame_finish(net, sk, skb);
285 /* the neighbour function below overwrites the complete
286 * MAC header, so we save the Ethernet source address and
289 skb_copy_from_linear_data_offset(skb,
290 -(ETH_HLEN-ETH_ALEN),
291 nf_bridge->neigh_header,
293 /* tell br_dev_xmit to continue with forwarding */
294 nf_bridge->bridged_dnat = 1;
295 /* FIXME Need to refragment */
296 ret = neigh->output(neigh, skb);
298 neigh_release(neigh);
307 br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
308 const struct nf_bridge_info *nf_bridge)
310 return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
313 /* This requires some explaining. If DNAT has taken place,
314 * we will need to fix up the destination Ethernet address.
315 * This is also true when SNAT takes place (for the reply direction).
317 * There are two cases to consider:
318 * 1. The packet was DNAT'ed to a device in the same bridge
319 * port group as it was received on. We can still bridge
321 * 2. The packet was DNAT'ed to a different device, either
322 * a non-bridged device or another bridge port group.
323 * The packet will need to be routed.
325 * The correct way of distinguishing between these two cases is to
326 * call ip_route_input() and to look at skb->dst->dev, which is
327 * changed to the destination device if ip_route_input() succeeds.
329 * Let's first consider the case that ip_route_input() succeeds:
331 * If the output device equals the logical bridge device the packet
332 * came in on, we can consider this bridging. The corresponding MAC
333 * address will be obtained in br_nf_pre_routing_finish_bridge.
334 * Otherwise, the packet is considered to be routed and we just
335 * change the destination MAC address so that the packet will
336 * later be passed up to the IP stack to be routed. For a redirected
337 * packet, ip_route_input() will give back the localhost as output device,
338 * which differs from the bridge device.
340 * Let's now consider the case that ip_route_input() fails:
342 * This can be because the destination address is martian, in which case
343 * the packet will be dropped.
344 * If IP forwarding is disabled, ip_route_input() will fail, while
345 * ip_route_output_key() can return success. The source
346 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
347 * thinks we're handling a locally generated packet and won't care
348 * if IP forwarding is enabled. If the output device equals the logical bridge
349 * device, we proceed as if ip_route_input() succeeded. If it differs from the
350 * logical bridge port or if ip_route_output_key() fails we drop the packet.
352 static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
354 struct net_device *dev = skb->dev;
355 struct iphdr *iph = ip_hdr(skb);
356 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
360 nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
362 if (nf_bridge->pkt_otherhost) {
363 skb->pkt_type = PACKET_OTHERHOST;
364 nf_bridge->pkt_otherhost = false;
366 nf_bridge->in_prerouting = 0;
367 if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
368 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
369 struct in_device *in_dev = __in_dev_get_rcu(dev);
371 /* If err equals -EHOSTUNREACH the error is due to a
372 * martian destination or due to the fact that
373 * forwarding is disabled. For most martian packets,
374 * ip_route_output_key() will fail. It won't fail for 2 types of
375 * martian destinations: loopback destinations and destination
376 * 0.0.0.0. In both cases the packet will be dropped because the
377 * destination is the loopback device and not the bridge. */
378 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
381 rt = ip_route_output(net, iph->daddr, 0,
382 RT_TOS(iph->tos), 0);
384 /* - Bridged-and-DNAT'ed traffic doesn't
385 * require ip_forwarding. */
386 if (rt->dst.dev == dev) {
387 skb_dst_set(skb, &rt->dst);
396 if (skb_dst(skb)->dev == dev) {
398 skb->dev = nf_bridge->physindev;
399 nf_bridge_update_protocol(skb);
400 nf_bridge_push_encap_header(skb);
401 br_nf_hook_thresh(NF_BR_PRE_ROUTING,
402 net, sk, skb, skb->dev,
404 br_nf_pre_routing_finish_bridge);
407 ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
408 skb->pkt_type = PACKET_HOST;
411 rt = bridge_parent_rtable(nf_bridge->physindev);
416 skb_dst_set_noref(skb, &rt->dst);
419 skb->dev = nf_bridge->physindev;
420 nf_bridge_update_protocol(skb);
421 nf_bridge_push_encap_header(skb);
422 br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
423 br_handle_frame_finish);
427 static struct net_device *brnf_get_logical_dev(struct sk_buff *skb,
428 const struct net_device *dev,
429 const struct net *net)
431 struct net_device *vlan, *br;
432 struct brnf_net *brnet = net_generic(net, brnf_net_id);
434 br = bridge_parent(dev);
436 if (brnet->pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
439 vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
440 skb_vlan_tag_get(skb) & VLAN_VID_MASK);
442 return vlan ? vlan : br;
445 /* Some common code for IPv4/IPv6 */
446 struct net_device *setup_pre_routing(struct sk_buff *skb, const struct net *net)
448 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
450 if (skb->pkt_type == PACKET_OTHERHOST) {
451 skb->pkt_type = PACKET_HOST;
452 nf_bridge->pkt_otherhost = true;
455 nf_bridge->in_prerouting = 1;
456 nf_bridge->physindev = skb->dev;
457 skb->dev = brnf_get_logical_dev(skb, skb->dev, net);
459 if (skb->protocol == htons(ETH_P_8021Q))
460 nf_bridge->orig_proto = BRNF_PROTO_8021Q;
461 else if (skb->protocol == htons(ETH_P_PPP_SES))
462 nf_bridge->orig_proto = BRNF_PROTO_PPPOE;
464 /* Must drop socket now because of tproxy. */
469 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
470 * Replicate the checks that IPv4 does on packet reception.
471 * Set skb->dev to the bridge device (i.e. parent of the
472 * receiving device) to make netfilter happy, the REDIRECT
473 * target in particular. Save the original destination IP
474 * address to be able to detect DNAT afterwards. */
475 static unsigned int br_nf_pre_routing(void *priv,
477 const struct nf_hook_state *state)
479 struct nf_bridge_info *nf_bridge;
480 struct net_bridge_port *p;
481 struct net_bridge *br;
482 __u32 len = nf_bridge_encap_header_len(skb);
483 struct brnf_net *brnet;
485 if (unlikely(!pskb_may_pull(skb, len)))
488 p = br_port_get_rcu(state->in);
493 brnet = net_generic(state->net, brnf_net_id);
494 if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
495 is_pppoe_ipv6(skb, state->net)) {
496 if (!brnet->call_ip6tables &&
497 !br_opt_get(br, BROPT_NF_CALL_IP6TABLES))
499 if (!ipv6_mod_enabled()) {
500 pr_warn_once("Module ipv6 is disabled, so call_ip6tables is not supported.");
504 nf_bridge_pull_encap_header_rcsum(skb);
505 return br_nf_pre_routing_ipv6(priv, skb, state);
508 if (!brnet->call_iptables && !br_opt_get(br, BROPT_NF_CALL_IPTABLES))
511 if (!IS_IP(skb) && !is_vlan_ip(skb, state->net) &&
512 !is_pppoe_ip(skb, state->net))
515 nf_bridge_pull_encap_header_rcsum(skb);
517 if (br_validate_ipv4(state->net, skb))
520 if (!nf_bridge_alloc(skb))
522 if (!setup_pre_routing(skb, state->net))
525 nf_bridge = nf_bridge_info_get(skb);
526 nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
528 skb->protocol = htons(ETH_P_IP);
529 skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
531 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
533 br_nf_pre_routing_finish);
539 /* PF_BRIDGE/FORWARD *************************************************/
540 static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
542 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
543 struct net_device *in;
545 if (!IS_ARP(skb) && !is_vlan_arp(skb, net)) {
547 if (skb->protocol == htons(ETH_P_IP))
548 nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
550 if (skb->protocol == htons(ETH_P_IPV6))
551 nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
553 in = nf_bridge->physindev;
554 if (nf_bridge->pkt_otherhost) {
555 skb->pkt_type = PACKET_OTHERHOST;
556 nf_bridge->pkt_otherhost = false;
558 nf_bridge_update_protocol(skb);
560 in = *((struct net_device **)(skb->cb));
562 nf_bridge_push_encap_header(skb);
564 br_nf_hook_thresh(NF_BR_FORWARD, net, sk, skb, in, skb->dev,
570 /* This is the 'purely bridged' case. For IP, we pass the packet to
571 * netfilter with indev and outdev set to the bridge device,
572 * but we are still able to filter on the 'real' indev/outdev
573 * because of the physdev module. For ARP, indev and outdev are the
575 static unsigned int br_nf_forward_ip(void *priv,
577 const struct nf_hook_state *state)
579 struct nf_bridge_info *nf_bridge;
580 struct net_device *parent;
583 nf_bridge = nf_bridge_info_get(skb);
587 /* Need exclusive nf_bridge_info since we might have multiple
588 * different physoutdevs. */
589 if (!nf_bridge_unshare(skb))
592 nf_bridge = nf_bridge_info_get(skb);
596 parent = bridge_parent(state->out);
600 if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
601 is_pppoe_ip(skb, state->net))
603 else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
604 is_pppoe_ipv6(skb, state->net))
609 nf_bridge_pull_encap_header(skb);
611 if (skb->pkt_type == PACKET_OTHERHOST) {
612 skb->pkt_type = PACKET_HOST;
613 nf_bridge->pkt_otherhost = true;
616 if (pf == NFPROTO_IPV4) {
617 if (br_validate_ipv4(state->net, skb))
619 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
622 if (pf == NFPROTO_IPV6) {
623 if (br_validate_ipv6(state->net, skb))
625 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
628 nf_bridge->physoutdev = skb->dev;
629 if (pf == NFPROTO_IPV4)
630 skb->protocol = htons(ETH_P_IP);
632 skb->protocol = htons(ETH_P_IPV6);
634 NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb,
635 brnf_get_logical_dev(skb, state->in, state->net),
636 parent, br_nf_forward_finish);
641 static unsigned int br_nf_forward_arp(void *priv,
643 const struct nf_hook_state *state)
645 struct net_bridge_port *p;
646 struct net_bridge *br;
647 struct net_device **d = (struct net_device **)(skb->cb);
648 struct brnf_net *brnet;
650 p = br_port_get_rcu(state->out);
655 brnet = net_generic(state->net, brnf_net_id);
656 if (!brnet->call_arptables && !br_opt_get(br, BROPT_NF_CALL_ARPTABLES))
660 if (!is_vlan_arp(skb, state->net))
662 nf_bridge_pull_encap_header(skb);
665 if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
668 if (arp_hdr(skb)->ar_pln != 4) {
669 if (is_vlan_arp(skb, state->net))
670 nf_bridge_push_encap_header(skb);
674 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->net, state->sk, skb,
675 state->in, state->out, br_nf_forward_finish);
680 static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
682 struct brnf_frag_data *data;
685 data = this_cpu_ptr(&brnf_frag_data_storage);
686 err = skb_cow_head(skb, data->size);
693 if (data->vlan_proto)
694 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci);
696 skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
697 __skb_push(skb, data->encap_size);
699 nf_bridge_info_free(skb);
700 return br_dev_queue_push_xmit(net, sk, skb);
704 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
705 int (*output)(struct net *, struct sock *, struct sk_buff *))
707 unsigned int mtu = ip_skb_dst_mtu(sk, skb);
708 struct iphdr *iph = ip_hdr(skb);
710 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
711 (IPCB(skb)->frag_max_size &&
712 IPCB(skb)->frag_max_size > mtu))) {
713 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
718 return ip_do_fragment(net, sk, skb, output);
721 static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
723 const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
725 if (nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
726 return PPPOE_SES_HLEN;
730 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
732 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
733 unsigned int mtu, mtu_reserved;
735 mtu_reserved = nf_bridge_mtu_reduction(skb);
738 if (nf_bridge->pkt_otherhost) {
739 skb->pkt_type = PACKET_OTHERHOST;
740 nf_bridge->pkt_otherhost = false;
743 if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
744 mtu = nf_bridge->frag_max_size;
746 if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
747 nf_bridge_info_free(skb);
748 return br_dev_queue_push_xmit(net, sk, skb);
751 /* This is wrong! We should preserve the original fragment
752 * boundaries by preserving frag_list rather than refragmenting.
754 if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) &&
755 skb->protocol == htons(ETH_P_IP)) {
756 struct brnf_frag_data *data;
758 if (br_validate_ipv4(net, skb))
761 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
763 nf_bridge_update_protocol(skb);
765 data = this_cpu_ptr(&brnf_frag_data_storage);
767 if (skb_vlan_tag_present(skb)) {
768 data->vlan_tci = skb->vlan_tci;
769 data->vlan_proto = skb->vlan_proto;
771 data->vlan_proto = 0;
774 data->encap_size = nf_bridge_encap_header_len(skb);
775 data->size = ETH_HLEN + data->encap_size;
777 skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
780 return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
782 if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
783 skb->protocol == htons(ETH_P_IPV6)) {
784 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
785 struct brnf_frag_data *data;
787 if (br_validate_ipv6(net, skb))
790 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
792 nf_bridge_update_protocol(skb);
794 data = this_cpu_ptr(&brnf_frag_data_storage);
795 data->encap_size = nf_bridge_encap_header_len(skb);
796 data->size = ETH_HLEN + data->encap_size;
798 skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
802 return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
807 nf_bridge_info_free(skb);
808 return br_dev_queue_push_xmit(net, sk, skb);
814 /* PF_BRIDGE/POST_ROUTING ********************************************/
815 static unsigned int br_nf_post_routing(void *priv,
817 const struct nf_hook_state *state)
819 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
820 struct net_device *realoutdev = bridge_parent(skb->dev);
823 /* if nf_bridge is set, but ->physoutdev is NULL, this packet came in
824 * on a bridge, but was delivered locally and is now being routed:
826 * POST_ROUTING was already invoked from the ip stack.
828 if (!nf_bridge || !nf_bridge->physoutdev)
834 if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
835 is_pppoe_ip(skb, state->net))
837 else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
838 is_pppoe_ipv6(skb, state->net))
843 if (skb->pkt_type == PACKET_OTHERHOST) {
844 skb->pkt_type = PACKET_HOST;
845 nf_bridge->pkt_otherhost = true;
848 nf_bridge_pull_encap_header(skb);
849 if (pf == NFPROTO_IPV4)
850 skb->protocol = htons(ETH_P_IP);
852 skb->protocol = htons(ETH_P_IPV6);
854 NF_HOOK(pf, NF_INET_POST_ROUTING, state->net, state->sk, skb,
856 br_nf_dev_queue_xmit);
861 /* IP/SABOTAGE *****************************************************/
862 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
863 * for the second time. */
864 static unsigned int ip_sabotage_in(void *priv,
866 const struct nf_hook_state *state)
868 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
870 if (nf_bridge && !nf_bridge->in_prerouting &&
871 !netif_is_l3_master(skb->dev) &&
872 !netif_is_l3_slave(skb->dev)) {
873 state->okfn(state->net, state->sk, skb);
880 /* This is called when br_netfilter has called into iptables/netfilter,
881 * and DNAT has taken place on a bridge-forwarded packet.
883 * neigh->output has created a new MAC header, with local br0 MAC
886 * This restores the original MAC saddr of the bridged packet
887 * before invoking bridge forward logic to transmit the packet.
889 static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
891 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
893 skb_pull(skb, ETH_HLEN);
894 nf_bridge->bridged_dnat = 0;
896 BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
898 skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
899 nf_bridge->neigh_header,
900 ETH_HLEN - ETH_ALEN);
901 skb->dev = nf_bridge->physindev;
903 nf_bridge->physoutdev = NULL;
904 br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
907 static int br_nf_dev_xmit(struct sk_buff *skb)
909 const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
911 if (nf_bridge && nf_bridge->bridged_dnat) {
912 br_nf_pre_routing_finish_bridge_slow(skb);
918 static const struct nf_br_ops br_ops = {
919 .br_dev_xmit_hook = br_nf_dev_xmit,
922 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
923 * br_dev_queue_push_xmit is called afterwards */
924 static const struct nf_hook_ops br_nf_ops[] = {
926 .hook = br_nf_pre_routing,
927 .pf = NFPROTO_BRIDGE,
928 .hooknum = NF_BR_PRE_ROUTING,
929 .priority = NF_BR_PRI_BRNF,
932 .hook = br_nf_forward_ip,
933 .pf = NFPROTO_BRIDGE,
934 .hooknum = NF_BR_FORWARD,
935 .priority = NF_BR_PRI_BRNF - 1,
938 .hook = br_nf_forward_arp,
939 .pf = NFPROTO_BRIDGE,
940 .hooknum = NF_BR_FORWARD,
941 .priority = NF_BR_PRI_BRNF,
944 .hook = br_nf_post_routing,
945 .pf = NFPROTO_BRIDGE,
946 .hooknum = NF_BR_POST_ROUTING,
947 .priority = NF_BR_PRI_LAST,
950 .hook = ip_sabotage_in,
952 .hooknum = NF_INET_PRE_ROUTING,
953 .priority = NF_IP_PRI_FIRST,
956 .hook = ip_sabotage_in,
958 .hooknum = NF_INET_PRE_ROUTING,
959 .priority = NF_IP6_PRI_FIRST,
963 static int brnf_device_event(struct notifier_block *unused, unsigned long event,
966 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
967 struct brnf_net *brnet;
971 if (event != NETDEV_REGISTER || !(dev->priv_flags & IFF_EBRIDGE))
977 brnet = net_generic(net, brnf_net_id);
981 ret = nf_register_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
985 brnet->enabled = true;
989 static struct notifier_block brnf_notifier __read_mostly = {
990 .notifier_call = brnf_device_event,
993 /* recursively invokes nf_hook_slow (again), skipping already-called
994 * hooks (< NF_BR_PRI_BRNF).
996 * Called with rcu read lock held.
998 int br_nf_hook_thresh(unsigned int hook, struct net *net,
999 struct sock *sk, struct sk_buff *skb,
1000 struct net_device *indev,
1001 struct net_device *outdev,
1002 int (*okfn)(struct net *, struct sock *,
1005 const struct nf_hook_entries *e;
1006 struct nf_hook_state state;
1007 struct nf_hook_ops **ops;
1011 e = rcu_dereference(net->nf.hooks_bridge[hook]);
1013 return okfn(net, sk, skb);
1015 ops = nf_hook_entries_get_hook_ops(e);
1016 for (i = 0; i < e->num_hook_entries &&
1017 ops[i]->priority <= NF_BR_PRI_BRNF; i++)
1020 nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev,
1023 ret = nf_hook_slow(skb, &state, e, i);
1025 ret = okfn(net, sk, skb);
1030 #ifdef CONFIG_SYSCTL
1032 int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
1033 void *buffer, size_t *lenp, loff_t *ppos)
1037 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1039 if (write && *(int *)(ctl->data))
1040 *(int *)(ctl->data) = 1;
1044 static struct ctl_table brnf_table[] = {
1046 .procname = "bridge-nf-call-arptables",
1047 .maxlen = sizeof(int),
1049 .proc_handler = brnf_sysctl_call_tables,
1052 .procname = "bridge-nf-call-iptables",
1053 .maxlen = sizeof(int),
1055 .proc_handler = brnf_sysctl_call_tables,
1058 .procname = "bridge-nf-call-ip6tables",
1059 .maxlen = sizeof(int),
1061 .proc_handler = brnf_sysctl_call_tables,
1064 .procname = "bridge-nf-filter-vlan-tagged",
1065 .maxlen = sizeof(int),
1067 .proc_handler = brnf_sysctl_call_tables,
1070 .procname = "bridge-nf-filter-pppoe-tagged",
1071 .maxlen = sizeof(int),
1073 .proc_handler = brnf_sysctl_call_tables,
1076 .procname = "bridge-nf-pass-vlan-input-dev",
1077 .maxlen = sizeof(int),
1079 .proc_handler = brnf_sysctl_call_tables,
1084 static inline void br_netfilter_sysctl_default(struct brnf_net *brnf)
1086 brnf->call_iptables = 1;
1087 brnf->call_ip6tables = 1;
1088 brnf->call_arptables = 1;
1089 brnf->filter_vlan_tagged = 0;
1090 brnf->filter_pppoe_tagged = 0;
1091 brnf->pass_vlan_indev = 0;
1094 static int br_netfilter_sysctl_init_net(struct net *net)
1096 struct ctl_table *table = brnf_table;
1097 struct brnf_net *brnet;
1099 if (!net_eq(net, &init_net)) {
1100 table = kmemdup(table, sizeof(brnf_table), GFP_KERNEL);
1105 brnet = net_generic(net, brnf_net_id);
1106 table[0].data = &brnet->call_arptables;
1107 table[1].data = &brnet->call_iptables;
1108 table[2].data = &brnet->call_ip6tables;
1109 table[3].data = &brnet->filter_vlan_tagged;
1110 table[4].data = &brnet->filter_pppoe_tagged;
1111 table[5].data = &brnet->pass_vlan_indev;
1113 br_netfilter_sysctl_default(brnet);
1115 brnet->ctl_hdr = register_net_sysctl(net, "net/bridge", table);
1116 if (!brnet->ctl_hdr) {
1117 if (!net_eq(net, &init_net))
1126 static void br_netfilter_sysctl_exit_net(struct net *net,
1127 struct brnf_net *brnet)
1129 struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg;
1131 unregister_net_sysctl_table(brnet->ctl_hdr);
1132 if (!net_eq(net, &init_net))
1136 static int __net_init brnf_init_net(struct net *net)
1138 return br_netfilter_sysctl_init_net(net);
1142 static void __net_exit brnf_exit_net(struct net *net)
1144 struct brnf_net *brnet;
1146 brnet = net_generic(net, brnf_net_id);
1147 if (brnet->enabled) {
1148 nf_unregister_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
1149 brnet->enabled = false;
1152 #ifdef CONFIG_SYSCTL
1153 br_netfilter_sysctl_exit_net(net, brnet);
1157 static struct pernet_operations brnf_net_ops __read_mostly = {
1158 #ifdef CONFIG_SYSCTL
1159 .init = brnf_init_net,
1161 .exit = brnf_exit_net,
1163 .size = sizeof(struct brnf_net),
1166 static int __init br_netfilter_init(void)
1170 ret = register_pernet_subsys(&brnf_net_ops);
1174 ret = register_netdevice_notifier(&brnf_notifier);
1176 unregister_pernet_subsys(&brnf_net_ops);
1180 RCU_INIT_POINTER(nf_br_ops, &br_ops);
1181 printk(KERN_NOTICE "Bridge firewalling registered\n");
1185 static void __exit br_netfilter_fini(void)
1187 RCU_INIT_POINTER(nf_br_ops, NULL);
1188 unregister_netdevice_notifier(&brnf_notifier);
1189 unregister_pernet_subsys(&brnf_net_ops);
1192 module_init(br_netfilter_init);
1193 module_exit(br_netfilter_fini);
1195 MODULE_LICENSE("GPL");
1198 MODULE_DESCRIPTION("Linux ethernet netfilter firewall bridge");