1 #include <linux/module.h>
2 #include <linux/errno.h>
3 #include <linux/socket.h>
4 #include <linux/skbuff.h>
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <net/genetlink.h>
12 #include <net/protocol.h>
14 #include <net/udp_tunnel.h>
16 #include <uapi/linux/fou.h>
17 #include <uapi/linux/genetlink.h>
26 struct list_head list;
30 #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
36 struct udp_port_cfg udp_config;
39 static unsigned int fou_net_id;
42 struct list_head fou_list;
43 struct mutex fou_lock;
46 static inline struct fou *fou_from_sock(struct sock *sk)
48 return sk->sk_user_data;
51 static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
53 /* Remove 'len' bytes from the packet (UDP header and
54 * FOU header if present).
56 if (fou->family == AF_INET)
57 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
59 ipv6_hdr(skb)->payload_len =
60 htons(ntohs(ipv6_hdr(skb)->payload_len) - len);
63 skb_postpull_rcsum(skb, udp_hdr(skb), len);
64 skb_reset_transport_header(skb);
65 return iptunnel_pull_offloads(skb);
68 static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
70 struct fou *fou = fou_from_sock(sk);
75 if (fou_recv_pull(skb, fou, sizeof(struct udphdr)))
78 return -fou->protocol;
85 static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
86 void *data, size_t hdrlen, u8 ipproto,
90 size_t start = ntohs(pd[0]);
91 size_t offset = ntohs(pd[1]);
92 size_t plen = sizeof(struct udphdr) + hdrlen +
93 max_t(size_t, offset + sizeof(u16), start);
95 if (skb->remcsum_offload)
98 if (!pskb_may_pull(skb, plen))
100 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
102 skb_remcsum_process(skb, (void *)guehdr + hdrlen,
103 start, offset, nopartial);
108 static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
115 static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
117 struct fou *fou = fou_from_sock(sk);
118 size_t len, optlen, hdrlen;
119 struct guehdr *guehdr;
126 len = sizeof(struct udphdr) + sizeof(struct guehdr);
127 if (!pskb_may_pull(skb, len))
130 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
132 optlen = guehdr->hlen << 2;
135 if (!pskb_may_pull(skb, len))
138 /* guehdr may change after pull */
139 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
141 hdrlen = sizeof(struct guehdr) + optlen;
143 if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
146 hdrlen = sizeof(struct guehdr) + optlen;
148 if (fou->family == AF_INET)
149 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
151 ipv6_hdr(skb)->payload_len =
152 htons(ntohs(ipv6_hdr(skb)->payload_len) - len);
154 /* Pull csum through the guehdr now . This can be used if
155 * there is a remote checksum offload.
157 skb_postpull_rcsum(skb, udp_hdr(skb), len);
161 if (guehdr->flags & GUE_FLAG_PRIV) {
162 __be32 flags = *(__be32 *)(data + doffset);
164 doffset += GUE_LEN_PRIV;
166 if (flags & GUE_PFLAG_REMCSUM) {
167 guehdr = gue_remcsum(skb, guehdr, data + doffset,
168 hdrlen, guehdr->proto_ctype,
170 FOU_F_REMCSUM_NOPARTIAL));
176 doffset += GUE_PLEN_REMCSUM;
180 if (unlikely(guehdr->control))
181 return gue_control_message(skb, guehdr);
183 __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
184 skb_reset_transport_header(skb);
186 if (iptunnel_pull_offloads(skb))
189 return -guehdr->proto_ctype;
196 static struct sk_buff **fou_gro_receive(struct sock *sk,
197 struct sk_buff **head,
200 const struct net_offload *ops;
201 struct sk_buff **pp = NULL;
202 u8 proto = fou_from_sock(sk)->protocol;
203 const struct net_offload **offloads;
205 /* We can clear the encap_mark for FOU as we are essentially doing
206 * one of two possible things. We are either adding an L4 tunnel
207 * header to the outer L3 tunnel header, or we are are simply
208 * treating the GRE tunnel header as though it is a UDP protocol
209 * specific header such as VXLAN or GENEVE.
211 NAPI_GRO_CB(skb)->encap_mark = 0;
213 /* Flag this frame as already having an outer encap header */
214 NAPI_GRO_CB(skb)->is_fou = 1;
217 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
218 ops = rcu_dereference(offloads[proto]);
219 if (!ops || !ops->callbacks.gro_receive)
222 pp = ops->callbacks.gro_receive(head, skb);
230 static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
233 const struct net_offload *ops;
234 u8 proto = fou_from_sock(sk)->protocol;
236 const struct net_offload **offloads;
239 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
240 ops = rcu_dereference(offloads[proto]);
241 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
244 err = ops->callbacks.gro_complete(skb, nhoff);
246 skb_set_inner_mac_header(skb, nhoff);
254 static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
255 struct guehdr *guehdr, void *data,
256 size_t hdrlen, struct gro_remcsum *grc,
260 size_t start = ntohs(pd[0]);
261 size_t offset = ntohs(pd[1]);
263 if (skb->remcsum_offload)
266 if (!NAPI_GRO_CB(skb)->csum_valid)
269 guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
270 start, offset, grc, nopartial);
272 skb->remcsum_offload = 1;
277 static struct sk_buff **gue_gro_receive(struct sock *sk,
278 struct sk_buff **head,
281 const struct net_offload **offloads;
282 const struct net_offload *ops;
283 struct sk_buff **pp = NULL;
285 struct guehdr *guehdr;
286 size_t len, optlen, hdrlen, off;
290 struct fou *fou = fou_from_sock(sk);
291 struct gro_remcsum grc;
293 skb_gro_remcsum_init(&grc);
295 off = skb_gro_offset(skb);
296 len = off + sizeof(*guehdr);
298 guehdr = skb_gro_header_fast(skb, off);
299 if (skb_gro_header_hard(skb, len)) {
300 guehdr = skb_gro_header_slow(skb, len, off);
301 if (unlikely(!guehdr))
305 optlen = guehdr->hlen << 2;
308 if (skb_gro_header_hard(skb, len)) {
309 guehdr = skb_gro_header_slow(skb, len, off);
310 if (unlikely(!guehdr))
314 if (unlikely(guehdr->control) || guehdr->version != 0 ||
315 validate_gue_flags(guehdr, optlen))
318 hdrlen = sizeof(*guehdr) + optlen;
320 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
321 * this is needed if there is a remote checkcsum offload.
323 skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
327 if (guehdr->flags & GUE_FLAG_PRIV) {
328 __be32 flags = *(__be32 *)(data + doffset);
330 doffset += GUE_LEN_PRIV;
332 if (flags & GUE_PFLAG_REMCSUM) {
333 guehdr = gue_gro_remcsum(skb, off, guehdr,
334 data + doffset, hdrlen, &grc,
336 FOU_F_REMCSUM_NOPARTIAL));
343 doffset += GUE_PLEN_REMCSUM;
347 skb_gro_pull(skb, hdrlen);
349 for (p = *head; p; p = p->next) {
350 const struct guehdr *guehdr2;
352 if (!NAPI_GRO_CB(p)->same_flow)
355 guehdr2 = (struct guehdr *)(p->data + off);
357 /* Compare base GUE header to be equal (covers
358 * hlen, version, proto_ctype, and flags.
360 if (guehdr->word != guehdr2->word) {
361 NAPI_GRO_CB(p)->same_flow = 0;
365 /* Compare optional fields are the same. */
366 if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
367 guehdr->hlen << 2)) {
368 NAPI_GRO_CB(p)->same_flow = 0;
373 /* We can clear the encap_mark for GUE as we are essentially doing
374 * one of two possible things. We are either adding an L4 tunnel
375 * header to the outer L3 tunnel header, or we are are simply
376 * treating the GRE tunnel header as though it is a UDP protocol
377 * specific header such as VXLAN or GENEVE.
379 NAPI_GRO_CB(skb)->encap_mark = 0;
381 /* Flag this frame as already having an outer encap header */
382 NAPI_GRO_CB(skb)->is_fou = 1;
385 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
386 ops = rcu_dereference(offloads[guehdr->proto_ctype]);
387 if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
390 pp = ops->callbacks.gro_receive(head, skb);
396 NAPI_GRO_CB(skb)->flush |= flush;
397 skb_gro_remcsum_cleanup(skb, &grc);
402 static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
404 const struct net_offload **offloads;
405 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
406 const struct net_offload *ops;
407 unsigned int guehlen;
411 proto = guehdr->proto_ctype;
413 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
416 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
417 ops = rcu_dereference(offloads[proto]);
418 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
421 err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
423 skb_set_inner_mac_header(skb, nhoff + guehlen);
430 static int fou_add_to_port_list(struct net *net, struct fou *fou)
432 struct fou_net *fn = net_generic(net, fou_net_id);
435 mutex_lock(&fn->fou_lock);
436 list_for_each_entry(fout, &fn->fou_list, list) {
437 if (fou->port == fout->port &&
438 fou->family == fout->family) {
439 mutex_unlock(&fn->fou_lock);
444 list_add(&fou->list, &fn->fou_list);
445 mutex_unlock(&fn->fou_lock);
450 static void fou_release(struct fou *fou)
452 struct socket *sock = fou->sock;
454 list_del(&fou->list);
455 udp_tunnel_sock_release(sock);
460 static int fou_create(struct net *net, struct fou_cfg *cfg,
461 struct socket **sockp)
463 struct socket *sock = NULL;
464 struct fou *fou = NULL;
466 struct udp_tunnel_sock_cfg tunnel_cfg;
469 /* Open UDP socket */
470 err = udp_sock_create(net, &cfg->udp_config, &sock);
474 /* Allocate FOU port structure */
475 fou = kzalloc(sizeof(*fou), GFP_KERNEL);
483 fou->port = cfg->udp_config.local_udp_port;
484 fou->family = cfg->udp_config.family;
485 fou->flags = cfg->flags;
486 fou->type = cfg->type;
489 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
490 tunnel_cfg.encap_type = 1;
491 tunnel_cfg.sk_user_data = fou;
492 tunnel_cfg.encap_destroy = NULL;
494 /* Initial for fou type */
496 case FOU_ENCAP_DIRECT:
497 tunnel_cfg.encap_rcv = fou_udp_recv;
498 tunnel_cfg.gro_receive = fou_gro_receive;
499 tunnel_cfg.gro_complete = fou_gro_complete;
500 fou->protocol = cfg->protocol;
503 tunnel_cfg.encap_rcv = gue_udp_recv;
504 tunnel_cfg.gro_receive = gue_gro_receive;
505 tunnel_cfg.gro_complete = gue_gro_complete;
512 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
514 sk->sk_allocation = GFP_ATOMIC;
516 err = fou_add_to_port_list(net, fou);
528 udp_tunnel_sock_release(sock);
533 static int fou_destroy(struct net *net, struct fou_cfg *cfg)
535 struct fou_net *fn = net_generic(net, fou_net_id);
536 __be16 port = cfg->udp_config.local_udp_port;
537 u8 family = cfg->udp_config.family;
541 mutex_lock(&fn->fou_lock);
542 list_for_each_entry(fou, &fn->fou_list, list) {
543 if (fou->port == port && fou->family == family) {
549 mutex_unlock(&fn->fou_lock);
554 static struct genl_family fou_nl_family = {
555 .id = GENL_ID_GENERATE,
557 .name = FOU_GENL_NAME,
558 .version = FOU_GENL_VERSION,
559 .maxattr = FOU_ATTR_MAX,
563 static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
564 [FOU_ATTR_PORT] = { .type = NLA_U16, },
565 [FOU_ATTR_AF] = { .type = NLA_U8, },
566 [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
567 [FOU_ATTR_TYPE] = { .type = NLA_U8, },
568 [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
571 static int parse_nl_config(struct genl_info *info,
574 memset(cfg, 0, sizeof(*cfg));
576 cfg->udp_config.family = AF_INET;
578 if (info->attrs[FOU_ATTR_AF]) {
579 u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
585 cfg->udp_config.ipv6_v6only = 1;
588 return -EAFNOSUPPORT;
591 cfg->udp_config.family = family;
594 if (info->attrs[FOU_ATTR_PORT]) {
595 __be16 port = nla_get_be16(info->attrs[FOU_ATTR_PORT]);
597 cfg->udp_config.local_udp_port = port;
600 if (info->attrs[FOU_ATTR_IPPROTO])
601 cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
603 if (info->attrs[FOU_ATTR_TYPE])
604 cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
606 if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
607 cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
612 static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
614 struct net *net = genl_info_net(info);
618 err = parse_nl_config(info, &cfg);
622 return fou_create(net, &cfg, NULL);
625 static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
627 struct net *net = genl_info_net(info);
631 err = parse_nl_config(info, &cfg);
635 return fou_destroy(net, &cfg);
638 static int fou_fill_info(struct fou *fou, struct sk_buff *msg)
640 if (nla_put_u8(msg, FOU_ATTR_AF, fou->sock->sk->sk_family) ||
641 nla_put_be16(msg, FOU_ATTR_PORT, fou->port) ||
642 nla_put_u8(msg, FOU_ATTR_IPPROTO, fou->protocol) ||
643 nla_put_u8(msg, FOU_ATTR_TYPE, fou->type))
646 if (fou->flags & FOU_F_REMCSUM_NOPARTIAL)
647 if (nla_put_flag(msg, FOU_ATTR_REMCSUM_NOPARTIAL))
652 static int fou_dump_info(struct fou *fou, u32 portid, u32 seq,
653 u32 flags, struct sk_buff *skb, u8 cmd)
657 hdr = genlmsg_put(skb, portid, seq, &fou_nl_family, flags, cmd);
661 if (fou_fill_info(fou, skb) < 0)
662 goto nla_put_failure;
664 genlmsg_end(skb, hdr);
668 genlmsg_cancel(skb, hdr);
672 static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info)
674 struct net *net = genl_info_net(info);
675 struct fou_net *fn = net_generic(net, fou_net_id);
683 ret = parse_nl_config(info, &cfg);
686 port = cfg.udp_config.local_udp_port;
690 family = cfg.udp_config.family;
691 if (family != AF_INET && family != AF_INET6)
694 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
699 mutex_lock(&fn->fou_lock);
700 list_for_each_entry(fout, &fn->fou_list, list) {
701 if (port == fout->port && family == fout->family) {
702 ret = fou_dump_info(fout, info->snd_portid,
703 info->snd_seq, 0, msg,
708 mutex_unlock(&fn->fou_lock);
712 return genlmsg_reply(msg, info);
719 static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
721 struct net *net = sock_net(skb->sk);
722 struct fou_net *fn = net_generic(net, fou_net_id);
726 mutex_lock(&fn->fou_lock);
727 list_for_each_entry(fout, &fn->fou_list, list) {
728 if (idx++ < cb->args[0])
730 ret = fou_dump_info(fout, NETLINK_CB(cb->skb).portid,
731 cb->nlh->nlmsg_seq, NLM_F_MULTI,
736 mutex_unlock(&fn->fou_lock);
742 static const struct genl_ops fou_nl_ops[] = {
745 .doit = fou_nl_cmd_add_port,
746 .policy = fou_nl_policy,
747 .flags = GENL_ADMIN_PERM,
751 .doit = fou_nl_cmd_rm_port,
752 .policy = fou_nl_policy,
753 .flags = GENL_ADMIN_PERM,
757 .doit = fou_nl_cmd_get_port,
758 .dumpit = fou_nl_dump,
759 .policy = fou_nl_policy,
763 size_t fou_encap_hlen(struct ip_tunnel_encap *e)
765 return sizeof(struct udphdr);
767 EXPORT_SYMBOL(fou_encap_hlen);
769 size_t gue_encap_hlen(struct ip_tunnel_encap *e)
772 bool need_priv = false;
774 len = sizeof(struct udphdr) + sizeof(struct guehdr);
776 if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
777 len += GUE_PLEN_REMCSUM;
781 len += need_priv ? GUE_LEN_PRIV : 0;
785 EXPORT_SYMBOL(gue_encap_hlen);
787 static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
788 struct flowi4 *fl4, u8 *protocol, __be16 sport)
792 skb_push(skb, sizeof(struct udphdr));
793 skb_reset_transport_header(skb);
799 uh->len = htons(skb->len);
800 udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
801 fl4->saddr, fl4->daddr, skb->len);
803 *protocol = IPPROTO_UDP;
806 int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
807 u8 *protocol, __be16 *sport, int type)
811 err = iptunnel_handle_offloads(skb, type);
815 *sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
820 EXPORT_SYMBOL(__fou_build_header);
822 int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
823 u8 *protocol, struct flowi4 *fl4)
825 int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
830 err = __fou_build_header(skb, e, protocol, &sport, type);
834 fou_build_udp(skb, e, fl4, protocol, sport);
838 EXPORT_SYMBOL(fou_build_header);
840 int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
841 u8 *protocol, __be16 *sport, int type)
843 struct guehdr *guehdr;
844 size_t hdrlen, optlen = 0;
846 bool need_priv = false;
849 if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
850 skb->ip_summed == CHECKSUM_PARTIAL) {
851 optlen += GUE_PLEN_REMCSUM;
852 type |= SKB_GSO_TUNNEL_REMCSUM;
856 optlen += need_priv ? GUE_LEN_PRIV : 0;
858 err = iptunnel_handle_offloads(skb, type);
862 /* Get source port (based on flow hash) before skb_push */
863 *sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
866 hdrlen = sizeof(struct guehdr) + optlen;
868 skb_push(skb, hdrlen);
870 guehdr = (struct guehdr *)skb->data;
874 guehdr->hlen = optlen >> 2;
876 guehdr->proto_ctype = *protocol;
881 __be32 *flags = data;
883 guehdr->flags |= GUE_FLAG_PRIV;
885 data += GUE_LEN_PRIV;
887 if (type & SKB_GSO_TUNNEL_REMCSUM) {
888 u16 csum_start = skb_checksum_start_offset(skb);
891 if (csum_start < hdrlen)
894 csum_start -= hdrlen;
895 pd[0] = htons(csum_start);
896 pd[1] = htons(csum_start + skb->csum_offset);
898 if (!skb_is_gso(skb)) {
899 skb->ip_summed = CHECKSUM_NONE;
900 skb->encapsulation = 0;
903 *flags |= GUE_PFLAG_REMCSUM;
904 data += GUE_PLEN_REMCSUM;
911 EXPORT_SYMBOL(__gue_build_header);
913 int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
914 u8 *protocol, struct flowi4 *fl4)
916 int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
921 err = __gue_build_header(skb, e, protocol, &sport, type);
925 fou_build_udp(skb, e, fl4, protocol, sport);
929 EXPORT_SYMBOL(gue_build_header);
931 #ifdef CONFIG_NET_FOU_IP_TUNNELS
933 static const struct ip_tunnel_encap_ops fou_iptun_ops = {
934 .encap_hlen = fou_encap_hlen,
935 .build_header = fou_build_header,
938 static const struct ip_tunnel_encap_ops gue_iptun_ops = {
939 .encap_hlen = gue_encap_hlen,
940 .build_header = gue_build_header,
943 static int ip_tunnel_encap_add_fou_ops(void)
947 ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
949 pr_err("can't add fou ops\n");
953 ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
955 pr_err("can't add gue ops\n");
956 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
963 static void ip_tunnel_encap_del_fou_ops(void)
965 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
966 ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
971 static int ip_tunnel_encap_add_fou_ops(void)
976 static void ip_tunnel_encap_del_fou_ops(void)
982 static __net_init int fou_init_net(struct net *net)
984 struct fou_net *fn = net_generic(net, fou_net_id);
986 INIT_LIST_HEAD(&fn->fou_list);
987 mutex_init(&fn->fou_lock);
991 static __net_exit void fou_exit_net(struct net *net)
993 struct fou_net *fn = net_generic(net, fou_net_id);
994 struct fou *fou, *next;
996 /* Close all the FOU sockets */
997 mutex_lock(&fn->fou_lock);
998 list_for_each_entry_safe(fou, next, &fn->fou_list, list)
1000 mutex_unlock(&fn->fou_lock);
1003 static struct pernet_operations fou_net_ops = {
1004 .init = fou_init_net,
1005 .exit = fou_exit_net,
1007 .size = sizeof(struct fou_net),
1010 static int __init fou_init(void)
1014 ret = register_pernet_device(&fou_net_ops);
1018 ret = genl_register_family_with_ops(&fou_nl_family,
1023 ret = ip_tunnel_encap_add_fou_ops();
1027 genl_unregister_family(&fou_nl_family);
1029 unregister_pernet_device(&fou_net_ops);
1034 static void __exit fou_fini(void)
1036 ip_tunnel_encap_del_fou_ops();
1037 genl_unregister_family(&fou_nl_family);
1038 unregister_pernet_device(&fou_net_ops);
1041 module_init(fou_init);
1042 module_exit(fou_fini);
1044 MODULE_LICENSE("GPL");