2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
8 * Based on net/ipv4/icmp.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
21 * Andi Kleen : exception handling
22 * Andi Kleen add rate limits. never reply to a icmp.
23 * add more length checks and other fixes.
24 * yoshfuji : ensure to sent parameter problem for
26 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
28 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
29 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
32 #define pr_fmt(fmt) "IPv6: " fmt
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
45 #include <linux/slab.h>
48 #include <linux/sysctl.h>
51 #include <linux/inet.h>
52 #include <linux/netdevice.h>
53 #include <linux/icmpv6.h>
59 #include <net/ip6_checksum.h>
60 #include <net/protocol.h>
62 #include <net/rawv6.h>
63 #include <net/transp_v6.h>
64 #include <net/ip6_route.h>
65 #include <net/addrconf.h>
68 #include <net/inet_common.h>
70 #include <asm/uaccess.h>
73 * The ICMP socket(s). This is the most convenient way to flow control
74 * our ICMP output as well as maintain a clean interface throughout
75 * all layers. All Socketless IP sends will soon be gone.
77 * On SMP we have one ICMP socket per-cpu.
79 static inline struct sock *icmpv6_sk(struct net *net)
81 return net->ipv6.icmp_sk[smp_processor_id()];
84 static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
85 u8 type, u8 code, int offset, __be32 info)
87 struct net *net = dev_net(skb->dev);
89 if (type == ICMPV6_PKT_TOOBIG)
90 ip6_update_pmtu(skb, net, info, 0, 0);
91 else if (type == NDISC_REDIRECT)
92 ip6_redirect(skb, net, 0, 0);
95 static int icmpv6_rcv(struct sk_buff *skb);
97 static const struct inet6_protocol icmpv6_protocol = {
98 .handler = icmpv6_rcv,
99 .err_handler = icmpv6_err,
100 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
103 static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
110 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
111 /* This can happen if the output path (f.e. SIT or
112 * ip6ip6 tunnel) signals dst_link_failure() for an
113 * outgoing ICMP6 packet.
121 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
123 spin_unlock_bh(&sk->sk_lock.slock);
127 * Figure out, may we reply to this packet with icmp error.
129 * We do not reply, if:
130 * - it was icmp error message.
131 * - it is truncated, so that it is known, that protocol is ICMPV6
132 * (i.e. in the middle of some exthdr)
137 static bool is_ineligible(const struct sk_buff *skb)
139 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
140 int len = skb->len - ptr;
141 __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
147 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
150 if (nexthdr == IPPROTO_ICMPV6) {
152 tp = skb_header_pointer(skb,
153 ptr+offsetof(struct icmp6hdr, icmp6_type),
154 sizeof(_type), &_type);
156 !(*tp & ICMPV6_INFOMSG_MASK))
163 * Check the ICMP output rate limit
165 static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
168 struct dst_entry *dst;
169 struct net *net = sock_net(sk);
172 /* Informational messages are not limited. */
173 if (type & ICMPV6_INFOMSG_MASK)
176 /* Do not limit pmtu discovery, it would break it. */
177 if (type == ICMPV6_PKT_TOOBIG)
181 * Look up the output route.
182 * XXX: perhaps the expire for routing entries cloned by
183 * this lookup should be more aggressive (not longer than timeout).
185 dst = ip6_route_output(net, sk, fl6);
187 IP6_INC_STATS(net, ip6_dst_idev(dst),
188 IPSTATS_MIB_OUTNOROUTES);
189 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
192 struct rt6_info *rt = (struct rt6_info *)dst;
193 int tmo = net->ipv6.sysctl.icmpv6_time;
194 struct inet_peer *peer;
196 /* Give more bandwidth to wider prefixes. */
197 if (rt->rt6i_dst.plen < 128)
198 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
200 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
201 res = inet_peer_xrlim_allow(peer, tmo);
210 * an inline helper for the "simple" if statement below
211 * checks if parameter problem report is caused by an
212 * unrecognized IPv6 option that has the Option Type
213 * highest-order two bits set to 10
216 static bool opt_unrec(struct sk_buff *skb, __u32 offset)
220 offset += skb_network_offset(skb);
221 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
224 return (*op & 0xC0) == 0x80;
227 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len)
230 struct icmp6hdr *icmp6h;
233 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
236 icmp6h = icmp6_hdr(skb);
237 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
238 icmp6h->icmp6_cksum = 0;
240 if (skb_queue_len(&sk->sk_write_queue) == 1) {
241 skb->csum = csum_partial(icmp6h,
242 sizeof(struct icmp6hdr), skb->csum);
243 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
245 len, fl6->flowi6_proto,
250 skb_queue_walk(&sk->sk_write_queue, skb) {
251 tmp_csum = csum_add(tmp_csum, skb->csum);
254 tmp_csum = csum_partial(icmp6h,
255 sizeof(struct icmp6hdr), tmp_csum);
256 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
258 len, fl6->flowi6_proto,
261 ip6_push_pending_frames(sk);
272 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
274 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
275 struct sk_buff *org_skb = msg->skb;
278 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
280 skb->csum = csum_block_add(skb->csum, csum, odd);
281 if (!(msg->type & ICMPV6_INFOMSG_MASK))
282 nf_ct_attach(skb, org_skb);
286 #if IS_ENABLED(CONFIG_IPV6_MIP6)
287 static void mip6_addr_swap(struct sk_buff *skb)
289 struct ipv6hdr *iph = ipv6_hdr(skb);
290 struct inet6_skb_parm *opt = IP6CB(skb);
291 struct ipv6_destopt_hao *hao;
296 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
297 if (likely(off >= 0)) {
298 hao = (struct ipv6_destopt_hao *)
299 (skb_network_header(skb) + off);
301 iph->saddr = hao->addr;
307 static inline void mip6_addr_swap(struct sk_buff *skb) {}
310 static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
311 struct sock *sk, struct flowi6 *fl6)
313 struct dst_entry *dst, *dst2;
317 err = ip6_dst_lookup(sk, &dst, fl6);
322 * We won't send icmp if the destination is known
325 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
326 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: acast source\n");
328 return ERR_PTR(-EINVAL);
331 /* No need to clone since we're just using its address. */
334 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0);
339 if (PTR_ERR(dst) == -EPERM)
345 err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6);
347 goto relookup_failed;
349 err = ip6_dst_lookup(sk, &dst2, &fl2);
351 goto relookup_failed;
353 dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP);
363 goto relookup_failed;
373 * Send an ICMP message in response to a packet in error
375 static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
377 struct net *net = dev_net(skb->dev);
378 struct inet6_dev *idev = NULL;
379 struct ipv6hdr *hdr = ipv6_hdr(skb);
381 struct ipv6_pinfo *np;
382 const struct in6_addr *saddr = NULL;
383 struct dst_entry *dst;
384 struct icmp6hdr tmp_hdr;
386 struct icmpv6_msg msg;
393 if ((u8 *)hdr < skb->head ||
394 (skb->network_header + sizeof(*hdr)) > skb->tail)
398 * Make sure we respect the rules
399 * i.e. RFC 1885 2.4(e)
400 * Rule (e.1) is enforced by not using icmp6_send
401 * in any code that processes icmp errors.
403 addr_type = ipv6_addr_type(&hdr->daddr);
405 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
412 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
413 if (type != ICMPV6_PKT_TOOBIG &&
414 !(type == ICMPV6_PARAMPROB &&
415 code == ICMPV6_UNK_OPTION &&
416 (opt_unrec(skb, info))))
422 addr_type = ipv6_addr_type(&hdr->saddr);
428 if (__ipv6_addr_needs_scope_id(addr_type))
429 iif = skb->dev->ifindex;
432 * Must not send error if the source does not uniquely
433 * identify a single node (RFC2463 Section 2.4).
434 * We check unspecified / multicast addresses here,
435 * and anycast addresses will be checked later.
437 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
438 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: addr_any/mcast source\n");
443 * Never answer to a ICMP packet.
445 if (is_ineligible(skb)) {
446 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: no reply to icmp error\n");
452 memset(&fl6, 0, sizeof(fl6));
453 fl6.flowi6_proto = IPPROTO_ICMPV6;
454 fl6.daddr = hdr->saddr;
457 fl6.flowi6_oif = iif;
458 fl6.fl6_icmp_type = type;
459 fl6.fl6_icmp_code = code;
460 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
462 sk = icmpv6_xmit_lock(net);
467 if (!icmpv6_xrlim_allow(sk, type, &fl6))
470 tmp_hdr.icmp6_type = type;
471 tmp_hdr.icmp6_code = code;
472 tmp_hdr.icmp6_cksum = 0;
473 tmp_hdr.icmp6_pointer = htonl(info);
475 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
476 fl6.flowi6_oif = np->mcast_oif;
477 else if (!fl6.flowi6_oif)
478 fl6.flowi6_oif = np->ucast_oif;
480 dst = icmpv6_route_lookup(net, skb, sk, &fl6);
484 if (ipv6_addr_is_multicast(&fl6.daddr))
485 hlimit = np->mcast_hops;
487 hlimit = np->hop_limit;
489 hlimit = ip6_dst_hoplimit(dst);
492 msg.offset = skb_network_offset(skb);
495 len = skb->len - msg.offset;
496 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
498 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
499 goto out_dst_release;
503 idev = __in6_dev_get(skb->dev);
505 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
506 len + sizeof(struct icmp6hdr),
507 sizeof(struct icmp6hdr), hlimit,
508 np->tclass, NULL, &fl6, (struct rt6_info *)dst,
509 MSG_DONTWAIT, np->dontfrag);
511 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
512 ip6_flush_pending_frames(sk);
514 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
515 len + sizeof(struct icmp6hdr));
521 icmpv6_xmit_unlock(sk);
524 /* Slightly more convenient version of icmp6_send.
526 void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
528 icmp6_send(skb, ICMPV6_PARAMPROB, code, pos);
532 static void icmpv6_echo_reply(struct sk_buff *skb)
534 struct net *net = dev_net(skb->dev);
536 struct inet6_dev *idev;
537 struct ipv6_pinfo *np;
538 const struct in6_addr *saddr = NULL;
539 struct icmp6hdr *icmph = icmp6_hdr(skb);
540 struct icmp6hdr tmp_hdr;
542 struct icmpv6_msg msg;
543 struct dst_entry *dst;
547 saddr = &ipv6_hdr(skb)->daddr;
549 if (!ipv6_unicast_destination(skb))
552 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
553 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
555 memset(&fl6, 0, sizeof(fl6));
556 fl6.flowi6_proto = IPPROTO_ICMPV6;
557 fl6.daddr = ipv6_hdr(skb)->saddr;
560 fl6.flowi6_oif = skb->dev->ifindex;
561 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
562 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
564 sk = icmpv6_xmit_lock(net);
569 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
570 fl6.flowi6_oif = np->mcast_oif;
571 else if (!fl6.flowi6_oif)
572 fl6.flowi6_oif = np->ucast_oif;
574 err = ip6_dst_lookup(sk, &dst, &fl6);
577 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
581 if (ipv6_addr_is_multicast(&fl6.daddr))
582 hlimit = np->mcast_hops;
584 hlimit = np->hop_limit;
586 hlimit = ip6_dst_hoplimit(dst);
588 idev = __in6_dev_get(skb->dev);
592 msg.type = ICMPV6_ECHO_REPLY;
594 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
595 sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6,
596 (struct rt6_info *)dst, MSG_DONTWAIT,
600 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
601 ip6_flush_pending_frames(sk);
603 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
604 skb->len + sizeof(struct icmp6hdr));
608 icmpv6_xmit_unlock(sk);
611 void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
613 const struct inet6_protocol *ipprot;
618 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
621 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
622 if (ipv6_ext_hdr(nexthdr)) {
623 /* now skip over extension headers */
624 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
625 &nexthdr, &frag_off);
629 inner_offset = sizeof(struct ipv6hdr);
632 /* Checkin header including 8 bytes of inner protocol header. */
633 if (!pskb_may_pull(skb, inner_offset+8))
636 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
637 Without this we will not able f.e. to make source routed
639 Corresponding argument (opt) to notifiers is already added.
644 ipprot = rcu_dereference(inet6_protos[nexthdr]);
645 if (ipprot && ipprot->err_handler)
646 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
649 raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
653 * Handle icmp messages
656 static int icmpv6_rcv(struct sk_buff *skb)
658 struct net_device *dev = skb->dev;
659 struct inet6_dev *idev = __in6_dev_get(dev);
660 const struct in6_addr *saddr, *daddr;
661 struct icmp6hdr *hdr;
664 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
665 struct sec_path *sp = skb_sec_path(skb);
668 if (!(sp && sp->xvec[sp->len - 1]->props.flags &
672 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
675 nh = skb_network_offset(skb);
676 skb_set_network_header(skb, sizeof(*hdr));
678 if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
681 skb_set_network_header(skb, nh);
684 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS);
686 saddr = &ipv6_hdr(skb)->saddr;
687 daddr = &ipv6_hdr(skb)->daddr;
689 /* Perform checksum. */
690 switch (skb->ip_summed) {
691 case CHECKSUM_COMPLETE:
692 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
697 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
699 if (__skb_checksum_complete(skb)) {
700 LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n",
706 if (!pskb_pull(skb, sizeof(*hdr)))
709 hdr = icmp6_hdr(skb);
711 type = hdr->icmp6_type;
713 ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type);
716 case ICMPV6_ECHO_REQUEST:
717 icmpv6_echo_reply(skb);
720 case ICMPV6_ECHO_REPLY:
721 /* we couldn't care less */
724 case ICMPV6_PKT_TOOBIG:
725 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
726 standard destination cache. Seems, only "advanced"
727 destination cache will allow to solve this problem
730 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
732 hdr = icmp6_hdr(skb);
735 * Drop through to notify
738 case ICMPV6_DEST_UNREACH:
739 case ICMPV6_TIME_EXCEED:
740 case ICMPV6_PARAMPROB:
741 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
744 case NDISC_ROUTER_SOLICITATION:
745 case NDISC_ROUTER_ADVERTISEMENT:
746 case NDISC_NEIGHBOUR_SOLICITATION:
747 case NDISC_NEIGHBOUR_ADVERTISEMENT:
752 case ICMPV6_MGM_QUERY:
753 igmp6_event_query(skb);
756 case ICMPV6_MGM_REPORT:
757 igmp6_event_report(skb);
760 case ICMPV6_MGM_REDUCTION:
761 case ICMPV6_NI_QUERY:
762 case ICMPV6_NI_REPLY:
763 case ICMPV6_MLD2_REPORT:
764 case ICMPV6_DHAAD_REQUEST:
765 case ICMPV6_DHAAD_REPLY:
766 case ICMPV6_MOBILE_PREFIX_SOL:
767 case ICMPV6_MOBILE_PREFIX_ADV:
771 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
774 if (type & ICMPV6_INFOMSG_MASK)
778 * error of unknown type.
779 * must pass to upper level
782 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
789 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
791 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS);
797 void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
799 const struct in6_addr *saddr,
800 const struct in6_addr *daddr,
803 memset(fl6, 0, sizeof(*fl6));
806 fl6->flowi6_proto = IPPROTO_ICMPV6;
807 fl6->fl6_icmp_type = type;
808 fl6->fl6_icmp_code = 0;
809 fl6->flowi6_oif = oif;
810 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
814 * Special lock-class for __icmpv6_sk:
816 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
818 static int __net_init icmpv6_sk_init(struct net *net)
824 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
825 if (net->ipv6.icmp_sk == NULL)
828 for_each_possible_cpu(i) {
829 err = inet_ctl_sock_create(&sk, PF_INET6,
830 SOCK_RAW, IPPROTO_ICMPV6, net);
832 pr_err("Failed to initialize the ICMP6 control socket (err %d)\n",
837 net->ipv6.icmp_sk[i] = sk;
840 * Split off their lock-class, because sk->sk_dst_lock
841 * gets used from softirqs, which is safe for
842 * __icmpv6_sk (because those never get directly used
843 * via userspace syscalls), but unsafe for normal sockets.
845 lockdep_set_class(&sk->sk_dst_lock,
846 &icmpv6_socket_sk_dst_lock_key);
848 /* Enough space for 2 64K ICMP packets, including
849 * sk_buff struct overhead.
851 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
856 for (j = 0; j < i; j++)
857 inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
858 kfree(net->ipv6.icmp_sk);
862 static void __net_exit icmpv6_sk_exit(struct net *net)
866 for_each_possible_cpu(i) {
867 inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
869 kfree(net->ipv6.icmp_sk);
872 static struct pernet_operations icmpv6_sk_ops = {
873 .init = icmpv6_sk_init,
874 .exit = icmpv6_sk_exit,
877 int __init icmpv6_init(void)
881 err = register_pernet_subsys(&icmpv6_sk_ops);
886 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
889 err = inet6_register_icmp_sender(icmp6_send);
895 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
897 pr_err("Failed to register ICMP6 protocol\n");
898 unregister_pernet_subsys(&icmpv6_sk_ops);
902 void icmpv6_cleanup(void)
904 inet6_unregister_icmp_sender(icmp6_send);
905 unregister_pernet_subsys(&icmpv6_sk_ops);
906 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
910 static const struct icmp6_err {
918 { /* ADM_PROHIBITED */
922 { /* Was NOT_NEIGHBOUR, now reserved */
936 int icmpv6_err_convert(u8 type, u8 code, int *err)
943 case ICMPV6_DEST_UNREACH:
945 if (code <= ICMPV6_PORT_UNREACH) {
946 *err = tab_unreach[code].err;
947 fatal = tab_unreach[code].fatal;
951 case ICMPV6_PKT_TOOBIG:
955 case ICMPV6_PARAMPROB:
960 case ICMPV6_TIME_EXCEED:
967 EXPORT_SYMBOL(icmpv6_err_convert);
970 ctl_table ipv6_icmp_table_template[] = {
972 .procname = "ratelimit",
973 .data = &init_net.ipv6.sysctl.icmpv6_time,
974 .maxlen = sizeof(int),
976 .proc_handler = proc_dointvec_ms_jiffies,
981 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
983 struct ctl_table *table;
985 table = kmemdup(ipv6_icmp_table_template,
986 sizeof(ipv6_icmp_table_template),
990 table[0].data = &net->ipv6.sysctl.icmpv6_time;