1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Linux INET6 implementation
10 #include <linux/capability.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/interrupt.h>
15 #include <linux/socket.h>
16 #include <linux/sockios.h>
17 #include <linux/in6.h>
18 #include <linux/ipv6.h>
19 #include <linux/route.h>
20 #include <linux/slab.h>
21 #include <linux/export.h>
22 #include <linux/icmp.h>
25 #include <net/ndisc.h>
26 #include <net/addrconf.h>
27 #include <net/transp_v6.h>
28 #include <net/ip6_route.h>
29 #include <net/tcp_states.h>
30 #include <net/dsfield.h>
31 #include <net/sock_reuseport.h>
33 #include <linux/errqueue.h>
34 #include <linux/uaccess.h>
36 static bool ipv6_mapped_addr_any(const struct in6_addr *a)
38 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
41 static void ip6_datagram_flow_key_init(struct flowi6 *fl6,
42 const struct sock *sk)
44 const struct inet_sock *inet = inet_sk(sk);
45 const struct ipv6_pinfo *np = inet6_sk(sk);
46 int oif = sk->sk_bound_dev_if;
48 memset(fl6, 0, sizeof(*fl6));
49 fl6->flowi6_proto = sk->sk_protocol;
50 fl6->daddr = sk->sk_v6_daddr;
51 fl6->saddr = np->saddr;
52 fl6->flowi6_mark = sk->sk_mark;
53 fl6->fl6_dport = inet->inet_dport;
54 fl6->fl6_sport = inet->inet_sport;
55 fl6->flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
56 fl6->flowi6_uid = sk->sk_uid;
59 oif = np->sticky_pktinfo.ipi6_ifindex;
62 if (ipv6_addr_is_multicast(&fl6->daddr))
63 oif = READ_ONCE(np->mcast_oif);
65 oif = READ_ONCE(np->ucast_oif);
68 fl6->flowi6_oif = oif;
69 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
72 int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
74 struct ip6_flowlabel *flowlabel = NULL;
75 struct in6_addr *final_p, final;
76 struct ipv6_txoptions *opt;
77 struct dst_entry *dst;
78 struct inet_sock *inet = inet_sk(sk);
79 struct ipv6_pinfo *np = inet6_sk(sk);
83 if (inet6_test_bit(SNDFLOW, sk) &&
84 (np->flow_label & IPV6_FLOWLABEL_MASK)) {
85 flowlabel = fl6_sock_lookup(sk, np->flow_label);
86 if (IS_ERR(flowlabel))
89 ip6_datagram_flow_key_init(&fl6, sk);
92 opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
93 final_p = fl6_update_dst(&fl6, opt, &final);
96 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
103 if (ipv6_addr_any(&np->saddr))
104 np->saddr = fl6.saddr;
106 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
107 sk->sk_v6_rcv_saddr = fl6.saddr;
108 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
109 if (sk->sk_prot->rehash)
110 sk->sk_prot->rehash(sk);
114 ip6_sk_dst_store_flow(sk, dst, &fl6);
117 fl6_sock_release(flowlabel);
121 void ip6_datagram_release_cb(struct sock *sk)
123 struct dst_entry *dst;
125 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
129 dst = __sk_dst_get(sk);
130 if (!dst || !dst->obsolete ||
131 dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
137 ip6_datagram_dst_update(sk, false);
139 EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
141 int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
144 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct ipv6_pinfo *np = inet6_sk(sk);
147 struct in6_addr *daddr, old_daddr;
148 __be32 fl6_flowlabel = 0;
149 __be32 old_fl6_flowlabel;
154 if (usin->sin6_family == AF_INET) {
155 if (ipv6_only_sock(sk))
156 return -EAFNOSUPPORT;
157 err = __ip4_datagram_connect(sk, uaddr, addr_len);
161 if (addr_len < SIN6_LEN_RFC2133)
164 if (usin->sin6_family != AF_INET6)
165 return -EAFNOSUPPORT;
167 if (inet6_test_bit(SNDFLOW, sk))
168 fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
170 if (ipv6_addr_any(&usin->sin6_addr)) {
174 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
175 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
178 usin->sin6_addr = in6addr_loopback;
181 addr_type = ipv6_addr_type(&usin->sin6_addr);
183 daddr = &usin->sin6_addr;
185 if (addr_type & IPV6_ADDR_MAPPED) {
186 struct sockaddr_in sin;
188 if (ipv6_only_sock(sk)) {
192 sin.sin_family = AF_INET;
193 sin.sin_addr.s_addr = daddr->s6_addr32[3];
194 sin.sin_port = usin->sin6_port;
196 err = __ip4_datagram_connect(sk,
197 (struct sockaddr *) &sin,
204 ipv6_addr_set_v4mapped(inet->inet_daddr, &sk->sk_v6_daddr);
206 if (ipv6_addr_any(&np->saddr) ||
207 ipv6_mapped_addr_any(&np->saddr))
208 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
210 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr) ||
211 ipv6_mapped_addr_any(&sk->sk_v6_rcv_saddr)) {
212 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
213 &sk->sk_v6_rcv_saddr);
214 if (sk->sk_prot->rehash)
215 sk->sk_prot->rehash(sk);
221 if (__ipv6_addr_needs_scope_id(addr_type)) {
222 if (addr_len >= sizeof(struct sockaddr_in6) &&
223 usin->sin6_scope_id) {
224 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id)) {
228 WRITE_ONCE(sk->sk_bound_dev_if, usin->sin6_scope_id);
231 if (!sk->sk_bound_dev_if && (addr_type & IPV6_ADDR_MULTICAST))
232 WRITE_ONCE(sk->sk_bound_dev_if, READ_ONCE(np->mcast_oif));
234 /* Connect to link-local address requires an interface */
235 if (!sk->sk_bound_dev_if) {
241 /* save the current peer information before updating it */
242 old_daddr = sk->sk_v6_daddr;
243 old_fl6_flowlabel = np->flow_label;
244 old_dport = inet->inet_dport;
246 sk->sk_v6_daddr = *daddr;
247 np->flow_label = fl6_flowlabel;
248 inet->inet_dport = usin->sin6_port;
251 * Check for a route to destination an obtain the
252 * destination cache for it.
255 err = ip6_datagram_dst_update(sk, true);
257 /* Restore the socket peer info, to keep it consistent with
258 * the old socket state
260 sk->sk_v6_daddr = old_daddr;
261 np->flow_label = old_fl6_flowlabel;
262 inet->inet_dport = old_dport;
266 reuseport_has_conns_set(sk);
267 sk->sk_state = TCP_ESTABLISHED;
272 EXPORT_SYMBOL_GPL(__ip6_datagram_connect);
274 int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
279 res = __ip6_datagram_connect(sk, uaddr, addr_len);
283 EXPORT_SYMBOL_GPL(ip6_datagram_connect);
285 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
288 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, uaddr);
289 if (sin6->sin6_family != AF_INET6)
290 return -EAFNOSUPPORT;
291 return ip6_datagram_connect(sk, uaddr, addr_len);
293 EXPORT_SYMBOL_GPL(ip6_datagram_connect_v6_only);
295 static void ipv6_icmp_error_rfc4884(const struct sk_buff *skb,
296 struct sock_ee_data_rfc4884 *out)
298 switch (icmp6_hdr(skb)->icmp6_type) {
299 case ICMPV6_TIME_EXCEED:
300 case ICMPV6_DEST_UNREACH:
301 ip_icmp_error_rfc4884(skb, out, sizeof(struct icmp6hdr),
302 icmp6_hdr(skb)->icmp6_datagram_len * 8);
306 void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
307 __be16 port, u32 info, u8 *payload)
309 struct icmp6hdr *icmph = icmp6_hdr(skb);
310 struct sock_exterr_skb *serr;
312 if (!inet6_test_bit(RECVERR6, sk))
315 skb = skb_clone(skb, GFP_ATOMIC);
319 skb->protocol = htons(ETH_P_IPV6);
321 serr = SKB_EXT_ERR(skb);
322 serr->ee.ee_errno = err;
323 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP6;
324 serr->ee.ee_type = icmph->icmp6_type;
325 serr->ee.ee_code = icmph->icmp6_code;
327 serr->ee.ee_info = info;
328 serr->ee.ee_data = 0;
329 serr->addr_offset = (u8 *)&(((struct ipv6hdr *)(icmph + 1))->daddr) -
330 skb_network_header(skb);
333 __skb_pull(skb, payload - skb->data);
335 if (inet6_test_bit(RECVERR6_RFC4884, sk))
336 ipv6_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884);
338 skb_reset_transport_header(skb);
340 if (sock_queue_err_skb(sk, skb))
343 EXPORT_SYMBOL_GPL(ipv6_icmp_error);
345 void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
347 struct sock_exterr_skb *serr;
351 if (!inet6_test_bit(RECVERR6, sk))
354 skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
358 skb->protocol = htons(ETH_P_IPV6);
360 skb_put(skb, sizeof(struct ipv6hdr));
361 skb_reset_network_header(skb);
363 iph->daddr = fl6->daddr;
364 ip6_flow_hdr(iph, 0, 0);
366 serr = SKB_EXT_ERR(skb);
367 serr->ee.ee_errno = err;
368 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
369 serr->ee.ee_type = 0;
370 serr->ee.ee_code = 0;
372 serr->ee.ee_info = info;
373 serr->ee.ee_data = 0;
374 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
375 serr->port = fl6->fl6_dport;
377 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
378 skb_reset_transport_header(skb);
380 if (sock_queue_err_skb(sk, skb))
384 void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
386 struct ipv6_pinfo *np = inet6_sk(sk);
389 struct ip6_mtuinfo *mtu_info;
391 if (!np->rxopt.bits.rxpmtu)
394 skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
398 skb_put(skb, sizeof(struct ipv6hdr));
399 skb_reset_network_header(skb);
401 iph->daddr = fl6->daddr;
403 mtu_info = IP6CBMTU(skb);
405 mtu_info->ip6m_mtu = mtu;
406 mtu_info->ip6m_addr.sin6_family = AF_INET6;
407 mtu_info->ip6m_addr.sin6_port = 0;
408 mtu_info->ip6m_addr.sin6_flowinfo = 0;
409 mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif;
410 mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr;
412 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
413 skb_reset_transport_header(skb);
415 skb = xchg(&np->rxpmtu, skb);
419 /* For some errors we have valid addr_offset even with zero payload and
420 * zero port. Also, addr_offset should be supported if port is set.
422 static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
424 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 ||
425 serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
426 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
429 /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
431 * At one point, excluding local errors was a quick test to identify icmp/icmp6
432 * errors. This is no longer true, but the test remained, so the v6 stack,
433 * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
435 static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
436 struct sock_exterr_skb *serr)
438 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
439 serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6)
442 if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
445 if (!IP6CB(skb)->iif)
452 * Handle MSG_ERRQUEUE
454 int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
456 struct ipv6_pinfo *np = inet6_sk(sk);
457 struct sock_exterr_skb *serr;
459 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin, msg->msg_name);
461 struct sock_extended_err ee;
462 struct sockaddr_in6 offender;
468 skb = sock_dequeue_err_skb(sk);
474 msg->msg_flags |= MSG_TRUNC;
477 err = skb_copy_datagram_msg(skb, 0, msg, copied);
482 sock_recv_timestamp(msg, sk, skb);
484 serr = SKB_EXT_ERR(skb);
486 if (sin && ipv6_datagram_support_addr(serr)) {
487 const unsigned char *nh = skb_network_header(skb);
488 sin->sin6_family = AF_INET6;
489 sin->sin6_flowinfo = 0;
490 sin->sin6_port = serr->port;
491 if (skb->protocol == htons(ETH_P_IPV6)) {
492 const struct ipv6hdr *ip6h = container_of((struct in6_addr *)(nh + serr->addr_offset),
493 struct ipv6hdr, daddr);
494 sin->sin6_addr = ip6h->daddr;
495 if (inet6_test_bit(SNDFLOW, sk))
496 sin->sin6_flowinfo = ip6_flowinfo(ip6h);
498 ipv6_iface_scope_id(&sin->sin6_addr,
501 ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset),
503 sin->sin6_scope_id = 0;
505 *addr_len = sizeof(*sin);
508 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
509 sin = &errhdr.offender;
510 memset(sin, 0, sizeof(*sin));
512 if (ip6_datagram_support_cmsg(skb, serr)) {
513 sin->sin6_family = AF_INET6;
515 ip6_datagram_recv_common_ctl(sk, msg, skb);
516 if (skb->protocol == htons(ETH_P_IPV6)) {
517 sin->sin6_addr = ipv6_hdr(skb)->saddr;
519 ip6_datagram_recv_specific_ctl(sk, msg, skb);
521 ipv6_iface_scope_id(&sin->sin6_addr,
524 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
526 if (inet_cmsg_flags(inet_sk(sk)))
527 ip_cmsg_recv(msg, skb);
531 put_cmsg(msg, SOL_IPV6, IPV6_RECVERR, sizeof(errhdr), &errhdr);
533 /* Now we could try to dump offended packet options */
535 msg->msg_flags |= MSG_ERRQUEUE;
542 EXPORT_SYMBOL_GPL(ipv6_recv_error);
545 * Handle IPV6_RECVPATHMTU
547 int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
550 struct ipv6_pinfo *np = inet6_sk(sk);
552 struct ip6_mtuinfo mtu_info;
553 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin, msg->msg_name);
558 skb = xchg(&np->rxpmtu, NULL);
564 msg->msg_flags |= MSG_TRUNC;
567 err = skb_copy_datagram_msg(skb, 0, msg, copied);
571 sock_recv_timestamp(msg, sk, skb);
573 memcpy(&mtu_info, IP6CBMTU(skb), sizeof(mtu_info));
576 sin->sin6_family = AF_INET6;
577 sin->sin6_flowinfo = 0;
579 sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
580 sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
581 *addr_len = sizeof(*sin);
584 put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
595 void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg,
598 struct ipv6_pinfo *np = inet6_sk(sk);
599 bool is_ipv6 = skb->protocol == htons(ETH_P_IPV6);
601 if (np->rxopt.bits.rxinfo) {
602 struct in6_pktinfo src_info;
605 src_info.ipi6_ifindex = IP6CB(skb)->iif;
606 src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
608 src_info.ipi6_ifindex =
609 PKTINFO_SKB_CB(skb)->ipi_ifindex;
610 ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
611 &src_info.ipi6_addr);
614 if (src_info.ipi6_ifindex >= 0)
615 put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO,
616 sizeof(src_info), &src_info);
620 void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
623 struct ipv6_pinfo *np = inet6_sk(sk);
624 struct inet6_skb_parm *opt = IP6CB(skb);
625 unsigned char *nh = skb_network_header(skb);
627 if (np->rxopt.bits.rxhlim) {
628 int hlim = ipv6_hdr(skb)->hop_limit;
629 put_cmsg(msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim);
632 if (np->rxopt.bits.rxtclass) {
633 int tclass = ipv6_get_dsfield(ipv6_hdr(skb));
634 put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
637 if (np->rxopt.bits.rxflow) {
638 __be32 flowinfo = ip6_flowinfo((struct ipv6hdr *)nh);
640 put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo);
643 /* HbH is allowed only once */
644 if (np->rxopt.bits.hopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
645 u8 *ptr = nh + sizeof(struct ipv6hdr);
646 put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr);
650 (np->rxopt.bits.dstopts || np->rxopt.bits.srcrt)) {
652 * Silly enough, but we need to reparse in order to
653 * report extension headers (except for HbH)
656 * Also note that IPV6_RECVRTHDRDSTOPTS is NOT
657 * (and WILL NOT be) defined because
658 * IPV6_RECVDSTOPTS is more generic. --yoshfuji
660 unsigned int off = sizeof(struct ipv6hdr);
661 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
663 while (off <= opt->lastopt) {
668 case IPPROTO_DSTOPTS:
670 len = (ptr[1] + 1) << 3;
671 if (np->rxopt.bits.dstopts)
672 put_cmsg(msg, SOL_IPV6, IPV6_DSTOPTS, len, ptr);
674 case IPPROTO_ROUTING:
676 len = (ptr[1] + 1) << 3;
677 if (np->rxopt.bits.srcrt)
678 put_cmsg(msg, SOL_IPV6, IPV6_RTHDR, len, ptr);
682 len = (ptr[1] + 2) << 2;
686 len = (ptr[1] + 1) << 3;
694 /* socket options in old style */
695 if (np->rxopt.bits.rxoinfo) {
696 struct in6_pktinfo src_info;
698 src_info.ipi6_ifindex = opt->iif;
699 src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
700 put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
702 if (np->rxopt.bits.rxohlim) {
703 int hlim = ipv6_hdr(skb)->hop_limit;
704 put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
706 if (np->rxopt.bits.ohopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
707 u8 *ptr = nh + sizeof(struct ipv6hdr);
708 put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr);
710 if (np->rxopt.bits.odstopts && opt->dst0) {
711 u8 *ptr = nh + opt->dst0;
712 put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr);
714 if (np->rxopt.bits.osrcrt && opt->srcrt) {
715 struct ipv6_rt_hdr *rthdr = (struct ipv6_rt_hdr *)(nh + opt->srcrt);
716 put_cmsg(msg, SOL_IPV6, IPV6_2292RTHDR, (rthdr->hdrlen+1) << 3, rthdr);
718 if (np->rxopt.bits.odstopts && opt->dst1) {
719 u8 *ptr = nh + opt->dst1;
720 put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr);
722 if (np->rxopt.bits.rxorigdstaddr) {
723 struct sockaddr_in6 sin6;
724 __be16 _ports[2], *ports;
726 ports = skb_header_pointer(skb, skb_transport_offset(skb),
727 sizeof(_ports), &_ports);
729 /* All current transport protocols have the port numbers in the
730 * first four bytes of the transport header and this function is
731 * written with this assumption in mind.
733 sin6.sin6_family = AF_INET6;
734 sin6.sin6_addr = ipv6_hdr(skb)->daddr;
735 sin6.sin6_port = ports[1];
736 sin6.sin6_flowinfo = 0;
738 ipv6_iface_scope_id(&ipv6_hdr(skb)->daddr,
741 put_cmsg(msg, SOL_IPV6, IPV6_ORIGDSTADDR, sizeof(sin6), &sin6);
744 if (np->rxopt.bits.recvfragsize && opt->frag_max_size) {
745 int val = opt->frag_max_size;
747 put_cmsg(msg, SOL_IPV6, IPV6_RECVFRAGSIZE, sizeof(val), &val);
751 void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
754 ip6_datagram_recv_common_ctl(sk, msg, skb);
755 ip6_datagram_recv_specific_ctl(sk, msg, skb);
757 EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
759 int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
760 struct msghdr *msg, struct flowi6 *fl6,
761 struct ipcm6_cookie *ipc6)
763 struct in6_pktinfo *src_info;
764 struct cmsghdr *cmsg;
765 struct ipv6_rt_hdr *rthdr;
766 struct ipv6_opt_hdr *hdr;
767 struct ipv6_txoptions *opt = ipc6->opt;
771 for_each_cmsghdr(cmsg, msg) {
774 if (!CMSG_OK(msg, cmsg)) {
779 if (cmsg->cmsg_level == SOL_SOCKET) {
780 err = __sock_cmsg_send(sk, cmsg, &ipc6->sockc);
786 if (cmsg->cmsg_level != SOL_IPV6)
789 switch (cmsg->cmsg_type) {
791 case IPV6_2292PKTINFO:
793 struct net_device *dev = NULL;
796 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) {
801 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
802 src_idx = src_info->ipi6_ifindex;
805 if (fl6->flowi6_oif &&
806 src_idx != fl6->flowi6_oif &&
807 (READ_ONCE(sk->sk_bound_dev_if) != fl6->flowi6_oif ||
808 !sk_dev_equal_l3scope(sk, src_idx)))
810 fl6->flowi6_oif = src_idx;
813 addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
816 if (fl6->flowi6_oif) {
817 dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
822 } else if (addr_type & IPV6_ADDR_LINKLOCAL) {
827 if (addr_type != IPV6_ADDR_ANY) {
828 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
829 if (!ipv6_can_nonlocal_bind(net, inet_sk(sk)) &&
830 !ipv6_chk_addr_and_flags(net, &src_info->ipi6_addr,
833 !ipv6_chk_acast_addr_src(net, dev,
834 &src_info->ipi6_addr))
837 fl6->saddr = src_info->ipi6_addr;
849 if (cmsg->cmsg_len < CMSG_LEN(4)) {
854 if (fl6->flowlabel&IPV6_FLOWINFO_MASK) {
855 if ((fl6->flowlabel^*(__be32 *)CMSG_DATA(cmsg))&~IPV6_FLOWINFO_MASK) {
860 fl6->flowlabel = IPV6_FLOWINFO_MASK & *(__be32 *)CMSG_DATA(cmsg);
863 case IPV6_2292HOPOPTS:
865 if (opt->hopopt || cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_opt_hdr))) {
870 hdr = (struct ipv6_opt_hdr *)CMSG_DATA(cmsg);
871 len = ((hdr->hdrlen + 1) << 3);
872 if (cmsg->cmsg_len < CMSG_LEN(len)) {
876 if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
880 opt->opt_nflen += len;
884 case IPV6_2292DSTOPTS:
885 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_opt_hdr))) {
890 hdr = (struct ipv6_opt_hdr *)CMSG_DATA(cmsg);
891 len = ((hdr->hdrlen + 1) << 3);
892 if (cmsg->cmsg_len < CMSG_LEN(len)) {
896 if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
904 opt->opt_flen += len;
909 case IPV6_RTHDRDSTOPTS:
910 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_opt_hdr))) {
915 hdr = (struct ipv6_opt_hdr *)CMSG_DATA(cmsg);
916 len = ((hdr->hdrlen + 1) << 3);
917 if (cmsg->cmsg_len < CMSG_LEN(len)) {
921 if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
925 if (cmsg->cmsg_type == IPV6_DSTOPTS) {
926 opt->opt_flen += len;
929 opt->opt_nflen += len;
936 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_rt_hdr))) {
941 rthdr = (struct ipv6_rt_hdr *)CMSG_DATA(cmsg);
943 switch (rthdr->type) {
944 #if IS_ENABLED(CONFIG_IPV6_MIP6)
945 case IPV6_SRCRT_TYPE_2:
946 if (rthdr->hdrlen != 2 ||
947 rthdr->segments_left != 1) {
958 len = ((rthdr->hdrlen + 1) << 3);
960 if (cmsg->cmsg_len < CMSG_LEN(len)) {
965 /* segments left must also match */
966 if ((rthdr->hdrlen >> 1) != rthdr->segments_left) {
971 opt->opt_nflen += len;
974 if (cmsg->cmsg_type == IPV6_2292RTHDR && opt->dst1opt) {
975 int dsthdrlen = ((opt->dst1opt->hdrlen+1)<<3);
977 opt->opt_nflen += dsthdrlen;
978 opt->dst0opt = opt->dst1opt;
980 opt->opt_flen -= dsthdrlen;
985 case IPV6_2292HOPLIMIT:
987 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
992 ipc6->hlimit = *(int *)CMSG_DATA(cmsg);
993 if (ipc6->hlimit < -1 || ipc6->hlimit > 0xff) {
1005 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
1008 tc = *(int *)CMSG_DATA(cmsg);
1009 if (tc < -1 || tc > 0xff)
1023 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
1026 df = *(int *)CMSG_DATA(cmsg);
1027 if (df < 0 || df > 1)
1031 ipc6->dontfrag = df;
1036 net_dbg_ratelimited("invalid cmsg type: %d\n",
1046 EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
1048 void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1049 __u16 srcp, __u16 destp, int rqueue, int bucket)
1051 const struct in6_addr *dest, *src;
1053 dest = &sp->sk_v6_daddr;
1054 src = &sp->sk_v6_rcv_saddr;
1056 "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1057 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n",
1059 src->s6_addr32[0], src->s6_addr32[1],
1060 src->s6_addr32[2], src->s6_addr32[3], srcp,
1061 dest->s6_addr32[0], dest->s6_addr32[1],
1062 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1064 sk_wmem_alloc_get(sp),
1067 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1070 refcount_read(&sp->sk_refcnt), sp,
1071 atomic_read(&sp->sk_drops));