4 * An implementation of the DCCP protocol
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/dccp.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/skbuff.h>
18 #include <linux/netdevice.h>
20 #include <linux/if_arp.h>
21 #include <linux/init.h>
22 #include <linux/random.h>
23 #include <linux/slab.h>
24 #include <net/checksum.h>
26 #include <net/inet_sock.h>
27 #include <net/inet_common.h>
31 #include <asm/ioctls.h>
32 #include <linux/spinlock.h>
33 #include <linux/timer.h>
34 #include <linux/delay.h>
35 #include <linux/poll.h>
41 #define CREATE_TRACE_POINTS
44 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
46 EXPORT_SYMBOL_GPL(dccp_statistics);
48 struct percpu_counter dccp_orphan_count;
49 EXPORT_SYMBOL_GPL(dccp_orphan_count);
51 struct inet_hashinfo dccp_hashinfo;
52 EXPORT_SYMBOL_GPL(dccp_hashinfo);
54 /* the maximum queue length for tx in packets. 0 is no limit */
55 int sysctl_dccp_tx_qlen __read_mostly = 5;
57 #ifdef CONFIG_IP_DCCP_DEBUG
58 static const char *dccp_state_name(const int state)
60 static const char *const dccp_state_names[] = {
62 [DCCP_REQUESTING] = "REQUESTING",
63 [DCCP_PARTOPEN] = "PARTOPEN",
64 [DCCP_LISTEN] = "LISTEN",
65 [DCCP_RESPOND] = "RESPOND",
66 [DCCP_CLOSING] = "CLOSING",
67 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
68 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
69 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
70 [DCCP_TIME_WAIT] = "TIME_WAIT",
71 [DCCP_CLOSED] = "CLOSED",
74 if (state >= DCCP_MAX_STATES)
75 return "INVALID STATE!";
77 return dccp_state_names[state];
81 void dccp_set_state(struct sock *sk, const int state)
83 const int oldstate = sk->sk_state;
85 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
86 dccp_state_name(oldstate), dccp_state_name(state));
87 WARN_ON(state == oldstate);
91 if (oldstate != DCCP_OPEN)
92 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
93 /* Client retransmits all Confirm options until entering OPEN */
94 if (oldstate == DCCP_PARTOPEN)
95 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
99 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
100 oldstate == DCCP_CLOSING)
101 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
103 sk->sk_prot->unhash(sk);
104 if (inet_csk(sk)->icsk_bind_hash != NULL &&
105 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
109 if (oldstate == DCCP_OPEN)
110 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
113 /* Change state AFTER socket is unhashed to avoid closed
114 * socket sitting in hash tables.
116 inet_sk_set_state(sk, state);
119 EXPORT_SYMBOL_GPL(dccp_set_state);
121 static void dccp_finish_passive_close(struct sock *sk)
123 switch (sk->sk_state) {
124 case DCCP_PASSIVE_CLOSE:
125 /* Node (client or server) has received Close packet. */
126 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
127 dccp_set_state(sk, DCCP_CLOSED);
129 case DCCP_PASSIVE_CLOSEREQ:
131 * Client received CloseReq. We set the `active' flag so that
132 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
134 dccp_send_close(sk, 1);
135 dccp_set_state(sk, DCCP_CLOSING);
139 void dccp_done(struct sock *sk)
141 dccp_set_state(sk, DCCP_CLOSED);
142 dccp_clear_xmit_timers(sk);
144 sk->sk_shutdown = SHUTDOWN_MASK;
146 if (!sock_flag(sk, SOCK_DEAD))
147 sk->sk_state_change(sk);
149 inet_csk_destroy_sock(sk);
152 EXPORT_SYMBOL_GPL(dccp_done);
154 const char *dccp_packet_name(const int type)
156 static const char *const dccp_packet_names[] = {
157 [DCCP_PKT_REQUEST] = "REQUEST",
158 [DCCP_PKT_RESPONSE] = "RESPONSE",
159 [DCCP_PKT_DATA] = "DATA",
160 [DCCP_PKT_ACK] = "ACK",
161 [DCCP_PKT_DATAACK] = "DATAACK",
162 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
163 [DCCP_PKT_CLOSE] = "CLOSE",
164 [DCCP_PKT_RESET] = "RESET",
165 [DCCP_PKT_SYNC] = "SYNC",
166 [DCCP_PKT_SYNCACK] = "SYNCACK",
169 if (type >= DCCP_NR_PKT_TYPES)
172 return dccp_packet_names[type];
175 EXPORT_SYMBOL_GPL(dccp_packet_name);
177 static void dccp_sk_destruct(struct sock *sk)
179 struct dccp_sock *dp = dccp_sk(sk);
181 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
182 dp->dccps_hc_tx_ccid = NULL;
183 inet_sock_destruct(sk);
186 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
188 struct dccp_sock *dp = dccp_sk(sk);
189 struct inet_connection_sock *icsk = inet_csk(sk);
191 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
192 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
193 sk->sk_state = DCCP_CLOSED;
194 sk->sk_write_space = dccp_write_space;
195 sk->sk_destruct = dccp_sk_destruct;
196 icsk->icsk_sync_mss = dccp_sync_mss;
197 dp->dccps_mss_cache = 536;
198 dp->dccps_rate_last = jiffies;
199 dp->dccps_role = DCCP_ROLE_UNDEFINED;
200 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
201 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
203 dccp_init_xmit_timers(sk);
205 INIT_LIST_HEAD(&dp->dccps_featneg);
206 /* control socket doesn't need feat nego */
207 if (likely(ctl_sock_initialized))
208 return dccp_feat_init(sk);
212 EXPORT_SYMBOL_GPL(dccp_init_sock);
214 void dccp_destroy_sock(struct sock *sk)
216 struct dccp_sock *dp = dccp_sk(sk);
218 __skb_queue_purge(&sk->sk_write_queue);
219 if (sk->sk_send_head != NULL) {
220 kfree_skb(sk->sk_send_head);
221 sk->sk_send_head = NULL;
224 /* Clean up a referenced DCCP bind bucket. */
225 if (inet_csk(sk)->icsk_bind_hash != NULL)
228 kfree(dp->dccps_service_list);
229 dp->dccps_service_list = NULL;
231 if (dp->dccps_hc_rx_ackvec != NULL) {
232 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
233 dp->dccps_hc_rx_ackvec = NULL;
235 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
236 dp->dccps_hc_rx_ccid = NULL;
238 /* clean up feature negotiation state */
239 dccp_feat_list_purge(&dp->dccps_featneg);
242 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
244 static inline int dccp_listen_start(struct sock *sk, int backlog)
246 struct dccp_sock *dp = dccp_sk(sk);
248 dp->dccps_role = DCCP_ROLE_LISTEN;
249 /* do not start to listen if feature negotiation setup fails */
250 if (dccp_feat_finalise_settings(dp))
252 return inet_csk_listen_start(sk, backlog);
255 static inline int dccp_need_reset(int state)
257 return state != DCCP_CLOSED && state != DCCP_LISTEN &&
258 state != DCCP_REQUESTING;
261 int dccp_disconnect(struct sock *sk, int flags)
263 struct inet_connection_sock *icsk = inet_csk(sk);
264 struct inet_sock *inet = inet_sk(sk);
265 struct dccp_sock *dp = dccp_sk(sk);
266 const int old_state = sk->sk_state;
268 if (old_state != DCCP_CLOSED)
269 dccp_set_state(sk, DCCP_CLOSED);
272 * This corresponds to the ABORT function of RFC793, sec. 3.8
273 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
275 if (old_state == DCCP_LISTEN) {
276 inet_csk_listen_stop(sk);
277 } else if (dccp_need_reset(old_state)) {
278 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
279 sk->sk_err = ECONNRESET;
280 } else if (old_state == DCCP_REQUESTING)
281 sk->sk_err = ECONNRESET;
283 dccp_clear_xmit_timers(sk);
284 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
285 dp->dccps_hc_rx_ccid = NULL;
287 __skb_queue_purge(&sk->sk_receive_queue);
288 __skb_queue_purge(&sk->sk_write_queue);
289 if (sk->sk_send_head != NULL) {
290 __kfree_skb(sk->sk_send_head);
291 sk->sk_send_head = NULL;
294 inet->inet_dport = 0;
296 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
297 inet_reset_saddr(sk);
300 sock_reset_flag(sk, SOCK_DONE);
302 icsk->icsk_backoff = 0;
303 inet_csk_delack_init(sk);
306 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
308 sk->sk_error_report(sk);
312 EXPORT_SYMBOL_GPL(dccp_disconnect);
315 * Wait for a DCCP event.
317 * Note that we don't need to lock the socket, as the upper poll layers
318 * take care of normal races (between the test and the event) and we don't
319 * go look at any of the socket buffers directly.
321 __poll_t dccp_poll(struct file *file, struct socket *sock,
325 struct sock *sk = sock->sk;
327 sock_poll_wait(file, sock, wait);
328 if (sk->sk_state == DCCP_LISTEN)
329 return inet_csk_listen_poll(sk);
331 /* Socket is not locked. We are protected from async events
332 by poll logic and correct handling of state changes
333 made by another threads is impossible in any case.
340 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
342 if (sk->sk_shutdown & RCV_SHUTDOWN)
343 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
346 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
347 if (atomic_read(&sk->sk_rmem_alloc) > 0)
348 mask |= EPOLLIN | EPOLLRDNORM;
350 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
351 if (sk_stream_is_writeable(sk)) {
352 mask |= EPOLLOUT | EPOLLWRNORM;
353 } else { /* send SIGIO later */
354 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
355 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
357 /* Race breaker. If space is freed after
358 * wspace test but before the flags are set,
359 * IO signal will be lost.
361 if (sk_stream_is_writeable(sk))
362 mask |= EPOLLOUT | EPOLLWRNORM;
369 EXPORT_SYMBOL_GPL(dccp_poll);
371 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
377 if (sk->sk_state == DCCP_LISTEN)
383 unsigned long amount = 0;
385 skb = skb_peek(&sk->sk_receive_queue);
388 * We will only return the amount of this packet since
389 * that is all that will be read.
393 rc = put_user(amount, (int __user *)arg);
405 EXPORT_SYMBOL_GPL(dccp_ioctl);
407 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
408 char __user *optval, unsigned int optlen)
410 struct dccp_sock *dp = dccp_sk(sk);
411 struct dccp_service_list *sl = NULL;
413 if (service == DCCP_SERVICE_INVALID_VALUE ||
414 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
417 if (optlen > sizeof(service)) {
418 sl = kmalloc(optlen, GFP_KERNEL);
422 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
423 if (copy_from_user(sl->dccpsl_list,
424 optval + sizeof(service),
425 optlen - sizeof(service)) ||
426 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
433 dp->dccps_service = service;
435 kfree(dp->dccps_service_list);
437 dp->dccps_service_list = sl;
442 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
447 if (cscov < 0 || cscov > 15)
450 * Populate a list of permissible values, in the range cscov...15. This
451 * is necessary since feature negotiation of single values only works if
452 * both sides incidentally choose the same value. Since the list starts
453 * lowest-value first, negotiation will pick the smallest shared value.
459 list = kmalloc(len, GFP_KERNEL);
463 for (i = 0; i < len; i++)
466 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
470 dccp_sk(sk)->dccps_pcrlen = cscov;
472 dccp_sk(sk)->dccps_pcslen = cscov;
478 static int dccp_setsockopt_ccid(struct sock *sk, int type,
479 char __user *optval, unsigned int optlen)
484 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
487 val = memdup_user(optval, optlen);
492 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
493 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
495 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
496 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
503 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
504 char __user *optval, unsigned int optlen)
506 struct dccp_sock *dp = dccp_sk(sk);
510 case DCCP_SOCKOPT_PACKET_SIZE:
511 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
513 case DCCP_SOCKOPT_CHANGE_L:
514 case DCCP_SOCKOPT_CHANGE_R:
515 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
517 case DCCP_SOCKOPT_CCID:
518 case DCCP_SOCKOPT_RX_CCID:
519 case DCCP_SOCKOPT_TX_CCID:
520 return dccp_setsockopt_ccid(sk, optname, optval, optlen);
523 if (optlen < (int)sizeof(int))
526 if (get_user(val, (int __user *)optval))
529 if (optname == DCCP_SOCKOPT_SERVICE)
530 return dccp_setsockopt_service(sk, val, optval, optlen);
534 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
535 if (dp->dccps_role != DCCP_ROLE_SERVER)
538 dp->dccps_server_timewait = (val != 0);
540 case DCCP_SOCKOPT_SEND_CSCOV:
541 err = dccp_setsockopt_cscov(sk, val, false);
543 case DCCP_SOCKOPT_RECV_CSCOV:
544 err = dccp_setsockopt_cscov(sk, val, true);
546 case DCCP_SOCKOPT_QPOLICY_ID:
547 if (sk->sk_state != DCCP_CLOSED)
549 else if (val < 0 || val >= DCCPQ_POLICY_MAX)
552 dp->dccps_qpolicy = val;
554 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
558 dp->dccps_tx_qlen = val;
569 int dccp_setsockopt(struct sock *sk, int level, int optname,
570 char __user *optval, unsigned int optlen)
572 if (level != SOL_DCCP)
573 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
576 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
579 EXPORT_SYMBOL_GPL(dccp_setsockopt);
582 int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
583 char __user *optval, unsigned int optlen)
585 if (level != SOL_DCCP)
586 return inet_csk_compat_setsockopt(sk, level, optname,
588 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
591 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
594 static int dccp_getsockopt_service(struct sock *sk, int len,
595 __be32 __user *optval,
598 const struct dccp_sock *dp = dccp_sk(sk);
599 const struct dccp_service_list *sl;
600 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
603 if ((sl = dp->dccps_service_list) != NULL) {
604 slen = sl->dccpsl_nr * sizeof(u32);
613 if (put_user(total_len, optlen) ||
614 put_user(dp->dccps_service, optval) ||
615 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
622 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
623 char __user *optval, int __user *optlen)
625 struct dccp_sock *dp;
628 if (get_user(len, optlen))
631 if (len < (int)sizeof(int))
637 case DCCP_SOCKOPT_PACKET_SIZE:
638 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
640 case DCCP_SOCKOPT_SERVICE:
641 return dccp_getsockopt_service(sk, len,
642 (__be32 __user *)optval, optlen);
643 case DCCP_SOCKOPT_GET_CUR_MPS:
644 val = dp->dccps_mss_cache;
646 case DCCP_SOCKOPT_AVAILABLE_CCIDS:
647 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
648 case DCCP_SOCKOPT_TX_CCID:
649 val = ccid_get_current_tx_ccid(dp);
653 case DCCP_SOCKOPT_RX_CCID:
654 val = ccid_get_current_rx_ccid(dp);
658 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
659 val = dp->dccps_server_timewait;
661 case DCCP_SOCKOPT_SEND_CSCOV:
662 val = dp->dccps_pcslen;
664 case DCCP_SOCKOPT_RECV_CSCOV:
665 val = dp->dccps_pcrlen;
667 case DCCP_SOCKOPT_QPOLICY_ID:
668 val = dp->dccps_qpolicy;
670 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
671 val = dp->dccps_tx_qlen;
674 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
675 len, (u32 __user *)optval, optlen);
677 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
678 len, (u32 __user *)optval, optlen);
684 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
690 int dccp_getsockopt(struct sock *sk, int level, int optname,
691 char __user *optval, int __user *optlen)
693 if (level != SOL_DCCP)
694 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
697 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
700 EXPORT_SYMBOL_GPL(dccp_getsockopt);
703 int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
704 char __user *optval, int __user *optlen)
706 if (level != SOL_DCCP)
707 return inet_csk_compat_getsockopt(sk, level, optname,
709 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
712 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
715 static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
717 struct cmsghdr *cmsg;
720 * Assign an (opaque) qpolicy priority value to skb->priority.
722 * We are overloading this skb field for use with the qpolicy subystem.
723 * The skb->priority is normally used for the SO_PRIORITY option, which
724 * is initialised from sk_priority. Since the assignment of sk_priority
725 * to skb->priority happens later (on layer 3), we overload this field
726 * for use with queueing priorities as long as the skb is on layer 4.
727 * The default priority value (if nothing is set) is 0.
731 for_each_cmsghdr(cmsg, msg) {
732 if (!CMSG_OK(msg, cmsg))
735 if (cmsg->cmsg_level != SOL_DCCP)
738 if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
739 !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
742 switch (cmsg->cmsg_type) {
743 case DCCP_SCM_PRIORITY:
744 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
746 skb->priority = *(__u32 *)CMSG_DATA(cmsg);
755 int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
757 const struct dccp_sock *dp = dccp_sk(sk);
758 const int flags = msg->msg_flags;
759 const int noblock = flags & MSG_DONTWAIT;
764 trace_dccp_probe(sk, len);
766 if (len > dp->dccps_mss_cache)
771 if (dccp_qpolicy_full(sk)) {
776 timeo = sock_sndtimeo(sk, noblock);
779 * We have to use sk_stream_wait_connect here to set sk_write_pending,
780 * so that the trick in dccp_rcv_request_sent_state_process.
782 /* Wait for a connection to finish. */
783 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
784 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
787 size = sk->sk_prot->max_header + len;
789 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
794 if (sk->sk_state == DCCP_CLOSED) {
799 skb_reserve(skb, sk->sk_prot->max_header);
800 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
804 rc = dccp_msghdr_parse(msg, skb);
808 dccp_qpolicy_push(sk, skb);
810 * The xmit_timer is set if the TX CCID is rate-based and will expire
811 * when congestion control permits to release further packets into the
812 * network. Window-based CCIDs do not use this timer.
814 if (!timer_pending(&dp->dccps_xmit_timer))
824 EXPORT_SYMBOL_GPL(dccp_sendmsg);
826 int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
827 int flags, int *addr_len)
829 const struct dccp_hdr *dh;
834 if (sk->sk_state == DCCP_LISTEN) {
839 timeo = sock_rcvtimeo(sk, nonblock);
842 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
845 goto verify_sock_status;
849 switch (dh->dccph_type) {
851 case DCCP_PKT_DATAACK:
855 case DCCP_PKT_CLOSEREQ:
856 if (!(flags & MSG_PEEK))
857 dccp_finish_passive_close(sk);
860 dccp_pr_debug("found fin (%s) ok!\n",
861 dccp_packet_name(dh->dccph_type));
865 dccp_pr_debug("packet_type=%s\n",
866 dccp_packet_name(dh->dccph_type));
870 if (sock_flag(sk, SOCK_DONE)) {
876 len = sock_error(sk);
880 if (sk->sk_shutdown & RCV_SHUTDOWN) {
885 if (sk->sk_state == DCCP_CLOSED) {
886 if (!sock_flag(sk, SOCK_DONE)) {
887 /* This occurs when user tries to read
888 * from never connected socket.
902 if (signal_pending(current)) {
903 len = sock_intr_errno(timeo);
907 sk_wait_data(sk, &timeo, NULL);
912 else if (len < skb->len)
913 msg->msg_flags |= MSG_TRUNC;
915 if (skb_copy_datagram_msg(skb, 0, msg, len)) {
916 /* Exception. Bailout! */
920 if (flags & MSG_TRUNC)
923 if (!(flags & MSG_PEEK))
932 EXPORT_SYMBOL_GPL(dccp_recvmsg);
934 int inet_dccp_listen(struct socket *sock, int backlog)
936 struct sock *sk = sock->sk;
937 unsigned char old_state;
943 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
946 old_state = sk->sk_state;
947 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
950 sk->sk_max_ack_backlog = backlog;
951 /* Really, if the socket is already in listen state
952 * we can only allow the backlog to be adjusted.
954 if (old_state != DCCP_LISTEN) {
956 * FIXME: here it probably should be sk->sk_prot->listen_start
957 * see tcp_listen_start
959 err = dccp_listen_start(sk, backlog);
970 EXPORT_SYMBOL_GPL(inet_dccp_listen);
972 static void dccp_terminate_connection(struct sock *sk)
974 u8 next_state = DCCP_CLOSED;
976 switch (sk->sk_state) {
977 case DCCP_PASSIVE_CLOSE:
978 case DCCP_PASSIVE_CLOSEREQ:
979 dccp_finish_passive_close(sk);
982 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
983 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
986 dccp_send_close(sk, 1);
988 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
989 !dccp_sk(sk)->dccps_server_timewait)
990 next_state = DCCP_ACTIVE_CLOSEREQ;
992 next_state = DCCP_CLOSING;
995 dccp_set_state(sk, next_state);
999 void dccp_close(struct sock *sk, long timeout)
1001 struct dccp_sock *dp = dccp_sk(sk);
1002 struct sk_buff *skb;
1003 u32 data_was_unread = 0;
1008 sk->sk_shutdown = SHUTDOWN_MASK;
1010 if (sk->sk_state == DCCP_LISTEN) {
1011 dccp_set_state(sk, DCCP_CLOSED);
1014 inet_csk_listen_stop(sk);
1016 goto adjudge_to_death;
1019 sk_stop_timer(sk, &dp->dccps_xmit_timer);
1022 * We need to flush the recv. buffs. We do this only on the
1023 * descriptor close, not protocol-sourced closes, because the
1024 *reader process may not have drained the data yet!
1026 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1027 data_was_unread += skb->len;
1031 /* If socket has been already reset kill it. */
1032 if (sk->sk_state == DCCP_CLOSED)
1033 goto adjudge_to_death;
1035 if (data_was_unread) {
1036 /* Unread data was tossed, send an appropriate Reset Code */
1037 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
1038 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
1039 dccp_set_state(sk, DCCP_CLOSED);
1040 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1041 /* Check zero linger _after_ checking for unread data. */
1042 sk->sk_prot->disconnect(sk, 0);
1043 } else if (sk->sk_state != DCCP_CLOSED) {
1045 * Normal connection termination. May need to wait if there are
1046 * still packets in the TX queue that are delayed by the CCID.
1048 dccp_flush_write_queue(sk, &timeout);
1049 dccp_terminate_connection(sk);
1053 * Flush write queue. This may be necessary in several cases:
1054 * - we have been closed by the peer but still have application data;
1055 * - abortive termination (unread data or zero linger time),
1056 * - normal termination but queue could not be flushed within time limit
1058 __skb_queue_purge(&sk->sk_write_queue);
1060 sk_stream_wait_close(sk, timeout);
1063 state = sk->sk_state;
1068 * It is the last release_sock in its life. It will remove backlog.
1072 * Now socket is owned by kernel and we acquire BH lock
1073 * to finish close. No need to check for user refs.
1077 WARN_ON(sock_owned_by_user(sk));
1079 percpu_counter_inc(sk->sk_prot->orphan_count);
1081 /* Have we already been destroyed by a softirq or backlog? */
1082 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1085 if (sk->sk_state == DCCP_CLOSED)
1086 inet_csk_destroy_sock(sk);
1088 /* Otherwise, socket is reprieved until protocol close. */
1096 EXPORT_SYMBOL_GPL(dccp_close);
1098 void dccp_shutdown(struct sock *sk, int how)
1100 dccp_pr_debug("called shutdown(%x)\n", how);
1103 EXPORT_SYMBOL_GPL(dccp_shutdown);
1105 static inline int __init dccp_mib_init(void)
1107 dccp_statistics = alloc_percpu(struct dccp_mib);
1108 if (!dccp_statistics)
1113 static inline void dccp_mib_exit(void)
1115 free_percpu(dccp_statistics);
1118 static int thash_entries;
1119 module_param(thash_entries, int, 0444);
1120 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1122 #ifdef CONFIG_IP_DCCP_DEBUG
1124 module_param(dccp_debug, bool, 0644);
1125 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1127 EXPORT_SYMBOL_GPL(dccp_debug);
1130 static int __init dccp_init(void)
1133 unsigned long nr_pages = totalram_pages();
1134 int ehash_order, bhash_order, i;
1137 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1138 FIELD_SIZEOF(struct sk_buff, cb));
1139 rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
1142 inet_hashinfo_init(&dccp_hashinfo);
1143 rc = inet_hashinfo2_init_mod(&dccp_hashinfo);
1147 dccp_hashinfo.bind_bucket_cachep =
1148 kmem_cache_create("dccp_bind_bucket",
1149 sizeof(struct inet_bind_bucket), 0,
1150 SLAB_HWCACHE_ALIGN, NULL);
1151 if (!dccp_hashinfo.bind_bucket_cachep)
1152 goto out_free_percpu;
1155 * Size and allocate the main established and bind bucket
1158 * The methodology is similar to that of the buffer cache.
1160 if (nr_pages >= (128 * 1024))
1161 goal = nr_pages >> (21 - PAGE_SHIFT);
1163 goal = nr_pages >> (23 - PAGE_SHIFT);
1166 goal = (thash_entries *
1167 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1168 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1171 unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
1172 sizeof(struct inet_ehash_bucket);
1174 while (hash_size & (hash_size - 1))
1176 dccp_hashinfo.ehash_mask = hash_size - 1;
1177 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1178 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
1179 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1181 if (!dccp_hashinfo.ehash) {
1182 DCCP_CRIT("Failed to allocate DCCP established hash table");
1183 goto out_free_bind_bucket_cachep;
1186 for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
1187 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
1189 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1190 goto out_free_dccp_ehash;
1192 bhash_order = ehash_order;
1195 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1196 sizeof(struct inet_bind_hashbucket);
1197 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1200 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1201 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
1202 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1204 if (!dccp_hashinfo.bhash) {
1205 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1206 goto out_free_dccp_locks;
1209 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1210 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1211 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1214 rc = dccp_mib_init();
1216 goto out_free_dccp_bhash;
1218 rc = dccp_ackvec_init();
1220 goto out_free_dccp_mib;
1222 rc = dccp_sysctl_init();
1224 goto out_ackvec_exit;
1226 rc = ccid_initialize_builtins();
1228 goto out_sysctl_exit;
1230 dccp_timestamping_init();
1240 out_free_dccp_bhash:
1241 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1242 out_free_dccp_locks:
1243 inet_ehash_locks_free(&dccp_hashinfo);
1244 out_free_dccp_ehash:
1245 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1246 out_free_bind_bucket_cachep:
1247 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1249 percpu_counter_destroy(&dccp_orphan_count);
1251 dccp_hashinfo.bhash = NULL;
1252 dccp_hashinfo.ehash = NULL;
1253 dccp_hashinfo.bind_bucket_cachep = NULL;
1257 static void __exit dccp_fini(void)
1259 ccid_cleanup_builtins();
1261 free_pages((unsigned long)dccp_hashinfo.bhash,
1262 get_order(dccp_hashinfo.bhash_size *
1263 sizeof(struct inet_bind_hashbucket)));
1264 free_pages((unsigned long)dccp_hashinfo.ehash,
1265 get_order((dccp_hashinfo.ehash_mask + 1) *
1266 sizeof(struct inet_ehash_bucket)));
1267 inet_ehash_locks_free(&dccp_hashinfo);
1268 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1271 percpu_counter_destroy(&dccp_orphan_count);
1274 module_init(dccp_init);
1275 module_exit(dccp_fini);
1277 MODULE_LICENSE("GPL");
1279 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");