4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/dccp.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/skbuff.h>
18 #include <linux/netdevice.h>
20 #include <linux/if_arp.h>
21 #include <linux/init.h>
22 #include <linux/random.h>
23 #include <linux/slab.h>
24 #include <net/checksum.h>
26 #include <net/inet_sock.h>
30 #include <asm/ioctls.h>
31 #include <linux/spinlock.h>
32 #include <linux/timer.h>
33 #include <linux/delay.h>
34 #include <linux/poll.h>
40 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
42 EXPORT_SYMBOL_GPL(dccp_statistics);
44 struct percpu_counter dccp_orphan_count;
45 EXPORT_SYMBOL_GPL(dccp_orphan_count);
47 struct inet_hashinfo dccp_hashinfo;
48 EXPORT_SYMBOL_GPL(dccp_hashinfo);
50 /* the maximum queue length for tx in packets. 0 is no limit */
51 int sysctl_dccp_tx_qlen __read_mostly = 5;
53 #ifdef CONFIG_IP_DCCP_DEBUG
54 static const char *dccp_state_name(const int state)
56 static const char *const dccp_state_names[] = {
58 [DCCP_REQUESTING] = "REQUESTING",
59 [DCCP_PARTOPEN] = "PARTOPEN",
60 [DCCP_LISTEN] = "LISTEN",
61 [DCCP_RESPOND] = "RESPOND",
62 [DCCP_CLOSING] = "CLOSING",
63 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
64 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
65 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
66 [DCCP_TIME_WAIT] = "TIME_WAIT",
67 [DCCP_CLOSED] = "CLOSED",
70 if (state >= DCCP_MAX_STATES)
71 return "INVALID STATE!";
73 return dccp_state_names[state];
77 void dccp_set_state(struct sock *sk, const int state)
79 const int oldstate = sk->sk_state;
81 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
82 dccp_state_name(oldstate), dccp_state_name(state));
83 WARN_ON(state == oldstate);
87 if (oldstate != DCCP_OPEN)
88 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
89 /* Client retransmits all Confirm options until entering OPEN */
90 if (oldstate == DCCP_PARTOPEN)
91 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
95 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
96 oldstate == DCCP_CLOSING)
97 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
99 sk->sk_prot->unhash(sk);
100 if (inet_csk(sk)->icsk_bind_hash != NULL &&
101 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
105 if (oldstate == DCCP_OPEN)
106 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
109 /* Change state AFTER socket is unhashed to avoid closed
110 * socket sitting in hash tables.
112 sk->sk_state = state;
115 EXPORT_SYMBOL_GPL(dccp_set_state);
117 static void dccp_finish_passive_close(struct sock *sk)
119 switch (sk->sk_state) {
120 case DCCP_PASSIVE_CLOSE:
121 /* Node (client or server) has received Close packet. */
122 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
123 dccp_set_state(sk, DCCP_CLOSED);
125 case DCCP_PASSIVE_CLOSEREQ:
127 * Client received CloseReq. We set the `active' flag so that
128 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
130 dccp_send_close(sk, 1);
131 dccp_set_state(sk, DCCP_CLOSING);
135 void dccp_done(struct sock *sk)
137 dccp_set_state(sk, DCCP_CLOSED);
138 dccp_clear_xmit_timers(sk);
140 sk->sk_shutdown = SHUTDOWN_MASK;
142 if (!sock_flag(sk, SOCK_DEAD))
143 sk->sk_state_change(sk);
145 inet_csk_destroy_sock(sk);
148 EXPORT_SYMBOL_GPL(dccp_done);
150 const char *dccp_packet_name(const int type)
152 static const char *const dccp_packet_names[] = {
153 [DCCP_PKT_REQUEST] = "REQUEST",
154 [DCCP_PKT_RESPONSE] = "RESPONSE",
155 [DCCP_PKT_DATA] = "DATA",
156 [DCCP_PKT_ACK] = "ACK",
157 [DCCP_PKT_DATAACK] = "DATAACK",
158 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
159 [DCCP_PKT_CLOSE] = "CLOSE",
160 [DCCP_PKT_RESET] = "RESET",
161 [DCCP_PKT_SYNC] = "SYNC",
162 [DCCP_PKT_SYNCACK] = "SYNCACK",
165 if (type >= DCCP_NR_PKT_TYPES)
168 return dccp_packet_names[type];
171 EXPORT_SYMBOL_GPL(dccp_packet_name);
173 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
175 struct dccp_sock *dp = dccp_sk(sk);
176 struct inet_connection_sock *icsk = inet_csk(sk);
178 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
179 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
180 sk->sk_state = DCCP_CLOSED;
181 sk->sk_write_space = dccp_write_space;
182 icsk->icsk_sync_mss = dccp_sync_mss;
183 dp->dccps_mss_cache = 536;
184 dp->dccps_rate_last = jiffies;
185 dp->dccps_role = DCCP_ROLE_UNDEFINED;
186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
187 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
189 dccp_init_xmit_timers(sk);
191 INIT_LIST_HEAD(&dp->dccps_featneg);
192 /* control socket doesn't need feat nego */
193 if (likely(ctl_sock_initialized))
194 return dccp_feat_init(sk);
198 EXPORT_SYMBOL_GPL(dccp_init_sock);
200 void dccp_destroy_sock(struct sock *sk)
202 struct dccp_sock *dp = dccp_sk(sk);
205 * DCCP doesn't use sk_write_queue, just sk_send_head
206 * for retransmissions
208 if (sk->sk_send_head != NULL) {
209 kfree_skb(sk->sk_send_head);
210 sk->sk_send_head = NULL;
213 /* Clean up a referenced DCCP bind bucket. */
214 if (inet_csk(sk)->icsk_bind_hash != NULL)
217 kfree(dp->dccps_service_list);
218 dp->dccps_service_list = NULL;
220 if (dp->dccps_hc_rx_ackvec != NULL) {
221 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
222 dp->dccps_hc_rx_ackvec = NULL;
224 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
225 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
226 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
228 /* clean up feature negotiation state */
229 dccp_feat_list_purge(&dp->dccps_featneg);
232 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
234 static inline int dccp_listen_start(struct sock *sk, int backlog)
236 struct dccp_sock *dp = dccp_sk(sk);
238 dp->dccps_role = DCCP_ROLE_LISTEN;
239 /* do not start to listen if feature negotiation setup fails */
240 if (dccp_feat_finalise_settings(dp))
242 return inet_csk_listen_start(sk, backlog);
245 static inline int dccp_need_reset(int state)
247 return state != DCCP_CLOSED && state != DCCP_LISTEN &&
248 state != DCCP_REQUESTING;
251 int dccp_disconnect(struct sock *sk, int flags)
253 struct inet_connection_sock *icsk = inet_csk(sk);
254 struct inet_sock *inet = inet_sk(sk);
255 struct dccp_sock *dp = dccp_sk(sk);
257 const int old_state = sk->sk_state;
259 if (old_state != DCCP_CLOSED)
260 dccp_set_state(sk, DCCP_CLOSED);
263 * This corresponds to the ABORT function of RFC793, sec. 3.8
264 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
266 if (old_state == DCCP_LISTEN) {
267 inet_csk_listen_stop(sk);
268 } else if (dccp_need_reset(old_state)) {
269 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
270 sk->sk_err = ECONNRESET;
271 } else if (old_state == DCCP_REQUESTING)
272 sk->sk_err = ECONNRESET;
274 dccp_clear_xmit_timers(sk);
275 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
276 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
277 dp->dccps_hc_rx_ccid = NULL;
278 dp->dccps_hc_tx_ccid = NULL;
280 __skb_queue_purge(&sk->sk_receive_queue);
281 __skb_queue_purge(&sk->sk_write_queue);
282 if (sk->sk_send_head != NULL) {
283 __kfree_skb(sk->sk_send_head);
284 sk->sk_send_head = NULL;
287 inet->inet_dport = 0;
289 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
290 inet_reset_saddr(sk);
293 sock_reset_flag(sk, SOCK_DONE);
295 icsk->icsk_backoff = 0;
296 inet_csk_delack_init(sk);
299 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
301 sk->sk_error_report(sk);
305 EXPORT_SYMBOL_GPL(dccp_disconnect);
308 * Wait for a DCCP event.
310 * Note that we don't need to lock the socket, as the upper poll layers
311 * take care of normal races (between the test and the event) and we don't
312 * go look at any of the socket buffers directly.
314 unsigned int dccp_poll(struct file *file, struct socket *sock,
318 struct sock *sk = sock->sk;
320 sock_poll_wait(file, sk_sleep(sk), wait);
321 if (sk->sk_state == DCCP_LISTEN)
322 return inet_csk_listen_poll(sk);
324 /* Socket is not locked. We are protected from async events
325 by poll logic and correct handling of state changes
326 made by another threads is impossible in any case.
333 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
335 if (sk->sk_shutdown & RCV_SHUTDOWN)
336 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
339 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
340 if (atomic_read(&sk->sk_rmem_alloc) > 0)
341 mask |= POLLIN | POLLRDNORM;
343 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
344 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
345 mask |= POLLOUT | POLLWRNORM;
346 } else { /* send SIGIO later */
347 set_bit(SOCK_ASYNC_NOSPACE,
348 &sk->sk_socket->flags);
349 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
351 /* Race breaker. If space is freed after
352 * wspace test but before the flags are set,
353 * IO signal will be lost.
355 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
356 mask |= POLLOUT | POLLWRNORM;
363 EXPORT_SYMBOL_GPL(dccp_poll);
365 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
371 if (sk->sk_state == DCCP_LISTEN)
377 unsigned long amount = 0;
379 skb = skb_peek(&sk->sk_receive_queue);
382 * We will only return the amount of this packet since
383 * that is all that will be read.
387 rc = put_user(amount, (int __user *)arg);
399 EXPORT_SYMBOL_GPL(dccp_ioctl);
401 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
402 char __user *optval, unsigned int optlen)
404 struct dccp_sock *dp = dccp_sk(sk);
405 struct dccp_service_list *sl = NULL;
407 if (service == DCCP_SERVICE_INVALID_VALUE ||
408 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
411 if (optlen > sizeof(service)) {
412 sl = kmalloc(optlen, GFP_KERNEL);
416 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
417 if (copy_from_user(sl->dccpsl_list,
418 optval + sizeof(service),
419 optlen - sizeof(service)) ||
420 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
427 dp->dccps_service = service;
429 kfree(dp->dccps_service_list);
431 dp->dccps_service_list = sl;
436 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
441 if (cscov < 0 || cscov > 15)
444 * Populate a list of permissible values, in the range cscov...15. This
445 * is necessary since feature negotiation of single values only works if
446 * both sides incidentally choose the same value. Since the list starts
447 * lowest-value first, negotiation will pick the smallest shared value.
453 list = kmalloc(len, GFP_KERNEL);
457 for (i = 0; i < len; i++)
460 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
464 dccp_sk(sk)->dccps_pcrlen = cscov;
466 dccp_sk(sk)->dccps_pcslen = cscov;
472 static int dccp_setsockopt_ccid(struct sock *sk, int type,
473 char __user *optval, unsigned int optlen)
478 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
481 val = memdup_user(optval, optlen);
486 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
487 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
489 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
490 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
497 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
498 char __user *optval, unsigned int optlen)
500 struct dccp_sock *dp = dccp_sk(sk);
504 case DCCP_SOCKOPT_PACKET_SIZE:
505 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
507 case DCCP_SOCKOPT_CHANGE_L:
508 case DCCP_SOCKOPT_CHANGE_R:
509 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
511 case DCCP_SOCKOPT_CCID:
512 case DCCP_SOCKOPT_RX_CCID:
513 case DCCP_SOCKOPT_TX_CCID:
514 return dccp_setsockopt_ccid(sk, optname, optval, optlen);
517 if (optlen < (int)sizeof(int))
520 if (get_user(val, (int __user *)optval))
523 if (optname == DCCP_SOCKOPT_SERVICE)
524 return dccp_setsockopt_service(sk, val, optval, optlen);
528 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
529 if (dp->dccps_role != DCCP_ROLE_SERVER)
532 dp->dccps_server_timewait = (val != 0);
534 case DCCP_SOCKOPT_SEND_CSCOV:
535 err = dccp_setsockopt_cscov(sk, val, false);
537 case DCCP_SOCKOPT_RECV_CSCOV:
538 err = dccp_setsockopt_cscov(sk, val, true);
540 case DCCP_SOCKOPT_QPOLICY_ID:
541 if (sk->sk_state != DCCP_CLOSED)
543 else if (val < 0 || val >= DCCPQ_POLICY_MAX)
546 dp->dccps_qpolicy = val;
548 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
552 dp->dccps_tx_qlen = val;
563 int dccp_setsockopt(struct sock *sk, int level, int optname,
564 char __user *optval, unsigned int optlen)
566 if (level != SOL_DCCP)
567 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
570 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
573 EXPORT_SYMBOL_GPL(dccp_setsockopt);
576 int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
577 char __user *optval, unsigned int optlen)
579 if (level != SOL_DCCP)
580 return inet_csk_compat_setsockopt(sk, level, optname,
582 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
585 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
588 static int dccp_getsockopt_service(struct sock *sk, int len,
589 __be32 __user *optval,
592 const struct dccp_sock *dp = dccp_sk(sk);
593 const struct dccp_service_list *sl;
594 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
597 if ((sl = dp->dccps_service_list) != NULL) {
598 slen = sl->dccpsl_nr * sizeof(u32);
607 if (put_user(total_len, optlen) ||
608 put_user(dp->dccps_service, optval) ||
609 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
616 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
617 char __user *optval, int __user *optlen)
619 struct dccp_sock *dp;
622 if (get_user(len, optlen))
625 if (len < (int)sizeof(int))
631 case DCCP_SOCKOPT_PACKET_SIZE:
632 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
634 case DCCP_SOCKOPT_SERVICE:
635 return dccp_getsockopt_service(sk, len,
636 (__be32 __user *)optval, optlen);
637 case DCCP_SOCKOPT_GET_CUR_MPS:
638 val = dp->dccps_mss_cache;
640 case DCCP_SOCKOPT_AVAILABLE_CCIDS:
641 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
642 case DCCP_SOCKOPT_TX_CCID:
643 val = ccid_get_current_tx_ccid(dp);
647 case DCCP_SOCKOPT_RX_CCID:
648 val = ccid_get_current_rx_ccid(dp);
652 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
653 val = dp->dccps_server_timewait;
655 case DCCP_SOCKOPT_SEND_CSCOV:
656 val = dp->dccps_pcslen;
658 case DCCP_SOCKOPT_RECV_CSCOV:
659 val = dp->dccps_pcrlen;
661 case DCCP_SOCKOPT_QPOLICY_ID:
662 val = dp->dccps_qpolicy;
664 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
665 val = dp->dccps_tx_qlen;
668 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
669 len, (u32 __user *)optval, optlen);
671 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
672 len, (u32 __user *)optval, optlen);
678 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
684 int dccp_getsockopt(struct sock *sk, int level, int optname,
685 char __user *optval, int __user *optlen)
687 if (level != SOL_DCCP)
688 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
691 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
694 EXPORT_SYMBOL_GPL(dccp_getsockopt);
697 int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
698 char __user *optval, int __user *optlen)
700 if (level != SOL_DCCP)
701 return inet_csk_compat_getsockopt(sk, level, optname,
703 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
706 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
709 static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
711 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
714 * Assign an (opaque) qpolicy priority value to skb->priority.
716 * We are overloading this skb field for use with the qpolicy subystem.
717 * The skb->priority is normally used for the SO_PRIORITY option, which
718 * is initialised from sk_priority. Since the assignment of sk_priority
719 * to skb->priority happens later (on layer 3), we overload this field
720 * for use with queueing priorities as long as the skb is on layer 4.
721 * The default priority value (if nothing is set) is 0.
725 for (; cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
727 if (!CMSG_OK(msg, cmsg))
730 if (cmsg->cmsg_level != SOL_DCCP)
733 if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
734 !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
737 switch (cmsg->cmsg_type) {
738 case DCCP_SCM_PRIORITY:
739 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
741 skb->priority = *(__u32 *)CMSG_DATA(cmsg);
750 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
753 const struct dccp_sock *dp = dccp_sk(sk);
754 const int flags = msg->msg_flags;
755 const int noblock = flags & MSG_DONTWAIT;
760 if (len > dp->dccps_mss_cache)
765 if (dccp_qpolicy_full(sk)) {
770 timeo = sock_sndtimeo(sk, noblock);
773 * We have to use sk_stream_wait_connect here to set sk_write_pending,
774 * so that the trick in dccp_rcv_request_sent_state_process.
776 /* Wait for a connection to finish. */
777 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
778 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
781 size = sk->sk_prot->max_header + len;
783 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
788 skb_reserve(skb, sk->sk_prot->max_header);
789 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
793 rc = dccp_msghdr_parse(msg, skb);
797 dccp_qpolicy_push(sk, skb);
799 * The xmit_timer is set if the TX CCID is rate-based and will expire
800 * when congestion control permits to release further packets into the
801 * network. Window-based CCIDs do not use this timer.
803 if (!timer_pending(&dp->dccps_xmit_timer))
813 EXPORT_SYMBOL_GPL(dccp_sendmsg);
815 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
816 size_t len, int nonblock, int flags, int *addr_len)
818 const struct dccp_hdr *dh;
823 if (sk->sk_state == DCCP_LISTEN) {
828 timeo = sock_rcvtimeo(sk, nonblock);
831 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
834 goto verify_sock_status;
838 switch (dh->dccph_type) {
840 case DCCP_PKT_DATAACK:
844 case DCCP_PKT_CLOSEREQ:
845 if (!(flags & MSG_PEEK))
846 dccp_finish_passive_close(sk);
849 dccp_pr_debug("found fin (%s) ok!\n",
850 dccp_packet_name(dh->dccph_type));
854 dccp_pr_debug("packet_type=%s\n",
855 dccp_packet_name(dh->dccph_type));
856 sk_eat_skb(sk, skb, 0);
859 if (sock_flag(sk, SOCK_DONE)) {
865 len = sock_error(sk);
869 if (sk->sk_shutdown & RCV_SHUTDOWN) {
874 if (sk->sk_state == DCCP_CLOSED) {
875 if (!sock_flag(sk, SOCK_DONE)) {
876 /* This occurs when user tries to read
877 * from never connected socket.
891 if (signal_pending(current)) {
892 len = sock_intr_errno(timeo);
896 sk_wait_data(sk, &timeo);
901 else if (len < skb->len)
902 msg->msg_flags |= MSG_TRUNC;
904 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
905 /* Exception. Bailout! */
909 if (flags & MSG_TRUNC)
912 if (!(flags & MSG_PEEK))
913 sk_eat_skb(sk, skb, 0);
921 EXPORT_SYMBOL_GPL(dccp_recvmsg);
923 int inet_dccp_listen(struct socket *sock, int backlog)
925 struct sock *sk = sock->sk;
926 unsigned char old_state;
932 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
935 old_state = sk->sk_state;
936 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
939 /* Really, if the socket is already in listen state
940 * we can only allow the backlog to be adjusted.
942 if (old_state != DCCP_LISTEN) {
944 * FIXME: here it probably should be sk->sk_prot->listen_start
945 * see tcp_listen_start
947 err = dccp_listen_start(sk, backlog);
951 sk->sk_max_ack_backlog = backlog;
959 EXPORT_SYMBOL_GPL(inet_dccp_listen);
961 static void dccp_terminate_connection(struct sock *sk)
963 u8 next_state = DCCP_CLOSED;
965 switch (sk->sk_state) {
966 case DCCP_PASSIVE_CLOSE:
967 case DCCP_PASSIVE_CLOSEREQ:
968 dccp_finish_passive_close(sk);
971 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
972 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
975 dccp_send_close(sk, 1);
977 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
978 !dccp_sk(sk)->dccps_server_timewait)
979 next_state = DCCP_ACTIVE_CLOSEREQ;
981 next_state = DCCP_CLOSING;
984 dccp_set_state(sk, next_state);
988 void dccp_close(struct sock *sk, long timeout)
990 struct dccp_sock *dp = dccp_sk(sk);
992 u32 data_was_unread = 0;
997 sk->sk_shutdown = SHUTDOWN_MASK;
999 if (sk->sk_state == DCCP_LISTEN) {
1000 dccp_set_state(sk, DCCP_CLOSED);
1003 inet_csk_listen_stop(sk);
1005 goto adjudge_to_death;
1008 sk_stop_timer(sk, &dp->dccps_xmit_timer);
1011 * We need to flush the recv. buffs. We do this only on the
1012 * descriptor close, not protocol-sourced closes, because the
1013 *reader process may not have drained the data yet!
1015 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1016 data_was_unread += skb->len;
1020 /* If socket has been already reset kill it. */
1021 if (sk->sk_state == DCCP_CLOSED)
1022 goto adjudge_to_death;
1024 if (data_was_unread) {
1025 /* Unread data was tossed, send an appropriate Reset Code */
1026 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
1027 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
1028 dccp_set_state(sk, DCCP_CLOSED);
1029 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1030 /* Check zero linger _after_ checking for unread data. */
1031 sk->sk_prot->disconnect(sk, 0);
1032 } else if (sk->sk_state != DCCP_CLOSED) {
1034 * Normal connection termination. May need to wait if there are
1035 * still packets in the TX queue that are delayed by the CCID.
1037 dccp_flush_write_queue(sk, &timeout);
1038 dccp_terminate_connection(sk);
1042 * Flush write queue. This may be necessary in several cases:
1043 * - we have been closed by the peer but still have application data;
1044 * - abortive termination (unread data or zero linger time),
1045 * - normal termination but queue could not be flushed within time limit
1047 __skb_queue_purge(&sk->sk_write_queue);
1049 sk_stream_wait_close(sk, timeout);
1052 state = sk->sk_state;
1057 * It is the last release_sock in its life. It will remove backlog.
1061 * Now socket is owned by kernel and we acquire BH lock
1062 * to finish close. No need to check for user refs.
1066 WARN_ON(sock_owned_by_user(sk));
1068 percpu_counter_inc(sk->sk_prot->orphan_count);
1070 /* Have we already been destroyed by a softirq or backlog? */
1071 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1074 if (sk->sk_state == DCCP_CLOSED)
1075 inet_csk_destroy_sock(sk);
1077 /* Otherwise, socket is reprieved until protocol close. */
1085 EXPORT_SYMBOL_GPL(dccp_close);
1087 void dccp_shutdown(struct sock *sk, int how)
1089 dccp_pr_debug("called shutdown(%x)\n", how);
1092 EXPORT_SYMBOL_GPL(dccp_shutdown);
1094 static inline int dccp_mib_init(void)
1096 return snmp_mib_init((void __percpu **)dccp_statistics,
1097 sizeof(struct dccp_mib),
1098 __alignof__(struct dccp_mib));
1101 static inline void dccp_mib_exit(void)
1103 snmp_mib_free((void __percpu **)dccp_statistics);
1106 static int thash_entries;
1107 module_param(thash_entries, int, 0444);
1108 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1110 #ifdef CONFIG_IP_DCCP_DEBUG
1112 module_param(dccp_debug, bool, 0644);
1113 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1115 EXPORT_SYMBOL_GPL(dccp_debug);
1118 static int __init dccp_init(void)
1121 int ehash_order, bhash_order, i;
1124 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1125 FIELD_SIZEOF(struct sk_buff, cb));
1126 rc = percpu_counter_init(&dccp_orphan_count, 0);
1130 inet_hashinfo_init(&dccp_hashinfo);
1131 dccp_hashinfo.bind_bucket_cachep =
1132 kmem_cache_create("dccp_bind_bucket",
1133 sizeof(struct inet_bind_bucket), 0,
1134 SLAB_HWCACHE_ALIGN, NULL);
1135 if (!dccp_hashinfo.bind_bucket_cachep)
1136 goto out_free_percpu;
1139 * Size and allocate the main established and bind bucket
1142 * The methodology is similar to that of the buffer cache.
1144 if (totalram_pages >= (128 * 1024))
1145 goal = totalram_pages >> (21 - PAGE_SHIFT);
1147 goal = totalram_pages >> (23 - PAGE_SHIFT);
1150 goal = (thash_entries *
1151 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1152 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1155 unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
1156 sizeof(struct inet_ehash_bucket);
1158 while (hash_size & (hash_size - 1))
1160 dccp_hashinfo.ehash_mask = hash_size - 1;
1161 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1162 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
1163 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1165 if (!dccp_hashinfo.ehash) {
1166 DCCP_CRIT("Failed to allocate DCCP established hash table");
1167 goto out_free_bind_bucket_cachep;
1170 for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) {
1171 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
1172 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
1175 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1176 goto out_free_dccp_ehash;
1178 bhash_order = ehash_order;
1181 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1182 sizeof(struct inet_bind_hashbucket);
1183 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1186 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1187 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
1188 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1190 if (!dccp_hashinfo.bhash) {
1191 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1192 goto out_free_dccp_locks;
1195 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1196 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1197 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1200 rc = dccp_mib_init();
1202 goto out_free_dccp_bhash;
1204 rc = dccp_ackvec_init();
1206 goto out_free_dccp_mib;
1208 rc = dccp_sysctl_init();
1210 goto out_ackvec_exit;
1212 rc = ccid_initialize_builtins();
1214 goto out_sysctl_exit;
1216 dccp_timestamping_init();
1226 out_free_dccp_bhash:
1227 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1228 out_free_dccp_locks:
1229 inet_ehash_locks_free(&dccp_hashinfo);
1230 out_free_dccp_ehash:
1231 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1232 out_free_bind_bucket_cachep:
1233 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1235 percpu_counter_destroy(&dccp_orphan_count);
1237 dccp_hashinfo.bhash = NULL;
1238 dccp_hashinfo.ehash = NULL;
1239 dccp_hashinfo.bind_bucket_cachep = NULL;
1243 static void __exit dccp_fini(void)
1245 ccid_cleanup_builtins();
1247 free_pages((unsigned long)dccp_hashinfo.bhash,
1248 get_order(dccp_hashinfo.bhash_size *
1249 sizeof(struct inet_bind_hashbucket)));
1250 free_pages((unsigned long)dccp_hashinfo.ehash,
1251 get_order((dccp_hashinfo.ehash_mask + 1) *
1252 sizeof(struct inet_ehash_bucket)));
1253 inet_ehash_locks_free(&dccp_hashinfo);
1254 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1257 percpu_counter_destroy(&dccp_orphan_count);
1260 module_init(dccp_init);
1261 module_exit(dccp_fini);
1263 MODULE_LICENSE("GPL");
1264 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1265 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");