4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/config.h>
13 #include <linux/dccp.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/random.h>
24 #include <net/checksum.h>
26 #include <net/inet_common.h>
28 #include <net/protocol.h>
32 #include <asm/semaphore.h>
33 #include <linux/spinlock.h>
34 #include <linux/timer.h>
35 #include <linux/delay.h>
36 #include <linux/poll.h>
37 #include <linux/dccp.h>
42 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
44 atomic_t dccp_orphan_count = ATOMIC_INIT(0);
46 static struct net_protocol dccp_protocol = {
47 .handler = dccp_v4_rcv,
48 .err_handler = dccp_v4_err,
51 const char *dccp_packet_name(const int type)
53 static const char *dccp_packet_names[] = {
54 [DCCP_PKT_REQUEST] = "REQUEST",
55 [DCCP_PKT_RESPONSE] = "RESPONSE",
56 [DCCP_PKT_DATA] = "DATA",
57 [DCCP_PKT_ACK] = "ACK",
58 [DCCP_PKT_DATAACK] = "DATAACK",
59 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
60 [DCCP_PKT_CLOSE] = "CLOSE",
61 [DCCP_PKT_RESET] = "RESET",
62 [DCCP_PKT_SYNC] = "SYNC",
63 [DCCP_PKT_SYNCACK] = "SYNCACK",
66 if (type >= DCCP_NR_PKT_TYPES)
69 return dccp_packet_names[type];
72 EXPORT_SYMBOL_GPL(dccp_packet_name);
74 const char *dccp_state_name(const int state)
76 static char *dccp_state_names[] = {
78 [DCCP_REQUESTING] = "REQUESTING",
79 [DCCP_PARTOPEN] = "PARTOPEN",
80 [DCCP_LISTEN] = "LISTEN",
81 [DCCP_RESPOND] = "RESPOND",
82 [DCCP_CLOSING] = "CLOSING",
83 [DCCP_TIME_WAIT] = "TIME_WAIT",
84 [DCCP_CLOSED] = "CLOSED",
87 if (state >= DCCP_MAX_STATES)
88 return "INVALID STATE!";
90 return dccp_state_names[state];
93 EXPORT_SYMBOL_GPL(dccp_state_name);
95 static inline int dccp_listen_start(struct sock *sk)
97 struct dccp_sock *dp = dccp_sk(sk);
99 dp->dccps_role = DCCP_ROLE_LISTEN;
101 * Apps need to use setsockopt(DCCP_SOCKOPT_SERVICE)
102 * before calling listen()
104 if (dccp_service_not_initialized(sk))
106 return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
109 int dccp_disconnect(struct sock *sk, int flags)
111 struct inet_connection_sock *icsk = inet_csk(sk);
112 struct inet_sock *inet = inet_sk(sk);
114 const int old_state = sk->sk_state;
116 if (old_state != DCCP_CLOSED)
117 dccp_set_state(sk, DCCP_CLOSED);
119 /* ABORT function of RFC793 */
120 if (old_state == DCCP_LISTEN) {
121 inet_csk_listen_stop(sk);
122 /* FIXME: do the active reset thing */
123 } else if (old_state == DCCP_REQUESTING)
124 sk->sk_err = ECONNRESET;
126 dccp_clear_xmit_timers(sk);
127 __skb_queue_purge(&sk->sk_receive_queue);
128 if (sk->sk_send_head != NULL) {
129 __kfree_skb(sk->sk_send_head);
130 sk->sk_send_head = NULL;
135 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
136 inet_reset_saddr(sk);
139 sock_reset_flag(sk, SOCK_DONE);
141 icsk->icsk_backoff = 0;
142 inet_csk_delack_init(sk);
145 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
147 sk->sk_error_report(sk);
152 * Wait for a DCCP event.
154 * Note that we don't need to lock the socket, as the upper poll layers
155 * take care of normal races (between the test and the event) and we don't
156 * go look at any of the socket buffers directly.
158 static unsigned int dccp_poll(struct file *file, struct socket *sock,
162 struct sock *sk = sock->sk;
164 poll_wait(file, sk->sk_sleep, wait);
165 if (sk->sk_state == DCCP_LISTEN)
166 return inet_csk_listen_poll(sk);
168 /* Socket is not locked. We are protected from async events
169 by poll logic and correct handling of state changes
170 made by another threads is impossible in any case.
177 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
179 if (sk->sk_shutdown & RCV_SHUTDOWN)
180 mask |= POLLIN | POLLRDNORM;
183 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
184 if (atomic_read(&sk->sk_rmem_alloc) > 0)
185 mask |= POLLIN | POLLRDNORM;
187 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
188 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
189 mask |= POLLOUT | POLLWRNORM;
190 } else { /* send SIGIO later */
191 set_bit(SOCK_ASYNC_NOSPACE,
192 &sk->sk_socket->flags);
193 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
195 /* Race breaker. If space is freed after
196 * wspace test but before the flags are set,
197 * IO signal will be lost.
199 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
200 mask |= POLLOUT | POLLWRNORM;
207 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
209 dccp_pr_debug("entry\n");
213 static int dccp_setsockopt_service(struct sock *sk, const u32 service,
214 char __user *optval, int optlen)
216 struct dccp_sock *dp = dccp_sk(sk);
217 struct dccp_service_list *sl = NULL;
219 if (service == DCCP_SERVICE_INVALID_VALUE ||
220 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
223 if (optlen > sizeof(service)) {
224 sl = kmalloc(optlen, GFP_KERNEL);
228 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
229 if (copy_from_user(sl->dccpsl_list,
230 optval + sizeof(service),
231 optlen - sizeof(service)) ||
232 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
239 dp->dccps_service = service;
241 if (dp->dccps_service_list != NULL)
242 kfree(dp->dccps_service_list);
244 dp->dccps_service_list = sl;
249 int dccp_setsockopt(struct sock *sk, int level, int optname,
250 char __user *optval, int optlen)
252 struct dccp_sock *dp;
256 if (level != SOL_DCCP)
257 return ip_setsockopt(sk, level, optname, optval, optlen);
259 if (optlen < sizeof(int))
262 if (get_user(val, (int __user *)optval))
265 if (optname == DCCP_SOCKOPT_SERVICE)
266 return dccp_setsockopt_service(sk, val, optval, optlen);
273 case DCCP_SOCKOPT_PACKET_SIZE:
274 dp->dccps_packet_size = val;
285 static int dccp_getsockopt_service(struct sock *sk, int len,
289 const struct dccp_sock *dp = dccp_sk(sk);
290 const struct dccp_service_list *sl;
291 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
294 if (dccp_service_not_initialized(sk))
297 if ((sl = dp->dccps_service_list) != NULL) {
298 slen = sl->dccpsl_nr * sizeof(u32);
307 if (put_user(total_len, optlen) ||
308 put_user(dp->dccps_service, optval) ||
309 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
316 int dccp_getsockopt(struct sock *sk, int level, int optname,
317 char __user *optval, int __user *optlen)
319 struct dccp_sock *dp;
322 if (level != SOL_DCCP)
323 return ip_getsockopt(sk, level, optname, optval, optlen);
325 if (get_user(len, optlen))
328 if (optname == DCCP_SOCKOPT_SERVICE)
329 return dccp_getsockopt_service(sk, len,
330 (u32 __user *)optval, optlen);
332 len = min_t(unsigned int, len, sizeof(int));
339 case DCCP_SOCKOPT_PACKET_SIZE:
340 val = dp->dccps_packet_size;
346 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
352 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
355 const struct dccp_sock *dp = dccp_sk(sk);
356 const int flags = msg->msg_flags;
357 const int noblock = flags & MSG_DONTWAIT;
362 if (len > dp->dccps_mss_cache)
366 timeo = sock_sndtimeo(sk, noblock);
369 * We have to use sk_stream_wait_connect here to set sk_write_pending,
370 * so that the trick in dccp_rcv_request_sent_state_process.
372 /* Wait for a connection to finish. */
373 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
374 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
377 size = sk->sk_prot->max_header + len;
379 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
384 skb_reserve(skb, sk->sk_prot->max_header);
385 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
389 rc = dccp_write_xmit(sk, skb, &timeo);
391 * XXX we don't use sk_write_queue, so just discard the packet.
392 * Current plan however is to _use_ sk_write_queue with
393 * an algorith similar to tcp_sendmsg, where the main difference
394 * is that in DCCP we have to respect packet boundaries, so
395 * no coalescing of skbs.
397 * This bug was _quickly_ found & fixed by just looking at an OSTRA
398 * generated callgraph 8) -acme
410 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
411 size_t len, int nonblock, int flags, int *addr_len)
413 const struct dccp_hdr *dh;
418 if (sk->sk_state == DCCP_LISTEN) {
423 timeo = sock_rcvtimeo(sk, nonblock);
426 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
429 goto verify_sock_status;
433 if (dh->dccph_type == DCCP_PKT_DATA ||
434 dh->dccph_type == DCCP_PKT_DATAACK)
437 if (dh->dccph_type == DCCP_PKT_RESET ||
438 dh->dccph_type == DCCP_PKT_CLOSE) {
439 dccp_pr_debug("found fin ok!\n");
443 dccp_pr_debug("packet_type=%s\n",
444 dccp_packet_name(dh->dccph_type));
447 if (sock_flag(sk, SOCK_DONE)) {
453 len = sock_error(sk);
457 if (sk->sk_shutdown & RCV_SHUTDOWN) {
462 if (sk->sk_state == DCCP_CLOSED) {
463 if (!sock_flag(sk, SOCK_DONE)) {
464 /* This occurs when user tries to read
465 * from never connected socket.
479 if (signal_pending(current)) {
480 len = sock_intr_errno(timeo);
484 sk_wait_data(sk, &timeo);
489 else if (len < skb->len)
490 msg->msg_flags |= MSG_TRUNC;
492 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
493 /* Exception. Bailout! */
498 if (!(flags & MSG_PEEK))
507 static int inet_dccp_listen(struct socket *sock, int backlog)
509 struct sock *sk = sock->sk;
510 unsigned char old_state;
516 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
519 old_state = sk->sk_state;
520 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
523 /* Really, if the socket is already in listen state
524 * we can only allow the backlog to be adjusted.
526 if (old_state != DCCP_LISTEN) {
528 * FIXME: here it probably should be sk->sk_prot->listen_start
529 * see tcp_listen_start
531 err = dccp_listen_start(sk);
535 sk->sk_max_ack_backlog = backlog;
543 static const unsigned char dccp_new_state[] = {
544 /* current state: new state: action: */
546 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
547 [DCCP_REQUESTING] = DCCP_CLOSED,
548 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
549 [DCCP_LISTEN] = DCCP_CLOSED,
550 [DCCP_RESPOND] = DCCP_CLOSED,
551 [DCCP_CLOSING] = DCCP_CLOSED,
552 [DCCP_TIME_WAIT] = DCCP_CLOSED,
553 [DCCP_CLOSED] = DCCP_CLOSED,
556 static int dccp_close_state(struct sock *sk)
558 const int next = dccp_new_state[sk->sk_state];
559 const int ns = next & DCCP_STATE_MASK;
561 if (ns != sk->sk_state)
562 dccp_set_state(sk, ns);
564 return next & DCCP_ACTION_FIN;
567 void dccp_close(struct sock *sk, long timeout)
573 sk->sk_shutdown = SHUTDOWN_MASK;
575 if (sk->sk_state == DCCP_LISTEN) {
576 dccp_set_state(sk, DCCP_CLOSED);
579 inet_csk_listen_stop(sk);
581 goto adjudge_to_death;
585 * We need to flush the recv. buffs. We do this only on the
586 * descriptor close, not protocol-sourced closes, because the
587 *reader process may not have drained the data yet!
589 /* FIXME: check for unread data */
590 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
594 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
595 /* Check zero linger _after_ checking for unread data. */
596 sk->sk_prot->disconnect(sk, 0);
597 } else if (dccp_close_state(sk)) {
598 dccp_send_close(sk, 1);
601 sk_stream_wait_close(sk, timeout);
605 * It is the last release_sock in its life. It will remove backlog.
609 * Now socket is owned by kernel and we acquire BH lock
610 * to finish close. No need to check for user refs.
614 BUG_TRAP(!sock_owned_by_user(sk));
620 * The last release_sock may have processed the CLOSE or RESET
621 * packet moving sock to CLOSED state, if not we have to fire
622 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
623 * in draft-ietf-dccp-spec-11. -acme
625 if (sk->sk_state == DCCP_CLOSING) {
626 /* FIXME: should start at 2 * RTT */
627 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
628 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
629 inet_csk(sk)->icsk_rto,
632 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
633 dccp_set_state(sk, DCCP_CLOSED);
637 atomic_inc(sk->sk_prot->orphan_count);
638 if (sk->sk_state == DCCP_CLOSED)
639 inet_csk_destroy_sock(sk);
641 /* Otherwise, socket is reprieved until protocol close. */
648 void dccp_shutdown(struct sock *sk, int how)
650 dccp_pr_debug("entry\n");
653 static struct proto_ops inet_dccp_ops = {
655 .owner = THIS_MODULE,
656 .release = inet_release,
658 .connect = inet_stream_connect,
659 .socketpair = sock_no_socketpair,
660 .accept = inet_accept,
661 .getname = inet_getname,
662 /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
665 /* FIXME: work on inet_listen to rename it to sock_common_listen */
666 .listen = inet_dccp_listen,
667 .shutdown = inet_shutdown,
668 .setsockopt = sock_common_setsockopt,
669 .getsockopt = sock_common_getsockopt,
670 .sendmsg = inet_sendmsg,
671 .recvmsg = sock_common_recvmsg,
672 .mmap = sock_no_mmap,
673 .sendpage = sock_no_sendpage,
676 extern struct net_proto_family inet_family_ops;
678 static struct inet_protosw dccp_v4_protosw = {
680 .protocol = IPPROTO_DCCP,
681 .prot = &dccp_v4_prot,
682 .ops = &inet_dccp_ops,
689 * This is the global socket data structure used for responding to
690 * the Out-of-the-blue (OOTB) packets. A control sock will be created
691 * for this socket at the initialization time.
693 struct socket *dccp_ctl_socket;
695 static char dccp_ctl_socket_err_msg[] __initdata =
696 KERN_ERR "DCCP: Failed to create the control socket.\n";
698 static int __init dccp_ctl_sock_init(void)
700 int rc = sock_create_kern(PF_INET, SOCK_DCCP, IPPROTO_DCCP,
703 printk(dccp_ctl_socket_err_msg);
705 dccp_ctl_socket->sk->sk_allocation = GFP_ATOMIC;
706 inet_sk(dccp_ctl_socket->sk)->uc_ttl = -1;
708 /* Unhash it so that IP input processing does not even
709 * see it, we do not wish this socket to see incoming
712 dccp_ctl_socket->sk->sk_prot->unhash(dccp_ctl_socket->sk);
718 #ifdef CONFIG_IP_DCCP_UNLOAD_HACK
719 void dccp_ctl_sock_exit(void)
721 if (dccp_ctl_socket != NULL) {
722 sock_release(dccp_ctl_socket);
723 dccp_ctl_socket = NULL;
727 EXPORT_SYMBOL_GPL(dccp_ctl_sock_exit);
730 static int __init init_dccp_v4_mibs(void)
734 dccp_statistics[0] = alloc_percpu(struct dccp_mib);
735 if (dccp_statistics[0] == NULL)
738 dccp_statistics[1] = alloc_percpu(struct dccp_mib);
739 if (dccp_statistics[1] == NULL)
746 free_percpu(dccp_statistics[0]);
747 dccp_statistics[0] = NULL;
752 static int thash_entries;
753 module_param(thash_entries, int, 0444);
754 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
756 #ifdef CONFIG_IP_DCCP_DEBUG
758 module_param(dccp_debug, int, 0444);
759 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
762 static int __init dccp_init(void)
765 int ehash_order, bhash_order, i;
766 int rc = proto_register(&dccp_v4_prot, 1);
771 dccp_hashinfo.bind_bucket_cachep =
772 kmem_cache_create("dccp_bind_bucket",
773 sizeof(struct inet_bind_bucket), 0,
774 SLAB_HWCACHE_ALIGN, NULL, NULL);
775 if (!dccp_hashinfo.bind_bucket_cachep)
776 goto out_proto_unregister;
779 * Size and allocate the main established and bind bucket
782 * The methodology is similar to that of the buffer cache.
784 if (num_physpages >= (128 * 1024))
785 goal = num_physpages >> (21 - PAGE_SHIFT);
787 goal = num_physpages >> (23 - PAGE_SHIFT);
790 goal = (thash_entries *
791 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
792 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
795 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
796 sizeof(struct inet_ehash_bucket);
797 dccp_hashinfo.ehash_size >>= 1;
798 while (dccp_hashinfo.ehash_size &
799 (dccp_hashinfo.ehash_size - 1))
800 dccp_hashinfo.ehash_size--;
801 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
802 __get_free_pages(GFP_ATOMIC, ehash_order);
803 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
805 if (!dccp_hashinfo.ehash) {
806 printk(KERN_CRIT "Failed to allocate DCCP "
807 "established hash table\n");
808 goto out_free_bind_bucket_cachep;
811 for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
812 rwlock_init(&dccp_hashinfo.ehash[i].lock);
813 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
816 bhash_order = ehash_order;
819 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
820 sizeof(struct inet_bind_hashbucket);
821 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
824 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
825 __get_free_pages(GFP_ATOMIC, bhash_order);
826 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
828 if (!dccp_hashinfo.bhash) {
829 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
830 goto out_free_dccp_ehash;
833 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
834 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
835 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
838 if (init_dccp_v4_mibs())
839 goto out_free_dccp_bhash;
842 if (inet_add_protocol(&dccp_protocol, IPPROTO_DCCP))
843 goto out_free_dccp_v4_mibs;
845 inet_register_protosw(&dccp_v4_protosw);
847 rc = dccp_ctl_sock_init();
849 goto out_unregister_protosw;
852 out_unregister_protosw:
853 inet_unregister_protosw(&dccp_v4_protosw);
854 inet_del_protocol(&dccp_protocol, IPPROTO_DCCP);
855 out_free_dccp_v4_mibs:
856 free_percpu(dccp_statistics[0]);
857 free_percpu(dccp_statistics[1]);
858 dccp_statistics[0] = dccp_statistics[1] = NULL;
860 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
861 dccp_hashinfo.bhash = NULL;
863 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
864 dccp_hashinfo.ehash = NULL;
865 out_free_bind_bucket_cachep:
866 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
867 dccp_hashinfo.bind_bucket_cachep = NULL;
868 out_proto_unregister:
869 proto_unregister(&dccp_v4_prot);
873 static const char dccp_del_proto_err_msg[] __exitdata =
874 KERN_ERR "can't remove dccp net_protocol\n";
876 static void __exit dccp_fini(void)
878 inet_unregister_protosw(&dccp_v4_protosw);
880 if (inet_del_protocol(&dccp_protocol, IPPROTO_DCCP) < 0)
881 printk(dccp_del_proto_err_msg);
883 free_percpu(dccp_statistics[0]);
884 free_percpu(dccp_statistics[1]);
885 free_pages((unsigned long)dccp_hashinfo.bhash,
886 get_order(dccp_hashinfo.bhash_size *
887 sizeof(struct inet_bind_hashbucket)));
888 free_pages((unsigned long)dccp_hashinfo.ehash,
889 get_order(dccp_hashinfo.ehash_size *
890 sizeof(struct inet_ehash_bucket)));
891 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
892 proto_unregister(&dccp_v4_prot);
895 module_init(dccp_init);
896 module_exit(dccp_fini);
899 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
900 * values directly, Also cover the case where the protocol is not specified,
901 * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
903 MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6");
904 MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6");
905 MODULE_LICENSE("GPL");
906 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
907 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");