4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/icmp.h>
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/random.h>
21 #include <net/inet_hashtables.h>
23 #include <net/tcp_states.h>
29 struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
30 .lhash_lock = RW_LOCK_UNLOCKED,
31 .lhash_users = ATOMIC_INIT(0),
32 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
33 .portalloc_lock = SPIN_LOCK_UNLOCKED,
34 .port_rover = 1024 - 1,
37 EXPORT_SYMBOL_GPL(dccp_hashinfo);
39 static int dccp_v4_get_port(struct sock *sk, const unsigned short snum)
41 return inet_csk_get_port(&dccp_hashinfo, sk, snum);
44 static void dccp_v4_hash(struct sock *sk)
46 inet_hash(&dccp_hashinfo, sk);
49 static void dccp_v4_unhash(struct sock *sk)
51 inet_unhash(&dccp_hashinfo, sk);
54 /* called with local bh disabled */
55 static int __dccp_v4_check_established(struct sock *sk, const __u16 lport,
56 struct inet_timewait_sock **twp)
58 struct inet_sock *inet = inet_sk(sk);
59 const u32 daddr = inet->rcv_saddr;
60 const u32 saddr = inet->daddr;
61 const int dif = sk->sk_bound_dev_if;
62 INET_ADDR_COOKIE(acookie, saddr, daddr)
63 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
64 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport,
65 dccp_hashinfo.ehash_size);
66 struct inet_ehash_bucket *head = &dccp_hashinfo.ehash[hash];
67 const struct sock *sk2;
68 const struct hlist_node *node;
69 struct inet_timewait_sock *tw;
71 write_lock(&head->lock);
73 /* Check TIME-WAIT sockets first. */
74 sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) {
77 if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif))
82 /* And established part... */
83 sk_for_each(sk2, node, &head->chain) {
84 if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif))
88 /* Must record num and sport now. Otherwise we will see
89 * in hash table socket with a funny identity. */
91 inet->sport = htons(lport);
92 sk->sk_hashent = hash;
93 BUG_TRAP(sk_unhashed(sk));
94 __sk_add_node(sk, &head->chain);
95 sock_prot_inc_use(sk->sk_prot);
96 write_unlock(&head->lock);
100 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
101 } else if (tw != NULL) {
102 /* Silly. Should hash-dance instead... */
103 inet_twsk_deschedule(tw, &dccp_death_row);
104 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
112 write_unlock(&head->lock);
113 return -EADDRNOTAVAIL;
117 * Bind a port for a connect operation and hash it.
119 static int dccp_v4_hash_connect(struct sock *sk)
121 const unsigned short snum = inet_sk(sk)->num;
122 struct inet_bind_hashbucket *head;
123 struct inet_bind_bucket *tb;
128 int low = sysctl_local_port_range[0];
129 int high = sysctl_local_port_range[1];
130 int remaining = (high - low) + 1;
131 struct hlist_node *node;
132 struct inet_timewait_sock *tw = NULL;
136 /* TODO. Actually it is not so bad idea to remove
137 * dccp_hashinfo.portalloc_lock before next submission to
139 * As soon as we touch this place at all it is time to think.
141 * Now it protects single _advisory_ variable
142 * dccp_hashinfo.port_rover, hence it is mostly useless.
143 * Code will work nicely if we just delete it, but
144 * I am afraid in contented case it will work not better or
145 * even worse: another cpu just will hit the same bucket
147 * So some cpu salt could remove both contention and
148 * memory pingpong. Any ideas how to do this in a nice way?
150 spin_lock(&dccp_hashinfo.portalloc_lock);
151 rover = dccp_hashinfo.port_rover;
155 if ((rover < low) || (rover > high))
157 head = &dccp_hashinfo.bhash[inet_bhashfn(rover,
158 dccp_hashinfo.bhash_size)];
159 spin_lock(&head->lock);
161 /* Does not bother with rcv_saddr checks,
162 * because the established check is already
165 inet_bind_bucket_for_each(tb, node, &head->chain) {
166 if (tb->port == rover) {
167 BUG_TRAP(!hlist_empty(&tb->owners));
168 if (tb->fastreuse >= 0)
170 if (!__dccp_v4_check_established(sk,
178 tb = inet_bind_bucket_create(dccp_hashinfo.bind_bucket_cachep,
181 spin_unlock(&head->lock);
188 spin_unlock(&head->lock);
189 } while (--remaining > 0);
190 dccp_hashinfo.port_rover = rover;
191 spin_unlock(&dccp_hashinfo.portalloc_lock);
195 return -EADDRNOTAVAIL;
198 /* All locks still held and bhs disabled */
199 dccp_hashinfo.port_rover = rover;
200 spin_unlock(&dccp_hashinfo.portalloc_lock);
202 inet_bind_hash(sk, tb, rover);
203 if (sk_unhashed(sk)) {
204 inet_sk(sk)->sport = htons(rover);
205 __inet_hash(&dccp_hashinfo, sk, 0);
207 spin_unlock(&head->lock);
210 inet_twsk_deschedule(tw, &dccp_death_row);
218 head = &dccp_hashinfo.bhash[inet_bhashfn(snum,
219 dccp_hashinfo.bhash_size)];
220 tb = inet_csk(sk)->icsk_bind_hash;
221 spin_lock_bh(&head->lock);
222 if (sk_head(&tb->owners) == sk && sk->sk_bind_node.next == NULL) {
223 __inet_hash(&dccp_hashinfo, sk, 0);
224 spin_unlock_bh(&head->lock);
227 spin_unlock(&head->lock);
228 /* No definite answer... Walk to established hash table */
229 ret = __dccp_v4_check_established(sk, snum, NULL);
236 static int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
239 struct inet_sock *inet = inet_sk(sk);
240 struct dccp_sock *dp = dccp_sk(sk);
241 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
247 dp->dccps_role = DCCP_ROLE_CLIENT;
249 if (addr_len < sizeof(struct sockaddr_in))
252 if (usin->sin_family != AF_INET)
253 return -EAFNOSUPPORT;
255 nexthop = daddr = usin->sin_addr.s_addr;
256 if (inet->opt != NULL && inet->opt->srr) {
259 nexthop = inet->opt->faddr;
262 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
263 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
265 inet->sport, usin->sin_port, sk);
269 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
274 if (inet->opt == NULL || !inet->opt->srr)
277 if (inet->saddr == 0)
278 inet->saddr = rt->rt_src;
279 inet->rcv_saddr = inet->saddr;
281 inet->dport = usin->sin_port;
284 dp->dccps_ext_header_len = 0;
285 if (inet->opt != NULL)
286 dp->dccps_ext_header_len = inet->opt->optlen;
288 * Socket identity is still unknown (sport may be zero).
289 * However we set state to DCCP_REQUESTING and not releasing socket
290 * lock select source port, enter ourselves into the hash tables and
291 * complete initialization after this.
293 dccp_set_state(sk, DCCP_REQUESTING);
294 err = dccp_v4_hash_connect(sk);
298 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
302 /* OK, now commit destination to socket. */
303 sk_setup_caps(sk, &rt->u.dst);
306 dp->dccps_iss = secure_dccp_sequence_number(inet->saddr,
310 dccp_update_gss(sk, dp->dccps_iss);
312 inet->id = dp->dccps_iss ^ jiffies;
314 err = dccp_connect(sk);
322 * This unhashes the socket and releases the local port, if necessary.
324 dccp_set_state(sk, DCCP_CLOSED);
326 sk->sk_route_caps = 0;
332 * This routine does path mtu discovery as defined in RFC1191.
334 static inline void dccp_do_pmtu_discovery(struct sock *sk,
335 const struct iphdr *iph,
338 struct dst_entry *dst;
339 const struct inet_sock *inet = inet_sk(sk);
340 const struct dccp_sock *dp = dccp_sk(sk);
342 /* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs
343 * send out by Linux are always < 576bytes so they should go through
346 if (sk->sk_state == DCCP_LISTEN)
349 /* We don't check in the destentry if pmtu discovery is forbidden
350 * on this route. We just assume that no packet_to_big packets
351 * are send back when pmtu discovery is not active.
352 * There is a small race when the user changes this flag in the
353 * route, but I think that's acceptable.
355 if ((dst = __sk_dst_check(sk, 0)) == NULL)
358 dst->ops->update_pmtu(dst, mtu);
360 /* Something is about to be wrong... Remember soft error
361 * for the case, if this connection will not able to recover.
363 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
364 sk->sk_err_soft = EMSGSIZE;
368 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
369 dp->dccps_pmtu_cookie > mtu) {
370 dccp_sync_mss(sk, mtu);
373 * From: draft-ietf-dccp-spec-11.txt
375 * DCCP-Sync packets are the best choice for upward
376 * probing, since DCCP-Sync probes do not risk application
379 dccp_send_sync(sk, dp->dccps_gsr);
380 } /* else let the usual retransmit timer handle it */
383 static void dccp_v4_ctl_send_ack(struct sk_buff *rxskb)
386 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
387 const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
388 sizeof(struct dccp_hdr_ext) +
389 sizeof(struct dccp_hdr_ack_bits);
392 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
395 skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC);
399 /* Reserve space for headers. */
400 skb_reserve(skb, MAX_DCCP_HEADER);
402 skb->dst = dst_clone(rxskb->dst);
404 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
406 memset(dh, 0, dccp_hdr_ack_len);
408 /* Build DCCP header and checksum it. */
409 dh->dccph_type = DCCP_PKT_ACK;
410 dh->dccph_sport = rxdh->dccph_dport;
411 dh->dccph_dport = rxdh->dccph_sport;
412 dh->dccph_doff = dccp_hdr_ack_len / 4;
415 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
416 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
417 DCCP_SKB_CB(rxskb)->dccpd_seq);
419 bh_lock_sock(dccp_ctl_socket->sk);
420 err = ip_build_and_send_pkt(skb, dccp_ctl_socket->sk,
421 rxskb->nh.iph->daddr,
422 rxskb->nh.iph->saddr, NULL);
423 bh_unlock_sock(dccp_ctl_socket->sk);
425 if (err == NET_XMIT_CN || err == 0) {
426 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
427 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
431 static void dccp_v4_reqsk_send_ack(struct sk_buff *skb,
432 struct request_sock *req)
434 dccp_v4_ctl_send_ack(skb);
437 static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
438 struct dst_entry *dst)
443 /* First, grab a route. */
445 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
448 skb = dccp_make_response(sk, dst, req);
450 const struct inet_request_sock *ireq = inet_rsk(req);
452 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
455 if (err == NET_XMIT_CN)
465 * This routine is called by the ICMP module when it gets some sort of error
466 * condition. If err < 0 then the socket should be closed and the error
467 * returned to the user. If err > 0 it's just the icmp type << 8 | icmp code.
468 * After adjustment header points to the first 8 bytes of the tcp header. We
469 * need to find the appropriate port.
471 * The locking strategy used here is very "optimistic". When someone else
472 * accesses the socket the ICMP is just dropped and for some paths there is no
473 * check at all. A more general error queue to queue errors for later handling
474 * is probably better.
476 void dccp_v4_err(struct sk_buff *skb, u32 info)
478 const struct iphdr *iph = (struct iphdr *)skb->data;
479 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data +
481 struct dccp_sock *dp;
482 struct inet_sock *inet;
483 const int type = skb->h.icmph->type;
484 const int code = skb->h.icmph->code;
489 if (skb->len < (iph->ihl << 2) + 8) {
490 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
494 sk = inet_lookup(&dccp_hashinfo, iph->daddr, dh->dccph_dport,
495 iph->saddr, dh->dccph_sport, inet_iif(skb));
497 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
501 if (sk->sk_state == DCCP_TIME_WAIT) {
502 inet_twsk_put((struct inet_timewait_sock *)sk);
507 /* If too many ICMPs get dropped on busy
508 * servers this needs to be solved differently.
510 if (sock_owned_by_user(sk))
511 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
513 if (sk->sk_state == DCCP_CLOSED)
517 seq = dccp_hdr_seq(skb);
518 if (sk->sk_state != DCCP_LISTEN &&
519 !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
520 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
525 case ICMP_SOURCE_QUENCH:
526 /* Just silently ignore these. */
528 case ICMP_PARAMETERPROB:
531 case ICMP_DEST_UNREACH:
532 if (code > NR_ICMP_UNREACH)
535 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
536 if (!sock_owned_by_user(sk))
537 dccp_do_pmtu_discovery(sk, iph, info);
541 err = icmp_err_convert[code].errno;
543 case ICMP_TIME_EXCEEDED:
550 switch (sk->sk_state) {
551 struct request_sock *req , **prev;
553 if (sock_owned_by_user(sk))
555 req = inet_csk_search_req(sk, &prev, dh->dccph_dport,
556 iph->daddr, iph->saddr);
561 * ICMPs are not backlogged, hence we cannot get an established
566 if (seq != dccp_rsk(req)->dreq_iss) {
567 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
571 * Still in RESPOND, just remove it silently.
572 * There is no good way to pass the error to the newly
573 * created socket, and POSIX does not want network
574 * errors returned from accept().
576 inet_csk_reqsk_queue_drop(sk, req, prev);
579 case DCCP_REQUESTING:
581 if (!sock_owned_by_user(sk)) {
582 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
585 sk->sk_error_report(sk);
589 sk->sk_err_soft = err;
593 /* If we've already connected we will keep trying
594 * until we time out, or the user gives up.
596 * rfc1122 4.2.3.9 allows to consider as hard errors
597 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
598 * but it is obsoleted by pmtu discovery).
600 * Note, that in modern internet, where routing is unreliable
601 * and in each dark corner broken firewalls sit, sending random
602 * errors ordered by their masters even this two messages finally lose
603 * their original sense (even Linux sends invalid PORT_UNREACHs)
605 * Now we are in compliance with RFCs.
610 if (!sock_owned_by_user(sk) && inet->recverr) {
612 sk->sk_error_report(sk);
613 } else /* Only an error on timeout */
614 sk->sk_err_soft = err;
620 extern struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
621 enum dccp_reset_codes code);
623 int dccp_v4_send_reset(struct sock *sk, enum dccp_reset_codes code)
627 * FIXME: what if rebuild_header fails?
628 * Should we be doing a rebuild_header here?
630 int err = inet_sk_rebuild_header(sk);
635 skb = dccp_make_reset(sk, sk->sk_dst_cache, code);
637 const struct dccp_sock *dp = dccp_sk(sk);
638 const struct inet_sock *inet = inet_sk(sk);
640 err = ip_build_and_send_pkt(skb, sk,
641 inet->saddr, inet->daddr, NULL);
642 if (err == NET_XMIT_CN)
645 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
646 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
652 static inline u64 dccp_v4_init_sequence(const struct sock *sk,
653 const struct sk_buff *skb)
655 return secure_dccp_sequence_number(skb->nh.iph->daddr,
657 dccp_hdr(skb)->dccph_dport,
658 dccp_hdr(skb)->dccph_sport);
661 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
663 struct inet_request_sock *ireq;
665 struct request_sock *req;
666 struct dccp_request_sock *dreq;
667 const __u32 saddr = skb->nh.iph->saddr;
668 const __u32 daddr = skb->nh.iph->daddr;
669 struct dst_entry *dst = NULL;
671 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
672 if (((struct rtable *)skb->dst)->rt_flags &
673 (RTCF_BROADCAST | RTCF_MULTICAST))
677 * TW buckets are converted to open requests without
678 * limitations, they conserve resources and peer is
679 * evidently real one.
681 if (inet_csk_reqsk_queue_is_full(sk))
685 * Accept backlog is full. If we have already queued enough
686 * of warm entries in syn queue, drop request. It is better than
687 * clogging syn queue with openreqs with exponentially increasing
690 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
693 req = reqsk_alloc(sk->sk_prot->rsk_prot);
697 /* FIXME: process options */
699 dccp_openreq_init(req, &dp, skb);
701 ireq = inet_rsk(req);
702 ireq->loc_addr = daddr;
703 ireq->rmt_addr = saddr;
704 /* FIXME: Merge Aristeu's option parsing code when ready */
705 req->rcv_wnd = 100; /* Fake, option parsing will get the
710 * Step 3: Process LISTEN state
712 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
714 * In fact we defer setting S.GSR, S.SWL, S.SWH to
715 * dccp_create_openreq_child.
717 dreq = dccp_rsk(req);
718 dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq;
719 dreq->dreq_iss = dccp_v4_init_sequence(sk, skb);
720 dreq->dreq_service = dccp_hdr_request(skb)->dccph_req_service;
722 if (dccp_v4_send_response(sk, req, dst))
725 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
730 * FIXME: should be reqsk_free after implementing req->rsk_ops
734 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
739 * The three way handshake has completed - we got a valid ACK or DATAACK -
740 * now create the new socket.
742 * This is the equivalent of TCP's tcp_v4_syn_recv_sock
744 struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
745 struct request_sock *req,
746 struct dst_entry *dst)
748 struct inet_request_sock *ireq;
749 struct inet_sock *newinet;
750 struct dccp_sock *newdp;
753 if (sk_acceptq_is_full(sk))
756 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
759 newsk = dccp_create_openreq_child(sk, req, skb);
763 sk_setup_caps(newsk, dst);
765 newdp = dccp_sk(newsk);
766 newinet = inet_sk(newsk);
767 ireq = inet_rsk(req);
768 newinet->daddr = ireq->rmt_addr;
769 newinet->rcv_saddr = ireq->loc_addr;
770 newinet->saddr = ireq->loc_addr;
771 newinet->opt = ireq->opt;
773 newinet->mc_index = inet_iif(skb);
774 newinet->mc_ttl = skb->nh.iph->ttl;
775 newinet->id = jiffies;
777 dccp_sync_mss(newsk, dst_mtu(dst));
779 __inet_hash(&dccp_hashinfo, newsk, 0);
780 __inet_inherit_port(&dccp_hashinfo, sk, newsk);
785 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
787 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
792 static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
794 const struct dccp_hdr *dh = dccp_hdr(skb);
795 const struct iphdr *iph = skb->nh.iph;
797 struct request_sock **prev;
798 /* Find possible connection requests. */
799 struct request_sock *req = inet_csk_search_req(sk, &prev,
801 iph->saddr, iph->daddr);
803 return dccp_check_req(sk, skb, req, prev);
805 nsk = __inet_lookup_established(&dccp_hashinfo,
806 iph->saddr, dh->dccph_sport,
807 iph->daddr, ntohs(dh->dccph_dport),
810 if (nsk->sk_state != DCCP_TIME_WAIT) {
814 inet_twsk_put((struct inet_timewait_sock *)nsk);
821 int dccp_v4_checksum(const struct sk_buff *skb, const u32 saddr,
824 const struct dccp_hdr* dh = dccp_hdr(skb);
828 if (dh->dccph_cscov == 0)
829 checksum_len = skb->len;
831 checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
832 checksum_len = checksum_len < skb->len ? checksum_len :
836 tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
837 return csum_tcpudp_magic(saddr, daddr, checksum_len,
841 static int dccp_v4_verify_checksum(struct sk_buff *skb,
842 const u32 saddr, const u32 daddr)
844 struct dccp_hdr *dh = dccp_hdr(skb);
848 if (dh->dccph_cscov == 0)
849 checksum_len = skb->len;
851 checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
852 checksum_len = checksum_len < skb->len ? checksum_len :
855 tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
856 return csum_tcpudp_magic(saddr, daddr, checksum_len,
857 IPPROTO_DCCP, tmp) == 0 ? 0 : -1;
860 static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
864 struct flowi fl = { .oif = ((struct rtable *)skb->dst)->rt_iif,
866 { .daddr = skb->nh.iph->saddr,
867 .saddr = skb->nh.iph->daddr,
868 .tos = RT_CONN_FLAGS(sk) } },
869 .proto = sk->sk_protocol,
871 { .sport = dccp_hdr(skb)->dccph_dport,
872 .dport = dccp_hdr(skb)->dccph_sport }
876 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
877 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
884 void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
887 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
888 const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
889 sizeof(struct dccp_hdr_ext) +
890 sizeof(struct dccp_hdr_reset);
892 struct dst_entry *dst;
894 /* Never send a reset in response to a reset. */
895 if (rxdh->dccph_type == DCCP_PKT_RESET)
898 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
901 dst = dccp_v4_route_skb(dccp_ctl_socket->sk, rxskb);
905 skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC);
909 /* Reserve space for headers. */
910 skb_reserve(skb, MAX_DCCP_HEADER);
911 skb->dst = dst_clone(dst);
913 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
915 memset(dh, 0, dccp_hdr_reset_len);
917 /* Build DCCP header and checksum it. */
918 dh->dccph_type = DCCP_PKT_RESET;
919 dh->dccph_sport = rxdh->dccph_dport;
920 dh->dccph_dport = rxdh->dccph_sport;
921 dh->dccph_doff = dccp_hdr_reset_len / 4;
923 dccp_hdr_reset(skb)->dccph_reset_code =
924 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
926 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
927 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
928 DCCP_SKB_CB(rxskb)->dccpd_seq);
930 dh->dccph_checksum = dccp_v4_checksum(skb, rxskb->nh.iph->saddr,
931 rxskb->nh.iph->daddr);
933 bh_lock_sock(dccp_ctl_socket->sk);
934 err = ip_build_and_send_pkt(skb, dccp_ctl_socket->sk,
935 rxskb->nh.iph->daddr,
936 rxskb->nh.iph->saddr, NULL);
937 bh_unlock_sock(dccp_ctl_socket->sk);
939 if (err == NET_XMIT_CN || err == 0) {
940 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
941 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
947 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
949 struct dccp_hdr *dh = dccp_hdr(skb);
951 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
952 if (dccp_rcv_established(sk, skb, dh, skb->len))
958 * Step 3: Process LISTEN state
959 * If S.state == LISTEN,
960 * If P.type == Request or P contains a valid Init Cookie
962 * * Must scan the packet's options to check for an Init
963 * Cookie. Only the Init Cookie is processed here,
964 * however; other options are processed in Step 8. This
965 * scan need only be performed if the endpoint uses Init
967 * * Generate a new socket and switch to that socket *
968 * Set S := new socket for this port pair
970 * Choose S.ISS (initial seqno) or set from Init Cookie
971 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
972 * Continue with S.state == RESPOND
973 * * A Response packet will be generated in Step 11 *
975 * Generate Reset(No Connection) unless P.type == Reset
976 * Drop packet and return
978 * NOTE: the check for the packet types is done in
979 * dccp_rcv_state_process
981 if (sk->sk_state == DCCP_LISTEN) {
982 struct sock *nsk = dccp_v4_hnd_req(sk, skb);
988 if (dccp_child_process(sk, nsk, skb))
994 if (dccp_rcv_state_process(sk, skb, dh, skb->len))
999 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
1000 dccp_v4_ctl_send_reset(skb);
1006 static inline int dccp_invalid_packet(struct sk_buff *skb)
1008 const struct dccp_hdr *dh;
1010 if (skb->pkt_type != PACKET_HOST)
1013 if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
1014 dccp_pr_debug("pskb_may_pull failed\n");
1020 /* If the packet type is not understood, drop packet and return */
1021 if (dh->dccph_type >= DCCP_PKT_INVALID) {
1022 dccp_pr_debug("invalid packet type\n");
1027 * If P.Data Offset is too small for packet type, or too large for
1028 * packet, drop packet and return
1030 if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
1031 dccp_pr_debug("Offset(%u) too small 1\n", dh->dccph_doff);
1035 if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
1036 dccp_pr_debug("P.Data Offset(%u) too small 2\n",
1044 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
1045 * has short sequence numbers), drop packet and return
1047 if (dh->dccph_x == 0 &&
1048 dh->dccph_type != DCCP_PKT_DATA &&
1049 dh->dccph_type != DCCP_PKT_ACK &&
1050 dh->dccph_type != DCCP_PKT_DATAACK) {
1051 dccp_pr_debug("P.type (%s) not Data, Ack nor DataAck and "
1052 "P.X == 0\n", dccp_packet_name(dh->dccph_type));
1056 /* If the header checksum is incorrect, drop packet and return */
1057 if (dccp_v4_verify_checksum(skb, skb->nh.iph->saddr,
1058 skb->nh.iph->daddr) < 0) {
1059 dccp_pr_debug("header checksum is incorrect\n");
1066 /* this is called when real data arrives */
1067 int dccp_v4_rcv(struct sk_buff *skb)
1069 const struct dccp_hdr *dh;
1073 /* Step 1: Check header basics: */
1075 if (dccp_invalid_packet(skb))
1081 * Use something like this to simulate some DATA/DATAACK loss to test
1082 * dccp_ackpkts_add, you'll get something like this on a session that
1083 * sends 10 DATA/DATAACK packets:
1085 * ackpkts_print: 281473596467422 |0,0|3,0|0,0|3,0|0,0|3,0|0,0|3,0|0,1|
1087 * 0, 0 means: DCCP_ACKPKTS_STATE_RECEIVED, RLE == just this packet
1088 * 0, 1 means: DCCP_ACKPKTS_STATE_RECEIVED, RLE == two adjacent packets
1089 * with the same state
1090 * 3, 0 means: DCCP_ACKPKTS_STATE_NOT_RECEIVED, RLE == just this packet
1094 * 281473596467422 was received
1095 * 281473596467421 was not received
1096 * 281473596467420 was received
1097 * 281473596467419 was not received
1098 * 281473596467418 was received
1099 * 281473596467417 was not received
1100 * 281473596467416 was received
1101 * 281473596467415 was not received
1102 * 281473596467414 was received
1103 * 281473596467413 was received (this one was the 3way handshake
1107 if (dh->dccph_type == DCCP_PKT_DATA ||
1108 dh->dccph_type == DCCP_PKT_DATAACK) {
1109 static int discard = 0;
1118 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
1119 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
1121 dccp_pr_debug("%8.8s "
1122 "src=%u.%u.%u.%u@%-5d "
1123 "dst=%u.%u.%u.%u@%-5d seq=%llu",
1124 dccp_packet_name(dh->dccph_type),
1125 NIPQUAD(skb->nh.iph->saddr), ntohs(dh->dccph_sport),
1126 NIPQUAD(skb->nh.iph->daddr), ntohs(dh->dccph_dport),
1127 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
1129 if (dccp_packet_without_ack(skb)) {
1130 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
1131 dccp_pr_debug_cat("\n");
1133 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
1134 dccp_pr_debug_cat(", ack=%llu\n",
1135 (unsigned long long)
1136 DCCP_SKB_CB(skb)->dccpd_ack_seq);
1140 * Look up flow ID in table and get corresponding socket */
1141 sk = __inet_lookup(&dccp_hashinfo,
1142 skb->nh.iph->saddr, dh->dccph_sport,
1143 skb->nh.iph->daddr, ntohs(dh->dccph_dport),
1149 * Generate Reset(No Connection) unless P.type == Reset
1150 * Drop packet and return
1153 dccp_pr_debug("failed to look up flow ID in table and "
1154 "get corresponding socket\n");
1155 goto no_dccp_socket;
1160 * ... or S.state == TIMEWAIT,
1161 * Generate Reset(No Connection) unless P.type == Reset
1162 * Drop packet and return
1165 if (sk->sk_state == DCCP_TIME_WAIT) {
1166 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: "
1171 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
1172 dccp_pr_debug("xfrm4_policy_check failed\n");
1173 goto discard_and_relse;
1176 if (sk_filter(sk, skb, 0)) {
1177 dccp_pr_debug("sk_filter failed\n");
1178 goto discard_and_relse;
1185 if (!sock_owned_by_user(sk))
1186 rc = dccp_v4_do_rcv(sk, skb);
1188 sk_add_backlog(sk, skb);
1195 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1199 * Generate Reset(No Connection) unless P.type == Reset
1200 * Drop packet and return
1202 if (dh->dccph_type != DCCP_PKT_RESET) {
1203 DCCP_SKB_CB(skb)->dccpd_reset_code =
1204 DCCP_RESET_CODE_NO_CONNECTION;
1205 dccp_v4_ctl_send_reset(skb);
1209 /* Discard frame. */
1218 inet_twsk_put((struct inet_timewait_sock *)sk);
1219 goto no_dccp_socket;
1222 static int dccp_v4_init_sock(struct sock *sk)
1224 struct dccp_sock *dp = dccp_sk(sk);
1225 static int dccp_ctl_socket_init = 1;
1227 dccp_options_init(&dp->dccps_options);
1229 if (dp->dccps_options.dccpo_send_ack_vector) {
1230 dp->dccps_hc_rx_ackpkts =
1231 dccp_ackpkts_alloc(DCCP_MAX_ACK_VECTOR_LEN,
1234 if (dp->dccps_hc_rx_ackpkts == NULL)
1239 * FIXME: We're hardcoding the CCID, and doing this at this point makes
1240 * the listening (master) sock get CCID control blocks, which is not
1241 * necessary, but for now, to not mess with the test userspace apps,
1242 * lets leave it here, later the real solution is to do this in a
1243 * setsockopt(CCIDs-I-want/accept). -acme
1245 if (likely(!dccp_ctl_socket_init)) {
1246 dp->dccps_hc_rx_ccid = ccid_init(dp->dccps_options.dccpo_ccid,
1248 dp->dccps_hc_tx_ccid = ccid_init(dp->dccps_options.dccpo_ccid,
1250 if (dp->dccps_hc_rx_ccid == NULL ||
1251 dp->dccps_hc_tx_ccid == NULL) {
1252 ccid_exit(dp->dccps_hc_rx_ccid, sk);
1253 ccid_exit(dp->dccps_hc_tx_ccid, sk);
1254 dccp_ackpkts_free(dp->dccps_hc_rx_ackpkts);
1255 dp->dccps_hc_rx_ackpkts = NULL;
1256 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
1260 dccp_ctl_socket_init = 0;
1262 dccp_init_xmit_timers(sk);
1263 inet_csk(sk)->icsk_rto = DCCP_TIMEOUT_INIT;
1264 sk->sk_state = DCCP_CLOSED;
1265 dp->dccps_mss_cache = 536;
1266 dp->dccps_role = DCCP_ROLE_UNDEFINED;
1271 int dccp_v4_destroy_sock(struct sock *sk)
1273 struct dccp_sock *dp = dccp_sk(sk);
1276 * DCCP doesn't use sk_qrite_queue, just sk_send_head
1277 * for retransmissions
1279 if (sk->sk_send_head != NULL) {
1280 kfree_skb(sk->sk_send_head);
1281 sk->sk_send_head = NULL;
1284 /* Clean up a referenced DCCP bind bucket. */
1285 if (inet_csk(sk)->icsk_bind_hash != NULL)
1286 inet_put_port(&dccp_hashinfo, sk);
1288 dccp_ackpkts_free(dp->dccps_hc_rx_ackpkts);
1289 dp->dccps_hc_rx_ackpkts = NULL;
1290 ccid_exit(dp->dccps_hc_rx_ccid, sk);
1291 ccid_exit(dp->dccps_hc_tx_ccid, sk);
1292 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
1297 static void dccp_v4_reqsk_destructor(struct request_sock *req)
1299 kfree(inet_rsk(req)->opt);
1302 static struct request_sock_ops dccp_request_sock_ops = {
1304 .obj_size = sizeof(struct dccp_request_sock),
1305 .rtx_syn_ack = dccp_v4_send_response,
1306 .send_ack = dccp_v4_reqsk_send_ack,
1307 .destructor = dccp_v4_reqsk_destructor,
1308 .send_reset = dccp_v4_ctl_send_reset,
1311 struct proto dccp_v4_prot = {
1313 .owner = THIS_MODULE,
1314 .close = dccp_close,
1315 .connect = dccp_v4_connect,
1316 .disconnect = dccp_disconnect,
1317 .ioctl = dccp_ioctl,
1318 .init = dccp_v4_init_sock,
1319 .setsockopt = dccp_setsockopt,
1320 .getsockopt = dccp_getsockopt,
1321 .sendmsg = dccp_sendmsg,
1322 .recvmsg = dccp_recvmsg,
1323 .backlog_rcv = dccp_v4_do_rcv,
1324 .hash = dccp_v4_hash,
1325 .unhash = dccp_v4_unhash,
1326 .accept = inet_csk_accept,
1327 .get_port = dccp_v4_get_port,
1328 .shutdown = dccp_shutdown,
1329 .destroy = dccp_v4_destroy_sock,
1330 .orphan_count = &dccp_orphan_count,
1331 .max_header = MAX_DCCP_HEADER,
1332 .obj_size = sizeof(struct dccp_sock),
1333 .rsk_prot = &dccp_request_sock_ops,
1334 .twsk_obj_size = sizeof(struct inet_timewait_sock),