pandora: defconfig: update
[pandora-kernel.git] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Donald Becker, <becker@super.org>
11  *              Alan Cox, <Alan.Cox@linux.org>
12  *              Richard Underwood
13  *              Stefan Becker, <stefanb@yello.ping.de>
14  *              Jorge Cwik, <jorge@laser.satlink.net>
15  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16  *              Hirokazu Takahashi, <taka@valinux.co.jp>
17  *
18  *      See ip_input.c for original log
19  *
20  *      Fixes:
21  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
22  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
23  *              Bradford Johnson:       Fix faulty handling of some frames when
24  *                                      no route is found.
25  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
26  *                                      (in case if packet not accepted by
27  *                                      output firewall rules)
28  *              Mike McLagan    :       Routing by source
29  *              Alexey Kuznetsov:       use new route cache
30  *              Andi Kleen:             Fix broken PMTU recovery and remove
31  *                                      some redundant tests.
32  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
33  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
34  *              Andi Kleen      :       Split fast and slow ip_build_xmit path
35  *                                      for decreased register pressure on x86
36  *                                      and more readibility.
37  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
38  *                                      silently drop skb instead of failing with -EPERM.
39  *              Detlev Wengorz  :       Copy protocol for fragments.
40  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
41  *                                      datagrams.
42  *              Hirokazu Takahashi:     sendfile() on UDP works now.
43  */
44
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
55
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
65
66 #include <net/snmp.h>
67 #include <net/ip.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
70 #include <net/xfrm.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <net/icmp.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <linux/igmp.h>
78 #include <linux/netfilter_ipv4.h>
79 #include <linux/netfilter_bridge.h>
80 #include <linux/mroute.h>
81 #include <linux/netlink.h>
82 #include <linux/tcp.h>
83
84 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
85 EXPORT_SYMBOL(sysctl_ip_default_ttl);
86
87 /* Generate a checksum for an outgoing IP datagram. */
88 __inline__ void ip_send_check(struct iphdr *iph)
89 {
90         iph->check = 0;
91         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
92 }
93 EXPORT_SYMBOL(ip_send_check);
94
95 int __ip_local_out(struct sk_buff *skb)
96 {
97         struct iphdr *iph = ip_hdr(skb);
98
99         iph->tot_len = htons(skb->len);
100         ip_send_check(iph);
101         return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
102                        skb_dst(skb)->dev, dst_output);
103 }
104
105 int ip_local_out(struct sk_buff *skb)
106 {
107         int err;
108
109         err = __ip_local_out(skb);
110         if (likely(err == 1))
111                 err = dst_output(skb);
112
113         return err;
114 }
115 EXPORT_SYMBOL_GPL(ip_local_out);
116
117 /* dev_loopback_xmit for use with netfilter. */
118 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
119 {
120         skb_reset_mac_header(newskb);
121         __skb_pull(newskb, skb_network_offset(newskb));
122         newskb->pkt_type = PACKET_LOOPBACK;
123         newskb->ip_summed = CHECKSUM_UNNECESSARY;
124         WARN_ON(!skb_dst(newskb));
125         skb_dst_force(newskb);
126         netif_rx_ni(newskb);
127         return 0;
128 }
129
130 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
131 {
132         int ttl = inet->uc_ttl;
133
134         if (ttl < 0)
135                 ttl = ip4_dst_hoplimit(dst);
136         return ttl;
137 }
138
139 /*
140  *              Add an ip header to a skbuff and send it out.
141  *
142  */
143 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
144                           __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
145 {
146         struct inet_sock *inet = inet_sk(sk);
147         struct rtable *rt = skb_rtable(skb);
148         struct iphdr *iph;
149
150         /* Build the IP header. */
151         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
152         skb_reset_network_header(skb);
153         iph = ip_hdr(skb);
154         iph->version  = 4;
155         iph->ihl      = 5;
156         iph->tos      = inet->tos;
157         if (ip_dont_fragment(sk, &rt->dst))
158                 iph->frag_off = htons(IP_DF);
159         else
160                 iph->frag_off = 0;
161         iph->ttl      = ip_select_ttl(inet, &rt->dst);
162         iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
163         iph->saddr    = saddr;
164         iph->protocol = sk->sk_protocol;
165         ip_select_ident(skb, sk);
166
167         if (opt && opt->opt.optlen) {
168                 iph->ihl += opt->opt.optlen>>2;
169                 ip_options_build(skb, &opt->opt, daddr, rt, 0);
170         }
171
172         skb->priority = sk->sk_priority;
173         skb->mark = sk->sk_mark;
174
175         /* Send it out. */
176         return ip_local_out(skb);
177 }
178 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
179
180 static inline int ip_finish_output2(struct sk_buff *skb)
181 {
182         struct dst_entry *dst = skb_dst(skb);
183         struct rtable *rt = (struct rtable *)dst;
184         struct net_device *dev = dst->dev;
185         unsigned int hh_len = LL_RESERVED_SPACE(dev);
186         struct neighbour *neigh;
187
188         if (rt->rt_type == RTN_MULTICAST) {
189                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
190         } else if (rt->rt_type == RTN_BROADCAST)
191                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
192
193         /* Be paranoid, rather than too clever. */
194         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
195                 struct sk_buff *skb2;
196
197                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
198                 if (skb2 == NULL) {
199                         kfree_skb(skb);
200                         return -ENOMEM;
201                 }
202                 if (skb->sk)
203                         skb_set_owner_w(skb2, skb->sk);
204                 kfree_skb(skb);
205                 skb = skb2;
206         }
207
208         rcu_read_lock();
209         neigh = dst_get_neighbour(dst);
210         if (neigh) {
211                 int res = neigh_output(neigh, skb);
212
213                 rcu_read_unlock();
214                 return res;
215         }
216         rcu_read_unlock();
217
218         if (net_ratelimit())
219                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
220         kfree_skb(skb);
221         return -EINVAL;
222 }
223
224 static inline int ip_skb_dst_mtu(struct sk_buff *skb)
225 {
226         struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
227
228         return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
229                skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
230 }
231
232 static int ip_finish_output(struct sk_buff *skb)
233 {
234 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
235         /* Policy lookup after SNAT yielded a new policy */
236         if (skb_dst(skb)->xfrm != NULL) {
237                 IPCB(skb)->flags |= IPSKB_REROUTED;
238                 return dst_output(skb);
239         }
240 #endif
241         if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
242                 return ip_fragment(skb, ip_finish_output2);
243         else
244                 return ip_finish_output2(skb);
245 }
246
247 int ip_mc_output(struct sk_buff *skb)
248 {
249         struct sock *sk = skb->sk;
250         struct rtable *rt = skb_rtable(skb);
251         struct net_device *dev = rt->dst.dev;
252
253         /*
254          *      If the indicated interface is up and running, send the packet.
255          */
256         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
257
258         skb->dev = dev;
259         skb->protocol = htons(ETH_P_IP);
260
261         /*
262          *      Multicasts are looped back for other local users
263          */
264
265         if (rt->rt_flags&RTCF_MULTICAST) {
266                 if (sk_mc_loop(sk)
267 #ifdef CONFIG_IP_MROUTE
268                 /* Small optimization: do not loopback not local frames,
269                    which returned after forwarding; they will be  dropped
270                    by ip_mr_input in any case.
271                    Note, that local frames are looped back to be delivered
272                    to local recipients.
273
274                    This check is duplicated in ip_mr_input at the moment.
275                  */
276                     &&
277                     ((rt->rt_flags & RTCF_LOCAL) ||
278                      !(IPCB(skb)->flags & IPSKB_FORWARDED))
279 #endif
280                    ) {
281                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
282                         if (newskb)
283                                 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
284                                         newskb, NULL, newskb->dev,
285                                         ip_dev_loopback_xmit);
286                 }
287
288                 /* Multicasts with ttl 0 must not go beyond the host */
289
290                 if (ip_hdr(skb)->ttl == 0) {
291                         kfree_skb(skb);
292                         return 0;
293                 }
294         }
295
296         if (rt->rt_flags&RTCF_BROADCAST) {
297                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
298                 if (newskb)
299                         NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
300                                 NULL, newskb->dev, ip_dev_loopback_xmit);
301         }
302
303         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
304                             skb->dev, ip_finish_output,
305                             !(IPCB(skb)->flags & IPSKB_REROUTED));
306 }
307
308 int ip_output(struct sk_buff *skb)
309 {
310         struct net_device *dev = skb_dst(skb)->dev;
311
312         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
313
314         skb->dev = dev;
315         skb->protocol = htons(ETH_P_IP);
316
317         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
318                             ip_finish_output,
319                             !(IPCB(skb)->flags & IPSKB_REROUTED));
320 }
321
322 int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
323 {
324         struct sock *sk = skb->sk;
325         struct inet_sock *inet = inet_sk(sk);
326         struct ip_options_rcu *inet_opt;
327         struct flowi4 *fl4;
328         struct rtable *rt;
329         struct iphdr *iph;
330         int res;
331
332         /* Skip all of this if the packet is already routed,
333          * f.e. by something like SCTP.
334          */
335         rcu_read_lock();
336         inet_opt = rcu_dereference(inet->inet_opt);
337         fl4 = &fl->u.ip4;
338         rt = skb_rtable(skb);
339         if (rt != NULL)
340                 goto packet_routed;
341
342         /* Make sure we can route this packet. */
343         rt = (struct rtable *)__sk_dst_check(sk, 0);
344         if (rt == NULL) {
345                 __be32 daddr;
346
347                 /* Use correct destination address if we have options. */
348                 daddr = inet->inet_daddr;
349                 if (inet_opt && inet_opt->opt.srr)
350                         daddr = inet_opt->opt.faddr;
351
352                 /* If this fails, retransmit mechanism of transport layer will
353                  * keep trying until route appears or the connection times
354                  * itself out.
355                  */
356                 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
357                                            daddr, inet->inet_saddr,
358                                            inet->inet_dport,
359                                            inet->inet_sport,
360                                            sk->sk_protocol,
361                                            RT_CONN_FLAGS(sk),
362                                            sk->sk_bound_dev_if);
363                 if (IS_ERR(rt))
364                         goto no_route;
365                 sk_setup_caps(sk, &rt->dst);
366         }
367         skb_dst_set_noref(skb, &rt->dst);
368
369 packet_routed:
370         if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
371                 goto no_route;
372
373         /* OK, we know where to send it, allocate and build IP header. */
374         skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
375         skb_reset_network_header(skb);
376         iph = ip_hdr(skb);
377         *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
378         if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
379                 iph->frag_off = htons(IP_DF);
380         else
381                 iph->frag_off = 0;
382         iph->ttl      = ip_select_ttl(inet, &rt->dst);
383         iph->protocol = sk->sk_protocol;
384         iph->saddr    = fl4->saddr;
385         iph->daddr    = fl4->daddr;
386         /* Transport layer set skb->h.foo itself. */
387
388         if (inet_opt && inet_opt->opt.optlen) {
389                 iph->ihl += inet_opt->opt.optlen >> 2;
390                 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
391         }
392
393         ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
394
395         skb->priority = sk->sk_priority;
396         skb->mark = sk->sk_mark;
397
398         res = ip_local_out(skb);
399         rcu_read_unlock();
400         return res;
401
402 no_route:
403         rcu_read_unlock();
404         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
405         kfree_skb(skb);
406         return -EHOSTUNREACH;
407 }
408 EXPORT_SYMBOL(ip_queue_xmit);
409
410
411 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
412 {
413         to->pkt_type = from->pkt_type;
414         to->priority = from->priority;
415         to->protocol = from->protocol;
416         skb_dst_drop(to);
417         skb_dst_copy(to, from);
418         to->dev = from->dev;
419         to->mark = from->mark;
420
421         /* Copy the flags to each fragment. */
422         IPCB(to)->flags = IPCB(from)->flags;
423
424 #ifdef CONFIG_NET_SCHED
425         to->tc_index = from->tc_index;
426 #endif
427         nf_copy(to, from);
428 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
429     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
430         to->nf_trace = from->nf_trace;
431 #endif
432 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
433         to->ipvs_property = from->ipvs_property;
434 #endif
435         skb_copy_secmark(to, from);
436 }
437
438 /*
439  *      This IP datagram is too large to be sent in one piece.  Break it up into
440  *      smaller pieces (each of size equal to IP header plus
441  *      a block of the data of the original IP data part) that will yet fit in a
442  *      single device frame, and queue such a frame for sending.
443  */
444
445 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
446 {
447         struct iphdr *iph;
448         int ptr;
449         struct net_device *dev;
450         struct sk_buff *skb2;
451         unsigned int mtu, hlen, left, len, ll_rs;
452         int offset;
453         __be16 not_last_frag;
454         struct rtable *rt = skb_rtable(skb);
455         int err = 0;
456
457         dev = rt->dst.dev;
458
459         /*
460          *      Point into the IP datagram header.
461          */
462
463         iph = ip_hdr(skb);
464
465         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
466                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
467                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
468                           htonl(ip_skb_dst_mtu(skb)));
469                 kfree_skb(skb);
470                 return -EMSGSIZE;
471         }
472
473         /*
474          *      Setup starting values.
475          */
476
477         hlen = iph->ihl * 4;
478         mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
479 #ifdef CONFIG_BRIDGE_NETFILTER
480         if (skb->nf_bridge)
481                 mtu -= nf_bridge_mtu_reduction(skb);
482 #endif
483         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
484
485         /* When frag_list is given, use it. First, check its validity:
486          * some transformers could create wrong frag_list or break existing
487          * one, it is not prohibited. In this case fall back to copying.
488          *
489          * LATER: this step can be merged to real generation of fragments,
490          * we can switch to copy when see the first bad fragment.
491          */
492         if (skb_has_frag_list(skb)) {
493                 struct sk_buff *frag, *frag2;
494                 int first_len = skb_pagelen(skb);
495
496                 if (first_len - hlen > mtu ||
497                     ((first_len - hlen) & 7) ||
498                     ip_is_fragment(iph) ||
499                     skb_cloned(skb))
500                         goto slow_path;
501
502                 skb_walk_frags(skb, frag) {
503                         /* Correct geometry. */
504                         if (frag->len > mtu ||
505                             ((frag->len & 7) && frag->next) ||
506                             skb_headroom(frag) < hlen)
507                                 goto slow_path_clean;
508
509                         /* Partially cloned skb? */
510                         if (skb_shared(frag))
511                                 goto slow_path_clean;
512
513                         BUG_ON(frag->sk);
514                         if (skb->sk) {
515                                 frag->sk = skb->sk;
516                                 frag->destructor = sock_wfree;
517                         }
518                         skb->truesize -= frag->truesize;
519                 }
520
521                 /* Everything is OK. Generate! */
522
523                 err = 0;
524                 offset = 0;
525                 frag = skb_shinfo(skb)->frag_list;
526                 skb_frag_list_init(skb);
527                 skb->data_len = first_len - skb_headlen(skb);
528                 skb->len = first_len;
529                 iph->tot_len = htons(first_len);
530                 iph->frag_off = htons(IP_MF);
531                 ip_send_check(iph);
532
533                 for (;;) {
534                         /* Prepare header of the next frame,
535                          * before previous one went down. */
536                         if (frag) {
537                                 frag->ip_summed = CHECKSUM_NONE;
538                                 skb_reset_transport_header(frag);
539                                 __skb_push(frag, hlen);
540                                 skb_reset_network_header(frag);
541                                 memcpy(skb_network_header(frag), iph, hlen);
542                                 iph = ip_hdr(frag);
543                                 iph->tot_len = htons(frag->len);
544                                 ip_copy_metadata(frag, skb);
545                                 if (offset == 0)
546                                         ip_options_fragment(frag);
547                                 offset += skb->len - hlen;
548                                 iph->frag_off = htons(offset>>3);
549                                 if (frag->next != NULL)
550                                         iph->frag_off |= htons(IP_MF);
551                                 /* Ready, complete checksum */
552                                 ip_send_check(iph);
553                         }
554
555                         err = output(skb);
556
557                         if (!err)
558                                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
559                         if (err || !frag)
560                                 break;
561
562                         skb = frag;
563                         frag = skb->next;
564                         skb->next = NULL;
565                 }
566
567                 if (err == 0) {
568                         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
569                         return 0;
570                 }
571
572                 while (frag) {
573                         skb = frag->next;
574                         kfree_skb(frag);
575                         frag = skb;
576                 }
577                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
578                 return err;
579
580 slow_path_clean:
581                 skb_walk_frags(skb, frag2) {
582                         if (frag2 == frag)
583                                 break;
584                         frag2->sk = NULL;
585                         frag2->destructor = NULL;
586                         skb->truesize += frag2->truesize;
587                 }
588         }
589
590 slow_path:
591         left = skb->len - hlen;         /* Space per frame */
592         ptr = hlen;             /* Where to start from */
593
594         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
595          * we need to make room for the encapsulating header
596          */
597         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
598
599         /*
600          *      Fragment the datagram.
601          */
602
603         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
604         not_last_frag = iph->frag_off & htons(IP_MF);
605
606         /*
607          *      Keep copying data until we run out.
608          */
609
610         while (left > 0) {
611                 len = left;
612                 /* IF: it doesn't fit, use 'mtu' - the data space left */
613                 if (len > mtu)
614                         len = mtu;
615                 /* IF: we are not sending up to and including the packet end
616                    then align the next start on an eight byte boundary */
617                 if (len < left) {
618                         len &= ~7;
619                 }
620                 /*
621                  *      Allocate buffer.
622                  */
623
624                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
625                         NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
626                         err = -ENOMEM;
627                         goto fail;
628                 }
629
630                 /*
631                  *      Set up data on packet
632                  */
633
634                 ip_copy_metadata(skb2, skb);
635                 skb_reserve(skb2, ll_rs);
636                 skb_put(skb2, len + hlen);
637                 skb_reset_network_header(skb2);
638                 skb2->transport_header = skb2->network_header + hlen;
639
640                 /*
641                  *      Charge the memory for the fragment to any owner
642                  *      it might possess
643                  */
644
645                 if (skb->sk)
646                         skb_set_owner_w(skb2, skb->sk);
647
648                 /*
649                  *      Copy the packet header into the new buffer.
650                  */
651
652                 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
653
654                 /*
655                  *      Copy a block of the IP datagram.
656                  */
657                 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
658                         BUG();
659                 left -= len;
660
661                 /*
662                  *      Fill in the new header fields.
663                  */
664                 iph = ip_hdr(skb2);
665                 iph->frag_off = htons((offset >> 3));
666
667                 /* ANK: dirty, but effective trick. Upgrade options only if
668                  * the segment to be fragmented was THE FIRST (otherwise,
669                  * options are already fixed) and make it ONCE
670                  * on the initial skb, so that all the following fragments
671                  * will inherit fixed options.
672                  */
673                 if (offset == 0)
674                         ip_options_fragment(skb);
675
676                 /*
677                  *      Added AC : If we are fragmenting a fragment that's not the
678                  *                 last fragment then keep MF on each bit
679                  */
680                 if (left > 0 || not_last_frag)
681                         iph->frag_off |= htons(IP_MF);
682                 ptr += len;
683                 offset += len;
684
685                 /*
686                  *      Put this fragment into the sending queue.
687                  */
688                 iph->tot_len = htons(len + hlen);
689
690                 ip_send_check(iph);
691
692                 err = output(skb2);
693                 if (err)
694                         goto fail;
695
696                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
697         }
698         kfree_skb(skb);
699         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
700         return err;
701
702 fail:
703         kfree_skb(skb);
704         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
705         return err;
706 }
707 EXPORT_SYMBOL(ip_fragment);
708
709 int
710 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
711 {
712         struct iovec *iov = from;
713
714         if (skb->ip_summed == CHECKSUM_PARTIAL) {
715                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
716                         return -EFAULT;
717         } else {
718                 __wsum csum = 0;
719                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
720                         return -EFAULT;
721                 skb->csum = csum_block_add(skb->csum, csum, odd);
722         }
723         return 0;
724 }
725 EXPORT_SYMBOL(ip_generic_getfrag);
726
727 static inline __wsum
728 csum_page(struct page *page, int offset, int copy)
729 {
730         char *kaddr;
731         __wsum csum;
732         kaddr = kmap(page);
733         csum = csum_partial(kaddr + offset, copy, 0);
734         kunmap(page);
735         return csum;
736 }
737
738 static inline int ip_ufo_append_data(struct sock *sk,
739                         struct sk_buff_head *queue,
740                         int getfrag(void *from, char *to, int offset, int len,
741                                int odd, struct sk_buff *skb),
742                         void *from, int length, int hh_len, int fragheaderlen,
743                         int transhdrlen, int maxfraglen, unsigned int flags)
744 {
745         struct sk_buff *skb;
746         int err;
747
748         /* There is support for UDP fragmentation offload by network
749          * device, so create one single skb packet containing complete
750          * udp datagram
751          */
752         if ((skb = skb_peek_tail(queue)) == NULL) {
753                 skb = sock_alloc_send_skb(sk,
754                         hh_len + fragheaderlen + transhdrlen + 20,
755                         (flags & MSG_DONTWAIT), &err);
756
757                 if (skb == NULL)
758                         return err;
759
760                 /* reserve space for Hardware header */
761                 skb_reserve(skb, hh_len);
762
763                 /* create space for UDP/IP header */
764                 skb_put(skb, fragheaderlen + transhdrlen);
765
766                 /* initialize network header pointer */
767                 skb_reset_network_header(skb);
768
769                 /* initialize protocol header pointer */
770                 skb->transport_header = skb->network_header + fragheaderlen;
771
772                 skb->ip_summed = CHECKSUM_PARTIAL;
773                 skb->csum = 0;
774
775                 /* specify the length of each IP datagram fragment */
776                 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
777                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
778                 __skb_queue_tail(queue, skb);
779         }
780
781         return skb_append_datato_frags(sk, skb, getfrag, from,
782                                        (length - transhdrlen));
783 }
784
785 static int __ip_append_data(struct sock *sk,
786                             struct flowi4 *fl4,
787                             struct sk_buff_head *queue,
788                             struct inet_cork *cork,
789                             int getfrag(void *from, char *to, int offset,
790                                         int len, int odd, struct sk_buff *skb),
791                             void *from, int length, int transhdrlen,
792                             unsigned int flags)
793 {
794         struct inet_sock *inet = inet_sk(sk);
795         struct sk_buff *skb;
796
797         struct ip_options *opt = cork->opt;
798         int hh_len;
799         int exthdrlen;
800         int mtu;
801         int copy;
802         int err;
803         int offset = 0;
804         unsigned int maxfraglen, fragheaderlen;
805         int csummode = CHECKSUM_NONE;
806         struct rtable *rt = (struct rtable *)cork->dst;
807
808         skb = skb_peek_tail(queue);
809
810         exthdrlen = !skb ? rt->dst.header_len : 0;
811         mtu = cork->fragsize;
812
813         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
814
815         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
816         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
817
818         if (cork->length + length > 0xFFFF - fragheaderlen) {
819                 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
820                                mtu-exthdrlen);
821                 return -EMSGSIZE;
822         }
823
824         /*
825          * transhdrlen > 0 means that this is the first fragment and we wish
826          * it won't be fragmented in the future.
827          */
828         if (transhdrlen &&
829             length + fragheaderlen <= mtu &&
830             rt->dst.dev->features & NETIF_F_V4_CSUM &&
831             !exthdrlen)
832                 csummode = CHECKSUM_PARTIAL;
833
834         cork->length += length;
835         if (((length > mtu) || (skb && skb_has_frags(skb))) &&
836             (sk->sk_protocol == IPPROTO_UDP) &&
837             (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
838             (sk->sk_type == SOCK_DGRAM)) {
839                 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
840                                          hh_len, fragheaderlen, transhdrlen,
841                                          maxfraglen, flags);
842                 if (err)
843                         goto error;
844                 return 0;
845         }
846
847         /* So, what's going on in the loop below?
848          *
849          * We use calculated fragment length to generate chained skb,
850          * each of segments is IP fragment ready for sending to network after
851          * adding appropriate IP header.
852          */
853
854         if (!skb)
855                 goto alloc_new_skb;
856
857         while (length > 0) {
858                 /* Check if the remaining data fits into current packet. */
859                 copy = mtu - skb->len;
860                 if (copy < length)
861                         copy = maxfraglen - skb->len;
862                 if (copy <= 0) {
863                         char *data;
864                         unsigned int datalen;
865                         unsigned int fraglen;
866                         unsigned int fraggap;
867                         unsigned int alloclen;
868                         struct sk_buff *skb_prev;
869 alloc_new_skb:
870                         skb_prev = skb;
871                         if (skb_prev)
872                                 fraggap = skb_prev->len - maxfraglen;
873                         else
874                                 fraggap = 0;
875
876                         /*
877                          * If remaining data exceeds the mtu,
878                          * we know we need more fragment(s).
879                          */
880                         datalen = length + fraggap;
881                         if (datalen > mtu - fragheaderlen)
882                                 datalen = maxfraglen - fragheaderlen;
883                         fraglen = datalen + fragheaderlen;
884
885                         if ((flags & MSG_MORE) &&
886                             !(rt->dst.dev->features&NETIF_F_SG))
887                                 alloclen = mtu;
888                         else
889                                 alloclen = fraglen;
890
891                         alloclen += exthdrlen;
892
893                         /* The last fragment gets additional space at tail.
894                          * Note, with MSG_MORE we overallocate on fragments,
895                          * because we have no idea what fragment will be
896                          * the last.
897                          */
898                         if (datalen == length + fraggap)
899                                 alloclen += rt->dst.trailer_len;
900
901                         if (transhdrlen) {
902                                 skb = sock_alloc_send_skb(sk,
903                                                 alloclen + hh_len + 15,
904                                                 (flags & MSG_DONTWAIT), &err);
905                         } else {
906                                 skb = NULL;
907                                 if (atomic_read(&sk->sk_wmem_alloc) <=
908                                     2 * sk->sk_sndbuf)
909                                         skb = sock_wmalloc(sk,
910                                                            alloclen + hh_len + 15, 1,
911                                                            sk->sk_allocation);
912                                 if (unlikely(skb == NULL))
913                                         err = -ENOBUFS;
914                                 else
915                                         /* only the initial fragment is
916                                            time stamped */
917                                         cork->tx_flags = 0;
918                         }
919                         if (skb == NULL)
920                                 goto error;
921
922                         /*
923                          *      Fill in the control structures
924                          */
925                         skb->ip_summed = csummode;
926                         skb->csum = 0;
927                         skb_reserve(skb, hh_len);
928                         skb_shinfo(skb)->tx_flags = cork->tx_flags;
929
930                         /*
931                          *      Find where to start putting bytes.
932                          */
933                         data = skb_put(skb, fraglen + exthdrlen);
934                         skb_set_network_header(skb, exthdrlen);
935                         skb->transport_header = (skb->network_header +
936                                                  fragheaderlen);
937                         data += fragheaderlen + exthdrlen;
938
939                         if (fraggap) {
940                                 skb->csum = skb_copy_and_csum_bits(
941                                         skb_prev, maxfraglen,
942                                         data + transhdrlen, fraggap, 0);
943                                 skb_prev->csum = csum_sub(skb_prev->csum,
944                                                           skb->csum);
945                                 data += fraggap;
946                                 pskb_trim_unique(skb_prev, maxfraglen);
947                         }
948
949                         copy = datalen - transhdrlen - fraggap;
950                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
951                                 err = -EFAULT;
952                                 kfree_skb(skb);
953                                 goto error;
954                         }
955
956                         offset += copy;
957                         length -= datalen - fraggap;
958                         transhdrlen = 0;
959                         exthdrlen = 0;
960                         csummode = CHECKSUM_NONE;
961
962                         /*
963                          * Put the packet on the pending queue.
964                          */
965                         __skb_queue_tail(queue, skb);
966                         continue;
967                 }
968
969                 if (copy > length)
970                         copy = length;
971
972                 if (!(rt->dst.dev->features&NETIF_F_SG)) {
973                         unsigned int off;
974
975                         off = skb->len;
976                         if (getfrag(from, skb_put(skb, copy),
977                                         offset, copy, off, skb) < 0) {
978                                 __skb_trim(skb, off);
979                                 err = -EFAULT;
980                                 goto error;
981                         }
982                 } else {
983                         int i = skb_shinfo(skb)->nr_frags;
984                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
985                         struct page *page = cork->page;
986                         int off = cork->off;
987                         unsigned int left;
988
989                         if (page && (left = PAGE_SIZE - off) > 0) {
990                                 if (copy >= left)
991                                         copy = left;
992                                 if (page != skb_frag_page(frag)) {
993                                         if (i == MAX_SKB_FRAGS) {
994                                                 err = -EMSGSIZE;
995                                                 goto error;
996                                         }
997                                         skb_fill_page_desc(skb, i, page, off, 0);
998                                         skb_frag_ref(skb, i);
999                                         frag = &skb_shinfo(skb)->frags[i];
1000                                 }
1001                         } else if (i < MAX_SKB_FRAGS) {
1002                                 if (copy > PAGE_SIZE)
1003                                         copy = PAGE_SIZE;
1004                                 page = alloc_pages(sk->sk_allocation, 0);
1005                                 if (page == NULL)  {
1006                                         err = -ENOMEM;
1007                                         goto error;
1008                                 }
1009                                 cork->page = page;
1010                                 cork->off = 0;
1011
1012                                 skb_fill_page_desc(skb, i, page, 0, 0);
1013                                 frag = &skb_shinfo(skb)->frags[i];
1014                         } else {
1015                                 err = -EMSGSIZE;
1016                                 goto error;
1017                         }
1018                         if (getfrag(from, skb_frag_address(frag)+skb_frag_size(frag),
1019                                     offset, copy, skb->len, skb) < 0) {
1020                                 err = -EFAULT;
1021                                 goto error;
1022                         }
1023                         cork->off += copy;
1024                         skb_frag_size_add(frag, copy);
1025                         skb->len += copy;
1026                         skb->data_len += copy;
1027                         skb->truesize += copy;
1028                         atomic_add(copy, &sk->sk_wmem_alloc);
1029                 }
1030                 offset += copy;
1031                 length -= copy;
1032         }
1033
1034         return 0;
1035
1036 error:
1037         cork->length -= length;
1038         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1039         return err;
1040 }
1041
1042 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1043                          struct ipcm_cookie *ipc, struct rtable **rtp)
1044 {
1045         struct inet_sock *inet = inet_sk(sk);
1046         struct ip_options_rcu *opt;
1047         struct rtable *rt;
1048
1049         /*
1050          * setup for corking.
1051          */
1052         opt = ipc->opt;
1053         if (opt) {
1054                 if (cork->opt == NULL) {
1055                         cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1056                                             sk->sk_allocation);
1057                         if (unlikely(cork->opt == NULL))
1058                                 return -ENOBUFS;
1059                 }
1060                 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1061                 cork->flags |= IPCORK_OPT;
1062                 cork->addr = ipc->addr;
1063         }
1064         rt = *rtp;
1065         if (unlikely(!rt))
1066                 return -EFAULT;
1067         /*
1068          * We steal reference to this route, caller should not release it
1069          */
1070         *rtp = NULL;
1071         cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
1072                          rt->dst.dev->mtu : dst_mtu(&rt->dst);
1073         cork->dst = &rt->dst;
1074         cork->length = 0;
1075         cork->tx_flags = ipc->tx_flags;
1076         cork->page = NULL;
1077         cork->off = 0;
1078
1079         return 0;
1080 }
1081
1082 /*
1083  *      ip_append_data() and ip_append_page() can make one large IP datagram
1084  *      from many pieces of data. Each pieces will be holded on the socket
1085  *      until ip_push_pending_frames() is called. Each piece can be a page
1086  *      or non-page data.
1087  *
1088  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
1089  *      this interface potentially.
1090  *
1091  *      LATER: length must be adjusted by pad at tail, when it is required.
1092  */
1093 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1094                    int getfrag(void *from, char *to, int offset, int len,
1095                                int odd, struct sk_buff *skb),
1096                    void *from, int length, int transhdrlen,
1097                    struct ipcm_cookie *ipc, struct rtable **rtp,
1098                    unsigned int flags)
1099 {
1100         struct inet_sock *inet = inet_sk(sk);
1101         int err;
1102
1103         if (flags&MSG_PROBE)
1104                 return 0;
1105
1106         if (skb_queue_empty(&sk->sk_write_queue)) {
1107                 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1108                 if (err)
1109                         return err;
1110         } else {
1111                 transhdrlen = 0;
1112         }
1113
1114         return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
1115                                 from, length, transhdrlen, flags);
1116 }
1117
1118 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1119                        int offset, size_t size, int flags)
1120 {
1121         struct inet_sock *inet = inet_sk(sk);
1122         struct sk_buff *skb;
1123         struct rtable *rt;
1124         struct ip_options *opt = NULL;
1125         struct inet_cork *cork;
1126         int hh_len;
1127         int mtu;
1128         int len;
1129         int err;
1130         unsigned int maxfraglen, fragheaderlen, fraggap;
1131
1132         if (inet->hdrincl)
1133                 return -EPERM;
1134
1135         if (flags&MSG_PROBE)
1136                 return 0;
1137
1138         if (skb_queue_empty(&sk->sk_write_queue))
1139                 return -EINVAL;
1140
1141         cork = &inet->cork.base;
1142         rt = (struct rtable *)cork->dst;
1143         if (cork->flags & IPCORK_OPT)
1144                 opt = cork->opt;
1145
1146         if (!(rt->dst.dev->features&NETIF_F_SG))
1147                 return -EOPNOTSUPP;
1148
1149         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1150         mtu = cork->fragsize;
1151
1152         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1153         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1154
1155         if (cork->length + size > 0xFFFF - fragheaderlen) {
1156                 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
1157                 return -EMSGSIZE;
1158         }
1159
1160         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1161                 return -EINVAL;
1162
1163         cork->length += size;
1164         if ((size + skb->len > mtu) &&
1165             (sk->sk_protocol == IPPROTO_UDP) &&
1166             (rt->dst.dev->features & NETIF_F_UFO)) {
1167                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1168                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1169         }
1170
1171
1172         while (size > 0) {
1173                 int i;
1174
1175                 if (skb_is_gso(skb))
1176                         len = size;
1177                 else {
1178
1179                         /* Check if the remaining data fits into current packet. */
1180                         len = mtu - skb->len;
1181                         if (len < size)
1182                                 len = maxfraglen - skb->len;
1183                 }
1184                 if (len <= 0) {
1185                         struct sk_buff *skb_prev;
1186                         int alloclen;
1187
1188                         skb_prev = skb;
1189                         fraggap = skb_prev->len - maxfraglen;
1190
1191                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1192                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1193                         if (unlikely(!skb)) {
1194                                 err = -ENOBUFS;
1195                                 goto error;
1196                         }
1197
1198                         /*
1199                          *      Fill in the control structures
1200                          */
1201                         skb->ip_summed = CHECKSUM_NONE;
1202                         skb->csum = 0;
1203                         skb_reserve(skb, hh_len);
1204
1205                         /*
1206                          *      Find where to start putting bytes.
1207                          */
1208                         skb_put(skb, fragheaderlen + fraggap);
1209                         skb_reset_network_header(skb);
1210                         skb->transport_header = (skb->network_header +
1211                                                  fragheaderlen);
1212                         if (fraggap) {
1213                                 skb->csum = skb_copy_and_csum_bits(skb_prev,
1214                                                                    maxfraglen,
1215                                                     skb_transport_header(skb),
1216                                                                    fraggap, 0);
1217                                 skb_prev->csum = csum_sub(skb_prev->csum,
1218                                                           skb->csum);
1219                                 pskb_trim_unique(skb_prev, maxfraglen);
1220                         }
1221
1222                         /*
1223                          * Put the packet on the pending queue.
1224                          */
1225                         __skb_queue_tail(&sk->sk_write_queue, skb);
1226                         continue;
1227                 }
1228
1229                 i = skb_shinfo(skb)->nr_frags;
1230                 if (len > size)
1231                         len = size;
1232                 if (skb_can_coalesce(skb, i, page, offset)) {
1233                         skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
1234                 } else if (i < MAX_SKB_FRAGS) {
1235                         get_page(page);
1236                         skb_fill_page_desc(skb, i, page, offset, len);
1237                 } else {
1238                         err = -EMSGSIZE;
1239                         goto error;
1240                 }
1241
1242                 if (skb->ip_summed == CHECKSUM_NONE) {
1243                         __wsum csum;
1244                         csum = csum_page(page, offset, len);
1245                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1246                 }
1247
1248                 skb->len += len;
1249                 skb->data_len += len;
1250                 skb->truesize += len;
1251                 atomic_add(len, &sk->sk_wmem_alloc);
1252                 offset += len;
1253                 size -= len;
1254         }
1255         return 0;
1256
1257 error:
1258         cork->length -= size;
1259         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1260         return err;
1261 }
1262
1263 static void ip_cork_release(struct inet_cork *cork)
1264 {
1265         cork->flags &= ~IPCORK_OPT;
1266         kfree(cork->opt);
1267         cork->opt = NULL;
1268         dst_release(cork->dst);
1269         cork->dst = NULL;
1270 }
1271
1272 /*
1273  *      Combined all pending IP fragments on the socket as one IP datagram
1274  *      and push them out.
1275  */
1276 struct sk_buff *__ip_make_skb(struct sock *sk,
1277                               struct flowi4 *fl4,
1278                               struct sk_buff_head *queue,
1279                               struct inet_cork *cork)
1280 {
1281         struct sk_buff *skb, *tmp_skb;
1282         struct sk_buff **tail_skb;
1283         struct inet_sock *inet = inet_sk(sk);
1284         struct net *net = sock_net(sk);
1285         struct ip_options *opt = NULL;
1286         struct rtable *rt = (struct rtable *)cork->dst;
1287         struct iphdr *iph;
1288         __be16 df = 0;
1289         __u8 ttl;
1290
1291         if ((skb = __skb_dequeue(queue)) == NULL)
1292                 goto out;
1293         tail_skb = &(skb_shinfo(skb)->frag_list);
1294
1295         /* move skb->data to ip header from ext header */
1296         if (skb->data < skb_network_header(skb))
1297                 __skb_pull(skb, skb_network_offset(skb));
1298         while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1299                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1300                 *tail_skb = tmp_skb;
1301                 tail_skb = &(tmp_skb->next);
1302                 skb->len += tmp_skb->len;
1303                 skb->data_len += tmp_skb->len;
1304                 skb->truesize += tmp_skb->truesize;
1305                 tmp_skb->destructor = NULL;
1306                 tmp_skb->sk = NULL;
1307         }
1308
1309         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1310          * to fragment the frame generated here. No matter, what transforms
1311          * how transforms change size of the packet, it will come out.
1312          */
1313         if (inet->pmtudisc < IP_PMTUDISC_DO)
1314                 skb->local_df = 1;
1315
1316         /* DF bit is set when we want to see DF on outgoing frames.
1317          * If local_df is set too, we still allow to fragment this frame
1318          * locally. */
1319         if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1320             (skb->len <= dst_mtu(&rt->dst) &&
1321              ip_dont_fragment(sk, &rt->dst)))
1322                 df = htons(IP_DF);
1323
1324         if (cork->flags & IPCORK_OPT)
1325                 opt = cork->opt;
1326
1327         if (rt->rt_type == RTN_MULTICAST)
1328                 ttl = inet->mc_ttl;
1329         else
1330                 ttl = ip_select_ttl(inet, &rt->dst);
1331
1332         iph = (struct iphdr *)skb->data;
1333         iph->version = 4;
1334         iph->ihl = 5;
1335         iph->tos = inet->tos;
1336         iph->frag_off = df;
1337         iph->ttl = ttl;
1338         iph->protocol = sk->sk_protocol;
1339         iph->saddr = fl4->saddr;
1340         iph->daddr = fl4->daddr;
1341         ip_select_ident(skb, sk);
1342
1343         if (opt) {
1344                 iph->ihl += opt->optlen>>2;
1345                 ip_options_build(skb, opt, cork->addr, rt, 0);
1346         }
1347
1348         skb->priority = sk->sk_priority;
1349         skb->mark = sk->sk_mark;
1350         /*
1351          * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1352          * on dst refcount
1353          */
1354         cork->dst = NULL;
1355         skb_dst_set(skb, &rt->dst);
1356
1357         if (iph->protocol == IPPROTO_ICMP)
1358                 icmp_out_count(net, ((struct icmphdr *)
1359                         skb_transport_header(skb))->type);
1360
1361         ip_cork_release(cork);
1362 out:
1363         return skb;
1364 }
1365
1366 int ip_send_skb(struct sk_buff *skb)
1367 {
1368         struct net *net = sock_net(skb->sk);
1369         int err;
1370
1371         err = ip_local_out(skb);
1372         if (err) {
1373                 if (err > 0)
1374                         err = net_xmit_errno(err);
1375                 if (err)
1376                         IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1377         }
1378
1379         return err;
1380 }
1381
1382 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1383 {
1384         struct sk_buff *skb;
1385
1386         skb = ip_finish_skb(sk, fl4);
1387         if (!skb)
1388                 return 0;
1389
1390         /* Netfilter gets whole the not fragmented skb. */
1391         return ip_send_skb(skb);
1392 }
1393
1394 /*
1395  *      Throw away all pending data on the socket.
1396  */
1397 static void __ip_flush_pending_frames(struct sock *sk,
1398                                       struct sk_buff_head *queue,
1399                                       struct inet_cork *cork)
1400 {
1401         struct sk_buff *skb;
1402
1403         while ((skb = __skb_dequeue_tail(queue)) != NULL)
1404                 kfree_skb(skb);
1405
1406         ip_cork_release(cork);
1407 }
1408
1409 void ip_flush_pending_frames(struct sock *sk)
1410 {
1411         __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1412 }
1413
1414 struct sk_buff *ip_make_skb(struct sock *sk,
1415                             struct flowi4 *fl4,
1416                             int getfrag(void *from, char *to, int offset,
1417                                         int len, int odd, struct sk_buff *skb),
1418                             void *from, int length, int transhdrlen,
1419                             struct ipcm_cookie *ipc, struct rtable **rtp,
1420                             unsigned int flags)
1421 {
1422         struct inet_cork cork;
1423         struct sk_buff_head queue;
1424         int err;
1425
1426         if (flags & MSG_PROBE)
1427                 return NULL;
1428
1429         __skb_queue_head_init(&queue);
1430
1431         cork.flags = 0;
1432         cork.addr = 0;
1433         cork.opt = NULL;
1434         err = ip_setup_cork(sk, &cork, ipc, rtp);
1435         if (err)
1436                 return ERR_PTR(err);
1437
1438         err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
1439                                from, length, transhdrlen, flags);
1440         if (err) {
1441                 __ip_flush_pending_frames(sk, &queue, &cork);
1442                 return ERR_PTR(err);
1443         }
1444
1445         return __ip_make_skb(sk, fl4, &queue, &cork);
1446 }
1447
1448 /*
1449  *      Fetch data from kernel space and fill in checksum if needed.
1450  */
1451 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1452                               int len, int odd, struct sk_buff *skb)
1453 {
1454         __wsum csum;
1455
1456         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1457         skb->csum = csum_block_add(skb->csum, csum, odd);
1458         return 0;
1459 }
1460
1461 /*
1462  *      Generic function to send a packet as reply to another packet.
1463  *      Used to send TCP resets so far. ICMP should use this function too.
1464  *
1465  *      Should run single threaded per socket because it uses the sock
1466  *      structure to pass arguments.
1467  */
1468 void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1469                    const struct ip_reply_arg *arg, unsigned int len)
1470 {
1471         struct inet_sock *inet = inet_sk(sk);
1472         struct ip_options_data replyopts;
1473         struct ipcm_cookie ipc;
1474         struct flowi4 fl4;
1475         struct rtable *rt = skb_rtable(skb);
1476         int err;
1477
1478         if (ip_options_echo(&replyopts.opt.opt, skb))
1479                 return;
1480
1481         ipc.addr = daddr;
1482         ipc.opt = NULL;
1483         ipc.tx_flags = 0;
1484
1485         if (replyopts.opt.opt.optlen) {
1486                 ipc.opt = &replyopts.opt;
1487
1488                 if (replyopts.opt.opt.srr)
1489                         daddr = replyopts.opt.opt.faddr;
1490         }
1491
1492         flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1493                            RT_TOS(arg->tos),
1494                            RT_SCOPE_UNIVERSE, sk->sk_protocol,
1495                            ip_reply_arg_flowi_flags(arg),
1496                            daddr, rt->rt_spec_dst,
1497                            tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1498         security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1499         rt = ip_route_output_key(sock_net(sk), &fl4);
1500         if (IS_ERR(rt))
1501                 return;
1502
1503         /* And let IP do all the hard work.
1504
1505            This chunk is not reenterable, hence spinlock.
1506            Note that it uses the fact, that this function is called
1507            with locally disabled BH and that sk cannot be already spinlocked.
1508          */
1509         bh_lock_sock(sk);
1510         inet->tos = arg->tos;
1511         sk->sk_priority = skb->priority;
1512         sk->sk_protocol = ip_hdr(skb)->protocol;
1513         sk->sk_bound_dev_if = arg->bound_dev_if;
1514         err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1515                              len, 0, &ipc, &rt, MSG_DONTWAIT);
1516         if (unlikely(err)) {
1517                 ip_flush_pending_frames(sk);
1518                 goto out;
1519         }
1520
1521         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1522                 if (arg->csumoffset >= 0)
1523                         *((__sum16 *)skb_transport_header(skb) +
1524                           arg->csumoffset) = csum_fold(csum_add(skb->csum,
1525                                                                 arg->csum));
1526                 skb->ip_summed = CHECKSUM_NONE;
1527                 ip_push_pending_frames(sk, &fl4);
1528         }
1529 out:
1530         bh_unlock_sock(sk);
1531
1532         ip_rt_put(rt);
1533 }
1534
1535 void __init ip_init(void)
1536 {
1537         ip_rt_init();
1538         inet_initpeers();
1539
1540 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1541         igmp_mc_proc_init();
1542 #endif
1543 }