[Bluetooth] Initiate authentication during connection establishment
[pandora-kernel.git] / net / core / datagram.c
index e1afa76..8a28fc9 100644 (file)
@@ -115,10 +115,10 @@ out_noerr:
 }
 
 /**
- *     skb_recv_datagram - Receive a datagram skbuff
+ *     __skb_recv_datagram - Receive a datagram skbuff
  *     @sk: socket
  *     @flags: MSG_ flags
- *     @noblock: blocking operation?
+ *     @peeked: returns non-zero if this packet has been seen before
  *     @err: error code returned
  *
  *     Get a datagram skbuff, understands the peeking, nonblocking wakeups
@@ -143,8 +143,8 @@ out_noerr:
  *     quite explicitly by POSIX 1003.1g, don't change them without having
  *     the standard around please.
  */
-struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
-                                 int noblock, int *err)
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+                                   int *peeked, int *err)
 {
        struct sk_buff *skb;
        long timeo;
@@ -156,7 +156,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
        if (error)
                goto no_packet;
 
-       timeo = sock_rcvtimeo(sk, noblock);
+       timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 
        do {
                /* Again only user level code calls this function, so nothing
@@ -165,18 +165,19 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
                 * Look at current nfs client by the way...
                 * However, this function was corrent in any case. 8)
                 */
-               if (flags & MSG_PEEK) {
-                       unsigned long cpu_flags;
-
-                       spin_lock_irqsave(&sk->sk_receive_queue.lock,
-                                         cpu_flags);
-                       skb = skb_peek(&sk->sk_receive_queue);
-                       if (skb)
+               unsigned long cpu_flags;
+
+               spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
+               skb = skb_peek(&sk->sk_receive_queue);
+               if (skb) {
+                       *peeked = skb->peeked;
+                       if (flags & MSG_PEEK) {
+                               skb->peeked = 1;
                                atomic_inc(&skb->users);
-                       spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
-                                              cpu_flags);
-               } else
-                       skb = skb_dequeue(&sk->sk_receive_queue);
+                       } else
+                               __skb_unlink(skb, &sk->sk_receive_queue);
+               }
+               spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
 
                if (skb)
                        return skb;
@@ -194,10 +195,21 @@ no_packet:
        *err = error;
        return NULL;
 }
+EXPORT_SYMBOL(__skb_recv_datagram);
+
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
+                                 int noblock, int *err)
+{
+       int peeked;
+
+       return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+                                  &peeked, err);
+}
 
 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
 {
        kfree_skb(skb);
+       sk_mem_reclaim(sk);
 }
 
 /**
@@ -217,20 +229,28 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
  *     This function currently only disables BH when acquiring the
  *     sk_receive_queue lock.  Therefore it must not be used in a
  *     context where that lock is acquired in an IRQ context.
+ *
+ *     It returns 0 if the packet was removed by us.
  */
 
-void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
+int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
 {
+       int err = 0;
+
        if (flags & MSG_PEEK) {
+               err = -ENOENT;
                spin_lock_bh(&sk->sk_receive_queue.lock);
                if (skb == skb_peek(&sk->sk_receive_queue)) {
                        __skb_unlink(skb, &sk->sk_receive_queue);
                        atomic_dec(&skb->users);
+                       err = 0;
                }
                spin_unlock_bh(&sk->sk_receive_queue.lock);
        }
 
        kfree_skb(skb);
+       sk_mem_reclaim(sk);
+       return err;
 }
 
 EXPORT_SYMBOL(skb_kill_datagram);
@@ -247,8 +267,8 @@ EXPORT_SYMBOL(skb_kill_datagram);
 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
                            struct iovec *to, int len)
 {
-       int end = skb_headlen(skb);
-       int i, copy = end - offset;
+       int start = skb_headlen(skb);
+       int i, copy = start - offset;
 
        /* Copy header. */
        if (copy > 0) {
@@ -263,9 +283,11 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
 
        /* Copy paged appendix. Hmm... why does this look so complicated? */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               BUG_TRAP(len >= 0);
+               int end;
 
-               end = offset + skb_shinfo(skb)->frags[i].size;
+               BUG_TRAP(start <= offset + len);
+
+               end = start + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
                        int err;
                        u8  *vaddr;
@@ -275,8 +297,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
                        if (copy > len)
                                copy = len;
                        vaddr = kmap(page);
-                       err = memcpy_toiovec(to, vaddr + frag->page_offset,
-                                            copy);
+                       err = memcpy_toiovec(to, vaddr + frag->page_offset +
+                                            offset - start, copy);
                        kunmap(page);
                        if (err)
                                goto fault;
@@ -284,24 +306,30 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
                                return 0;
                        offset += copy;
                }
+               start = end;
        }
 
        if (skb_shinfo(skb)->frag_list) {
                struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
                for (; list; list = list->next) {
-                       BUG_TRAP(len >= 0);
+                       int end;
+
+                       BUG_TRAP(start <= offset + len);
 
-                       end = offset + list->len;
+                       end = start + list->len;
                        if ((copy = end - offset) > 0) {
                                if (copy > len)
                                        copy = len;
-                               if (skb_copy_datagram_iovec(list, 0, to, copy))
+                               if (skb_copy_datagram_iovec(list,
+                                                           offset - start,
+                                                           to, copy))
                                        goto fault;
                                if ((len -= copy) == 0)
                                        return 0;
                                offset += copy;
                        }
+                       start = end;
                }
        }
        if (!len)
@@ -315,9 +343,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                      u8 __user *to, int len,
                                      __wsum *csump)
 {
-       int end = skb_headlen(skb);
+       int start = skb_headlen(skb);
        int pos = 0;
-       int i, copy = end - offset;
+       int i, copy = start - offset;
 
        /* Copy header. */
        if (copy > 0) {
@@ -336,9 +364,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               BUG_TRAP(len >= 0);
+               int end;
 
-               end = offset + skb_shinfo(skb)->frags[i].size;
+               BUG_TRAP(start <= offset + len);
+
+               end = start + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
                        __wsum csum2;
                        int err = 0;
@@ -350,7 +380,8 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                copy = len;
                        vaddr = kmap(page);
                        csum2 = csum_and_copy_to_user(vaddr +
-                                                       frag->page_offset,
+                                                       frag->page_offset +
+                                                       offset - start,
                                                      to, copy, 0, &err);
                        kunmap(page);
                        if (err)
@@ -362,20 +393,24 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                        to += copy;
                        pos += copy;
                }
+               start = end;
        }
 
        if (skb_shinfo(skb)->frag_list) {
                struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
                for (; list; list=list->next) {
-                       BUG_TRAP(len >= 0);
+                       int end;
+
+                       BUG_TRAP(start <= offset + len);
 
-                       end = offset + list->len;
+                       end = start + list->len;
                        if ((copy = end - offset) > 0) {
                                __wsum csum2 = 0;
                                if (copy > len)
                                        copy = len;
-                               if (skb_copy_and_csum_datagram(list, 0,
+                               if (skb_copy_and_csum_datagram(list,
+                                                              offset - start,
                                                               to, copy,
                                                               &csum2))
                                        goto fault;
@@ -386,6 +421,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                to += copy;
                                pos += copy;
                        }
+                       start = end;
                }
        }
        if (!len)
@@ -434,6 +470,9 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
        __wsum csum;
        int chunk = skb->len - hlen;
 
+       if (!chunk)
+               return 0;
+
        /* Skip filled elements.
         * Pretty silly, look at memcpy_toiovec, though 8)
         */