X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?p=pandora-kernel.git;a=blobdiff_plain;f=include%2Fnet%2Fsock.h;h=324b3ea233d6000f04e4d5894b799535bb397e6e;hp=c9fad6fb629b652d4c27e3965859efe12c3d7895;hb=51bece910d2b0aca64cd3dee9fa2a8aa7feeadd9;hpb=35a5d9ed9fedb74c22cb19ff7d749289473144e0 diff --git a/include/net/sock.h b/include/net/sock.h index c9fad6fb629b..324b3ea233d6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -40,11 +40,11 @@ #ifndef _SOCK_H #define _SOCK_H -#include #include #include #include #include +#include #include #include /* struct sk_buff */ #include @@ -79,14 +79,17 @@ typedef struct { spinlock_t slock; struct sock_iocb *owner; wait_queue_head_t wq; + /* + * We express the mutex-alike socket_lock semantics + * to the lock validator by explicitly managing + * the slock as a lock variant (in addition to + * the slock itself): + */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } socket_lock_t; -#define sock_lock_init(__sk) \ -do { spin_lock_init(&((__sk)->sk_lock.slock)); \ - (__sk)->sk_lock.owner = NULL; \ - init_waitqueue_head(&((__sk)->sk_lock.wq)); \ -} while(0) - struct sock; struct proto; @@ -132,6 +135,7 @@ struct sock_common { * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed * @sk_write_queue: Packet sending queue + * @sk_async_wait_queue: DMA copied packets * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward @@ -140,6 +144,7 @@ struct sock_common { * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) + * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) * @sk_lingertime: %SO_LINGER l_linger setting * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct @@ -205,11 +210,13 @@ struct sock { atomic_t sk_omem_alloc; struct sk_buff_head sk_receive_queue; struct sk_buff_head sk_write_queue; + struct sk_buff_head sk_async_wait_queue; int sk_wmem_queued; int sk_forward_alloc; gfp_t sk_allocation; int sk_sndbuf; int sk_route_caps; + int sk_gso_type; int sk_rcvlowat; unsigned long sk_flags; unsigned long sk_lingertime; @@ -382,7 +389,6 @@ enum sock_flags { SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ SOCK_DBG, /* %SO_DEBUG setting */ SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ - SOCK_NO_LARGESEND, /* whether to sent large segments or not */ SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ }; @@ -745,6 +751,9 @@ extern void FASTCALL(release_sock(struct sock *sk)); /* BH context may only use the following locking interface. */ #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) +#define bh_lock_sock_nested(__sk) \ + spin_lock_nested(&((__sk)->sk_lock.slock), \ + SINGLE_DEPTH_NESTING) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) extern struct sock *sk_alloc(int family, @@ -871,10 +880,7 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock) if (filter) { unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len); - if (!pkt_len) - err = -EPERM; - else - skb_trim(skb, pkt_len); + err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; } if (needlock) @@ -1028,13 +1034,22 @@ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); +static inline int sk_can_gso(const struct sock *sk) +{ + return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); +} + static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { __sk_dst_set(sk, dst); sk->sk_route_caps = dst->dev->features; - if (sk->sk_route_caps & NETIF_F_TSO) { - if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len) - sk->sk_route_caps &= ~NETIF_F_TSO; + if (sk->sk_route_caps & NETIF_F_GSO) + sk->sk_route_caps |= NETIF_F_GSO_MASK; + if (sk_can_gso(sk)) { + if (dst->header_len) + sk->sk_route_caps &= ~NETIF_F_GSO_MASK; + else + sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; } } @@ -1267,15 +1282,27 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) * sk_eat_skb - Release a skb if it is no longer needed * @sk: socket to eat this skb from * @skb: socket buffer to eat + * @copied_early: flag indicating whether DMA operations copied this data early * * This routine must be called with interrupts disabled or with the socket * locked so that the sk_buff queue operation is ok. */ -static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) +#ifdef CONFIG_NET_DMA +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) +{ + __skb_unlink(skb, &sk->sk_receive_queue); + if (!copied_early) + __kfree_skb(skb); + else + __skb_queue_tail(&sk->sk_async_wait_queue, skb); +} +#else +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); } +#endif extern void sock_enable_timestamp(struct sock *sk); extern int sock_get_timestamp(struct sock *, struct timeval __user *);