#include <linux/list_nulls.h>
#include <linux/timer.h>
#include <linux/cache.h>
-#include <linux/module.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h> /* struct sk_buff */
printk(KERN_DEBUG msg); } while (0)
#else
/* Validate arguments and do nothing */
-static inline void __attribute__ ((format (printf, 2, 3)))
-SOCK_DEBUG(struct sock *sk, const char *msg, ...)
+static inline __printf(2, 3)
+void SOCK_DEBUG(struct sock *sk, const char *msg, ...)
{
}
#endif
sk_userlocks : 4,
sk_protocol : 8,
sk_type : 16;
+#define SK_PROTOCOL_MAX ((u8)~0U)
kmemcheck_bitfield_end(flags);
int sk_wmem_queued;
gfp_t sk_allocation;
SOCK_ZEROCOPY, /* buffers from userspace */
};
+#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
nsk->sk_flags = osk->sk_flags;
/*
* Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
*/
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
{
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
- return qsize + skb->truesize > sk->sk_rcvbuf;
+ return qsize > sk->sk_rcvbuf;
}
/* The per-socket spinlock must be held here. */
#endif
}
-static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
+static inline void sock_rps_save_rxhash(struct sock *sk,
+ const struct sk_buff *skb)
{
#ifdef CONFIG_RPS
- if (unlikely(sk->sk_rxhash != rxhash)) {
+ if (unlikely(sk->sk_rxhash != skb->rxhash)) {
sock_rps_reset_flow(sk);
- sk->sk_rxhash = rxhash;
+ sk->sk_rxhash = skb->rxhash;
}
#endif
}
+static inline void sock_rps_reset_rxhash(struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ sock_rps_reset_flow(sk);
+ sk->sk_rxhash = 0;
+#endif
+}
+
#define sk_wait_event(__sk, __timeo, __condition) \
({ int __rc; \
release_sock(__sk); \
struct timewait_sock_ops;
struct inet_hashinfo;
struct raw_hashinfo;
+struct module;
+
+/*
+ * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+ * un-modified. Special care is taken when initializing object to zero.
+ */
+static inline void sk_prot_clear_nulls(struct sock *sk, int size)
+{
+ if (offsetof(struct sock, sk_node.next) != 0)
+ memset(sk, 0, offsetof(struct sock, sk_node.next));
+ memset(&sk->sk_node.pprev, 0,
+ size - offsetof(struct sock, sk_node.pprev));
+}
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
sk_free(sk);
}
-extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
- const int nested);
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
+ unsigned int trim_cap);
+static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested)
+{
+ return __sk_receive_skb(sk, skb, nested, 1);
+}
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
+int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);