extern struct inet_hashinfo tcp_hashinfo;
-extern atomic_t tcp_orphan_count;
+extern struct percpu_counter tcp_orphan_count;
extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER (128 + MAX_HEADER)
extern int sysctl_tcp_max_ssthresh;
extern atomic_t tcp_memory_allocated;
-extern atomic_t tcp_sockets_allocated;
+extern struct percpu_counter tcp_sockets_allocated;
extern int tcp_memory_pressure;
/*
/* tcp_input.c */
extern void tcp_cwnd_application_limited(struct sock *sk);
-extern void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
- struct sk_buff *skb);
/* tcp_timer.c */
extern void tcp_init_xmit_timers(struct sock *);
#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
- __u16 urg_ptr; /* Valid w/URG flags is set. */
__u32 ack_seq; /* Sequence number ACK'd */
};
return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
}
-extern int tcp_limit_reno_sacked(struct tcp_sock *tp);
-
/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
* The exception is rate halving phase, when cwnd is decreasing towards
* ssthresh.
BUG_ON(sock_owned_by_user(sk));
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
- sk->sk_backlog_rcv(sk, skb1);
+ sk_backlog_rcv(sk, skb1);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
}
return skb_queue_next(&sk->sk_write_queue, skb);
}
+static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
+{
+ return skb_queue_prev(&sk->sk_write_queue, skb);
+}
+
#define tcp_for_write_queue(skb, sk) \
skb_queue_walk(&(sk)->sk_write_queue, skb)