extern void tcp_twsk_destructor(struct sock *sk);
+extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len, unsigned int flags);
+
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
tcp_dec_pcount_approx_int(count, tcp_skb_pcount(skb));
}
-static inline void tcp_packets_out_inc(struct sock *sk,
- const struct sk_buff *skb)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- int orig = tp->packets_out;
-
- tp->packets_out += tcp_skb_pcount(skb);
- if (!orig)
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
-}
-
-static inline void tcp_packets_out_dec(struct tcp_sock *tp,
- const struct sk_buff *skb)
-{
- tp->packets_out -= tcp_skb_pcount(skb);
-}
-
/* Events passed to congestion control interface */
enum tcp_ca_event {
CA_EVENT_TX_START, /* first transmit when no packets in flight */
icsk->icsk_ca_ops->cwnd_event(sk, event);
}
+/* These functions determine how the current flow behaves in respect of SACK
+ * handling. SACK is negotiated with the peer, and therefore it can vary
+ * between different flows.
+ *
+ * tcp_is_sack - SACK enabled
+ * tcp_is_reno - No SACK
+ * tcp_is_fack - FACK enabled, implies SACK enabled
+ */
+static inline int tcp_is_sack(const struct tcp_sock *tp)
+{
+ return tp->rx_opt.sack_ok;
+}
+
+static inline int tcp_is_reno(const struct tcp_sock *tp)
+{
+ return !tcp_is_sack(tp);
+}
+
+static inline int tcp_is_fack(const struct tcp_sock *tp)
+{
+ return tp->rx_opt.sack_ok & 2;
+}
+
+static inline void tcp_enable_fack(struct tcp_sock *tp)
+{
+ tp->rx_opt.sack_ok |= 2;
+}
+
static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
{
return tp->sacked_out + tp->lost_out;
(tp->snd_cwnd >> 2)));
}
-/* Use define here intentionally to get BUG_ON location shown at the caller */
-#define tcp_verify_left_out(tp) BUG_ON(tcp_left_out(tp) > tp->packets_out)
+/* Use define here intentionally to get WARN_ON location shown at the caller */
+#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
return left <= tcp_max_burst(tp);
}
-static inline void tcp_minshall_update(struct tcp_sock *tp, int mss,
+static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
const struct sk_buff *skb)
{
if (skb->len < mss)
TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
}
-/*from STCP */
-static inline void clear_all_retrans_hints(struct tcp_sock *tp){
+/* from STCP */
+static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
+{
tp->lost_skb_hint = NULL;
tp->scoreboard_skb_hint = NULL;
tp->retransmit_skb_hint = NULL;
tp->forward_skb_hint = NULL;
+}
+
+static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
+{
+ tcp_clear_retrans_hints_partial(tp);
tp->fastpath_skb_hint = NULL;
}
struct sock *sk)
{
__skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
+
+ if (sk->sk_send_head == skb)
+ sk->sk_send_head = new;
}
static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
return skb_queue_empty(&sk->sk_write_queue);
}
+/* Start sequence of the highest skb with SACKed bit, valid only if
+ * sacked > 0 or when the caller has ensured validity by itself.
+ */
+static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
+{
+ if (!tp->sacked_out)
+ return tp->snd_una;
+ return TCP_SKB_CB(tp->highest_sack)->seq;
+}
+
/* /proc */
enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING,