return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
-static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
-{
- writel(value, ring->tail);
-}
-
#define IXGBE_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC(R, i) \
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
-{
- rx_ring->next_to_use = val;
-
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- ixgbe_write_tail(rx_ring, val);
-}
-
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
i += rx_ring->count;
- if (rx_ring->next_to_use != i)
- ixgbe_release_rx_desc(rx_ring, i);
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
}
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
- /* notify HW of packet */
- ixgbe_write_tail(tx_ring, i);
+ writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
}
return;