ixgbe: Remove tail write abstraction and add missing barrier
authorAlexander Duyck <alexander.h.duyck@redhat.com>
Fri, 14 Nov 2014 00:56:35 +0000 (00:56 +0000)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Fri, 5 Dec 2014 17:13:05 +0000 (09:13 -0800)
This change cleans up the tail writes for the ixgbe descriptor queues.  The
current implementation had me confused as I wasn't sure if it was still
making use of the surprise remove logic or not.

It also adds the mmiowb which is needed on ia64, mips, and a couple other
architectures in order to synchronize the MMIO writes with the Tx queue
_xmit_lock spinlock.

Cc: Don Skidmore <donald.c.skidmore@intel.com>
Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

index 5032a60..86fa607 100644 (file)
@@ -553,11 +553,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
        return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
 }
 
-static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
-{
-       writel(value, ring->tail);
-}
-
 #define IXGBE_RX_DESC(R, i)        \
        (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
 #define IXGBE_TX_DESC(R, i)        \
index 3c1d4ea..73cd0fe 100644 (file)
@@ -1416,22 +1416,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
-static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
-{
-       rx_ring->next_to_use = val;
-
-       /* update next to alloc since we have filled the ring */
-       rx_ring->next_to_alloc = val;
-       /*
-        * Force memory writes to complete before letting h/w
-        * know there are new descriptors to fetch.  (Only
-        * applicable for weak-ordered memory model archs,
-        * such as IA-64).
-        */
-       wmb();
-       ixgbe_write_tail(rx_ring, val);
-}
-
 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
                                    struct ixgbe_rx_buffer *bi)
 {
@@ -1517,8 +1501,20 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
 
        i += rx_ring->count;
 
-       if (rx_ring->next_to_use != i)
-               ixgbe_release_rx_desc(rx_ring, i);
+       if (rx_ring->next_to_use != i) {
+               rx_ring->next_to_use = i;
+
+               /* update next to alloc since we have filled the ring */
+               rx_ring->next_to_alloc = i;
+
+               /* Force memory writes to complete before letting h/w
+                * know there are new descriptors to fetch.  (Only
+                * applicable for weak-ordered memory model archs,
+                * such as IA-64).
+                */
+               wmb();
+               writel(i, rx_ring->tail);
+       }
 }
 
 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
@@ -6954,8 +6950,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
        ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
-               /* notify HW of packet */
-               ixgbe_write_tail(tx_ring, i);
+               writel(i, tx_ring->tail);
+
+               /* we need this if more than one processor can write to our tail
+                * at a time, it synchronizes IO on IA64/Altix systems
+                */
+               mmiowb();
        }
 
        return;