git.openpandora.org
/
pandora-kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
sfc: Minor formatting cleanup
[pandora-kernel.git]
/
drivers
/
net
/
ethernet
/
sfc
/
tx.c
diff --git
a/drivers/net/ethernet/sfc/tx.c
b/drivers/net/ethernet/sfc/tx.c
index
df88c54
..
94d0365
100644
(file)
--- a/
drivers/net/ethernet/sfc/tx.c
+++ b/
drivers/net/ethernet/sfc/tx.c
@@
-31,7
+31,9
@@
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer)
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
{
if (buffer->unmap_len) {
struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
@@
-48,6
+50,8
@@
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
}
if (buffer->skb) {
}
if (buffer->skb) {
+ (*pkts_compl)++;
+ (*bytes_compl) += buffer->skb->len;
dev_kfree_skb_any((struct sk_buff *) buffer->skb);
buffer->skb = NULL;
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
dev_kfree_skb_any((struct sk_buff *) buffer->skb);
buffer->skb = NULL;
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
@@
-106,7
+110,7
@@
efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
* little benefit from using descriptors that cross those
* boundaries and we keep things simple by not doing so.
*/
* little benefit from using descriptors that cross those
* boundaries and we keep things simple by not doing so.
*/
- unsigned len = (~dma_addr &
0xfff
) + 1;
+ unsigned len = (~dma_addr &
(EFX_PAGE_SIZE - 1)
) + 1;
/* Work around hardware bug for unaligned buffers. */
if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
/* Work around hardware bug for unaligned buffers. */
if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
@@
-250,6
+254,8
@@
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
buffer->skb = skb;
buffer->continuation = false;
buffer->skb = skb;
buffer->continuation = false;
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
@@
-267,10
+273,11
@@
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
unwind:
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
unwind:
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
--tx_queue->insert_count;
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
--tx_queue->insert_count;
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer
, &pkts_compl, &bytes_compl
);
buffer->len = 0;
}
buffer->len = 0;
}
@@
-293,7
+300,9
@@
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
* specified index.
*/
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
* specified index.
*/
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
- unsigned int index)
+ unsigned int index,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@
-311,7
+320,7
@@
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
return;
}
return;
}
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer
, pkts_compl, bytes_compl
);
buffer->continuation = true;
buffer->len = 0;
buffer->continuation = true;
buffer->len = 0;
@@
-330,7
+339,7
@@
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
* OS to free the skb.
*/
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
* OS to free the skb.
*/
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
-
struct net_device *net_dev)
+ struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
@@
-422,10
+431,12
@@
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
{
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
- efx_dequeue_buffers(tx_queue, index);
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
/* See if we need to restart the netif queue. This barrier
* separates the update of read_count from the test of the
/* See if we need to restart the netif queue. This barrier
* separates the update of read_count from the test of the
@@
-435,10
+446,8
@@
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
- if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
- EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
+ if (fill_level < EFX_TXQ_THRESHOLD(efx))
netif_tx_wake_queue(tx_queue->core_txq);
netif_tx_wake_queue(tx_queue->core_txq);
- }
}
/* Check whether the hardware queue is now empty */
}
/* Check whether the hardware queue is now empty */
@@
-468,7
+477,7
@@
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
/* Allocate software ring */
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
/* Allocate software ring */
- tx_queue->buffer = k
zalloc(entries *
sizeof(*tx_queue->buffer),
+ tx_queue->buffer = k
calloc(entries,
sizeof(*tx_queue->buffer),
GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
@@
-515,13
+524,15
@@
void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer
, &pkts_compl, &bytes_compl
);
buffer->continuation = true;
buffer->len = 0;
++tx_queue->read_count;
}
buffer->continuation = true;
buffer->len = 0;
++tx_queue->read_count;
}
+ netdev_tx_reset_queue(tx_queue->core_txq);
}
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
}
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
@@
-1160,6
+1171,8
@@
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
goto mem_err;
}
goto mem_err;
}
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);