X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?p=pandora-kernel.git;a=blobdiff_plain;f=drivers%2Fnet%2Fethernet%2Fbroadcom%2Fbnx2x%2Fbnx2x_cmn.c;h=2c1a5c01a2d07869467c9e235203ac49c57eca03;hp=580b44edb066eafe779b8d55953a5d70cf8546f8;hb=dcf42d8ca45ca2009ead5cfae84c1c0de0a0af72;hpb=1eb63378354ac37b7e27d256bbf84684751bac32 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 580b44edb066..2c1a5c01a2d0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -220,7 +220,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) if ((netif_tx_queue_stopped(txq)) && (bp->state == BNX2X_STATE_OPEN) && - (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)) + (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); @@ -551,6 +551,26 @@ static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); } +static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, + struct bnx2x_fastpath *fp) +{ + /* Do nothing if no IP/L4 csum validation was done */ + + if (cqe->fast_path_cqe.status_flags & + (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) + return; + + /* If both IP/L4 validation were done, check if an error was found. */ + + if (cqe->fast_path_cqe.type_error_flags & + (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | + ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) + fp->eth_q_stats.hw_csum_err++; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) { struct bnx2x *bp = fp->bp; @@ -746,13 +766,9 @@ reuse_rx: skb_checksum_none_assert(skb); - if (bp->dev->features & NETIF_F_RXCSUM) { + if (bp->dev->features & NETIF_F_RXCSUM) + bnx2x_csum_validate(skb, cqe, fp); - if (likely(BNX2X_RX_CSUM_OK(cqe))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - fp->eth_q_stats.hw_csum_err++; - } } skb_record_rx_queue(skb, fp->index); @@ -2238,8 +2254,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget) /* we split the first BD into headers and data BDs * to ease the pain of our fellow microcode engineers * we use one mapping for both BDs - * So far this has only been observed to happen - * in Other Operating Systems(TM) */ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, @@ -2890,7 +2904,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) txdata->tx_bd_prod += nbd; - if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) { + if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) { netif_tx_stop_queue(txq); /* paired memory barrier is in bnx2x_tx_int(), we have to keep @@ -2899,7 +2913,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) smp_mb(); fp->eth_q_stats.driver_xoff++; - if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3) + if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4) netif_tx_wake_queue(txq); } txdata->tx_pkt++;