X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_fcoe.c;h=824edae7786542ab7cc5f951d8eeb69c5f185106;hb=2c9e88a1085b3183e5f92170a74980e5654f817b;hp=05920726e8249a36618a87be3504c9e1836ea4a4;hpb=361932bf84657b5dc0779046c751f06998c0d81f;p=pandora-kernel.git diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 05920726e824..824edae77865 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -26,9 +26,6 @@ *******************************************************************************/ #include "ixgbe.h" -#ifdef CONFIG_IXGBE_DCB -#include "ixgbe_dcb_82599.h" -#endif /* CONFIG_IXGBE_DCB */ #include #include #include @@ -39,25 +36,6 @@ #include #include -/** - * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type - * @rx_desc: advanced rx descriptor - * - * Returns : true if it is FCoE pkt - */ -static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc) -{ - u16 p; - - p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info); - if (p & IXGBE_RXDADV_PKTTYPE_ETQF) { - p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK; - p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT; - return p == IXGBE_ETQF_FILTER_FCOE; - } - return false; -} - /** * ixgbe_fcoe_clear_ddp - clear the given ddp context * @ddp - ptr to the ixgbe_fcoe_ddp @@ -128,14 +106,17 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) if (ddp->sgl) pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); - pci_pool_free(fcoe->pool, ddp->udl, ddp->udp); + if (ddp->pool) { + pci_pool_free(ddp->pool, ddp->udl, ddp->udp); + ddp->pool = NULL; + } + ixgbe_fcoe_clear_ddp(ddp); out_ddp_put: return len; } - /** * ixgbe_fcoe_ddp_setup - called to set up ddp context * @netdev: the corresponding net_device @@ -163,6 +144,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, unsigned int thislen = 0; u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; dma_addr_t addr = 0; + struct pci_pool *pool; if (!netdev || !sgl) return 0; @@ -199,12 +181,14 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, return 0; } - /* alloc the udl from our ddp pool */ - ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp); + /* alloc the udl from per cpu ddp pool */ + pool = *per_cpu_ptr(fcoe->pool, get_cpu()); + ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; } + ddp->pool = pool; ddp->sgl = sgl; ddp->sgc = sgc; @@ -268,6 +252,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, j++; lastsize = 1; } + put_cpu(); fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); @@ -311,11 +296,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, return 1; out_noddp_free: - pci_pool_free(fcoe->pool, ddp->udl, ddp->udp); + pci_pool_free(pool, ddp->udl, ddp->udp); ixgbe_fcoe_clear_ddp(ddp); out_noddp_unmap: pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); + put_cpu(); return 0; } @@ -374,23 +360,20 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, */ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) + struct sk_buff *skb, + u32 staterr) { u16 xid; u32 fctl; - u32 sterr, fceofe, fcerr, fcstat; + u32 fceofe, fcerr, fcstat; int rc = -EINVAL; struct ixgbe_fcoe *fcoe; struct ixgbe_fcoe_ddp *ddp; struct fc_frame_header *fh; struct fcoe_crc_eof *crc; - if (!ixgbe_rx_is_fcoe(rx_desc)) - goto ddp_out; - - sterr = le32_to_cpu(rx_desc->wb.upper.status_error); - fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR); - fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE); + fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR); + fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE); if (fcerr == IXGBE_FCERR_BADCRC) skb_checksum_none_assert(skb); else @@ -419,7 +402,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, if (fcerr | fceofe) goto ddp_out; - fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); + fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT); if (fcstat) { /* update length of DDPed data */ ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); @@ -465,24 +448,18 @@ ddp_out: * * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error */ -int ixgbe_fso(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, struct sk_buff *skb, +int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) { - u8 sof, eof; + struct fc_frame_header *fh; u32 vlan_macip_lens; - u32 fcoe_sof_eof; - u32 type_tucmd; + u32 fcoe_sof_eof = 0; u32 mss_l4len_idx; - int mss = 0; - unsigned int i; - struct ixgbe_tx_buffer *tx_buffer_info; - struct ixgbe_adv_tx_context_desc *context_desc; - struct fc_frame_header *fh; + u8 sof, eof; if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { - e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", - skb_shinfo(skb)->gso_type); + dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", + skb_shinfo(skb)->gso_type); return -EINVAL; } @@ -492,23 +469,22 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, sizeof(struct fcoe_hdr)); /* sets up SOF and ORIS */ - fcoe_sof_eof = 0; sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; switch (sof) { case FC_SOF_I2: - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; break; case FC_SOF_I3: - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | + IXGBE_ADVTXD_FCOEF_ORIS; break; case FC_SOF_N2: break; case FC_SOF_N3: - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; break; default: - e_warn(drv, "unknown sof = 0x%x\n", sof); + dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); return -EINVAL; } @@ -521,12 +497,11 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, break; case FC_EOF_T: /* lso needs ORIE */ - if (skb_is_gso(skb)) { - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE; - } else { + if (skb_is_gso(skb)) + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | + IXGBE_ADVTXD_FCOEF_ORIE; + else fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; - } break; case FC_EOF_NI: fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; @@ -535,7 +510,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; break; default: - e_warn(drv, "unknown eof = 0x%x\n", eof); + dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); return -EINVAL; } @@ -544,47 +519,72 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; - /* hdr_len includes fc_hdr if FCoE lso is enabled */ + /* include trailer in headlen as it is replicated per frame */ *hdr_len = sizeof(struct fcoe_crc_eof); + + /* hdr_len includes fc_hdr if FCoE LSO is enabled */ if (skb_is_gso(skb)) *hdr_len += (skb_transport_offset(skb) + sizeof(struct fc_frame_header)); - /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = (skb_transport_offset(skb) + - sizeof(struct fc_frame_header)); - vlan_macip_lens |= ((skb_transport_offset(skb) - 4) - << IXGBE_ADVTXD_MACLEN_SHIFT); - vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); - - /* type_tycmd and mss: set TUCMD.FCoE to enable offload */ - type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT | - IXGBE_ADVTXT_TUCMD_FCOE; - if (skb_is_gso(skb)) - mss = skb_shinfo(skb)->gso_size; + /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ - mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) | - (1 << IXGBE_ADVTXD_IDX_SHIFT); + mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_transport_offset(skb) + + sizeof(struct fc_frame_header); + vlan_macip_lens |= (skb_transport_offset(skb) - 4) + << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; /* write context desc */ - i = tx_ring->next_to_use; - context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); - context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); - context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - tx_buffer_info->time_stamp = jiffies; - tx_buffer_info->next_to_watch = i; - - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, + IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); return skb_is_gso(skb); } +static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) +{ + unsigned int cpu; + struct pci_pool **pool; + + for_each_possible_cpu(cpu) { + pool = per_cpu_ptr(fcoe->pool, cpu); + if (*pool) + pci_pool_destroy(*pool); + } + free_percpu(fcoe->pool); + fcoe->pool = NULL; +} + +static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + unsigned int cpu; + struct pci_pool **pool; + char pool_name[32]; + + fcoe->pool = alloc_percpu(struct pci_pool *); + if (!fcoe->pool) + return; + + /* allocate pci pool for each cpu */ + for_each_possible_cpu(cpu) { + snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); + pool = per_cpu_ptr(fcoe->pool, cpu); + *pool = pci_pool_create(pool_name, + adapter->pdev, IXGBE_FCPTR_MAX, + IXGBE_FCPTR_ALIGN, PAGE_SIZE); + if (!*pool) { + e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); + ixgbe_fcoe_ddp_pools_free(fcoe); + return; + } + } +} + /** * ixgbe_configure_fcoe - configures registers for fcoe at start * @adapter: ptr to ixgbe adapter @@ -599,27 +599,21 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; -#ifdef CONFIG_IXGBE_DCB - u8 tc; - u32 up2tc; -#endif - /* create the pool for ddp if not created yet */ if (!fcoe->pool) { - /* allocate ddp pool */ - fcoe->pool = pci_pool_create("ixgbe_fcoe_ddp", - adapter->pdev, IXGBE_FCPTR_MAX, - IXGBE_FCPTR_ALIGN, PAGE_SIZE); - if (!fcoe->pool) - e_err(drv, "failed to allocated FCoE DDP pool\n"); - spin_lock_init(&fcoe->lock); + ixgbe_fcoe_ddp_pools_alloc(adapter); + if (!fcoe->pool) { + e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); + return; + } + /* Extra buffer to be shared by all DDPs for HW work around */ fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); if (fcoe->extra_ddp_buffer == NULL) { e_err(drv, "failed to allocated extra DDP buffer\n"); - goto out_extra_ddp_buffer_alloc; + goto out_ddp_pools; } fcoe->extra_ddp_buffer_dma = @@ -630,7 +624,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) if (dma_mapping_error(&adapter->pdev->dev, fcoe->extra_ddp_buffer_dma)) { e_err(drv, "failed to map extra DDP buffer\n"); - goto out_extra_ddp_buffer_dma; + goto out_extra_ddp_buffer; } } @@ -670,25 +664,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) IXGBE_FCRXCTRL_FCOELLI | IXGBE_FCRXCTRL_FCCRCBO | (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); -#ifdef CONFIG_IXGBE_DCB - up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC); - for (i = 0; i < MAX_USER_PRIORITY; i++) { - tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT)); - tc &= (MAX_TRAFFIC_CLASS - 1); - if (fcoe->tc == tc) { - fcoe->up = i; - break; - } - } -#endif - return; -out_extra_ddp_buffer_dma: +out_extra_ddp_buffer: kfree(fcoe->extra_ddp_buffer); -out_extra_ddp_buffer_alloc: - pci_pool_destroy(fcoe->pool); - fcoe->pool = NULL; +out_ddp_pools: + ixgbe_fcoe_ddp_pools_free(fcoe); } /** @@ -704,18 +685,17 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) int i; struct ixgbe_fcoe *fcoe = &adapter->fcoe; - /* release ddp resource */ - if (fcoe->pool) { - for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) - ixgbe_fcoe_ddp_put(adapter->netdev, i); - dma_unmap_single(&adapter->pdev->dev, - fcoe->extra_ddp_buffer_dma, - IXGBE_FCBUFF_MIN, - DMA_FROM_DEVICE); - kfree(fcoe->extra_ddp_buffer); - pci_pool_destroy(fcoe->pool); - fcoe->pool = NULL; - } + if (!fcoe->pool) + return; + + for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) + ixgbe_fcoe_ddp_put(adapter->netdev, i); + dma_unmap_single(&adapter->pdev->dev, + fcoe->extra_ddp_buffer_dma, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + kfree(fcoe->extra_ddp_buffer); + ixgbe_fcoe_ddp_pools_free(fcoe); } /** @@ -811,41 +791,6 @@ out_disable: return rc; } -#ifdef CONFIG_IXGBE_DCB -/** - * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE - * @adapter : ixgbe adapter - * @up : 802.1p user priority bitmap - * - * Finds out the traffic class from the input user priority - * bitmap for FCoE. - * - * Returns : 0 on success otherwise returns 1 on error - */ -u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up) -{ - int i; - u32 up2tc; - - /* valid user priority bitmap must not be 0 */ - if (up) { - /* from user priority to the corresponding traffic class */ - up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC); - for (i = 0; i < MAX_USER_PRIORITY; i++) { - if (up & (1 << i)) { - up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT); - up2tc &= (MAX_TRAFFIC_CLASS - 1); - adapter->fcoe.tc = (u8)up2tc; - adapter->fcoe.up = i; - return 0; - } - } - } - - return 1; -} -#endif /* CONFIG_IXGBE_DCB */ - /** * ixgbe_fcoe_get_wwn - get world wide name for the node or the port * @netdev : ixgbe adapter