static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
struct ath5k_buf *bf)
{
+ struct ath5k_hw *ah = sc->ah;
+ struct ath_common *common = ath5k_hw_common(ah);
+
BUG_ON(!bf);
if (!bf->skb)
return;
- pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
+ pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(bf->skb);
bf->skb = NULL;
/* configure operational mode */
ath5k_hw_set_opmode(ah);
- ath5k_hw_set_mcast_filter(ah, 0, 0);
ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
* fake physical layer header at the start.
*/
skb = ath_rxbuf_alloc(common,
- sc->rxbufsize + common->cachelsz - 1,
+ common->rx_bufsize,
GFP_ATOMIC);
if (!skb) {
ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
- sc->rxbufsize + common->cachelsz - 1);
+ common->rx_bufsize);
return NULL;
}
*skb_addr = pci_map_single(sc->pdev,
- skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
+ skb->data, common->rx_bufsize,
+ PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
struct ath5k_buf *bf;
int ret;
- sc->rxbufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz);
+ common->rx_bufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz);
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n",
- common->cachelsz, sc->rxbufsize);
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
+ common->cachelsz, common->rx_bufsize);
spin_lock_bh(&sc->rxbuflock);
sc->rxlink = NULL;
struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr;
struct ath5k_softc *sc = (void *)data;
+ struct ath5k_hw *ah = sc->ah;
+ struct ath_common *common = ath5k_hw_common(ah);
struct ath5k_buf *bf;
struct ath5k_desc *ds;
int ret;
if (!next_skb)
goto next;
- pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
+ pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
PCI_DMA_FROMDEVICE);
skb_put(skb, rs.rs_datalen);