#include "iwl-trans-int-pcie.h"
/*TODO remove uneeded includes when the transport layer tx_free will be here */
#include "iwl-agn.h"
-#include "iwl-core.h"
#include "iwl-shared.h"
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
}
}
-static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
+static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
struct iwl_rx_queue *rxq)
{
u32 rb_size;
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
/* Stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
/* Reset driver's Rx queue write index */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
(u32)(rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
rxq->rb_stts_dma >> 4);
/* Enable Rx DMA
* RB timeout 0x10
* 256 RBDs
*/
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
/* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+ iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}
static int iwl_rx_init(struct iwl_trans *trans)
iwlagn_rx_replenish(trans);
- iwl_trans_rx_hw_init(priv(trans), rxq);
+ iwl_trans_rx_hw_init(trans, rxq);
spin_lock_irqsave(&trans->shrd->lock, flags);
rxq->need_update = 1;
{
/* stop Rx DMA */
- iwl_write_direct32(priv(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- return iwl_poll_direct_bit(priv(trans), FH_MEM_RSSR_RX_STATUS_REG,
+ iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}
struct iwl_tx_queue *txq, int slots_num,
u32 txq_id)
{
- size_t tfd_sz = hw_params(trans).tfd_size * TFD_QUEUE_SIZE_MAX;
+ size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
int i;
- if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
+ if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
return -EINVAL;
txq->q.n_window = slots_num;
/* Driver private data, only for Tx (not command) queues,
* not shared with device. */
if (txq_id != trans->shrd->cmd_queue) {
- txq->txb = kzalloc(sizeof(txq->txb[0]) *
+ txq->skbs = kzalloc(sizeof(txq->skbs[0]) *
TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
- if (!txq->txb) {
+ if (!txq->skbs) {
IWL_ERR(trans, "kmalloc for auxiliary BD "
"structures failed\n");
goto error;
}
} else {
- txq->txb = NULL;
+ txq->skbs = NULL;
}
/* Circular buffer of transmit frame descriptors (TFDs),
return 0;
error:
- kfree(txq->txb);
- txq->txb = NULL;
+ kfree(txq->skbs);
+ txq->skbs = NULL;
/* since txq->cmd has been zeroed,
* all non allocated cmd[i] will be NULL */
if (txq->cmd)
* Tell nic where to find circular buffer of Tx Frame Descriptors for
* given Tx queue, and enable the DMA channel used for that queue.
* Circular buffer (TFD queue in DRAM) physical base address */
- iwl_write_direct32(priv(trans), FH_MEM_CBBC_QUEUE(txq_id),
+ iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
txq->q.dma_addr >> 8);
return 0;
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd) {
- dma_free_coherent(dev, hw_params(trans).tfd_size *
+ dma_free_coherent(dev, sizeof(struct iwl_tfd) *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
}
/* De-alloc array of per-TFD driver data */
- kfree(txq->txb);
- txq->txb = NULL;
+ kfree(txq->skbs);
+ txq->skbs = NULL;
/* deallocate arrays */
kfree(txq->cmd);
kfree(priv->txq);
priv->txq = NULL;
- iwlagn_free_dma_ptr(trans, &priv->kw);
+ iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
}
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
+ sizeof(struct iwlagn_scd_bc_tbl);
+
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
if (WARN_ON(priv->txq)) {
}
ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
- hw_params(trans).scd_bc_tbls_size);
+ scd_bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
goto error;
}
/* Alloc keep-warm buffer */
- ret = iwlagn_alloc_dma_ptr(trans, &priv->kw, IWL_KW_SIZE);
+ ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
if (ret) {
IWL_ERR(trans, "Keep Warm allocation failed\n");
goto error;
}
priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
- priv->cfg->base_params->num_of_queues, GFP_KERNEL);
+ hw_params(trans).max_txq_num, GFP_KERNEL);
if (!priv->txq) {
IWL_ERR(trans, "Not enough memory for txq\n");
ret = ENOMEM;
return 0;
error:
- iwl_trans_tx_free(trans);
+ iwl_trans_pcie_tx_free(trans);
return ret;
}
unsigned long flags;
bool alloc = false;
struct iwl_priv *priv = priv(trans);
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
if (!priv->txq) {
ret = iwl_trans_tx_alloc(trans);
spin_lock_irqsave(&trans->shrd->lock, flags);
/* Turn off all Tx DMA fifos */
- iwl_write_prph(priv, SCD_TXFACT, 0);
+ iwl_write_prph(bus(trans), SCD_TXFACT, 0);
/* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+ iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
spin_unlock_irqrestore(&trans->shrd->lock, flags);
error:
/*Upon error, free only if we allocated something */
if (alloc)
- iwl_trans_tx_free(trans);
+ iwl_trans_pcie_tx_free(trans);
return ret;
}
static void iwl_set_pwr_vmain(struct iwl_priv *priv)
{
+ struct iwl_trans *trans = trans(priv);
/*
* (for documentation purposes)
* to set power to V_AUX, do:
if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
~APMG_PS_CTRL_MSK_PWR_SRC);
*/
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
~APMG_PS_CTRL_MSK_PWR_SRC);
}
iwl_apm_init(priv);
/* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+ iwl_write8(bus(trans), CSR_INT_COALESCING,
+ IWL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&trans->shrd->lock, flags);
if (iwl_tx_init(trans))
return -ENOMEM;
- if (priv->cfg->base_params->shadow_reg_enable) {
+ if (hw_params(trans).shadow_reg_enable) {
/* enable shadow regs in HW */
- iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
+ iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
0x800FFFFF);
}
{
int ret;
- iwl_set_bit(priv(trans), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
/* See if we got it */
- ret = iwl_poll_bit(priv(trans), CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
HW_READY_TIMEOUT);
return 0;
/* If HW is not ready, prepare the conditions to check again */
- iwl_set_bit(priv(trans), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE);
- ret = iwl_poll_bit(priv(trans), CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
int ret;
struct iwl_priv *priv = priv(trans);
- priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
+ priv->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
- if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
+ if ((hw_params(priv).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
iwl_trans_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
return -EIO;
}
/* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
+ if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
else
return -ERFKILL;
}
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+ iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
ret = iwl_nic_init(trans);
if (ret) {
}
/* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
/* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+ iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
iwl_enable_interrupts(trans);
/* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
return 0;
}
*/
static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
{
- iwl_write_prph(priv(trans), SCD_TXFACT, mask);
+ iwl_write_prph(bus(trans), SCD_TXFACT, mask);
}
#define IWL_AC_UNSET -1
spin_lock_irqsave(&trans->shrd->lock, flags);
- trans_pcie->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
+ trans_pcie->scd_base_addr =
+ iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
/* reset conext data memory */
for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
a += 4)
- iwl_write_targ_mem(priv, a, 0);
+ iwl_write_targ_mem(bus(trans), a, 0);
/* reset tx status memory */
for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
a += 4)
- iwl_write_targ_mem(priv, a, 0);
+ iwl_write_targ_mem(bus(trans), a, 0);
for (; a < trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
a += 4)
- iwl_write_targ_mem(priv, a, 0);
+ iwl_write_targ_mem(bus(trans), a, 0);
- iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
+ iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10);
/* Enable DMA channel */
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
/* Update FH chicken bits */
- reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
+ reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
- iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
+ iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
SCD_QUEUECHAIN_SEL_ALL(priv));
- iwl_write_prph(priv, SCD_AGGR_SEL, 0);
+ iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
/* initiate the queues */
for (i = 0; i < hw_params(priv).max_txq_num; i++) {
- iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
+ iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
+ iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
+ iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
+ iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i) +
sizeof(u32),
((SCD_WIN_SIZE <<
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
}
- iwl_write_prph(priv, SCD_INTERRUPT_MASK,
+ iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
IWL_MASK(0, hw_params(trans).max_txq_num));
/* Activate all Tx DMA/FIFO channels */
spin_unlock_irqrestore(&trans->shrd->lock, flags);
/* Enable L1-Active */
- iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
+ iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
/* Stop each Tx DMA channel, and wait for it to be idle */
for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
- iwl_write_direct32(priv(trans),
+ iwl_write_direct32(bus(trans),
FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (iwl_poll_direct_bit(priv(trans), FH_TSSR_TX_STATUS_REG,
+ if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
1000))
IWL_ERR(trans, "Failing on timeout while stopping"
" DMA channel %d [0x%08x]", ch,
- iwl_read_direct32(priv(trans),
+ iwl_read_direct32(bus(trans),
FH_TSSR_TX_STATUS_REG));
}
spin_unlock_irqrestore(&trans->shrd->lock, flags);
return 0;
}
+static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
+{
+ unsigned long flags;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwl_disable_interrupts(trans);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ /* wait to make sure we flush pending tasklet*/
+ synchronize_irq(bus(trans)->irq);
+ tasklet_kill(&trans_pcie->irq_tasklet);
+}
+
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
/* stop and reset the on-board processor */
- iwl_write32(priv(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+ iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
/* tell the device to stop sending interrupts */
- iwl_trans_disable_sync_irq(trans);
+ iwl_trans_pcie_disable_sync_irq(trans);
/* device going down, Stop using ICT table */
iwl_disable_ict(trans);
iwl_trans_rx_stop(trans);
/* Power-down device's busmaster DMA clocks */
- iwl_write_prph(priv(trans), APMG_CLK_DIS_REG,
+ iwl_write_prph(bus(trans), APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
}
/* Make sure (redundant) we've released our request to stay awake */
- iwl_clear_bit(priv(trans), CSR_GP_CNTRL,
+ iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
}
static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
- struct iwl_rxon_context *ctx)
+ struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu)
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q;
u8 hdr_len = ieee80211_hdrlen(fc);
/* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = ctx;
+ txq->skbs[q->write_ptr] = skb;
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_meta = &txq->meta[q->write_ptr];
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(priv, txq);
+ iwl_txq_update_write_ptr(trans(priv), txq);
/*
* At this point the frame is "transmitted" successfully
if (iwl_queue_space(q) < q->high_mark) {
if (wait_write_ptr) {
txq->need_update = 1;
- iwl_txq_update_write_ptr(priv, txq);
+ iwl_txq_update_write_ptr(trans(priv), txq);
} else {
iwl_stop_queue(priv, txq);
}
static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
{
/* Remove all resets to allow NIC to operate */
- iwl_write32(priv(trans), CSR_RESET, 0);
+ iwl_write32(bus(trans), CSR_RESET, 0);
}
static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
if (txq->sched_retry) {
agg_state =
- priv->stations[txq->sta_id].tid[txq->tid].agg.state;
+ priv->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
} else {
cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
}
}
-static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
-{
- unsigned long flags;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_irqsave(&trans->shrd->lock, flags);
- iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
-
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(bus(trans)->irq);
- tasklet_kill(&trans_pcie->irq_tasklet);
-}
-
static void iwl_trans_pcie_free(struct iwl_trans *trans)
{
+ iwl_trans_pcie_tx_free(trans);
+ iwl_trans_pcie_rx_free(trans);
free_irq(bus(trans)->irq, trans);
iwl_free_isr_ict(trans);
trans->shrd->trans = NULL;
iwl_enable_interrupts(trans);
- if (!(iwl_read32(priv(trans), CSR_GP_CNTRL) &
+ if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
hw_rfkill = true;
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
char *buf;
int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
- (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
+ (hw_params(trans).max_txq_num * 32 * 8) + 400;
const u8 *ptr;
ssize_t ret;
int pos = 0;
int cnt;
int ret;
- const size_t bufsz = sizeof(char) * 64 *
- priv->cfg->base_params->num_of_queues;
+ const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
if (!priv->txq) {
IWL_ERR(priv, "txq not ready\n");
int pos = 0;
ssize_t ret = -ENOMEM;
- ret = pos = iwl_dump_nic_event_log(priv(trans), true, &buf, true);
+ ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
if (buf) {
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
if (sscanf(buf, "%d", &event_log_flag) != 1)
return -EFAULT;
if (event_log_flag == 1)
- iwl_dump_nic_event_log(priv(trans), true, NULL, false);
+ iwl_dump_nic_event_log(trans, true, NULL, false);
return count;
}
for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
IWL_ERR(trans, " %25s: 0X%08x\n",
get_csr_string(csr_tbl[i]),
- iwl_read32(priv(trans), csr_tbl[i]));
+ iwl_read32(bus(trans), csr_tbl[i]));
}
}
pos += scnprintf(*buf + pos, bufsz - pos,
" %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
- iwl_read_direct32(priv(trans), fh_tbl[i]));
+ iwl_read_direct32(bus(trans), fh_tbl[i]));
}
return pos;
}
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
IWL_ERR(trans, " %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
- iwl_read_direct32(priv(trans), fh_tbl[i]));
+ iwl_read_direct32(bus(trans), fh_tbl[i]));
}
return 0;
}
.tx_start = iwl_trans_pcie_tx_start,
- .rx_free = iwl_trans_pcie_rx_free,
- .tx_free = iwl_trans_pcie_tx_free,
-
.send_cmd = iwl_trans_pcie_send_cmd,
.send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
.kick_nic = iwl_trans_pcie_kick_nic,
- .disable_sync_irq = iwl_trans_pcie_disable_sync_irq,
.free = iwl_trans_pcie_free,
.dbgfs_register = iwl_trans_pcie_dbgfs_register,