iwlwifi: remove TX hex debug
[pandora-kernel.git] / drivers / net / wireless / iwlwifi / iwl-trans-pcie.c
index 6090e98..14b0361 100644 (file)
 
 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
 
+#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie)      \
+       (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\
+       (~(1<<(trans_pcie)->cmd_queue)))
+
 static int iwl_trans_rx_alloc(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie =
@@ -301,6 +305,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
 {
        size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
        int i;
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
        if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
                return -EINVAL;
@@ -313,7 +318,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
        if (!txq->meta || !txq->cmd)
                goto error;
 
-       if (txq_id == trans->shrd->cmd_queue)
+       if (txq_id == trans_pcie->cmd_queue)
                for (i = 0; i < slots_num; i++) {
                        txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
                                                GFP_KERNEL);
@@ -324,7 +329,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
        /* Alloc driver data array and TFD circular buffer */
        /* Driver private data, only for Tx (not command) queues,
         * not shared with device. */
-       if (txq_id != trans->shrd->cmd_queue) {
+       if (txq_id != trans_pcie->cmd_queue) {
                txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
                                    GFP_KERNEL);
                if (!txq->skbs) {
@@ -352,7 +357,7 @@ error:
        txq->skbs = NULL;
        /* since txq->cmd has been zeroed,
         * all non allocated cmd[i] will be NULL */
-       if (txq->cmd && txq_id == trans->shrd->cmd_queue)
+       if (txq->cmd && txq_id == trans_pcie->cmd_queue)
                for (i = 0; i < slots_num; i++)
                        kfree(txq->cmd[i]);
        kfree(txq->meta);
@@ -418,7 +423,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
        /* In the command queue, all the TBs are mapped as BIDI
         * so unmap them as such.
         */
-       if (txq_id == trans->shrd->cmd_queue)
+       if (txq_id == trans_pcie->cmd_queue)
                dma_dir = DMA_BIDIRECTIONAL;
        else
                dma_dir = DMA_TO_DEVICE;
@@ -454,7 +459,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
 
        /* De-alloc array of command/tx buffers */
 
-       if (txq_id == trans->shrd->cmd_queue)
+       if (txq_id == trans_pcie->cmd_queue)
                for (i = 0; i < txq->q.n_window; i++)
                        kfree(txq->cmd[i]);
 
@@ -492,7 +497,7 @@ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
        /* Tx queues */
        if (trans_pcie->txq) {
                for (txq_id = 0;
-                    txq_id < hw_params(trans).max_txq_num; txq_id++)
+                    txq_id < cfg(trans)->base_params->num_of_queues; txq_id++)
                        iwl_tx_queue_free(trans, txq_id);
        }
 
@@ -517,7 +522,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
        int txq_id, slots_num;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
+       u16 scd_bc_tbls_size = cfg(trans)->base_params->num_of_queues *
                        sizeof(struct iwlagn_scd_bc_tbl);
 
        /*It is not allowed to alloc twice, so warn when this happens.
@@ -541,7 +546,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
                goto error;
        }
 
-       trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num,
+       trans_pcie->txq = kcalloc(cfg(trans)->base_params->num_of_queues,
                                  sizeof(struct iwl_tx_queue), GFP_KERNEL);
        if (!trans_pcie->txq) {
                IWL_ERR(trans, "Not enough memory for txq\n");
@@ -550,8 +555,9 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
        }
 
        /* Alloc and init all Tx queues, including the command queue (#4/#9) */
-       for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
-               slots_num = (txq_id == trans->shrd->cmd_queue) ?
+       for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+            txq_id++) {
+               slots_num = (txq_id == trans_pcie->cmd_queue) ?
                                        TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
                ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
                                          slots_num, txq_id);
@@ -595,8 +601,9 @@ static int iwl_tx_init(struct iwl_trans *trans)
        spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 
        /* Alloc and init all Tx queues, including the command queue (#4/#9) */
-       for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
-               slots_num = (txq_id == trans->shrd->cmd_queue) ?
+       for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+            txq_id++) {
+               slots_num = (txq_id == trans_pcie->cmd_queue) ?
                                        TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
                ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
                                         slots_num, txq_id);
@@ -684,6 +691,7 @@ static void iwl_apm_config(struct iwl_trans *trans)
  */
 static int iwl_apm_init(struct iwl_trans *trans)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int ret = 0;
        IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
 
@@ -753,7 +761,7 @@ static int iwl_apm_init(struct iwl_trans *trans)
        iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
                          APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
 
-       set_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status);
+       set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
 
 out:
        return ret;
@@ -779,9 +787,10 @@ static int iwl_apm_stop_master(struct iwl_trans *trans)
 
 static void iwl_apm_stop(struct iwl_trans *trans)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
 
-       clear_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status);
+       clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
 
        /* Stop device's DMA activity */
        iwl_apm_stop_master(trans);
@@ -816,7 +825,7 @@ static int iwl_nic_init(struct iwl_trans *trans)
 
        iwl_set_pwr_vmain(trans);
 
-       iwl_nic_config(priv(trans));
+       iwl_op_mode_nic_config(trans->op_mode);
 
 #ifndef CONFIG_IWLWIFI_IDI
        /* Allocate the RX queue, or reset if it is already allocated */
@@ -833,8 +842,6 @@ static int iwl_nic_init(struct iwl_trans *trans)
                        0x800FFFFF);
        }
 
-       set_bit(STATUS_INIT, &trans->shrd->status);
-
        return 0;
 }
 
@@ -947,11 +954,12 @@ static const u8 iwlagn_pan_ac_to_queue[] = {
 static int iwl_load_section(struct iwl_trans *trans, const char *name,
                            const struct fw_desc *image, u32 dst_addr)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        dma_addr_t phy_addr = image->p_addr;
        u32 byte_cnt = image->len;
        int ret;
 
-       trans->ucode_write_complete = 0;
+       trans_pcie->ucode_write_complete = false;
 
        iwl_write_direct32(trans,
                FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
@@ -982,8 +990,8 @@ static int iwl_load_section(struct iwl_trans *trans, const char *name,
                FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
 
        IWL_DEBUG_FW(trans, "%s uCode section being loaded...\n", name);
-       ret = wait_event_timeout(trans->shrd->wait_command_queue,
-                                trans->ucode_write_complete, 5 * HZ);
+       ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
+                                trans_pcie->ucode_write_complete, 5 * HZ);
        if (!ret) {
                IWL_ERR(trans, "Could not load the %s uCode section\n",
                        name);
@@ -1022,7 +1030,6 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
                IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
 
-       trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
        trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
        trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
 
@@ -1044,7 +1051,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
        iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 
        if (hw_rfkill) {
-               iwl_enable_interrupts(trans);
+               iwl_enable_rfkill_int(trans);
                return -ERFKILL;
        }
 
@@ -1070,9 +1077,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
 
        /* Load the given image to the HW */
-       iwl_load_given_ucode(trans, fw);
-
-       return 0;
+       return iwl_load_given_ucode(trans, fw);
 }
 
 /*
@@ -1113,7 +1118,8 @@ static void iwl_tx_start(struct iwl_trans *trans)
                a += 4)
                iwl_write_targ_mem(trans, a, 0);
        for (; a < trans_pcie->scd_base_addr +
-              SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
+              SCD_TRANS_TBL_OFFSET_QUEUE(
+                               cfg(trans)->base_params->num_of_queues);
               a += 4)
                iwl_write_targ_mem(trans, a, 0);
 
@@ -1132,11 +1138,11 @@ static void iwl_tx_start(struct iwl_trans *trans)
                           reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
 
        iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
-               SCD_QUEUECHAIN_SEL_ALL(trans));
+               SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
        iwl_write_prph(trans, SCD_AGGR_SEL, 0);
 
        /* initiate the queues */
-       for (i = 0; i < hw_params(trans).max_txq_num; i++) {
+       for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) {
                iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
                iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
                iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
@@ -1153,7 +1159,7 @@ static void iwl_tx_start(struct iwl_trans *trans)
        }
 
        iwl_write_prph(trans, SCD_INTERRUPT_MASK,
-                       IWL_MASK(0, hw_params(trans).max_txq_num));
+                       IWL_MASK(0, cfg(trans)->base_params->num_of_queues));
 
        /* Activate all Tx DMA/FIFO channels */
        iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
@@ -1164,7 +1170,7 @@ static void iwl_tx_start(struct iwl_trans *trans)
        else
                queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
 
-       iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
+       iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
 
        /* make sure all queue are not stopped */
        memset(&trans_pcie->queue_stopped[0], 0,
@@ -1213,7 +1219,7 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
  */
 static int iwl_trans_tx_stop(struct iwl_trans *trans)
 {
-       int ch, txq_id;
+       int ch, txq_id, ret;
        unsigned long flags;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
@@ -1226,9 +1232,10 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
        for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
                iwl_write_direct32(trans,
                                   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
-               if (iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
+               ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
                                    FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
-                                   1000))
+                                   1000);
+               if (ret < 0)
                        IWL_ERR(trans, "Failing on timeout while stopping"
                            " DMA channel %d [0x%08x]", ch,
                            iwl_read_direct32(trans,
@@ -1242,7 +1249,8 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
        }
 
        /* Unmap DMA from host system and free skb's */
-       for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
+       for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+            txq_id++)
                iwl_tx_queue_unmap(trans, txq_id);
 
        return 0;
@@ -1268,7 +1276,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
         * restart. So don't process again if the device is
         * already dead.
         */
-       if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
+       if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
                iwl_trans_tx_stop(trans);
 #ifndef CONFIG_IWLWIFI_IDI
                iwl_trans_rx_stop(trans);
@@ -1294,7 +1302,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
        spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 
        /* wait to make sure we flush pending tasklet*/
-       synchronize_irq(trans->irq);
+       synchronize_irq(trans_pcie->irq);
        tasklet_kill(&trans_pcie->irq_tasklet);
 
        cancel_work_sync(&trans_pcie->rx_replenish);
@@ -1458,8 +1466,6 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
                     le16_to_cpu(dev_cmd->hdr.sequence));
        IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-       iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
-       iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
 
        /* Set up entry for this TFD in Tx byte-count array */
        iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
@@ -1488,7 +1494,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                        txq->need_update = 1;
                        iwl_txq_update_write_ptr(trans, txq);
                } else {
-                       iwl_stop_queue(trans, txq, "Queue is full");
+                       iwl_stop_queue(trans, txq);
                }
        }
        spin_unlock(&txq->lock);
@@ -1513,11 +1519,11 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
 
                iwl_alloc_isr_ict(trans);
 
-               err = request_irq(trans->irq, iwl_isr_ict, IRQF_SHARED,
+               err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
                        DRV_NAME, trans);
                if (err) {
                        IWL_ERR(trans, "Error allocating IRQ %d\n",
-                               trans->irq);
+                               trans_pcie->irq);
                        goto error;
                }
 
@@ -1540,7 +1546,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
        return err;
 
 err_free_irq:
-       free_irq(trans->irq, trans);
+       free_irq(trans_pcie->irq, trans);
 error:
        iwl_free_isr_ict(trans);
        tasklet_kill(&trans_pcie->irq_tasklet);
@@ -1554,13 +1560,11 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans)
        iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
 
        /* Even if we stop the HW, we still want the RF kill interrupt */
-       IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
-       iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+       iwl_enable_rfkill_int(trans);
 }
 
 static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
-                     int txq_id, int ssn, u32 status,
-                     struct sk_buff_head *skbs)
+                     int txq_id, int ssn, struct sk_buff_head *skbs)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
@@ -1593,10 +1597,8 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
                                txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
                                tfd_num, ssn);
                freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
-               if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
-                  (!txq->sched_retry ||
-                  status != TX_STATUS_FAIL_PASSIVE_NO_RX))
-                       iwl_wake_queue(trans, txq, "Packets reclaimed");
+               if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+                       iwl_wake_queue(trans, txq);
        }
 
        spin_unlock(&txq->lock);
@@ -1605,18 +1607,25 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
 
 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
 {
-       iowrite8(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+       writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
 }
 
 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
 {
-       iowrite32(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+       writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
 }
 
 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
 {
-       u32 val = ioread32(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
-       return val;
+       return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static void iwl_trans_pcie_configure(struct iwl_trans *trans,
+                             const struct iwl_trans_config *trans_cfg)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       trans_pcie->cmd_queue = trans_cfg->cmd_queue;
 }
 
 static void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1629,12 +1638,12 @@ static void iwl_trans_pcie_free(struct iwl_trans *trans)
        iwl_trans_pcie_rx_free(trans);
 #endif
        if (trans_pcie->irq_requested == true) {
-               free_irq(trans->irq, trans);
+               free_irq(trans_pcie->irq, trans);
                iwl_free_isr_ict(trans);
        }
 
        pci_disable_msi(trans_pcie->pci_dev);
-       pci_iounmap(trans_pcie->pci_dev, trans_pcie->hw_base);
+       iounmap(trans_pcie->hw_base);
        pci_release_regions(trans_pcie->pci_dev);
        pci_disable_device(trans_pcie->pci_dev);
 
@@ -1652,42 +1661,20 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
 {
        bool hw_rfkill;
 
-       iwl_enable_interrupts(trans);
-
        hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
                                CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+
+       if (hw_rfkill)
+               iwl_enable_rfkill_int(trans);
+       else
+               iwl_enable_interrupts(trans);
+
        iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 
        return 0;
 }
 #endif /* CONFIG_PM_SLEEP */
 
-static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
-                                         enum iwl_rxon_context_id ctx,
-                                         const char *msg)
-{
-       u8 ac, txq_id;
-       struct iwl_trans_pcie *trans_pcie =
-               IWL_TRANS_GET_PCIE_TRANS(trans);
-
-       for (ac = 0; ac < AC_NUM; ac++) {
-               txq_id = trans_pcie->ac_to_queue[ctx][ac];
-               IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n",
-                       ac,
-                       (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
-                             ? "stopped" : "awake");
-               iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg);
-       }
-}
-
-static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id,
-                                     const char *msg)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-       iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg);
-}
-
 #define IWL_FLUSH_WAIT_MS      2000
 
 static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
@@ -1700,8 +1687,8 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
        int ret = 0;
 
        /* waiting for all the tx frames complete might take a while */
-       for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
-               if (cnt == trans->shrd->cmd_queue)
+       for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
+               if (cnt == trans_pcie->cmd_queue)
                        continue;
                txq = &trans_pcie->txq[cnt];
                q = &txq->q;
@@ -1946,7 +1933,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
        int pos = 0;
        int cnt;
        int ret;
-       const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
+       size_t bufsz;
+
+       bufsz = sizeof(char) * 64 * cfg(trans)->base_params->num_of_queues;
 
        if (!trans_pcie->txq) {
                IWL_ERR(trans, "txq not ready\n");
@@ -1956,7 +1945,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
        if (!buf)
                return -ENOMEM;
 
-       for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
+       for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
                txq = &trans_pcie->txq[cnt];
                q = &txq->q;
                pos += scnprintf(buf + pos, bufsz - pos,
@@ -2207,8 +2196,6 @@ const struct iwl_trans_ops trans_ops_pcie = {
 
        .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
 
-       .wake_any_queue = iwl_trans_pcie_wake_any_queue,
-
        .send_cmd = iwl_trans_pcie_send_cmd,
 
        .tx = iwl_trans_pcie_tx,
@@ -2219,7 +2206,6 @@ const struct iwl_trans_ops trans_ops_pcie = {
        .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
 
        .free = iwl_trans_pcie_free,
-       .stop_queue = iwl_trans_pcie_stop_queue,
 
        .dbgfs_register = iwl_trans_pcie_dbgfs_register,
 
@@ -2233,6 +2219,7 @@ const struct iwl_trans_ops trans_ops_pcie = {
        .write8 = iwl_trans_pcie_write8,
        .write32 = iwl_trans_pcie_write32,
        .read32 = iwl_trans_pcie_read32,
+       .configure = iwl_trans_pcie_configure,
 };
 
 struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
@@ -2256,6 +2243,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
        trans->shrd = shrd;
        trans_pcie->trans = trans;
        spin_lock_init(&trans_pcie->irq_lock);
+       init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
        /* W/A - seems to solve weird behavior. We need to remove this if we
         * don't want to stay in L1 all the time. This wastes a lot of power */
@@ -2291,9 +2279,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
                goto out_pci_disable_device;
        }
 
-       trans_pcie->hw_base = pci_iomap(pdev, 0, 0);
+       trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
        if (!trans_pcie->hw_base) {
-               dev_printk(KERN_ERR, &pdev->dev, "pci_iomap failed");
+               dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
                err = -ENODEV;
                goto out_pci_release_regions;
        }
@@ -2317,7 +2305,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
                        "pci_enable_msi failed(0X%x)", err);
 
        trans->dev = &pdev->dev;
-       trans->irq = pdev->irq;
+       trans_pcie->irq = pdev->irq;
        trans_pcie->pci_dev = pdev;
        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;