else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
- return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
- &tx_power_cmd);
+ return priv->trans.ops->send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC,
+ sizeof(tx_power_cmd), &tx_power_cmd);
}
void iwlagn_temperature(struct iwl_priv *priv)
/* the rest are 0 by default */
};
-void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- dma_unmap_page(priv->bus.dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- DMA_FROM_DEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
u32 rb_size;
{
unsigned long flags;
struct iwl_rx_queue *rxq = &priv->rxq;
- int ret;
/* nic_init */
spin_lock_irqsave(&priv->lock, flags);
priv->cfg->ops->lib->apm_ops.config(priv);
/* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- ret = iwl_rx_queue_alloc(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwlagn_rx_queue_reset(priv, rxq);
+ priv->trans.ops->rx_init(priv);
iwlagn_rx_replenish(priv);
spin_unlock_irqrestore(&priv->lock, flags);
/* Allocate or reset and init all Tx and Command queues */
- if (!priv->txq) {
- ret = iwlagn_txq_ctx_alloc(priv);
- if (ret)
- return ret;
- } else
- iwlagn_txq_ctx_reset(priv);
+ if (priv->trans.ops->tx_init(priv))
+ return -ENOMEM;
if (priv->cfg->base_params->shadow_reg_enable) {
/* enable shadow regs in HW */
iwlagn_rx_queue_restock(priv);
}
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- dma_unmap_page(priv->bus.dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- DMA_FROM_DEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(priv->bus.dev, 4 * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
- dma_free_coherent(priv->bus.dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-
-int iwlagn_rxq_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
- return 0;
-}
-
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
{
int idx = 0;
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_CMD,
.len = { sizeof(struct iwl_scan_cmd), },
+ .flags = CMD_SYNC,
};
struct iwl_scan_cmd *scan;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
/* set scan bit here for PAN params */
set_bit(STATUS_SCAN_HW, &priv->status);
- if (priv->cfg->ops->hcmd->set_pan_params) {
- ret = priv->cfg->ops->hcmd->set_pan_params(priv);
- if (ret)
- return ret;
- }
+ ret = iwlagn_set_pan_params(priv);
+ if (ret)
+ return ret;
- ret = iwl_send_cmd_sync(priv, &cmd);
+ ret = priv->trans.ops->send_cmd(priv, &cmd);
if (ret) {
clear_bit(STATUS_SCAN_HW, &priv->status);
- if (priv->cfg->ops->hcmd->set_pan_params)
- priv->cfg->ops->hcmd->set_pan_params(priv);
+ iwlagn_set_pan_params(priv);
}
return ret;
flush_cmd.fifo_control);
flush_cmd.flush_control = cpu_to_le16(flush_control);
- return iwl_send_cmd(priv, &cmd);
+ return priv->trans.ops->send_cmd(priv, &cmd);
}
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
if (priv->cfg->bt_params->bt_session_2) {
memcpy(&bt_cmd_2000.basic, &basic,
sizeof(basic));
- ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(bt_cmd_2000), &bt_cmd_2000);
+ ret = priv->trans.ops->send_cmd_pdu(priv, REPLY_BT_CONFIG,
+ CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
} else {
memcpy(&bt_cmd_6000.basic, &basic,
sizeof(basic));
- ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(bt_cmd_6000), &bt_cmd_6000);
+ ret = priv->trans.ops->send_cmd_pdu(priv, REPLY_BT_CONFIG,
+ CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
}
if (ret)
IWL_ERR(priv, "failed to send BT Coex Config\n");
* already dead.
*/
if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
- iwlagn_txq_ctx_stop(priv);
- iwlagn_rxq_stop(priv);
+ priv->trans.ops->tx_stop(priv);
+ priv->trans.ops->rx_stop(priv);
- /* Power-down device's busmaster DMA clocks */
- iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
- }
+ /* Power-down device's busmaster DMA clocks */
+ iwl_write_prph(priv, APMG_CLK_DIS_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+ }
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);