size_t size;
};
+/*
+ * This queue number is required for proper operation
+ * because the ucode will stop/start the scheduler as
+ * required.
+ */
+#define IWL_IPAN_MCAST_QUEUE 8
+
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
* @kw: keep warm address
+ * @ac_to_fifo: to what fifo is a specifc AC mapped ?
+ * @ac_to_queue: to what tx queue is a specifc AC mapped ?
+ * @mcast_queue:
*/
struct iwl_trans_pcie {
struct iwl_rx_queue rxq;
u32 scd_base_addr;
struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw;
+
+ const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
+ const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
+ u8 mcast_queue[NUM_IWL_RXON_CTX];
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
/*****************************************************
* TX / HCMD
******************************************************/
-void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+void iwl_txq_update_write_ptr(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq);
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
dma_addr_t addr, u16 len, u8 reset);
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
u16 byte_cnt);
-int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo);
+void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
+int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid);
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
-void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
+void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
int tx_fifo_id, int scd_retry);
-void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
- int frame_limit);
+int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid, u16 *ssn);
+void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx,
+ int sta_id, int tid, int frame_limit);
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
int index);
-void iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
- struct sk_buff_head *skbs);
+int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
+ struct sk_buff_head *skbs);
/*****************************************************
* Error handling
clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
/* disable interrupts from uCode/NIC to host */
- iwl_write32(priv(trans), CSR_INT_MASK, 0x00000000);
+ iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
/* acknowledge/clear/reset any interrupts still pending
* from uCode or flow handler (Rx/Tx DMA) */
- iwl_write32(priv(trans), CSR_INT, 0xffffffff);
- iwl_write32(priv(trans), CSR_FH_INT_STATUS, 0xffffffff);
+ iwl_write32(bus(trans), CSR_INT, 0xffffffff);
+ iwl_write32(bus(trans), CSR_FH_INT_STATUS, 0xffffffff);
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
set_bit(STATUS_INT_ENABLED, &trans->shrd->status);
- iwl_write32(priv(trans), CSR_INT_MASK, trans_pcie->inta_mask);
+ iwl_write32(bus(trans), CSR_INT_MASK, trans_pcie->inta_mask);
}
#endif /* __iwl_trans_int_pcie_h__ */