1 /******************************************************************************
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
37 #include "iwl-helpers.h"
38 #include "iwl-trans-int-pcie.h"
41 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
43 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
44 struct iwl_tx_queue *txq,
47 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
48 struct iwl_trans_pcie *trans_pcie =
49 IWL_TRANS_GET_PCIE_TRANS(trans);
50 int write_ptr = txq->q.write_ptr;
51 int txq_id = txq->q.id;
54 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
57 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
59 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
61 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
62 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
64 switch (sec_ctl & TX_CMD_SEC_MSK) {
72 len += WEP_IV_LEN + WEP_ICV_LEN;
76 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
78 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
80 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
82 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
86 * iwl_txq_update_write_ptr - Send new write index to hardware
88 void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
91 int txq_id = txq->q.id;
93 if (txq->need_update == 0)
96 if (hw_params(trans).shadow_reg_enable) {
97 /* shadow register enabled */
98 iwl_write32(bus(trans), HBUS_TARG_WRPTR,
99 txq->q.write_ptr | (txq_id << 8));
101 /* if we're trying to save power */
102 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
103 /* wake up nic if it's powered down ...
104 * uCode will wake up, and interrupt us again, so next
105 * time we'll skip this part. */
106 reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
108 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
109 IWL_DEBUG_INFO(trans,
110 "Tx queue %d requesting wakeup,"
111 " GP1 = 0x%x\n", txq_id, reg);
112 iwl_set_bit(bus(trans), CSR_GP_CNTRL,
113 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
117 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
118 txq->q.write_ptr | (txq_id << 8));
121 * else not in power-save mode,
122 * uCode will never sleep when we're
123 * trying to tx (during RFKILL, we're not trying to tx).
126 iwl_write32(bus(trans), HBUS_TARG_WRPTR,
127 txq->q.write_ptr | (txq_id << 8));
129 txq->need_update = 0;
132 static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
134 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
136 dma_addr_t addr = get_unaligned_le32(&tb->lo);
137 if (sizeof(dma_addr_t) > sizeof(u32))
139 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
144 static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
146 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
148 return le16_to_cpu(tb->hi_n_len) >> 4;
151 static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
152 dma_addr_t addr, u16 len)
154 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
155 u16 hi_n_len = len << 4;
157 put_unaligned_le32(addr, &tb->lo);
158 if (sizeof(dma_addr_t) > sizeof(u32))
159 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
161 tb->hi_n_len = cpu_to_le16(hi_n_len);
163 tfd->num_tbs = idx + 1;
166 static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
168 return tfd->num_tbs & 0x1f;
171 static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
172 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
177 /* Sanity check on number of chunks */
178 num_tbs = iwl_tfd_get_num_tbs(tfd);
180 if (num_tbs >= IWL_NUM_OF_TBS) {
181 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
182 /* @todo issue fatal error, it is quite serious situation */
188 dma_unmap_single(bus(trans)->dev,
189 dma_unmap_addr(meta, mapping),
190 dma_unmap_len(meta, len),
193 /* Unmap chunks, if any. */
194 for (i = 1; i < num_tbs; i++)
195 dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
196 iwl_tfd_tb_get_len(tfd, i), dma_dir);
200 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
201 * @trans - transport private data
203 * @index - the index of the TFD to be freed
205 * Does NOT advance any TFD circular buffer read/write indexes
206 * Does NOT free the TFD itself (which is within circular buffer)
208 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
211 struct iwl_tfd *tfd_tmp = txq->tfds;
213 iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index],
220 skb = txq->skbs[index];
222 /* can be called from irqs-disabled context */
224 dev_kfree_skb_any(skb);
225 txq->skbs[index] = NULL;
230 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
231 struct iwl_tx_queue *txq,
232 dma_addr_t addr, u16 len,
236 struct iwl_tfd *tfd, *tfd_tmp;
241 tfd = &tfd_tmp[q->write_ptr];
244 memset(tfd, 0, sizeof(*tfd));
246 num_tbs = iwl_tfd_get_num_tbs(tfd);
248 /* Each TFD can point to a maximum 20 Tx buffers */
249 if (num_tbs >= IWL_NUM_OF_TBS) {
250 IWL_ERR(trans, "Error can not send more than %d chunks\n",
255 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
258 if (unlikely(addr & ~IWL_TX_DMA_MASK))
259 IWL_ERR(trans, "Unaligned address = %llx\n",
260 (unsigned long long)addr);
262 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
267 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
270 * Theory of operation
272 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
273 * of buffer descriptors, each of which points to one or more data buffers for
274 * the device to read from or fill. Driver and device exchange status of each
275 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
276 * entries in each circular buffer, to protect against confusing empty and full
279 * The device reads or writes the data in the queues via the device's several
280 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
282 * For Tx queue, there are low mark and high mark limits. If, after queuing
283 * the packet for Tx, free space become < low mark, Tx queue stopped. When
284 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
287 ***************************************************/
289 int iwl_queue_space(const struct iwl_queue *q)
291 int s = q->read_ptr - q->write_ptr;
293 if (q->read_ptr > q->write_ptr)
298 /* keep some reserve to not confuse empty and full situations */
306 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
308 int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
311 q->n_window = slots_num;
314 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
315 * and iwl_queue_dec_wrap are broken. */
316 if (WARN_ON(!is_power_of_2(count)))
319 /* slots_num must be power-of-two size, otherwise
320 * get_cmd_index is broken. */
321 if (WARN_ON(!is_power_of_2(slots_num)))
324 q->low_mark = q->n_window / 4;
328 q->high_mark = q->n_window / 8;
329 if (q->high_mark < 2)
332 q->write_ptr = q->read_ptr = 0;
337 static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
338 struct iwl_tx_queue *txq)
340 struct iwl_trans_pcie *trans_pcie =
341 IWL_TRANS_GET_PCIE_TRANS(trans);
342 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
343 int txq_id = txq->q.id;
344 int read_ptr = txq->q.read_ptr;
348 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
350 if (txq_id != trans->shrd->cmd_queue)
351 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
353 bc_ent = cpu_to_le16(1 | (sta_id << 12));
354 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
356 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
358 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
361 static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
368 struct iwl_trans_pcie *trans_pcie =
369 IWL_TRANS_GET_PCIE_TRANS(trans);
371 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
373 tbl_dw_addr = trans_pcie->scd_base_addr +
374 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
376 tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
379 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
381 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
383 iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
388 static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
390 /* Simply stop the queue, but don't change any configuration;
391 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
392 iwl_write_prph(bus(trans),
393 SCD_QUEUE_STATUS_BITS(txq_id),
394 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
395 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
398 void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
399 int txq_id, u32 index)
401 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
402 (index & 0xff) | (txq_id << 8));
403 iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
406 void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
407 struct iwl_tx_queue *txq,
408 int tx_fifo_id, int scd_retry)
410 int txq_id = txq->q.id;
412 test_bit(txq_id, &priv(trans)->txq_ctx_active_msk) ? 1 : 0;
414 iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
415 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
416 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
417 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
418 SCD_QUEUE_STTS_REG_MSK);
420 txq->sched_retry = scd_retry;
422 IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
423 active ? "Activate" : "Deactivate",
424 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
427 static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
430 const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
431 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
432 return ac_to_fifo[tid_to_ac[tid]];
434 /* no support for TIDs 8-15 yet */
438 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
439 enum iwl_rxon_context_id ctx, int sta_id,
440 int tid, int frame_limit)
442 int tx_fifo, txq_id, ssn_idx;
445 struct iwl_tid_data *tid_data;
447 struct iwl_trans_pcie *trans_pcie =
448 IWL_TRANS_GET_PCIE_TRANS(trans);
450 if (WARN_ON(sta_id == IWL_INVALID_STATION))
452 if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
455 tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
456 if (WARN_ON(tx_fifo < 0)) {
457 IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
461 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
462 tid_data = &trans->shrd->tid_data[sta_id][tid];
463 ssn_idx = SEQ_TO_SN(tid_data->seq_number);
464 txq_id = tid_data->agg.txq_id;
465 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
467 ra_tid = BUILD_RAxTID(sta_id, tid);
469 spin_lock_irqsave(&trans->shrd->lock, flags);
471 /* Stop this Tx queue before configuring it */
472 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
474 /* Map receiver-address / traffic-ID to this queue */
475 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
477 /* Set this queue as a chain-building queue */
478 iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
480 /* enable aggregations for the queue */
481 iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
483 /* Place first TFD at index corresponding to start sequence number.
484 * Assumes that ssn_idx is valid (!= 0xFFF) */
485 priv(trans)->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
486 priv(trans)->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
487 iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
489 /* Set up Tx window size and frame limit for this queue */
490 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
491 SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
494 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
495 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
497 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
498 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
500 iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
502 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
503 iwl_trans_tx_queue_set_status(trans, &priv(trans)->txq[txq_id],
506 priv(trans)->txq[txq_id].sta_id = sta_id;
507 priv(trans)->txq[txq_id].tid = tid;
509 spin_unlock_irqrestore(&trans->shrd->lock, flags);
513 * Find first available (lowest unused) Tx Queue, mark it "active".
514 * Called only when finding queue for aggregation.
515 * Should never return anything < 7, because they should already
516 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
518 static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
522 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
523 if (!test_and_set_bit(txq_id,
524 &priv(trans)->txq_ctx_active_msk))
529 int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
530 enum iwl_rxon_context_id ctx, int sta_id,
533 struct iwl_tid_data *tid_data;
536 struct iwl_priv *priv = priv(trans);
538 txq_id = iwlagn_txq_ctx_activate_free(trans);
540 IWL_ERR(trans, "No free aggregation queue available\n");
544 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
545 tid_data = &trans->shrd->tid_data[sta_id][tid];
546 *ssn = SEQ_TO_SN(tid_data->seq_number);
547 tid_data->agg.txq_id = txq_id;
548 iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
550 tid_data = &trans->shrd->tid_data[sta_id][tid];
551 if (tid_data->tfds_in_queue == 0) {
552 IWL_DEBUG_HT(trans, "HW queue is empty\n");
553 tid_data->agg.state = IWL_AGG_ON;
554 iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
556 IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
557 "queue\n", tid_data->tfds_in_queue);
558 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
560 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
565 void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
567 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
569 iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
571 priv(trans)->txq[txq_id].q.read_ptr = 0;
572 priv(trans)->txq[txq_id].q.write_ptr = 0;
573 /* supposes that ssn_idx is valid (!= 0xFFF) */
574 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
576 iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
577 iwl_txq_ctx_deactivate(priv(trans), txq_id);
578 iwl_trans_tx_queue_set_status(trans, &priv(trans)->txq[txq_id], 0, 0);
581 int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
582 enum iwl_rxon_context_id ctx, int sta_id,
586 int read_ptr, write_ptr;
587 struct iwl_tid_data *tid_data;
590 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
592 tid_data = &trans->shrd->tid_data[sta_id][tid];
593 txq_id = tid_data->agg.txq_id;
595 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
596 (IWLAGN_FIRST_AMPDU_QUEUE +
597 hw_params(trans).num_ampdu_queues <= txq_id)) {
599 "queue number out of range: %d, must be %d to %d\n",
600 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
601 IWLAGN_FIRST_AMPDU_QUEUE +
602 hw_params(trans).num_ampdu_queues - 1);
603 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
607 switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
608 case IWL_EMPTYING_HW_QUEUE_ADDBA:
610 * This can happen if the peer stops aggregation
611 * again before we've had a chance to drain the
612 * queue we selected previously, i.e. before the
613 * session was really started completely.
615 IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
620 IWL_WARN(trans, "Stopping AGG while state not ON"
624 write_ptr = priv(trans)->txq[txq_id].q.write_ptr;
625 read_ptr = priv(trans)->txq[txq_id].q.read_ptr;
627 /* The queue is not empty */
628 if (write_ptr != read_ptr) {
629 IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
630 trans->shrd->tid_data[sta_id][tid].agg.state =
631 IWL_EMPTYING_HW_QUEUE_DELBA;
632 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
636 IWL_DEBUG_HT(trans, "HW queue is empty\n");
638 trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
640 /* do not restore/save irqs */
641 spin_unlock(&trans->shrd->sta_lock);
642 spin_lock(&trans->shrd->lock);
644 iwl_trans_pcie_txq_agg_disable(trans, txq_id);
646 spin_unlock_irqrestore(&trans->shrd->lock, flags);
648 iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
653 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
656 * iwl_enqueue_hcmd - enqueue a uCode command
657 * @priv: device private data point
658 * @cmd: a point to the ucode command structure
660 * The function returns < 0 values to indicate the operation is
661 * failed. On success, it turns the index (> 0) of command in the
664 static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
666 struct iwl_tx_queue *txq = &priv(trans)->txq[trans->shrd->cmd_queue];
667 struct iwl_queue *q = &txq->q;
668 struct iwl_device_cmd *out_cmd;
669 struct iwl_cmd_meta *out_meta;
670 dma_addr_t phys_addr;
673 u16 copy_size, cmd_size;
674 bool is_ct_kill = false;
675 bool had_nocopy = false;
678 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
679 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
680 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
684 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
685 IWL_WARN(trans, "fw recovery, no hcmd send\n");
689 if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
690 !(cmd->flags & CMD_ON_DEMAND)) {
691 IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
695 copy_size = sizeof(out_cmd->hdr);
696 cmd_size = sizeof(out_cmd->hdr);
698 /* need one for the header if the first is NOCOPY */
699 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
701 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
704 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
707 /* NOCOPY must not be followed by normal! */
708 if (WARN_ON(had_nocopy))
710 copy_size += cmd->len[i];
712 cmd_size += cmd->len[i];
716 * If any of the command structures end up being larger than
717 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
718 * allocated into separate TFDs, then we will need to
719 * increase the size of the buffers.
721 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
724 if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
725 IWL_WARN(trans, "Not sending command - %s KILL\n",
726 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
730 spin_lock_irqsave(&trans->hcmd_lock, flags);
732 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
733 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
735 IWL_ERR(trans, "No space in command queue\n");
736 is_ct_kill = iwl_check_for_ct_kill(priv(trans));
738 IWL_ERR(trans, "Restarting adapter queue is full\n");
739 iwlagn_fw_error(priv(trans), false);
744 idx = get_cmd_index(q, q->write_ptr);
745 out_cmd = txq->cmd[idx];
746 out_meta = &txq->meta[idx];
748 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
749 if (cmd->flags & CMD_WANT_SKB)
750 out_meta->source = cmd;
751 if (cmd->flags & CMD_ASYNC)
752 out_meta->callback = cmd->callback;
754 /* set up the header */
756 out_cmd->hdr.cmd = cmd->id;
757 out_cmd->hdr.flags = 0;
758 out_cmd->hdr.sequence =
759 cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
760 INDEX_TO_SEQ(q->write_ptr));
762 /* and copy the data that needs to be copied */
764 cmd_dest = &out_cmd->cmd.payload[0];
765 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
768 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
770 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
771 cmd_dest += cmd->len[i];
774 IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
775 "%d bytes at %d[%d]:%d\n",
776 get_cmd_string(out_cmd->hdr.cmd),
778 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
779 q->write_ptr, idx, trans->shrd->cmd_queue);
781 phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
783 if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
788 dma_unmap_addr_set(out_meta, mapping, phys_addr);
789 dma_unmap_len_set(out_meta, len, copy_size);
791 iwlagn_txq_attach_buf_to_tfd(trans, txq,
792 phys_addr, copy_size, 1);
793 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
794 trace_bufs[0] = &out_cmd->hdr;
795 trace_lens[0] = copy_size;
799 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
802 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
804 phys_addr = dma_map_single(bus(trans)->dev,
805 (void *)cmd->data[i],
806 cmd->len[i], DMA_BIDIRECTIONAL);
807 if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
808 iwlagn_unmap_tfd(trans, out_meta,
809 &txq->tfds[q->write_ptr],
815 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
817 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
818 trace_bufs[trace_idx] = cmd->data[i];
819 trace_lens[trace_idx] = cmd->len[i];
824 out_meta->flags = cmd->flags;
826 txq->need_update = 1;
828 /* check that tracing gets all possible blocks */
829 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
830 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
831 trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
832 trace_bufs[0], trace_lens[0],
833 trace_bufs[1], trace_lens[1],
834 trace_bufs[2], trace_lens[2]);
837 /* Increment and update queue's write index */
838 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
839 iwl_txq_update_write_ptr(trans, txq);
842 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
847 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
849 * When FW advances 'R' index, all entries between old and new 'R' index
850 * need to be reclaimed. As result, some free space forms. If there is
851 * enough free space (> low mark), wake the stack that feeds us.
853 static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
855 struct iwl_tx_queue *txq = &priv->txq[txq_id];
856 struct iwl_queue *q = &txq->q;
859 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
860 IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
861 "index %d is out of range [0-%d] %d %d.\n", __func__,
862 txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
866 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
867 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
870 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
871 q->write_ptr, q->read_ptr);
872 iwlagn_fw_error(priv, false);
879 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
880 * @rxb: Rx buffer to reclaim
882 * If an Rx buffer has an async callback associated with it the callback
883 * will be executed. The attached skb (if present) will only be freed
884 * if the callback returns 1
886 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
888 struct iwl_rx_packet *pkt = rxb_addr(rxb);
889 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
890 int txq_id = SEQ_TO_QUEUE(sequence);
891 int index = SEQ_TO_INDEX(sequence);
893 struct iwl_device_cmd *cmd;
894 struct iwl_cmd_meta *meta;
895 struct iwl_trans *trans = trans(priv);
896 struct iwl_tx_queue *txq = &priv->txq[trans->shrd->cmd_queue];
899 /* If a Tx command is being handled and it isn't in the actual
900 * command queue then there a command routing bug has been introduced
901 * in the queue management code. */
902 if (WARN(txq_id != trans->shrd->cmd_queue,
903 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
904 txq_id, trans->shrd->cmd_queue, sequence,
905 priv->txq[trans->shrd->cmd_queue].q.read_ptr,
906 priv->txq[trans->shrd->cmd_queue].q.write_ptr)) {
907 iwl_print_hex_error(priv, pkt, 32);
911 cmd_index = get_cmd_index(&txq->q, index);
912 cmd = txq->cmd[cmd_index];
913 meta = &txq->meta[cmd_index];
915 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
918 /* Input error checking is done when commands are added to queue. */
919 if (meta->flags & CMD_WANT_SKB) {
920 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
922 } else if (meta->callback)
923 meta->callback(priv, cmd, pkt);
925 spin_lock_irqsave(&trans->hcmd_lock, flags);
927 iwl_hcmd_queue_reclaim(priv, txq_id, index);
929 if (!(meta->flags & CMD_ASYNC)) {
930 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
931 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
932 get_cmd_string(cmd->hdr.cmd));
933 wake_up_interruptible(&priv->wait_command_queue);
938 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
941 const char *get_cmd_string(u8 cmd)
944 IWL_CMD(REPLY_ALIVE);
945 IWL_CMD(REPLY_ERROR);
947 IWL_CMD(REPLY_RXON_ASSOC);
948 IWL_CMD(REPLY_QOS_PARAM);
949 IWL_CMD(REPLY_RXON_TIMING);
950 IWL_CMD(REPLY_ADD_STA);
951 IWL_CMD(REPLY_REMOVE_STA);
952 IWL_CMD(REPLY_REMOVE_ALL_STA);
953 IWL_CMD(REPLY_TXFIFO_FLUSH);
954 IWL_CMD(REPLY_WEPKEY);
956 IWL_CMD(REPLY_LEDS_CMD);
957 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
958 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
959 IWL_CMD(COEX_MEDIUM_NOTIFICATION);
960 IWL_CMD(COEX_EVENT_CMD);
961 IWL_CMD(REPLY_QUIET_CMD);
962 IWL_CMD(REPLY_CHANNEL_SWITCH);
963 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
964 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
965 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
966 IWL_CMD(POWER_TABLE_CMD);
967 IWL_CMD(PM_SLEEP_NOTIFICATION);
968 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
969 IWL_CMD(REPLY_SCAN_CMD);
970 IWL_CMD(REPLY_SCAN_ABORT_CMD);
971 IWL_CMD(SCAN_START_NOTIFICATION);
972 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
973 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
974 IWL_CMD(BEACON_NOTIFICATION);
975 IWL_CMD(REPLY_TX_BEACON);
976 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
977 IWL_CMD(QUIET_NOTIFICATION);
978 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
979 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
980 IWL_CMD(REPLY_BT_CONFIG);
981 IWL_CMD(REPLY_STATISTICS_CMD);
982 IWL_CMD(STATISTICS_NOTIFICATION);
983 IWL_CMD(REPLY_CARD_STATE_CMD);
984 IWL_CMD(CARD_STATE_NOTIFICATION);
985 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
986 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
987 IWL_CMD(SENSITIVITY_CMD);
988 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
989 IWL_CMD(REPLY_RX_PHY_CMD);
990 IWL_CMD(REPLY_RX_MPDU_CMD);
992 IWL_CMD(REPLY_COMPRESSED_BA);
993 IWL_CMD(CALIBRATION_CFG_CMD);
994 IWL_CMD(CALIBRATION_RES_NOTIFICATION);
995 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
996 IWL_CMD(REPLY_TX_POWER_DBM_CMD);
997 IWL_CMD(TEMPERATURE_NOTIFICATION);
998 IWL_CMD(TX_ANT_CONFIGURATION_CMD);
999 IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
1000 IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
1001 IWL_CMD(REPLY_BT_COEX_PROT_ENV);
1002 IWL_CMD(REPLY_WIPAN_PARAMS);
1003 IWL_CMD(REPLY_WIPAN_RXON);
1004 IWL_CMD(REPLY_WIPAN_RXON_TIMING);
1005 IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
1006 IWL_CMD(REPLY_WIPAN_QOS_PARAM);
1007 IWL_CMD(REPLY_WIPAN_WEPKEY);
1008 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
1009 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
1010 IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
1011 IWL_CMD(REPLY_WOWLAN_PATTERNS);
1012 IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
1013 IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
1014 IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
1015 IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
1016 IWL_CMD(REPLY_WOWLAN_GET_STATUS);
1023 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
1025 static void iwl_generic_cmd_callback(struct iwl_priv *priv,
1026 struct iwl_device_cmd *cmd,
1027 struct iwl_rx_packet *pkt)
1029 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
1030 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
1031 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
1035 #ifdef CONFIG_IWLWIFI_DEBUG
1036 switch (cmd->hdr.cmd) {
1037 case REPLY_TX_LINK_QUALITY_CMD:
1038 case SENSITIVITY_CMD:
1039 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
1040 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
1043 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
1044 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
1049 static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1053 /* An asynchronous command can not expect an SKB to be set. */
1054 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1057 /* Assign a generic callback if one is not provided */
1059 cmd->callback = iwl_generic_cmd_callback;
1061 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
1064 ret = iwl_enqueue_hcmd(trans, cmd);
1066 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1067 get_cmd_string(cmd->id), ret);
1073 static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1078 lockdep_assert_held(&trans->shrd->mutex);
1080 /* A synchronous command can not have a callback set. */
1081 if (WARN_ON(cmd->callback))
1084 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1085 get_cmd_string(cmd->id));
1087 set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1088 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1089 get_cmd_string(cmd->id));
1091 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
1094 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1095 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1096 get_cmd_string(cmd->id), ret);
1100 ret = wait_event_interruptible_timeout(priv(trans)->wait_command_queue,
1101 !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
1102 HOST_COMPLETE_TIMEOUT);
1104 if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
1106 "Error sending %s: time out after %dms.\n",
1107 get_cmd_string(cmd->id),
1108 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1110 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1111 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
1112 "%s\n", get_cmd_string(cmd->id));
1118 if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
1119 IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
1120 get_cmd_string(cmd->id));
1124 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
1125 IWL_ERR(trans, "Command %s failed: FW Error\n",
1126 get_cmd_string(cmd->id));
1130 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
1131 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1132 get_cmd_string(cmd->id));
1140 if (cmd->flags & CMD_WANT_SKB) {
1142 * Cancel the CMD_WANT_SKB flag for the cmd in the
1143 * TX cmd queue. Otherwise in case the cmd comes
1144 * in later, it will possibly set an invalid
1145 * address (cmd->meta.source).
1147 priv(trans)->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1151 if (cmd->reply_page) {
1152 iwl_free_pages(trans->shrd, cmd->reply_page);
1153 cmd->reply_page = 0;
1159 int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1161 if (cmd->flags & CMD_ASYNC)
1162 return iwl_send_cmd_async(trans, cmd);
1164 return iwl_send_cmd_sync(trans, cmd);
1167 int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
1168 u16 len, const void *data)
1170 struct iwl_host_cmd cmd = {
1177 return iwl_trans_pcie_send_cmd(trans, &cmd);
1180 /* Frees buffers until index _not_ inclusive */
1181 int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1182 struct sk_buff_head *skbs)
1184 struct iwl_tx_queue *txq = &priv(trans)->txq[txq_id];
1185 struct iwl_queue *q = &txq->q;
1189 /*Since we free until index _not_ inclusive, the one before index is
1190 * the last we will free. This one must be used */
1191 last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
1193 if ((index >= q->n_bd) ||
1194 (iwl_queue_used(q, last_to_free) == 0)) {
1195 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
1196 "last_to_free %d is out of range [0-%d] %d %d.\n",
1197 __func__, txq_id, last_to_free, q->n_bd,
1198 q->write_ptr, q->read_ptr);
1202 IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
1203 q->read_ptr, index);
1205 if (WARN_ON(!skb_queue_empty(skbs)))
1209 q->read_ptr != index;
1210 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1212 if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
1215 __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
1217 txq->skbs[txq->q.read_ptr] = NULL;
1219 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1221 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr);