1 /******************************************************************************
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/etherdevice.h>
31 #include <net/mac80211.h>
32 #include "iwl-eeprom.h"
37 #include "iwl-helpers.h"
39 static const u16 default_tid_to_tx_fifo[] = {
59 static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
60 struct iwl_dma_ptr *ptr, size_t size)
62 ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
69 static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
70 struct iwl_dma_ptr *ptr)
72 if (unlikely(!ptr->addr))
75 pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
76 memset(ptr, 0, sizeof(*ptr));
80 * iwl_txq_update_write_ptr - Send new write index to hardware
82 int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
86 int txq_id = txq->q.id;
88 if (txq->need_update == 0)
91 /* if we're trying to save power */
92 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
93 /* wake up nic if it's powered down ...
94 * uCode will wake up, and interrupt us again, so next
95 * time we'll skip this part. */
96 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
98 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
99 IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg);
100 iwl_set_bit(priv, CSR_GP_CNTRL,
101 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
105 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
106 txq->q.write_ptr | (txq_id << 8));
108 /* else not in power-save mode, uCode will never sleep when we're
109 * trying to tx (during RFKILL, we're not trying to tx). */
111 iwl_write32(priv, HBUS_TARG_WRPTR,
112 txq->q.write_ptr | (txq_id << 8));
114 txq->need_update = 0;
118 EXPORT_SYMBOL(iwl_txq_update_write_ptr);
122 * iwl_tx_queue_free - Deallocate DMA queue.
123 * @txq: Transmit queue to deallocate.
125 * Empty queue by removing and destroying all BD's.
127 * 0-fill, but do not free "txq" descriptor structure.
129 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
131 struct iwl_tx_queue *txq = &priv->txq[txq_id];
132 struct iwl_queue *q = &txq->q;
133 struct pci_dev *dev = priv->pci_dev;
139 /* first, empty all BD's */
140 for (; q->write_ptr != q->read_ptr;
141 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
142 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
144 len = sizeof(struct iwl_device_cmd) * q->n_window;
146 /* De-alloc array of command/tx buffers */
147 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
150 /* De-alloc circular buffer of TFDs */
152 pci_free_consistent(dev, priv->hw_params.tfd_size *
153 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
155 /* De-alloc array of per-TFD driver data */
159 /* deallocate arrays */
165 /* 0-fill queue descriptor structure */
166 memset(txq, 0, sizeof(*txq));
168 EXPORT_SYMBOL(iwl_tx_queue_free);
171 * iwl_cmd_queue_free - Deallocate DMA queue.
172 * @txq: Transmit queue to deallocate.
174 * Empty queue by removing and destroying all BD's.
176 * 0-fill, but do not free "txq" descriptor structure.
178 void iwl_cmd_queue_free(struct iwl_priv *priv)
180 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
181 struct iwl_queue *q = &txq->q;
182 struct pci_dev *dev = priv->pci_dev;
188 len = sizeof(struct iwl_device_cmd) * q->n_window;
189 len += IWL_MAX_SCAN_SIZE;
191 /* De-alloc array of command/tx buffers */
192 for (i = 0; i <= TFD_CMD_SLOTS; i++)
195 /* De-alloc circular buffer of TFDs */
197 pci_free_consistent(dev, priv->hw_params.tfd_size *
198 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
200 /* 0-fill queue descriptor structure */
201 memset(txq, 0, sizeof(*txq));
203 EXPORT_SYMBOL(iwl_cmd_queue_free);
205 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
208 * Theory of operation
210 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
211 * of buffer descriptors, each of which points to one or more data buffers for
212 * the device to read from or fill. Driver and device exchange status of each
213 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
214 * entries in each circular buffer, to protect against confusing empty and full
217 * The device reads or writes the data in the queues via the device's several
218 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
220 * For Tx queue, there are low mark and high mark limits. If, after queuing
221 * the packet for Tx, free space become < low mark, Tx queue stopped. When
222 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
225 * See more detailed info in iwl-4965-hw.h.
226 ***************************************************/
228 int iwl_queue_space(const struct iwl_queue *q)
230 int s = q->read_ptr - q->write_ptr;
232 if (q->read_ptr > q->write_ptr)
237 /* keep some reserve to not confuse empty and full situations */
243 EXPORT_SYMBOL(iwl_queue_space);
247 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
249 static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
250 int count, int slots_num, u32 id)
253 q->n_window = slots_num;
256 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
257 * and iwl_queue_dec_wrap are broken. */
258 BUG_ON(!is_power_of_2(count));
260 /* slots_num must be power-of-two size, otherwise
261 * get_cmd_index is broken. */
262 BUG_ON(!is_power_of_2(slots_num));
264 q->low_mark = q->n_window / 4;
268 q->high_mark = q->n_window / 8;
269 if (q->high_mark < 2)
272 q->write_ptr = q->read_ptr = 0;
278 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
280 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
281 struct iwl_tx_queue *txq, u32 id)
283 struct pci_dev *dev = priv->pci_dev;
284 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
286 /* Driver private data, only for Tx (not command) queues,
287 * not shared with device. */
288 if (id != IWL_CMD_QUEUE_NUM) {
289 txq->txb = kmalloc(sizeof(txq->txb[0]) *
290 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
292 IWL_ERR(priv, "kmalloc for auxiliary BD "
293 "structures failed\n");
300 /* Circular buffer of transmit frame descriptors (TFDs),
301 * shared with device */
302 txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
305 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
320 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
322 int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
323 int slots_num, u32 txq_id)
327 int actual_slots = slots_num;
330 * Alloc buffer array for commands (Tx or other types of commands).
331 * For the command queue (#4), allocate command space + one big
332 * command for scan, since scan command is very huge; the system will
333 * not have two scans at the same time, so only one is needed.
334 * For normal Tx queues (all other queues), no super-size command
337 if (txq_id == IWL_CMD_QUEUE_NUM)
340 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
342 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
345 if (!txq->meta || !txq->cmd)
346 goto out_free_arrays;
348 len = sizeof(struct iwl_device_cmd);
349 for (i = 0; i < actual_slots; i++) {
350 /* only happens for cmd queue */
352 len += IWL_MAX_SCAN_SIZE;
354 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
359 /* Alloc driver data array and TFD circular buffer */
360 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
364 txq->need_update = 0;
366 /* aggregation TX queues will get their ID when aggregation begins */
367 if (txq_id <= IWL_TX_FIFO_AC3)
368 txq->swq_id = txq_id;
370 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
371 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
372 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
374 /* Initialize queue's high/low-water marks, and head/tail indexes */
375 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
377 /* Tell device where to find queue */
378 priv->cfg->ops->lib->txq_init(priv, txq);
382 for (i = 0; i < actual_slots; i++)
390 EXPORT_SYMBOL(iwl_tx_queue_init);
393 * iwl_hw_txq_ctx_free - Free TXQ Context
395 * Destroy all TX DMA queues and structures
397 void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
402 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
403 if (txq_id == IWL_CMD_QUEUE_NUM)
404 iwl_cmd_queue_free(priv);
406 iwl_tx_queue_free(priv, txq_id);
408 iwl_free_dma_ptr(priv, &priv->kw);
410 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
412 EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
415 * iwl_txq_ctx_reset - Reset TX queue context
416 * Destroys all DMA structures and initialize them again
421 int iwl_txq_ctx_reset(struct iwl_priv *priv)
424 int txq_id, slots_num;
427 /* Free all tx/cmd queues and keep-warm buffer */
428 iwl_hw_txq_ctx_free(priv);
430 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
431 priv->hw_params.scd_bc_tbls_size);
433 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
436 /* Alloc keep-warm buffer */
437 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
439 IWL_ERR(priv, "Keep Warm allocation failed\n");
442 spin_lock_irqsave(&priv->lock, flags);
444 /* Turn off all Tx DMA fifos */
445 priv->cfg->ops->lib->txq_set_sched(priv, 0);
447 /* Tell NIC where to find the "keep warm" buffer */
448 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
450 spin_unlock_irqrestore(&priv->lock, flags);
452 /* Alloc and init all Tx queues, including the command queue (#4) */
453 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
454 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
455 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
456 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
459 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
467 iwl_hw_txq_ctx_free(priv);
468 iwl_free_dma_ptr(priv, &priv->kw);
470 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
476 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
478 void iwl_txq_ctx_stop(struct iwl_priv *priv)
483 /* Turn off all Tx DMA fifos */
484 spin_lock_irqsave(&priv->lock, flags);
486 priv->cfg->ops->lib->txq_set_sched(priv, 0);
488 /* Stop each Tx DMA channel, and wait for it to be idle */
489 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
490 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
491 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
492 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
495 spin_unlock_irqrestore(&priv->lock, flags);
497 /* Deallocate memory for all Tx queues */
498 iwl_hw_txq_ctx_free(priv);
500 EXPORT_SYMBOL(iwl_txq_ctx_stop);
503 * handle build REPLY_TX command notification.
505 static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
506 struct iwl_tx_cmd *tx_cmd,
507 struct ieee80211_tx_info *info,
508 struct ieee80211_hdr *hdr,
511 __le16 fc = hdr->frame_control;
512 __le32 tx_flags = tx_cmd->tx_flags;
514 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
515 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
516 tx_flags |= TX_CMD_FLG_ACK_MSK;
517 if (ieee80211_is_mgmt(fc))
518 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
519 if (ieee80211_is_probe_resp(fc) &&
520 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
521 tx_flags |= TX_CMD_FLG_TSF_MSK;
523 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
524 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
527 if (ieee80211_is_back_req(fc))
528 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
531 tx_cmd->sta_id = std_id;
532 if (ieee80211_has_morefrags(fc))
533 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
535 if (ieee80211_is_data_qos(fc)) {
536 u8 *qc = ieee80211_get_qos_ctl(hdr);
537 tx_cmd->tid_tspec = qc[0] & 0xf;
538 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
540 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
543 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
545 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
546 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
548 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
549 if (ieee80211_is_mgmt(fc)) {
550 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
551 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
553 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
555 tx_cmd->timeout.pm_frame_timeout = 0;
558 tx_cmd->driver_txop = 0;
559 tx_cmd->tx_flags = tx_flags;
560 tx_cmd->next_frame_len = 0;
563 #define RTS_HCCA_RETRY_LIMIT 3
564 #define RTS_DFAULT_RETRY_LIMIT 60
566 static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
567 struct iwl_tx_cmd *tx_cmd,
568 struct ieee80211_tx_info *info,
569 __le16 fc, int sta_id,
574 u8 rts_retry_limit = 0;
575 u8 data_retry_limit = 0;
578 rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
581 rate_plcp = iwl_rates[rate_idx].plcp;
583 rts_retry_limit = (is_hcca) ?
584 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
586 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
587 rate_flags |= RATE_MCS_CCK_MSK;
590 if (ieee80211_is_probe_resp(fc)) {
591 data_retry_limit = 3;
592 if (data_retry_limit < rts_retry_limit)
593 rts_retry_limit = data_retry_limit;
595 data_retry_limit = IWL_DEFAULT_TX_RETRY;
597 if (priv->data_retry_limit != -1)
598 data_retry_limit = priv->data_retry_limit;
601 if (ieee80211_is_data(fc)) {
602 tx_cmd->initial_rate_index = 0;
603 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
605 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
606 case cpu_to_le16(IEEE80211_STYPE_AUTH):
607 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
608 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
609 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
610 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
611 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
612 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
619 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
620 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
623 tx_cmd->rts_retry_limit = rts_retry_limit;
624 tx_cmd->data_retry_limit = data_retry_limit;
625 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
628 static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
629 struct ieee80211_tx_info *info,
630 struct iwl_tx_cmd *tx_cmd,
631 struct sk_buff *skb_frag,
634 struct ieee80211_key_conf *keyconf = info->control.hw_key;
636 switch (keyconf->alg) {
638 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
639 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
640 if (info->flags & IEEE80211_TX_CTL_AMPDU)
641 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
642 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
646 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
647 ieee80211_get_tkip_key(keyconf, skb_frag,
648 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
649 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
653 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
654 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
656 if (keyconf->keylen == WEP_KEY_LEN_128)
657 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
659 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
661 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
662 "with key %d\n", keyconf->keyidx);
666 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
671 static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
673 /* 0 - mgmt, 1 - cnt, 2 - data */
674 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
675 priv->tx_stats[idx].cnt++;
676 priv->tx_stats[idx].bytes += len;
680 * start REPLY_TX command process
682 int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
684 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
685 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
686 struct iwl_tx_queue *txq;
688 struct iwl_device_cmd *out_cmd;
689 struct iwl_cmd_meta *out_meta;
690 struct iwl_tx_cmd *tx_cmd;
692 dma_addr_t phys_addr;
693 dma_addr_t txcmd_phys;
694 dma_addr_t scratch_phys;
700 u8 wait_write_ptr = 0;
706 spin_lock_irqsave(&priv->lock, flags);
707 if (iwl_is_rfkill(priv)) {
708 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
712 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
714 IWL_ERR(priv, "ERROR: No TX rate available.\n");
718 fc = hdr->frame_control;
720 #ifdef CONFIG_IWLWIFI_DEBUG
721 if (ieee80211_is_auth(fc))
722 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
723 else if (ieee80211_is_assoc_req(fc))
724 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
725 else if (ieee80211_is_reassoc_req(fc))
726 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
729 /* drop all data frame if we are not associated */
730 if (ieee80211_is_data(fc) &&
731 (!iwl_is_monitor_mode(priv) ||
732 !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
733 (!iwl_is_associated(priv) ||
734 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
735 !priv->assoc_station_added)) {
736 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
740 spin_unlock_irqrestore(&priv->lock, flags);
742 hdr_len = ieee80211_hdrlen(fc);
744 /* Find (or create) index into station table for destination station */
745 sta_id = iwl_get_sta_id(priv, hdr);
746 if (sta_id == IWL_INVALID_STATION) {
747 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
752 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
754 txq_id = skb_get_queue_mapping(skb);
755 if (ieee80211_is_data_qos(fc)) {
756 qc = ieee80211_get_qos_ctl(hdr);
757 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
758 seq_number = priv->stations[sta_id].tid[tid].seq_number;
759 seq_number &= IEEE80211_SCTL_SEQ;
760 hdr->seq_ctrl = hdr->seq_ctrl &
761 cpu_to_le16(IEEE80211_SCTL_FRAG);
762 hdr->seq_ctrl |= cpu_to_le16(seq_number);
764 /* aggregation is on for this <sta,tid> */
765 if (info->flags & IEEE80211_TX_CTL_AMPDU)
766 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
767 priv->stations[sta_id].tid[tid].tfds_in_queue++;
770 txq = &priv->txq[txq_id];
771 swq_id = txq->swq_id;
774 spin_lock_irqsave(&priv->lock, flags);
776 /* Set up driver data for this TFD */
777 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
778 txq->txb[q->write_ptr].skb[0] = skb;
780 /* Set up first empty entry in queue's array of Tx/cmd buffers */
781 out_cmd = txq->cmd[q->write_ptr];
782 out_meta = &txq->meta[q->write_ptr];
783 tx_cmd = &out_cmd->cmd.tx;
784 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
785 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
788 * Set up the Tx-command (not MAC!) header.
789 * Store the chosen Tx queue and TFD index within the sequence field;
790 * after Tx, uCode's Tx response will return this value so driver can
791 * locate the frame within the tx queue and do post-tx processing.
793 out_cmd->hdr.cmd = REPLY_TX;
794 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
795 INDEX_TO_SEQ(q->write_ptr)));
797 /* Copy MAC header from skb into command buffer */
798 memcpy(tx_cmd->hdr, hdr, hdr_len);
801 /* Total # bytes to be transmitted */
803 tx_cmd->len = cpu_to_le16(len);
805 if (info->control.hw_key)
806 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
808 /* TODO need this for burst mode later on */
809 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
811 /* set is_hcca to 0; it probably will never be implemented */
812 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
814 iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
817 * Use the first empty entry in this queue's command buffer array
818 * to contain the Tx command and MAC header concatenated together
819 * (payload data will be in another buffer).
820 * Size of this varies, due to varying MAC header length.
821 * If end is not dword aligned, we'll have 2 extra bytes at the end
822 * of the MAC header (device reads on dword boundaries).
823 * We'll tell device about this padding later.
825 len = sizeof(struct iwl_tx_cmd) +
826 sizeof(struct iwl_cmd_header) + hdr_len;
829 len = (len + 3) & ~3;
836 /* Tell NIC about any 2-byte padding after MAC header */
838 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
840 /* Physical address of this Tx command's header (not MAC header!),
841 * within command buffer array. */
842 txcmd_phys = pci_map_single(priv->pci_dev,
844 PCI_DMA_BIDIRECTIONAL);
845 pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
846 pci_unmap_len_set(out_meta, len, len);
847 /* Add buffer containing Tx command and MAC(!) header to TFD's
849 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
850 txcmd_phys, len, 1, 0);
852 if (!ieee80211_has_morefrags(hdr->frame_control)) {
853 txq->need_update = 1;
855 priv->stations[sta_id].tid[tid].seq_number = seq_number;
858 txq->need_update = 0;
861 /* Set up TFD's 2nd entry to point directly to remainder of skb,
862 * if any (802.11 null frames have no payload). */
863 len = skb->len - hdr_len;
865 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
866 len, PCI_DMA_TODEVICE);
867 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
872 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
873 offsetof(struct iwl_tx_cmd, scratch);
875 len = sizeof(struct iwl_tx_cmd) +
876 sizeof(struct iwl_cmd_header) + hdr_len;
877 /* take back ownership of DMA buffer to enable update */
878 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
879 len, PCI_DMA_BIDIRECTIONAL);
880 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
881 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
883 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
884 le16_to_cpu(out_cmd->hdr.sequence));
885 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
886 iwl_print_hex_dump(IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
887 iwl_print_hex_dump(IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
889 /* Set up entry for this TFD in Tx byte-count array */
890 if (info->flags & IEEE80211_TX_CTL_AMPDU)
891 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
892 le16_to_cpu(tx_cmd->len));
894 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
895 len, PCI_DMA_BIDIRECTIONAL);
897 /* Tell device the write index *just past* this latest filled TFD */
898 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
899 ret = iwl_txq_update_write_ptr(priv, txq);
900 spin_unlock_irqrestore(&priv->lock, flags);
905 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
906 if (wait_write_ptr) {
907 spin_lock_irqsave(&priv->lock, flags);
908 txq->need_update = 1;
909 iwl_txq_update_write_ptr(priv, txq);
910 spin_unlock_irqrestore(&priv->lock, flags);
912 iwl_stop_queue(priv, txq->swq_id);
919 spin_unlock_irqrestore(&priv->lock, flags);
923 EXPORT_SYMBOL(iwl_tx_skb);
925 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
928 * iwl_enqueue_hcmd - enqueue a uCode command
929 * @priv: device private data point
930 * @cmd: a point to the ucode command structure
932 * The function returns < 0 values to indicate the operation is
933 * failed. On success, it turns the index (> 0) of command in the
936 int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
938 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
939 struct iwl_queue *q = &txq->q;
940 struct iwl_device_cmd *out_cmd;
941 struct iwl_cmd_meta *out_meta;
942 dma_addr_t phys_addr;
948 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
949 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
951 /* If any of the command structures end up being larger than
952 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
953 * we will need to increase the size of the TFD entries */
954 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
955 !(cmd->flags & CMD_SIZE_HUGE));
957 if (iwl_is_rfkill(priv)) {
958 IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n");
962 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
963 IWL_ERR(priv, "No space for Tx\n");
967 spin_lock_irqsave(&priv->hcmd_lock, flags);
969 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
970 out_cmd = txq->cmd[idx];
971 out_meta = &txq->meta[idx];
973 out_meta->flags = cmd->flags;
974 if (cmd->flags & CMD_WANT_SKB)
975 out_meta->source = cmd;
976 if (cmd->flags & CMD_ASYNC)
977 out_meta->callback = cmd->callback;
979 out_cmd->hdr.cmd = cmd->id;
980 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
982 /* At this point, the out_cmd now has all of the incoming cmd
985 out_cmd->hdr.flags = 0;
986 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
987 INDEX_TO_SEQ(q->write_ptr));
988 if (cmd->flags & CMD_SIZE_HUGE)
989 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
990 len = sizeof(struct iwl_device_cmd);
991 len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
994 #ifdef CONFIG_IWLWIFI_DEBUG
995 switch (out_cmd->hdr.cmd) {
996 case REPLY_TX_LINK_QUALITY_CMD:
997 case SENSITIVITY_CMD:
998 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
999 "%d bytes at %d[%d]:%d\n",
1000 get_cmd_string(out_cmd->hdr.cmd),
1002 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1003 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1006 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
1007 "%d bytes at %d[%d]:%d\n",
1008 get_cmd_string(out_cmd->hdr.cmd),
1010 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1011 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1014 txq->need_update = 1;
1016 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
1017 /* Set up entry in queue's byte count circular buffer */
1018 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1020 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
1021 fix_size, PCI_DMA_BIDIRECTIONAL);
1022 pci_unmap_addr_set(out_meta, mapping, phys_addr);
1023 pci_unmap_len_set(out_meta, len, fix_size);
1025 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1026 phys_addr, fix_size, 1,
1029 /* Increment and update queue's write index */
1030 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1031 ret = iwl_txq_update_write_ptr(priv, txq);
1033 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1034 return ret ? ret : idx;
1037 int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1039 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1040 struct iwl_queue *q = &txq->q;
1041 struct iwl_tx_info *tx_info;
1044 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1045 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1046 "is out of range [0-%d] %d %d.\n", txq_id,
1047 index, q->n_bd, q->write_ptr, q->read_ptr);
1051 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1052 q->read_ptr != index;
1053 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1055 tx_info = &txq->txb[txq->q.read_ptr];
1056 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
1057 tx_info->skb[0] = NULL;
1059 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1060 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1062 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1067 EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1071 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1073 * When FW advances 'R' index, all entries between old and new 'R' index
1074 * need to be reclaimed. As result, some free space forms. If there is
1075 * enough free space (> low mark), wake the stack that feeds us.
1077 static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1078 int idx, int cmd_idx)
1080 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1081 struct iwl_queue *q = &txq->q;
1084 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
1085 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1086 "is out of range [0-%d] %d %d.\n", txq_id,
1087 idx, q->n_bd, q->write_ptr, q->read_ptr);
1091 pci_unmap_single(priv->pci_dev,
1092 pci_unmap_addr(&txq->meta[cmd_idx], mapping),
1093 pci_unmap_len(&txq->meta[cmd_idx], len),
1094 PCI_DMA_BIDIRECTIONAL);
1096 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1097 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1100 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
1101 q->write_ptr, q->read_ptr);
1102 queue_work(priv->workqueue, &priv->restart);
1109 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1110 * @rxb: Rx buffer to reclaim
1112 * If an Rx buffer has an async callback associated with it the callback
1113 * will be executed. The attached skb (if present) will only be freed
1114 * if the callback returns 1
1116 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1118 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1119 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1120 int txq_id = SEQ_TO_QUEUE(sequence);
1121 int index = SEQ_TO_INDEX(sequence);
1123 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
1124 struct iwl_device_cmd *cmd;
1125 struct iwl_cmd_meta *meta;
1127 /* If a Tx command is being handled and it isn't in the actual
1128 * command queue then there a command routing bug has been introduced
1129 * in the queue management code. */
1130 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
1131 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1133 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
1134 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
1135 iwl_print_hex_error(priv, rxb, 32);
1139 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1140 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1141 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
1143 /* Input error checking is done when commands are added to queue. */
1144 if (meta->flags & CMD_WANT_SKB) {
1145 meta->source->reply_skb = rxb->skb;
1147 } else if (meta->callback && !meta->callback(priv, cmd, rxb->skb))
1150 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
1152 if (!(meta->flags & CMD_ASYNC)) {
1153 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1154 wake_up_interruptible(&priv->wait_command_queue);
1157 EXPORT_SYMBOL(iwl_tx_cmd_complete);
1160 * Find first available (lowest unused) Tx Queue, mark it "active".
1161 * Called only when finding queue for aggregation.
1162 * Should never return anything < 7, because they should already
1163 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1165 static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1169 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1170 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1175 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1181 unsigned long flags;
1182 struct iwl_tid_data *tid_data;
1184 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1185 tx_fifo = default_tid_to_tx_fifo[tid];
1189 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
1192 sta_id = iwl_find_station(priv, ra);
1193 if (sta_id == IWL_INVALID_STATION) {
1194 IWL_ERR(priv, "Start AGG on invalid station\n");
1198 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1199 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
1203 txq_id = iwl_txq_ctx_activate_free(priv);
1205 IWL_ERR(priv, "No free aggregation queue available\n");
1209 spin_lock_irqsave(&priv->sta_lock, flags);
1210 tid_data = &priv->stations[sta_id].tid[tid];
1211 *ssn = SEQ_TO_SN(tid_data->seq_number);
1212 tid_data->agg.txq_id = txq_id;
1213 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
1214 spin_unlock_irqrestore(&priv->sta_lock, flags);
1216 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1221 if (tid_data->tfds_in_queue == 0) {
1222 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1223 tid_data->agg.state = IWL_AGG_ON;
1224 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1226 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1227 tid_data->tfds_in_queue);
1228 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1232 EXPORT_SYMBOL(iwl_tx_agg_start);
1234 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1236 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1237 struct iwl_tid_data *tid_data;
1238 int ret, write_ptr, read_ptr;
1239 unsigned long flags;
1242 IWL_ERR(priv, "ra = NULL\n");
1246 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1247 tx_fifo_id = default_tid_to_tx_fifo[tid];
1251 sta_id = iwl_find_station(priv, ra);
1253 if (sta_id == IWL_INVALID_STATION) {
1254 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1258 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1259 IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
1261 tid_data = &priv->stations[sta_id].tid[tid];
1262 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1263 txq_id = tid_data->agg.txq_id;
1264 write_ptr = priv->txq[txq_id].q.write_ptr;
1265 read_ptr = priv->txq[txq_id].q.read_ptr;
1267 /* The queue is not empty */
1268 if (write_ptr != read_ptr) {
1269 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1270 priv->stations[sta_id].tid[tid].agg.state =
1271 IWL_EMPTYING_HW_QUEUE_DELBA;
1275 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1276 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1278 spin_lock_irqsave(&priv->lock, flags);
1279 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1281 spin_unlock_irqrestore(&priv->lock, flags);
1286 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1290 EXPORT_SYMBOL(iwl_tx_agg_stop);
1292 int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1294 struct iwl_queue *q = &priv->txq[txq_id].q;
1295 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1296 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1298 switch (priv->stations[sta_id].tid[tid].agg.state) {
1299 case IWL_EMPTYING_HW_QUEUE_DELBA:
1300 /* We are reclaiming the last packet of the */
1301 /* aggregated HW queue */
1302 if ((txq_id == tid_data->agg.txq_id) &&
1303 (q->read_ptr == q->write_ptr)) {
1304 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1305 int tx_fifo = default_tid_to_tx_fifo[tid];
1306 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1307 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1309 tid_data->agg.state = IWL_AGG_OFF;
1310 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1313 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1314 /* We are reclaiming the last packet of the queue */
1315 if (tid_data->tfds_in_queue == 0) {
1316 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1317 tid_data->agg.state = IWL_AGG_ON;
1318 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1324 EXPORT_SYMBOL(iwl_txq_check_empty);
1327 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1329 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1330 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1332 static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1333 struct iwl_ht_agg *agg,
1334 struct iwl_compressed_ba_resp *ba_resp)
1338 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1339 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1342 struct ieee80211_tx_info *info;
1344 if (unlikely(!agg->wait_for_ba)) {
1345 IWL_ERR(priv, "Received BA when not expected\n");
1349 /* Mark that the expected block-ack response arrived */
1350 agg->wait_for_ba = 0;
1351 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1353 /* Calculate shift to align block-ack bits with our Tx window bits */
1354 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1355 if (sh < 0) /* tbw something is wrong with indices */
1358 /* don't use 64-bit values for now */
1359 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1361 if (agg->frame_count > (64 - sh)) {
1362 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1366 /* check for success or failure according to the
1367 * transmitted bitmap and block-ack bitmap */
1368 bitmap &= agg->bitmap;
1370 /* For each frame attempted in aggregation,
1371 * update driver's record of tx frame's status. */
1372 for (i = 0; i < agg->frame_count ; i++) {
1373 ack = bitmap & (1ULL << i);
1375 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1376 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1377 agg->start_idx + i);
1380 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1381 memset(&info->status, 0, sizeof(info->status));
1382 info->flags = IEEE80211_TX_STAT_ACK;
1383 info->flags |= IEEE80211_TX_STAT_AMPDU;
1384 info->status.ampdu_ack_map = successes;
1385 info->status.ampdu_ack_len = agg->frame_count;
1386 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1388 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
1394 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1396 * Handles block-acknowledge notification from device, which reports success
1397 * of frames sent via aggregation.
1399 void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1400 struct iwl_rx_mem_buffer *rxb)
1402 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1403 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1404 struct iwl_tx_queue *txq = NULL;
1405 struct iwl_ht_agg *agg;
1410 /* "flow" corresponds to Tx queue */
1411 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1413 /* "ssn" is start of block-ack Tx window, corresponds to index
1414 * (in Tx queue's circular buffer) of first TFD/frame in window */
1415 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1417 if (scd_flow >= priv->hw_params.max_txq_num) {
1419 "BUG_ON scd_flow is bigger than number of queues\n");
1423 txq = &priv->txq[scd_flow];
1424 sta_id = ba_resp->sta_id;
1426 agg = &priv->stations[sta_id].tid[tid].agg;
1428 /* Find index just before block-ack window */
1429 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1431 /* TODO: Need to get this copy more safely - now good for debug */
1433 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1436 (u8 *) &ba_resp->sta_addr_lo32,
1438 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1439 "%d, scd_ssn = %d\n",
1442 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1445 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
1447 (unsigned long long)agg->bitmap);
1449 /* Update driver's record of ACK vs. not for each frame in window */
1450 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1452 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1453 * block-ack window (we assume that they've been successfully
1454 * transmitted ... if not, it's too late anyway). */
1455 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1456 /* calculate mac80211 ampdu sw queue to wake */
1457 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1458 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1460 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1461 priv->mac80211_registered &&
1462 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1463 iwl_wake_queue(priv, txq->swq_id);
1465 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
1468 EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1470 #ifdef CONFIG_IWLWIFI_DEBUG
1471 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1473 const char *iwl_get_tx_fail_reason(u32 status)
1475 switch (status & TX_STATUS_MSK) {
1476 case TX_STATUS_SUCCESS:
1478 TX_STATUS_ENTRY(SHORT_LIMIT);
1479 TX_STATUS_ENTRY(LONG_LIMIT);
1480 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1481 TX_STATUS_ENTRY(MGMNT_ABORT);
1482 TX_STATUS_ENTRY(NEXT_FRAG);
1483 TX_STATUS_ENTRY(LIFE_EXPIRE);
1484 TX_STATUS_ENTRY(DEST_PS);
1485 TX_STATUS_ENTRY(ABORTED);
1486 TX_STATUS_ENTRY(BT_RETRY);
1487 TX_STATUS_ENTRY(STA_INVALID);
1488 TX_STATUS_ENTRY(FRAG_DROPPED);
1489 TX_STATUS_ENTRY(TID_DISABLE);
1490 TX_STATUS_ENTRY(FRAME_FLUSHED);
1491 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1492 TX_STATUS_ENTRY(TX_LOCKED);
1493 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1498 EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1499 #endif /* CONFIG_IWLWIFI_DEBUG */