1 /******************************************************************************
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
39 #include "iwl-helpers.h"
40 #include "iwl-agn-hw.h"
44 * mac80211 queues, ACs, hardware queues, FIFOs.
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
69 static const u8 tid_to_ac[] = {
80 static inline int get_ac_from_tid(u16 tid)
82 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
83 return tid_to_ac[tid];
85 /* no support for TIDs 8-15 yet */
89 static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
91 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
92 return ctx->ac_to_fifo[tid_to_ac[tid]];
94 /* no support for TIDs 8-15 yet */
99 * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
101 void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
102 struct iwl_tx_queue *txq,
105 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
106 int write_ptr = txq->q.write_ptr;
107 int txq_id = txq->q.id;
110 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
113 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
115 if (txq_id != priv->cmd_queue) {
116 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
117 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
119 switch (sec_ctl & TX_CMD_SEC_MSK) {
123 case TX_CMD_SEC_TKIP:
127 len += WEP_IV_LEN + WEP_ICV_LEN;
132 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
134 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
136 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
138 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
141 void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
142 struct iwl_tx_queue *txq)
144 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
145 int txq_id = txq->q.id;
146 int read_ptr = txq->q.read_ptr;
150 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
152 if (txq_id != priv->cmd_queue)
153 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
155 bc_ent = cpu_to_le16(1 | (sta_id << 12));
156 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
158 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
160 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
163 static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
170 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
172 tbl_dw_addr = priv->scd_base_addr +
173 IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
175 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
178 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
180 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
182 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
187 static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
189 /* Simply stop the queue, but don't change any configuration;
190 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
192 IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
193 (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
194 (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
197 void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
198 int txq_id, u32 index)
200 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
201 (index & 0xff) | (txq_id << 8));
202 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index);
205 void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
206 struct iwl_tx_queue *txq,
207 int tx_fifo_id, int scd_retry)
209 int txq_id = txq->q.id;
210 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
212 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
213 (active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
214 (tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) |
215 (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) |
216 IWLAGN_SCD_QUEUE_STTS_REG_MSK);
218 txq->sched_retry = scd_retry;
220 IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
221 active ? "Activate" : "Deactivate",
222 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
225 static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id, int tid)
227 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
228 (IWLAGN_FIRST_AMPDU_QUEUE +
229 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
231 "queue number out of range: %d, must be %d to %d\n",
232 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
233 IWLAGN_FIRST_AMPDU_QUEUE +
234 priv->cfg->base_params->num_of_ampdu_queues - 1);
238 /* Modify device's station table to Tx this TID */
239 return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
242 void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv,
243 struct ieee80211_sta *sta,
244 int tid, int frame_limit)
246 int sta_id, tx_fifo, txq_id, ssn_idx;
249 struct iwl_tid_data *tid_data;
251 sta_id = iwl_sta_id(sta);
252 if (WARN_ON(sta_id == IWL_INVALID_STATION))
254 if (WARN_ON(tid >= MAX_TID_COUNT))
257 spin_lock_irqsave(&priv->sta_lock, flags);
258 tid_data = &priv->stations[sta_id].tid[tid];
259 ssn_idx = SEQ_TO_SN(tid_data->seq_number);
260 txq_id = tid_data->agg.txq_id;
261 tx_fifo = tid_data->agg.tx_fifo;
262 spin_unlock_irqrestore(&priv->sta_lock, flags);
264 ra_tid = BUILD_RAxTID(sta_id, tid);
266 spin_lock_irqsave(&priv->lock, flags);
268 /* Stop this Tx queue before configuring it */
269 iwlagn_tx_queue_stop_scheduler(priv, txq_id);
271 /* Map receiver-address / traffic-ID to this queue */
272 iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
274 /* Set this queue as a chain-building queue */
275 iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id));
277 /* enable aggregations for the queue */
278 iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id));
280 /* Place first TFD at index corresponding to start sequence number.
281 * Assumes that ssn_idx is valid (!= 0xFFF) */
282 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
283 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
284 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
286 /* Set up Tx window size and frame limit for this queue */
287 iwl_write_targ_mem(priv, priv->scd_base_addr +
288 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
291 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
292 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
294 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
295 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
297 iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
299 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
300 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
302 spin_unlock_irqrestore(&priv->lock, flags);
305 static int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
306 u16 ssn_idx, u8 tx_fifo)
308 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
309 (IWLAGN_FIRST_AMPDU_QUEUE +
310 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
312 "queue number out of range: %d, must be %d to %d\n",
313 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
314 IWLAGN_FIRST_AMPDU_QUEUE +
315 priv->cfg->base_params->num_of_ampdu_queues - 1);
319 iwlagn_tx_queue_stop_scheduler(priv, txq_id);
321 iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id));
323 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
324 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
325 /* supposes that ssn_idx is valid (!= 0xFFF) */
326 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
328 iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
329 iwl_txq_ctx_deactivate(priv, txq_id);
330 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
336 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
337 * must be called under priv->lock and mac access
339 void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
341 iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
345 * handle build REPLY_TX command notification.
347 static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
349 struct iwl_tx_cmd *tx_cmd,
350 struct ieee80211_tx_info *info,
351 struct ieee80211_hdr *hdr,
354 __le16 fc = hdr->frame_control;
355 __le32 tx_flags = tx_cmd->tx_flags;
357 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
358 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
359 tx_flags |= TX_CMD_FLG_ACK_MSK;
360 if (ieee80211_is_mgmt(fc))
361 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
362 if (ieee80211_is_probe_resp(fc) &&
363 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
364 tx_flags |= TX_CMD_FLG_TSF_MSK;
366 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
367 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
370 if (ieee80211_is_back_req(fc))
371 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
372 else if (info->band == IEEE80211_BAND_2GHZ &&
373 priv->cfg->bt_params &&
374 priv->cfg->bt_params->advanced_bt_coexist &&
375 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
376 ieee80211_is_reassoc_req(fc) ||
377 skb->protocol == cpu_to_be16(ETH_P_PAE)))
378 tx_flags |= TX_CMD_FLG_IGNORE_BT;
381 tx_cmd->sta_id = std_id;
382 if (ieee80211_has_morefrags(fc))
383 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
385 if (ieee80211_is_data_qos(fc)) {
386 u8 *qc = ieee80211_get_qos_ctl(hdr);
387 tx_cmd->tid_tspec = qc[0] & 0xf;
388 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
390 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
393 priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
395 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
396 if (ieee80211_is_mgmt(fc)) {
397 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
398 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
400 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
402 tx_cmd->timeout.pm_frame_timeout = 0;
405 tx_cmd->driver_txop = 0;
406 tx_cmd->tx_flags = tx_flags;
407 tx_cmd->next_frame_len = 0;
410 #define RTS_DFAULT_RETRY_LIMIT 60
412 static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
413 struct iwl_tx_cmd *tx_cmd,
414 struct ieee80211_tx_info *info,
423 /* Set retry limit on DATA packets and Probe Responses*/
424 if (ieee80211_is_probe_resp(fc))
425 data_retry_limit = 3;
427 data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
428 tx_cmd->data_retry_limit = data_retry_limit;
430 /* Set retry limit on RTS packets */
431 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
432 if (data_retry_limit < rts_retry_limit)
433 rts_retry_limit = data_retry_limit;
434 tx_cmd->rts_retry_limit = rts_retry_limit;
436 /* DATA packets will use the uCode station table for rate/antenna
438 if (ieee80211_is_data(fc)) {
439 tx_cmd->initial_rate_index = 0;
440 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
445 * If the current TX rate stored in mac80211 has the MCS bit set, it's
446 * not really a TX rate. Thus, we use the lowest supported rate for
447 * this band. Also use the lowest supported rate if the stored rate
450 rate_idx = info->control.rates[0].idx;
451 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
452 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
453 rate_idx = rate_lowest_index(&priv->bands[info->band],
455 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
456 if (info->band == IEEE80211_BAND_5GHZ)
457 rate_idx += IWL_FIRST_OFDM_RATE;
458 /* Get PLCP rate for tx_cmd->rate_n_flags */
459 rate_plcp = iwl_rates[rate_idx].plcp;
460 /* Zero out flags for this packet */
463 /* Set CCK flag as needed */
464 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
465 rate_flags |= RATE_MCS_CCK_MSK;
467 /* Set up antennas */
468 if (priv->cfg->bt_params &&
469 priv->cfg->bt_params->advanced_bt_coexist &&
470 priv->bt_full_concurrent) {
471 /* operated as 1x1 in full concurrency mode */
472 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
473 first_antenna(priv->hw_params.valid_tx_ant));
475 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
476 priv->hw_params.valid_tx_ant);
477 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
479 /* Set the rate in the TX cmd */
480 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
483 static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
484 struct ieee80211_tx_info *info,
485 struct iwl_tx_cmd *tx_cmd,
486 struct sk_buff *skb_frag,
489 struct ieee80211_key_conf *keyconf = info->control.hw_key;
491 switch (keyconf->cipher) {
492 case WLAN_CIPHER_SUITE_CCMP:
493 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
494 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
495 if (info->flags & IEEE80211_TX_CTL_AMPDU)
496 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
497 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
500 case WLAN_CIPHER_SUITE_TKIP:
501 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
502 ieee80211_get_tkip_key(keyconf, skb_frag,
503 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
504 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
507 case WLAN_CIPHER_SUITE_WEP104:
508 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
510 case WLAN_CIPHER_SUITE_WEP40:
511 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
512 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
514 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
516 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
517 "with key %d\n", keyconf->keyidx);
521 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
527 * start REPLY_TX command process
529 int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
531 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
532 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
533 struct ieee80211_sta *sta = info->control.sta;
534 struct iwl_station_priv *sta_priv = NULL;
535 struct iwl_tx_queue *txq;
537 struct iwl_device_cmd *out_cmd;
538 struct iwl_cmd_meta *out_meta;
539 struct iwl_tx_cmd *tx_cmd;
540 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
542 dma_addr_t phys_addr;
543 dma_addr_t txcmd_phys;
544 dma_addr_t scratch_phys;
545 u16 len, firstlen, secondlen;
550 u8 wait_write_ptr = 0;
557 * If the frame needs to go out off-channel, then
558 * we'll have put the PAN context to that channel,
559 * so make the frame go out there.
561 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
562 ctx = &priv->contexts[IWL_RXON_CTX_PAN];
563 else if (info->control.vif)
564 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
566 spin_lock_irqsave(&priv->lock, flags);
567 if (iwl_is_rfkill(priv)) {
568 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
572 fc = hdr->frame_control;
574 #ifdef CONFIG_IWLWIFI_DEBUG
575 if (ieee80211_is_auth(fc))
576 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
577 else if (ieee80211_is_assoc_req(fc))
578 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
579 else if (ieee80211_is_reassoc_req(fc))
580 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
583 hdr_len = ieee80211_hdrlen(fc);
585 /* Find index into station table for destination station */
586 sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
587 if (sta_id == IWL_INVALID_STATION) {
588 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
593 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
596 sta_priv = (void *)sta->drv_priv;
598 if (sta_priv && sta_priv->asleep &&
599 (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
601 * This sends an asynchronous command to the device,
602 * but we can rely on it being processed before the
603 * next frame is processed -- and the next frame to
604 * this station is the one that will consume this
606 * For now set the counter to just 1 since we do not
609 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
613 * Send this frame after DTIM -- there's a special queue
614 * reserved for this for contexts that support AP mode.
616 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
617 txq_id = ctx->mcast_queue;
619 * The microcode will clear the more data
620 * bit in the last frame it transmits.
622 hdr->frame_control |=
623 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
625 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
627 /* irqs already disabled/saved above when locking priv->lock */
628 spin_lock(&priv->sta_lock);
630 if (ieee80211_is_data_qos(fc)) {
631 qc = ieee80211_get_qos_ctl(hdr);
632 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
633 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
634 spin_unlock(&priv->sta_lock);
637 seq_number = priv->stations[sta_id].tid[tid].seq_number;
638 seq_number &= IEEE80211_SCTL_SEQ;
639 hdr->seq_ctrl = hdr->seq_ctrl &
640 cpu_to_le16(IEEE80211_SCTL_FRAG);
641 hdr->seq_ctrl |= cpu_to_le16(seq_number);
643 /* aggregation is on for this <sta,tid> */
644 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
645 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
646 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
651 txq = &priv->txq[txq_id];
654 if (unlikely(iwl_queue_space(q) < q->high_mark)) {
655 spin_unlock(&priv->sta_lock);
659 if (ieee80211_is_data_qos(fc)) {
660 priv->stations[sta_id].tid[tid].tfds_in_queue++;
661 if (!ieee80211_has_morefrags(fc))
662 priv->stations[sta_id].tid[tid].seq_number = seq_number;
665 spin_unlock(&priv->sta_lock);
667 /* Set up driver data for this TFD */
668 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
669 txq->txb[q->write_ptr].skb = skb;
670 txq->txb[q->write_ptr].ctx = ctx;
672 /* Set up first empty entry in queue's array of Tx/cmd buffers */
673 out_cmd = txq->cmd[q->write_ptr];
674 out_meta = &txq->meta[q->write_ptr];
675 tx_cmd = &out_cmd->cmd.tx;
676 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
677 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
680 * Set up the Tx-command (not MAC!) header.
681 * Store the chosen Tx queue and TFD index within the sequence field;
682 * after Tx, uCode's Tx response will return this value so driver can
683 * locate the frame within the tx queue and do post-tx processing.
685 out_cmd->hdr.cmd = REPLY_TX;
686 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
687 INDEX_TO_SEQ(q->write_ptr)));
689 /* Copy MAC header from skb into command buffer */
690 memcpy(tx_cmd->hdr, hdr, hdr_len);
693 /* Total # bytes to be transmitted */
695 tx_cmd->len = cpu_to_le16(len);
697 if (info->control.hw_key)
698 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
700 /* TODO need this for burst mode later on */
701 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
702 iwl_dbg_log_tx_data_frame(priv, len, hdr);
704 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
706 iwl_update_stats(priv, true, fc, len);
708 * Use the first empty entry in this queue's command buffer array
709 * to contain the Tx command and MAC header concatenated together
710 * (payload data will be in another buffer).
711 * Size of this varies, due to varying MAC header length.
712 * If end is not dword aligned, we'll have 2 extra bytes at the end
713 * of the MAC header (device reads on dword boundaries).
714 * We'll tell device about this padding later.
716 len = sizeof(struct iwl_tx_cmd) +
717 sizeof(struct iwl_cmd_header) + hdr_len;
718 firstlen = (len + 3) & ~3;
720 /* Tell NIC about any 2-byte padding after MAC header */
722 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
724 /* Physical address of this Tx command's header (not MAC header!),
725 * within command buffer array. */
726 txcmd_phys = pci_map_single(priv->pci_dev,
727 &out_cmd->hdr, firstlen,
728 PCI_DMA_BIDIRECTIONAL);
729 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
730 dma_unmap_len_set(out_meta, len, firstlen);
731 /* Add buffer containing Tx command and MAC(!) header to TFD's
733 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
734 txcmd_phys, firstlen, 1, 0);
736 if (!ieee80211_has_morefrags(hdr->frame_control)) {
737 txq->need_update = 1;
740 txq->need_update = 0;
743 /* Set up TFD's 2nd entry to point directly to remainder of skb,
744 * if any (802.11 null frames have no payload). */
745 secondlen = skb->len - hdr_len;
747 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
748 secondlen, PCI_DMA_TODEVICE);
749 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
750 phys_addr, secondlen,
754 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
755 offsetof(struct iwl_tx_cmd, scratch);
757 /* take back ownership of DMA buffer to enable update */
758 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
759 firstlen, PCI_DMA_BIDIRECTIONAL);
760 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
761 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
763 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
764 le16_to_cpu(out_cmd->hdr.sequence));
765 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
766 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
767 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
769 /* Set up entry for this TFD in Tx byte-count array */
770 if (info->flags & IEEE80211_TX_CTL_AMPDU)
771 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
772 le16_to_cpu(tx_cmd->len));
774 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
775 firstlen, PCI_DMA_BIDIRECTIONAL);
777 trace_iwlwifi_dev_tx(priv,
778 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
779 sizeof(struct iwl_tfd),
780 &out_cmd->hdr, firstlen,
781 skb->data + hdr_len, secondlen);
783 /* Tell device the write index *just past* this latest filled TFD */
784 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
785 iwl_txq_update_write_ptr(priv, txq);
786 spin_unlock_irqrestore(&priv->lock, flags);
789 * At this point the frame is "transmitted" successfully
790 * and we will get a TX status notification eventually,
791 * regardless of the value of ret. "ret" only indicates
792 * whether or not we should update the write pointer.
796 * Avoid atomic ops if it isn't an associated client.
797 * Also, if this is a packet for aggregation, don't
798 * increase the counter because the ucode will stop
799 * aggregation queues when their respective station
802 if (sta_priv && sta_priv->client && !is_agg)
803 atomic_inc(&sta_priv->pending_frames);
805 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
806 if (wait_write_ptr) {
807 spin_lock_irqsave(&priv->lock, flags);
808 txq->need_update = 1;
809 iwl_txq_update_write_ptr(priv, txq);
810 spin_unlock_irqrestore(&priv->lock, flags);
812 iwl_stop_queue(priv, txq);
819 spin_unlock_irqrestore(&priv->lock, flags);
823 static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
824 struct iwl_dma_ptr *ptr, size_t size)
826 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
834 static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
835 struct iwl_dma_ptr *ptr)
837 if (unlikely(!ptr->addr))
840 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
841 memset(ptr, 0, sizeof(*ptr));
845 * iwlagn_hw_txq_ctx_free - Free TXQ Context
847 * Destroy all TX DMA queues and structures
849 void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
855 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
856 if (txq_id == priv->cmd_queue)
857 iwl_cmd_queue_free(priv);
859 iwl_tx_queue_free(priv, txq_id);
861 iwlagn_free_dma_ptr(priv, &priv->kw);
863 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
865 /* free tx queue structure */
866 iwl_free_txq_mem(priv);
870 * iwlagn_txq_ctx_alloc - allocate TX queue context
871 * Allocate all Tx DMA structures and initialize them
876 int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
879 int txq_id, slots_num;
882 /* Free all tx/cmd queues and keep-warm buffer */
883 iwlagn_hw_txq_ctx_free(priv);
885 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
886 priv->hw_params.scd_bc_tbls_size);
888 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
891 /* Alloc keep-warm buffer */
892 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
894 IWL_ERR(priv, "Keep Warm allocation failed\n");
898 /* allocate tx queue structure */
899 ret = iwl_alloc_txq_mem(priv);
903 spin_lock_irqsave(&priv->lock, flags);
905 /* Turn off all Tx DMA fifos */
906 priv->cfg->ops->lib->txq_set_sched(priv, 0);
908 /* Tell NIC where to find the "keep warm" buffer */
909 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
911 spin_unlock_irqrestore(&priv->lock, flags);
913 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
914 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
915 slots_num = (txq_id == priv->cmd_queue) ?
916 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
917 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
920 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
928 iwlagn_hw_txq_ctx_free(priv);
929 iwlagn_free_dma_ptr(priv, &priv->kw);
931 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
936 void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
938 int txq_id, slots_num;
941 spin_lock_irqsave(&priv->lock, flags);
943 /* Turn off all Tx DMA fifos */
944 priv->cfg->ops->lib->txq_set_sched(priv, 0);
946 /* Tell NIC where to find the "keep warm" buffer */
947 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
949 spin_unlock_irqrestore(&priv->lock, flags);
951 /* Alloc and init all Tx queues, including the command queue (#4) */
952 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
953 slots_num = txq_id == priv->cmd_queue ?
954 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
955 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
960 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
962 void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
967 /* Turn off all Tx DMA fifos */
968 spin_lock_irqsave(&priv->lock, flags);
970 priv->cfg->ops->lib->txq_set_sched(priv, 0);
972 /* Stop each Tx DMA channel, and wait for it to be idle */
973 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
974 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
975 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
976 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
978 IWL_ERR(priv, "Failing on timeout while stopping"
979 " DMA channel %d [0x%08x]", ch,
980 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
982 spin_unlock_irqrestore(&priv->lock, flags);
987 /* Unmap DMA from host system and free skb's */
988 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
989 if (txq_id == priv->cmd_queue)
990 iwl_cmd_queue_unmap(priv);
992 iwl_tx_queue_unmap(priv, txq_id);
996 * Find first available (lowest unused) Tx Queue, mark it "active".
997 * Called only when finding queue for aggregation.
998 * Should never return anything < 7, because they should already
999 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
1001 static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
1005 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1006 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1011 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
1012 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
1018 unsigned long flags;
1019 struct iwl_tid_data *tid_data;
1021 tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
1022 if (unlikely(tx_fifo < 0))
1025 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
1026 __func__, sta->addr, tid);
1028 sta_id = iwl_sta_id(sta);
1029 if (sta_id == IWL_INVALID_STATION) {
1030 IWL_ERR(priv, "Start AGG on invalid station\n");
1033 if (unlikely(tid >= MAX_TID_COUNT))
1036 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1037 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
1041 txq_id = iwlagn_txq_ctx_activate_free(priv);
1043 IWL_ERR(priv, "No free aggregation queue available\n");
1047 spin_lock_irqsave(&priv->sta_lock, flags);
1048 tid_data = &priv->stations[sta_id].tid[tid];
1049 *ssn = SEQ_TO_SN(tid_data->seq_number);
1050 tid_data->agg.txq_id = txq_id;
1051 tid_data->agg.tx_fifo = tx_fifo;
1052 iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
1053 spin_unlock_irqrestore(&priv->sta_lock, flags);
1055 ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
1059 spin_lock_irqsave(&priv->sta_lock, flags);
1060 tid_data = &priv->stations[sta_id].tid[tid];
1061 if (tid_data->tfds_in_queue == 0) {
1062 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1063 tid_data->agg.state = IWL_AGG_ON;
1064 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1066 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1067 tid_data->tfds_in_queue);
1068 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1070 spin_unlock_irqrestore(&priv->sta_lock, flags);
1074 int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1075 struct ieee80211_sta *sta, u16 tid)
1077 int tx_fifo_id, txq_id, sta_id, ssn;
1078 struct iwl_tid_data *tid_data;
1079 int write_ptr, read_ptr;
1080 unsigned long flags;
1082 tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
1083 if (unlikely(tx_fifo_id < 0))
1086 sta_id = iwl_sta_id(sta);
1088 if (sta_id == IWL_INVALID_STATION) {
1089 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1093 spin_lock_irqsave(&priv->sta_lock, flags);
1095 tid_data = &priv->stations[sta_id].tid[tid];
1096 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1097 txq_id = tid_data->agg.txq_id;
1099 switch (priv->stations[sta_id].tid[tid].agg.state) {
1100 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1102 * This can happen if the peer stops aggregation
1103 * again before we've had a chance to drain the
1104 * queue we selected previously, i.e. before the
1105 * session was really started completely.
1107 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1112 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1115 write_ptr = priv->txq[txq_id].q.write_ptr;
1116 read_ptr = priv->txq[txq_id].q.read_ptr;
1118 /* The queue is not empty */
1119 if (write_ptr != read_ptr) {
1120 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1121 priv->stations[sta_id].tid[tid].agg.state =
1122 IWL_EMPTYING_HW_QUEUE_DELBA;
1123 spin_unlock_irqrestore(&priv->sta_lock, flags);
1127 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1129 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1131 /* do not restore/save irqs */
1132 spin_unlock(&priv->sta_lock);
1133 spin_lock(&priv->lock);
1136 * the only reason this call can fail is queue number out of range,
1137 * which can happen if uCode is reloaded and all the station
1138 * information are lost. if it is outside the range, there is no need
1139 * to deactivate the uCode queue, just return "success" to allow
1140 * mac80211 to clean up it own data.
1142 iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
1143 spin_unlock_irqrestore(&priv->lock, flags);
1145 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1150 int iwlagn_txq_check_empty(struct iwl_priv *priv,
1151 int sta_id, u8 tid, int txq_id)
1153 struct iwl_queue *q = &priv->txq[txq_id].q;
1154 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1155 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1156 struct iwl_rxon_context *ctx;
1158 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1160 lockdep_assert_held(&priv->sta_lock);
1162 switch (priv->stations[sta_id].tid[tid].agg.state) {
1163 case IWL_EMPTYING_HW_QUEUE_DELBA:
1164 /* We are reclaiming the last packet of the */
1165 /* aggregated HW queue */
1166 if ((txq_id == tid_data->agg.txq_id) &&
1167 (q->read_ptr == q->write_ptr)) {
1168 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1169 int tx_fifo = get_fifo_from_tid(ctx, tid);
1170 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1171 iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
1172 tid_data->agg.state = IWL_AGG_OFF;
1173 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1176 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1177 /* We are reclaiming the last packet of the queue */
1178 if (tid_data->tfds_in_queue == 0) {
1179 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1180 tid_data->agg.state = IWL_AGG_ON;
1181 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1189 static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
1190 struct iwl_rxon_context *ctx,
1193 struct ieee80211_sta *sta;
1194 struct iwl_station_priv *sta_priv;
1197 sta = ieee80211_find_sta(ctx->vif, addr1);
1199 sta_priv = (void *)sta->drv_priv;
1200 /* avoid atomic ops if this isn't a client */
1201 if (sta_priv->client &&
1202 atomic_dec_return(&sta_priv->pending_frames) == 0)
1203 ieee80211_sta_block_awake(priv->hw, sta, false);
1208 static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1211 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1214 iwlagn_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1216 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1219 int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1221 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1222 struct iwl_queue *q = &txq->q;
1223 struct iwl_tx_info *tx_info;
1225 struct ieee80211_hdr *hdr;
1227 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1228 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1229 "is out of range [0-%d] %d %d.\n", txq_id,
1230 index, q->n_bd, q->write_ptr, q->read_ptr);
1234 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1235 q->read_ptr != index;
1236 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1238 tx_info = &txq->txb[txq->q.read_ptr];
1240 if (WARN_ON_ONCE(tx_info->skb == NULL))
1243 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1244 if (ieee80211_is_data_qos(hdr->frame_control))
1247 iwlagn_tx_status(priv, tx_info,
1248 txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1249 tx_info->skb = NULL;
1251 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1252 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1254 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1260 * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
1262 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1263 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1265 static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1266 struct iwl_ht_agg *agg,
1267 struct iwl_compressed_ba_resp *ba_resp)
1271 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1272 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1273 struct ieee80211_tx_info *info;
1274 u64 bitmap, sent_bitmap;
1276 if (unlikely(!agg->wait_for_ba)) {
1277 if (unlikely(ba_resp->bitmap))
1278 IWL_ERR(priv, "Received BA when not expected\n");
1282 /* Mark that the expected block-ack response arrived */
1283 agg->wait_for_ba = 0;
1284 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1286 /* Calculate shift to align block-ack bits with our Tx window bits */
1287 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1292 * Check for success or failure according to the
1293 * transmitted bitmap and block-ack bitmap
1295 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1296 sent_bitmap = bitmap & agg->bitmap;
1298 /* Sanity check values reported by uCode */
1299 if (ba_resp->txed_2_done > ba_resp->txed) {
1300 IWL_DEBUG_TX_REPLY(priv,
1301 "bogus sent(%d) and ack(%d) count\n",
1302 ba_resp->txed, ba_resp->txed_2_done);
1304 * set txed_2_done = txed,
1305 * so it won't impact rate scale
1307 ba_resp->txed = ba_resp->txed_2_done;
1309 IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
1310 ba_resp->txed, ba_resp->txed_2_done);
1312 /* Find the first ACKed frame to store the TX status */
1313 while (sent_bitmap && !(sent_bitmap & 1)) {
1314 agg->start_idx = (agg->start_idx + 1) & 0xff;
1318 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1319 memset(&info->status, 0, sizeof(info->status));
1320 info->flags |= IEEE80211_TX_STAT_ACK;
1321 info->flags |= IEEE80211_TX_STAT_AMPDU;
1322 info->status.ampdu_ack_len = ba_resp->txed_2_done;
1323 info->status.ampdu_len = ba_resp->txed;
1324 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1330 * translate ucode response to mac80211 tx status control values
1332 void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1333 struct ieee80211_tx_info *info)
1335 struct ieee80211_tx_rate *r = &info->control.rates[0];
1337 info->antenna_sel_tx =
1338 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1339 if (rate_n_flags & RATE_MCS_HT_MSK)
1340 r->flags |= IEEE80211_TX_RC_MCS;
1341 if (rate_n_flags & RATE_MCS_GF_MSK)
1342 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1343 if (rate_n_flags & RATE_MCS_HT40_MSK)
1344 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1345 if (rate_n_flags & RATE_MCS_DUP_MSK)
1346 r->flags |= IEEE80211_TX_RC_DUP_DATA;
1347 if (rate_n_flags & RATE_MCS_SGI_MSK)
1348 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1349 r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1353 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1355 * Handles block-acknowledge notification from device, which reports success
1356 * of frames sent via aggregation.
1358 void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1359 struct iwl_rx_mem_buffer *rxb)
1361 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1362 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1363 struct iwl_tx_queue *txq = NULL;
1364 struct iwl_ht_agg *agg;
1368 unsigned long flags;
1370 /* "flow" corresponds to Tx queue */
1371 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1373 /* "ssn" is start of block-ack Tx window, corresponds to index
1374 * (in Tx queue's circular buffer) of first TFD/frame in window */
1375 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1377 if (scd_flow >= priv->hw_params.max_txq_num) {
1379 "BUG_ON scd_flow is bigger than number of queues\n");
1383 txq = &priv->txq[scd_flow];
1384 sta_id = ba_resp->sta_id;
1386 agg = &priv->stations[sta_id].tid[tid].agg;
1387 if (unlikely(agg->txq_id != scd_flow)) {
1389 * FIXME: this is a uCode bug which need to be addressed,
1390 * log the information and return for now!
1391 * since it is possible happen very often and in order
1392 * not to fill the syslog, don't enable the logging by default
1394 IWL_DEBUG_TX_REPLY(priv,
1395 "BA scd_flow %d does not match txq_id %d\n",
1396 scd_flow, agg->txq_id);
1400 /* Find index just before block-ack window */
1401 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1403 spin_lock_irqsave(&priv->sta_lock, flags);
1405 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1408 (u8 *) &ba_resp->sta_addr_lo32,
1410 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1411 "%d, scd_ssn = %d\n",
1414 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1417 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1419 (unsigned long long)agg->bitmap);
1421 /* Update driver's record of ACK vs. not for each frame in window */
1422 iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1424 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1425 * block-ack window (we assume that they've been successfully
1426 * transmitted ... if not, it's too late anyway). */
1427 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1428 /* calculate mac80211 ampdu sw queue to wake */
1429 int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index);
1430 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1432 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1433 priv->mac80211_registered &&
1434 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1435 iwl_wake_queue(priv, txq);
1437 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
1440 spin_unlock_irqrestore(&priv->sta_lock, flags);
1443 #ifdef CONFIG_IWLWIFI_DEBUG
1444 const char *iwl_get_tx_fail_reason(u32 status)
1446 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1447 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1449 switch (status & TX_STATUS_MSK) {
1450 case TX_STATUS_SUCCESS:
1452 TX_STATUS_POSTPONE(DELAY);
1453 TX_STATUS_POSTPONE(FEW_BYTES);
1454 TX_STATUS_POSTPONE(BT_PRIO);
1455 TX_STATUS_POSTPONE(QUIET_PERIOD);
1456 TX_STATUS_POSTPONE(CALC_TTAK);
1457 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1458 TX_STATUS_FAIL(SHORT_LIMIT);
1459 TX_STATUS_FAIL(LONG_LIMIT);
1460 TX_STATUS_FAIL(FIFO_UNDERRUN);
1461 TX_STATUS_FAIL(DRAIN_FLOW);
1462 TX_STATUS_FAIL(RFKILL_FLUSH);
1463 TX_STATUS_FAIL(LIFE_EXPIRE);
1464 TX_STATUS_FAIL(DEST_PS);
1465 TX_STATUS_FAIL(HOST_ABORTED);
1466 TX_STATUS_FAIL(BT_RETRY);
1467 TX_STATUS_FAIL(STA_INVALID);
1468 TX_STATUS_FAIL(FRAG_DROPPED);
1469 TX_STATUS_FAIL(TID_DISABLE);
1470 TX_STATUS_FAIL(FIFO_FLUSHED);
1471 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1472 TX_STATUS_FAIL(PASSIVE_NO_RX);
1473 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1478 #undef TX_STATUS_FAIL
1479 #undef TX_STATUS_POSTPONE
1481 #endif /* CONFIG_IWLWIFI_DEBUG */