1 /******************************************************************************
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
38 #include "iwl-helpers.h"
39 #include "iwl-agn-hw.h"
43 static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
45 return le32_to_cpup((__le32 *)&tx_resp->status +
46 tx_resp->frame_count) & MAX_SN;
49 static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
51 status &= TX_STATUS_MSK;
54 case TX_STATUS_POSTPONE_DELAY:
55 priv->_agn.reply_tx_stats.pp_delay++;
57 case TX_STATUS_POSTPONE_FEW_BYTES:
58 priv->_agn.reply_tx_stats.pp_few_bytes++;
60 case TX_STATUS_POSTPONE_BT_PRIO:
61 priv->_agn.reply_tx_stats.pp_bt_prio++;
63 case TX_STATUS_POSTPONE_QUIET_PERIOD:
64 priv->_agn.reply_tx_stats.pp_quiet_period++;
66 case TX_STATUS_POSTPONE_CALC_TTAK:
67 priv->_agn.reply_tx_stats.pp_calc_ttak++;
69 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
70 priv->_agn.reply_tx_stats.int_crossed_retry++;
72 case TX_STATUS_FAIL_SHORT_LIMIT:
73 priv->_agn.reply_tx_stats.short_limit++;
75 case TX_STATUS_FAIL_LONG_LIMIT:
76 priv->_agn.reply_tx_stats.long_limit++;
78 case TX_STATUS_FAIL_FIFO_UNDERRUN:
79 priv->_agn.reply_tx_stats.fifo_underrun++;
81 case TX_STATUS_FAIL_DRAIN_FLOW:
82 priv->_agn.reply_tx_stats.drain_flow++;
84 case TX_STATUS_FAIL_RFKILL_FLUSH:
85 priv->_agn.reply_tx_stats.rfkill_flush++;
87 case TX_STATUS_FAIL_LIFE_EXPIRE:
88 priv->_agn.reply_tx_stats.life_expire++;
90 case TX_STATUS_FAIL_DEST_PS:
91 priv->_agn.reply_tx_stats.dest_ps++;
93 case TX_STATUS_FAIL_HOST_ABORTED:
94 priv->_agn.reply_tx_stats.host_abort++;
96 case TX_STATUS_FAIL_BT_RETRY:
97 priv->_agn.reply_tx_stats.bt_retry++;
99 case TX_STATUS_FAIL_STA_INVALID:
100 priv->_agn.reply_tx_stats.sta_invalid++;
102 case TX_STATUS_FAIL_FRAG_DROPPED:
103 priv->_agn.reply_tx_stats.frag_drop++;
105 case TX_STATUS_FAIL_TID_DISABLE:
106 priv->_agn.reply_tx_stats.tid_disable++;
108 case TX_STATUS_FAIL_FIFO_FLUSHED:
109 priv->_agn.reply_tx_stats.fifo_flush++;
111 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
112 priv->_agn.reply_tx_stats.insuff_cf_poll++;
114 case TX_STATUS_FAIL_PASSIVE_NO_RX:
115 priv->_agn.reply_tx_stats.fail_hw_drop++;
117 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
118 priv->_agn.reply_tx_stats.sta_color_mismatch++;
121 priv->_agn.reply_tx_stats.unknown++;
126 static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
128 status &= AGG_TX_STATUS_MSK;
131 case AGG_TX_STATE_UNDERRUN_MSK:
132 priv->_agn.reply_agg_tx_stats.underrun++;
134 case AGG_TX_STATE_BT_PRIO_MSK:
135 priv->_agn.reply_agg_tx_stats.bt_prio++;
137 case AGG_TX_STATE_FEW_BYTES_MSK:
138 priv->_agn.reply_agg_tx_stats.few_bytes++;
140 case AGG_TX_STATE_ABORT_MSK:
141 priv->_agn.reply_agg_tx_stats.abort++;
143 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
144 priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
146 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
147 priv->_agn.reply_agg_tx_stats.last_sent_try++;
149 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
150 priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
152 case AGG_TX_STATE_SCD_QUERY_MSK:
153 priv->_agn.reply_agg_tx_stats.scd_query++;
155 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
156 priv->_agn.reply_agg_tx_stats.bad_crc32++;
158 case AGG_TX_STATE_RESPONSE_MSK:
159 priv->_agn.reply_agg_tx_stats.response++;
161 case AGG_TX_STATE_DUMP_TX_MSK:
162 priv->_agn.reply_agg_tx_stats.dump_tx++;
164 case AGG_TX_STATE_DELAY_TX_MSK:
165 priv->_agn.reply_agg_tx_stats.delay_tx++;
168 priv->_agn.reply_agg_tx_stats.unknown++;
173 static void iwlagn_set_tx_status(struct iwl_priv *priv,
174 struct ieee80211_tx_info *info,
175 struct iwlagn_tx_resp *tx_resp,
176 int txq_id, bool is_agg)
178 u16 status = le16_to_cpu(tx_resp->status.status);
180 info->status.rates[0].count = tx_resp->failure_frame + 1;
182 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
183 info->flags |= iwl_tx_status_to_mac80211(status);
184 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
186 if (!iwl_is_tx_success(status))
187 iwlagn_count_tx_err_status(priv, status);
189 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
192 iwl_get_tx_fail_reason(status), status,
193 le32_to_cpu(tx_resp->rate_n_flags),
194 tx_resp->failure_frame);
197 #ifdef CONFIG_IWLWIFI_DEBUG
198 #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
200 const char *iwl_get_agg_tx_fail_reason(u16 status)
202 status &= AGG_TX_STATUS_MSK;
204 case AGG_TX_STATE_TRANSMITTED:
206 AGG_TX_STATE_FAIL(UNDERRUN_MSK);
207 AGG_TX_STATE_FAIL(BT_PRIO_MSK);
208 AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
209 AGG_TX_STATE_FAIL(ABORT_MSK);
210 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
211 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
212 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
213 AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
214 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
215 AGG_TX_STATE_FAIL(RESPONSE_MSK);
216 AGG_TX_STATE_FAIL(DUMP_TX_MSK);
217 AGG_TX_STATE_FAIL(DELAY_TX_MSK);
222 #endif /* CONFIG_IWLWIFI_DEBUG */
224 static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
225 struct iwl_ht_agg *agg,
226 struct iwlagn_tx_resp *tx_resp,
227 int txq_id, u16 start_idx)
230 struct agg_tx_status *frame_status = &tx_resp->status;
231 struct ieee80211_hdr *hdr = NULL;
235 if (agg->wait_for_ba)
236 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
238 agg->frame_count = tx_resp->frame_count;
239 agg->start_idx = start_idx;
240 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
243 /* # frames attempted by Tx command */
244 if (agg->frame_count == 1) {
245 /* Only one frame was attempted; no block-ack will arrive */
248 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
249 agg->frame_count, agg->start_idx, idx);
250 iwlagn_set_tx_status(priv,
252 priv->txq[txq_id].txb[idx].skb),
253 tx_resp, txq_id, true);
254 agg->wait_for_ba = 0;
256 /* Two or more frames were attempted; expect block-ack */
260 * Start is the lowest frame sent. It may not be the first
261 * frame in the batch; we figure this out dynamically during
262 * the following loop.
264 int start = agg->start_idx;
266 /* Construct bit-map of pending frames within Tx window */
267 for (i = 0; i < agg->frame_count; i++) {
269 status = le16_to_cpu(frame_status[i].status);
270 seq = le16_to_cpu(frame_status[i].sequence);
271 idx = SEQ_TO_INDEX(seq);
272 txq_id = SEQ_TO_QUEUE(seq);
274 if (status & AGG_TX_STATUS_MSK)
275 iwlagn_count_agg_tx_err_status(priv, status);
277 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
278 AGG_TX_STATE_ABORT_MSK))
281 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
282 agg->frame_count, txq_id, idx);
283 IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
284 "try-count (0x%08x)\n",
285 iwl_get_agg_tx_fail_reason(status),
286 status & AGG_TX_STATUS_MSK,
287 status & AGG_TX_TRY_MSK);
289 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
292 "BUG_ON idx doesn't point to valid skb"
293 " idx=%d, txq_id=%d\n", idx, txq_id);
297 sc = le16_to_cpu(hdr->seq_ctrl);
298 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
300 "BUG_ON idx doesn't match seq control"
301 " idx=%d, seq_idx=%d, seq=%d\n",
307 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
308 i, idx, SEQ_TO_SN(sc));
311 * sh -> how many frames ahead of the starting frame is
314 * Note that all frames sent in the batch must be in a
315 * 64-frame window, so this number should be in [0,63].
316 * If outside of this window, then we've found a new
317 * "first" frame in the batch and need to change start.
322 * If >= 64, out of window. start must be at the front
323 * of the circular buffer, idx must be near the end of
324 * the buffer, and idx is the new "first" frame. Shift
325 * the indices around.
328 /* Shift bitmap by start - idx, wrapped */
329 sh = 0x100 - idx + start;
330 bitmap = bitmap << sh;
331 /* Now idx is the new start so sh = 0 */
335 * If <= -64 then wraps the 256-pkt circular buffer
336 * (e.g., start = 255 and idx = 0, sh should be 1)
338 } else if (sh <= -64) {
339 sh = 0x100 - start + idx;
341 * If < 0 but > -64, out of window. idx is before start
342 * but not wrapped. Shift the indices around.
345 /* Shift by how far start is ahead of idx */
347 bitmap = bitmap << sh;
348 /* Now idx is the new start so sh = 0 */
352 /* Sequence number start + sh was sent in this batch */
353 bitmap |= 1ULL << sh;
354 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
355 start, (unsigned long long)bitmap);
359 * Store the bitmap and possibly the new start, if we wrapped
362 agg->bitmap = bitmap;
363 agg->start_idx = start;
364 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
365 agg->frame_count, agg->start_idx,
366 (unsigned long long)agg->bitmap);
369 agg->wait_for_ba = 1;
374 void iwl_check_abort_status(struct iwl_priv *priv,
375 u8 frame_count, u32 status)
377 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
378 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
379 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
380 queue_work(priv->workqueue, &priv->tx_flush);
384 static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
385 struct iwl_rx_mem_buffer *rxb)
387 struct iwl_rx_packet *pkt = rxb_addr(rxb);
388 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
389 int txq_id = SEQ_TO_QUEUE(sequence);
390 int index = SEQ_TO_INDEX(sequence);
391 struct iwl_tx_queue *txq = &priv->txq[txq_id];
392 struct ieee80211_tx_info *info;
393 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
394 u32 status = le16_to_cpu(tx_resp->status.status);
400 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
401 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
402 "is out of range [0-%d] %d %d\n", txq_id,
403 index, txq->q.n_bd, txq->q.write_ptr,
408 txq->time_stamp = jiffies;
409 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
410 memset(&info->status, 0, sizeof(info->status));
412 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
413 IWLAGN_TX_RES_TID_POS;
414 sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
415 IWLAGN_TX_RES_RA_POS;
417 spin_lock_irqsave(&priv->sta_lock, flags);
418 if (txq->sched_retry) {
419 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
420 struct iwl_ht_agg *agg;
422 agg = &priv->stations[sta_id].tid[tid].agg;
424 * If the BT kill count is non-zero, we'll get this
425 * notification again.
427 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
428 priv->cfg->bt_params &&
429 priv->cfg->bt_params->advanced_bt_coexist) {
430 IWL_WARN(priv, "receive reply tx with bt_kill\n");
432 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
434 /* check if BAR is needed */
435 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
436 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
438 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
439 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
440 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
441 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
442 scd_ssn , index, txq_id, txq->swq_id);
444 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
445 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
447 if (priv->mac80211_registered &&
448 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
449 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
450 iwl_wake_queue(priv, txq);
453 iwlagn_set_tx_status(priv, info, tx_resp, txq_id, false);
454 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
455 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
457 if (priv->mac80211_registered &&
458 (iwl_queue_space(&txq->q) > txq->q.low_mark))
459 iwl_wake_queue(priv, txq);
462 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
464 iwl_check_abort_status(priv, tx_resp->frame_count, status);
465 spin_unlock_irqrestore(&priv->sta_lock, flags);
468 void iwlagn_rx_handler_setup(struct iwl_priv *priv)
470 /* init calibration handlers */
471 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
472 iwlagn_rx_calib_result;
473 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
474 iwlagn_rx_calib_complete;
475 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
477 /* set up notification wait support */
478 spin_lock_init(&priv->_agn.notif_wait_lock);
479 INIT_LIST_HEAD(&priv->_agn.notif_waits);
480 init_waitqueue_head(&priv->_agn.notif_waitq);
483 void iwlagn_setup_deferred_work(struct iwl_priv *priv)
485 /* in agn, the tx power calibration is done in uCode */
486 priv->disable_tx_power_cal = 1;
489 int iwlagn_hw_valid_rtc_data_addr(u32 addr)
491 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
492 (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
495 int iwlagn_send_tx_power(struct iwl_priv *priv)
497 struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
500 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
501 "TX Power requested while scanning!\n"))
504 /* half dBm need to multiply */
505 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
507 if (priv->tx_power_lmt_in_half_dbm &&
508 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
510 * For the newer devices which using enhanced/extend tx power
511 * table in EEPROM, the format is in half dBm. driver need to
512 * convert to dBm format before report to mac80211.
513 * By doing so, there is a possibility of 1/2 dBm resolution
514 * lost. driver will perform "round-up" operation before
515 * reporting, but it will cause 1/2 dBm tx power over the
516 * regulatory limit. Perform the checking here, if the
517 * "tx_power_user_lmt" is higher than EEPROM value (in
518 * half-dBm format), lower the tx power based on EEPROM
520 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
522 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
523 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
525 if (IWL_UCODE_API(priv->ucode_ver) == 1)
526 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
528 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
530 return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
534 void iwlagn_temperature(struct iwl_priv *priv)
536 /* store temperature from statistics (in Celsius) */
538 le32_to_cpu(priv->_agn.statistics.general.common.temperature);
539 iwl_tt_handler(priv);
542 u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
544 struct iwl_eeprom_calib_hdr {
550 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
559 static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
563 if ((address & INDIRECT_ADDRESS) == 0)
566 switch (address & INDIRECT_TYPE_MSK) {
568 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
570 case INDIRECT_GENERAL:
571 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
573 case INDIRECT_REGULATORY:
574 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
576 case INDIRECT_TXP_LIMIT:
577 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
579 case INDIRECT_TXP_LIMIT_SIZE:
580 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
582 case INDIRECT_CALIBRATION:
583 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
585 case INDIRECT_PROCESS_ADJST:
586 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
588 case INDIRECT_OTHERS:
589 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
592 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
593 address & INDIRECT_TYPE_MSK);
597 /* translate the offset from words to byte */
598 return (address & ADDRESS_MSK) + (offset << 1);
601 const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
604 u32 address = eeprom_indirect_address(priv, offset);
605 BUG_ON(address >= priv->cfg->base_params->eeprom_size);
606 return &priv->eeprom[address];
609 struct iwl_mod_params iwlagn_mod_params = {
612 /* the rest are 0 by default */
615 void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
619 spin_lock_irqsave(&rxq->lock, flags);
620 INIT_LIST_HEAD(&rxq->rx_free);
621 INIT_LIST_HEAD(&rxq->rx_used);
622 /* Fill the rx_used queue with _all_ of the Rx buffers */
623 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
624 /* In the reset function, these buffers may have been allocated
625 * to an SKB, so we need to unmap and free potential storage */
626 if (rxq->pool[i].page != NULL) {
627 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
628 PAGE_SIZE << priv->hw_params.rx_page_order,
630 __iwl_free_pages(priv, rxq->pool[i].page);
631 rxq->pool[i].page = NULL;
633 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
636 for (i = 0; i < RX_QUEUE_SIZE; i++)
637 rxq->queue[i] = NULL;
639 /* Set us so that we have processed and used all buffers, but have
640 * not restocked the Rx queue with fresh buffers */
641 rxq->read = rxq->write = 0;
642 rxq->write_actual = 0;
644 spin_unlock_irqrestore(&rxq->lock, flags);
647 int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
650 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
651 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
653 if (!priv->cfg->base_params->use_isr_legacy)
654 rb_timeout = RX_RB_TIMEOUT;
656 if (priv->cfg->mod_params->amsdu_size_8K)
657 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
659 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
662 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
664 /* Reset driver's Rx queue write index */
665 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
667 /* Tell device where to find RBD circular buffer in DRAM */
668 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
669 (u32)(rxq->bd_dma >> 8));
671 /* Tell device where in DRAM to update its Rx status */
672 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
673 rxq->rb_stts_dma >> 4);
676 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
677 * the credit mechanism in 5000 HW RX FIFO
678 * Direct rx interrupts to hosts
679 * Rx buffer size 4 or 8k
683 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
684 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
685 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
686 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
687 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
689 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
690 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
692 /* Set interrupt coalescing timer to default (2048 usecs) */
693 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
698 static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
701 * (for documentation purposes)
702 * to set power to V_AUX, do:
704 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
705 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
706 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
707 ~APMG_PS_CTRL_MSK_PWR_SRC);
710 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
711 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
712 ~APMG_PS_CTRL_MSK_PWR_SRC);
715 int iwlagn_hw_nic_init(struct iwl_priv *priv)
718 struct iwl_rx_queue *rxq = &priv->rxq;
722 spin_lock_irqsave(&priv->lock, flags);
723 priv->cfg->ops->lib->apm_ops.init(priv);
725 /* Set interrupt coalescing calibration timer to default (512 usecs) */
726 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
728 spin_unlock_irqrestore(&priv->lock, flags);
730 iwlagn_set_pwr_vmain(priv);
732 priv->cfg->ops->lib->apm_ops.config(priv);
734 /* Allocate the RX queue, or reset if it is already allocated */
736 ret = iwl_rx_queue_alloc(priv);
738 IWL_ERR(priv, "Unable to initialize Rx queue\n");
742 iwlagn_rx_queue_reset(priv, rxq);
744 iwlagn_rx_replenish(priv);
746 iwlagn_rx_init(priv, rxq);
748 spin_lock_irqsave(&priv->lock, flags);
750 rxq->need_update = 1;
751 iwl_rx_queue_update_write_ptr(priv, rxq);
753 spin_unlock_irqrestore(&priv->lock, flags);
755 /* Allocate or reset and init all Tx and Command queues */
757 ret = iwlagn_txq_ctx_alloc(priv);
761 iwlagn_txq_ctx_reset(priv);
763 if (priv->cfg->base_params->shadow_reg_enable) {
764 /* enable shadow regs in HW */
765 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
769 set_bit(STATUS_INIT, &priv->status);
775 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
777 static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
780 return cpu_to_le32((u32)(dma_addr >> 8));
784 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
786 * If there are slots in the RX queue that need to be restocked,
787 * and we have free pre-allocated buffers, fill the ranks as much
788 * as we can, pulling from rx_free.
790 * This moves the 'write' index forward to catch up with 'processed', and
791 * also updates the memory address in the firmware to reference the new
794 void iwlagn_rx_queue_restock(struct iwl_priv *priv)
796 struct iwl_rx_queue *rxq = &priv->rxq;
797 struct list_head *element;
798 struct iwl_rx_mem_buffer *rxb;
801 spin_lock_irqsave(&rxq->lock, flags);
802 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
803 /* The overwritten rxb must be a used one */
804 rxb = rxq->queue[rxq->write];
805 BUG_ON(rxb && rxb->page);
807 /* Get next free Rx buffer, remove from free list */
808 element = rxq->rx_free.next;
809 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
812 /* Point to Rx buffer via next RBD in circular buffer */
813 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
815 rxq->queue[rxq->write] = rxb;
816 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
819 spin_unlock_irqrestore(&rxq->lock, flags);
820 /* If the pre-allocated buffer pool is dropping low, schedule to
822 if (rxq->free_count <= RX_LOW_WATERMARK)
823 queue_work(priv->workqueue, &priv->rx_replenish);
826 /* If we've added more space for the firmware to place data, tell it.
827 * Increment device's write pointer in multiples of 8. */
828 if (rxq->write_actual != (rxq->write & ~0x7)) {
829 spin_lock_irqsave(&rxq->lock, flags);
830 rxq->need_update = 1;
831 spin_unlock_irqrestore(&rxq->lock, flags);
832 iwl_rx_queue_update_write_ptr(priv, rxq);
837 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
839 * When moving to rx_free an SKB is allocated for the slot.
841 * Also restock the Rx queue via iwl_rx_queue_restock.
842 * This is called as a scheduled work item (except for during initialization)
844 void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
846 struct iwl_rx_queue *rxq = &priv->rxq;
847 struct list_head *element;
848 struct iwl_rx_mem_buffer *rxb;
851 gfp_t gfp_mask = priority;
854 spin_lock_irqsave(&rxq->lock, flags);
855 if (list_empty(&rxq->rx_used)) {
856 spin_unlock_irqrestore(&rxq->lock, flags);
859 spin_unlock_irqrestore(&rxq->lock, flags);
861 if (rxq->free_count > RX_LOW_WATERMARK)
862 gfp_mask |= __GFP_NOWARN;
864 if (priv->hw_params.rx_page_order > 0)
865 gfp_mask |= __GFP_COMP;
867 /* Alloc a new receive buffer */
868 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
871 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
873 priv->hw_params.rx_page_order);
875 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
877 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
878 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
880 /* We don't reschedule replenish work here -- we will
881 * call the restock method and if it still needs
882 * more buffers it will schedule replenish */
886 spin_lock_irqsave(&rxq->lock, flags);
888 if (list_empty(&rxq->rx_used)) {
889 spin_unlock_irqrestore(&rxq->lock, flags);
890 __free_pages(page, priv->hw_params.rx_page_order);
893 element = rxq->rx_used.next;
894 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
897 spin_unlock_irqrestore(&rxq->lock, flags);
901 /* Get physical address of the RB */
902 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
903 PAGE_SIZE << priv->hw_params.rx_page_order,
905 /* dma address must be no more than 36 bits */
906 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
907 /* and also 256 byte aligned! */
908 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
910 spin_lock_irqsave(&rxq->lock, flags);
912 list_add_tail(&rxb->list, &rxq->rx_free);
914 priv->alloc_rxb_page++;
916 spin_unlock_irqrestore(&rxq->lock, flags);
920 void iwlagn_rx_replenish(struct iwl_priv *priv)
924 iwlagn_rx_allocate(priv, GFP_KERNEL);
926 spin_lock_irqsave(&priv->lock, flags);
927 iwlagn_rx_queue_restock(priv);
928 spin_unlock_irqrestore(&priv->lock, flags);
931 void iwlagn_rx_replenish_now(struct iwl_priv *priv)
933 iwlagn_rx_allocate(priv, GFP_ATOMIC);
935 iwlagn_rx_queue_restock(priv);
938 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
939 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
940 * This free routine walks the list of POOL entries and if SKB is set to
941 * non NULL it is unmapped and freed
943 void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
946 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
947 if (rxq->pool[i].page != NULL) {
948 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
949 PAGE_SIZE << priv->hw_params.rx_page_order,
951 __iwl_free_pages(priv, rxq->pool[i].page);
952 rxq->pool[i].page = NULL;
956 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
958 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
959 rxq->rb_stts, rxq->rb_stts_dma);
964 int iwlagn_rxq_stop(struct iwl_priv *priv)
968 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
969 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
970 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
975 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
980 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
981 if (rate_n_flags & RATE_MCS_HT_MSK) {
982 idx = (rate_n_flags & 0xff);
984 /* Legacy rate format, search for match in table */
986 if (band == IEEE80211_BAND_5GHZ)
987 band_offset = IWL_FIRST_OFDM_RATE;
988 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
989 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
990 return idx - band_offset;
996 /* Calc max signal level (dBm) among 3 possible receivers */
997 static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
998 struct iwl_rx_phy_res *rx_resp)
1000 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
1003 static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
1005 u32 decrypt_out = 0;
1007 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
1008 RX_RES_STATUS_STATION_FOUND)
1009 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
1010 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
1012 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
1014 /* packet was not encrypted */
1015 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
1016 RX_RES_STATUS_SEC_TYPE_NONE)
1019 /* packet was encrypted with unknown alg */
1020 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
1021 RX_RES_STATUS_SEC_TYPE_ERR)
1024 /* decryption was not done in HW */
1025 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
1026 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
1029 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
1031 case RX_RES_STATUS_SEC_TYPE_CCMP:
1032 /* alg is CCM: check MIC only */
1033 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
1035 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
1037 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
1041 case RX_RES_STATUS_SEC_TYPE_TKIP:
1042 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
1044 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
1047 /* fall through if TTAK OK */
1049 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
1050 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
1052 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
1056 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
1057 decrypt_in, decrypt_out);
1062 static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
1063 struct ieee80211_hdr *hdr,
1066 struct iwl_rx_mem_buffer *rxb,
1067 struct ieee80211_rx_status *stats)
1069 struct sk_buff *skb;
1070 __le16 fc = hdr->frame_control;
1072 /* We only process data packets if the interface is open */
1073 if (unlikely(!priv->is_open)) {
1074 IWL_DEBUG_DROP_LIMIT(priv,
1075 "Dropping packet while interface is not open.\n");
1079 /* In case of HW accelerated crypto and bad decryption, drop */
1080 if (!priv->cfg->mod_params->sw_crypto &&
1081 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
1084 skb = dev_alloc_skb(128);
1086 IWL_ERR(priv, "dev_alloc_skb failed\n");
1090 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
1092 iwl_update_stats(priv, false, fc, len);
1093 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
1095 ieee80211_rx(priv->hw, skb);
1096 priv->alloc_rxb_page--;
1100 /* Called for REPLY_RX (legacy ABG frames), or
1101 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
1102 void iwlagn_rx_reply_rx(struct iwl_priv *priv,
1103 struct iwl_rx_mem_buffer *rxb)
1105 struct ieee80211_hdr *header;
1106 struct ieee80211_rx_status rx_status;
1107 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1108 struct iwl_rx_phy_res *phy_res;
1109 __le32 rx_pkt_status;
1110 struct iwl_rx_mpdu_res_start *amsdu;
1116 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
1117 * REPLY_RX: physical layer info is in this buffer
1118 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
1119 * command and cached in priv->last_phy_res
1121 * Here we set up local variables depending on which command is
1124 if (pkt->hdr.cmd == REPLY_RX) {
1125 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
1126 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
1127 + phy_res->cfg_phy_cnt);
1129 len = le16_to_cpu(phy_res->byte_count);
1130 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
1131 phy_res->cfg_phy_cnt + len);
1132 ampdu_status = le32_to_cpu(rx_pkt_status);
1134 if (!priv->_agn.last_phy_res_valid) {
1135 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
1138 phy_res = &priv->_agn.last_phy_res;
1139 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
1140 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1141 len = le16_to_cpu(amsdu->byte_count);
1142 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
1143 ampdu_status = iwlagn_translate_rx_status(priv,
1144 le32_to_cpu(rx_pkt_status));
1147 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
1148 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1149 phy_res->cfg_phy_cnt);
1153 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1154 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1155 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1156 le32_to_cpu(rx_pkt_status));
1160 /* This will be used in several places later */
1161 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1163 /* rx_status carries information about the packet to mac80211 */
1164 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1165 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1166 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1168 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
1170 rx_status.rate_idx =
1171 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1174 /* TSF isn't reliable. In order to allow smooth user experience,
1175 * this W/A doesn't propagate it to the mac80211 */
1176 /*rx_status.flag |= RX_FLAG_TSFT;*/
1178 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1180 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1181 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
1183 iwl_dbg_log_rx_data_frame(priv, len, header);
1184 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
1185 rx_status.signal, (unsigned long long)rx_status.mactime);
1190 * It seems that the antenna field in the phy flags value
1191 * is actually a bit field. This is undefined by radiotap,
1192 * it wants an actual antenna number but I always get "7"
1193 * for most legacy frames I receive indicating that the
1194 * same frame was received on all three RX chains.
1196 * I think this field should be removed in favor of a
1197 * new 802.11n radiotap field "RX chains" that is defined
1201 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1202 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1204 /* set the preamble flag if appropriate */
1205 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1206 rx_status.flag |= RX_FLAG_SHORTPRE;
1208 /* Set up the HT phy flags */
1209 if (rate_n_flags & RATE_MCS_HT_MSK)
1210 rx_status.flag |= RX_FLAG_HT;
1211 if (rate_n_flags & RATE_MCS_HT40_MSK)
1212 rx_status.flag |= RX_FLAG_40MHZ;
1213 if (rate_n_flags & RATE_MCS_SGI_MSK)
1214 rx_status.flag |= RX_FLAG_SHORT_GI;
1216 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1220 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1221 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1222 void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
1223 struct iwl_rx_mem_buffer *rxb)
1225 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1226 priv->_agn.last_phy_res_valid = true;
1227 memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
1228 sizeof(struct iwl_rx_phy_res));
1231 static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
1232 struct ieee80211_vif *vif,
1233 enum ieee80211_band band,
1234 struct iwl_scan_channel *scan_ch)
1236 const struct ieee80211_supported_band *sband;
1237 u16 passive_dwell = 0;
1238 u16 active_dwell = 0;
1242 sband = iwl_get_hw_mode(priv, band);
1244 IWL_ERR(priv, "invalid band\n");
1248 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
1249 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
1251 if (passive_dwell <= active_dwell)
1252 passive_dwell = active_dwell + 1;
1254 channel = iwl_get_single_channel_number(priv, band);
1256 scan_ch->channel = cpu_to_le16(channel);
1257 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
1258 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1259 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1260 /* Set txpower levels to defaults */
1261 scan_ch->dsp_atten = 110;
1262 if (band == IEEE80211_BAND_5GHZ)
1263 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1265 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1268 IWL_ERR(priv, "no valid channel found\n");
1272 static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1273 struct ieee80211_vif *vif,
1274 enum ieee80211_band band,
1275 u8 is_active, u8 n_probes,
1276 struct iwl_scan_channel *scan_ch)
1278 struct ieee80211_channel *chan;
1279 const struct ieee80211_supported_band *sband;
1280 const struct iwl_channel_info *ch_info;
1281 u16 passive_dwell = 0;
1282 u16 active_dwell = 0;
1286 sband = iwl_get_hw_mode(priv, band);
1290 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
1291 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
1293 if (passive_dwell <= active_dwell)
1294 passive_dwell = active_dwell + 1;
1296 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1297 chan = priv->scan_request->channels[i];
1299 if (chan->band != band)
1302 channel = chan->hw_value;
1303 scan_ch->channel = cpu_to_le16(channel);
1305 ch_info = iwl_get_channel_info(priv, band, channel);
1306 if (!is_channel_valid(ch_info)) {
1307 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
1312 if (!is_active || is_channel_passive(ch_info) ||
1313 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
1314 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
1316 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
1319 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
1321 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1322 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1324 /* Set txpower levels to defaults */
1325 scan_ch->dsp_atten = 110;
1327 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1329 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1331 if (band == IEEE80211_BAND_5GHZ)
1332 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1334 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1336 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
1337 channel, le32_to_cpu(scan_ch->type),
1338 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
1339 "ACTIVE" : "PASSIVE",
1340 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
1341 active_dwell : passive_dwell);
1347 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1351 int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1353 struct iwl_host_cmd cmd = {
1354 .id = REPLY_SCAN_CMD,
1355 .len = sizeof(struct iwl_scan_cmd),
1356 .flags = CMD_SIZE_HUGE,
1358 struct iwl_scan_cmd *scan;
1359 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1363 enum ieee80211_band band;
1365 u8 rx_ant = priv->hw_params.valid_rx_ant;
1367 bool is_active = false;
1370 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
1373 lockdep_assert_held(&priv->mutex);
1376 ctx = iwl_rxon_ctx_from_vif(vif);
1378 if (!priv->scan_cmd) {
1379 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
1380 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
1381 if (!priv->scan_cmd) {
1382 IWL_DEBUG_SCAN(priv,
1383 "fail to allocate memory for scan\n");
1387 scan = priv->scan_cmd;
1388 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
1390 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
1391 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
1393 if (iwl_is_any_associated(priv)) {
1396 u32 suspend_time = 100;
1397 u32 scan_suspend_time = 100;
1399 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
1400 if (priv->is_internal_short_scan)
1403 interval = vif->bss_conf.beacon_int;
1405 scan->suspend_time = 0;
1406 scan->max_out_time = cpu_to_le32(200 * 1024);
1408 interval = suspend_time;
1410 extra = (suspend_time / interval) << 22;
1411 scan_suspend_time = (extra |
1412 ((suspend_time % interval) * 1024));
1413 scan->suspend_time = cpu_to_le32(scan_suspend_time);
1414 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
1415 scan_suspend_time, interval);
1418 if (priv->is_internal_short_scan) {
1419 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
1420 } else if (priv->scan_request->n_ssids) {
1422 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
1423 for (i = 0; i < priv->scan_request->n_ssids; i++) {
1424 /* always does wildcard anyway */
1425 if (!priv->scan_request->ssids[i].ssid_len)
1427 scan->direct_scan[p].id = WLAN_EID_SSID;
1428 scan->direct_scan[p].len =
1429 priv->scan_request->ssids[i].ssid_len;
1430 memcpy(scan->direct_scan[p].ssid,
1431 priv->scan_request->ssids[i].ssid,
1432 priv->scan_request->ssids[i].ssid_len);
1438 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
1440 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
1441 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
1442 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1444 switch (priv->scan_band) {
1445 case IEEE80211_BAND_2GHZ:
1446 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
1447 chan_mod = le32_to_cpu(
1448 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
1449 RXON_FLG_CHANNEL_MODE_MSK)
1450 >> RXON_FLG_CHANNEL_MODE_POS;
1451 if (chan_mod == CHANNEL_MODE_PURE_40) {
1452 rate = IWL_RATE_6M_PLCP;
1454 rate = IWL_RATE_1M_PLCP;
1455 rate_flags = RATE_MCS_CCK_MSK;
1458 * Internal scans are passive, so we can indiscriminately set
1459 * the BT ignore flag on 2.4 GHz since it applies to TX only.
1461 if (priv->cfg->bt_params &&
1462 priv->cfg->bt_params->advanced_bt_coexist)
1463 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
1465 case IEEE80211_BAND_5GHZ:
1466 rate = IWL_RATE_6M_PLCP;
1469 IWL_WARN(priv, "Invalid scan band\n");
1474 * If active scanning is requested but a certain channel is
1475 * marked passive, we can do active scanning if we detect
1478 * There is an issue with some firmware versions that triggers
1479 * a sysassert on a "good CRC threshold" of zero (== disabled),
1480 * on a radar channel even though this means that we should NOT
1483 * The "good CRC threshold" is the number of frames that we
1484 * need to receive during our dwell time on a channel before
1485 * sending out probes -- setting this to a huge value will
1486 * mean we never reach it, but at the same time work around
1487 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1488 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1490 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1491 IWL_GOOD_CRC_TH_NEVER;
1493 band = priv->scan_band;
1495 if (priv->cfg->scan_rx_antennas[band])
1496 rx_ant = priv->cfg->scan_rx_antennas[band];
1498 if (band == IEEE80211_BAND_2GHZ &&
1499 priv->cfg->bt_params &&
1500 priv->cfg->bt_params->advanced_bt_coexist) {
1501 /* transmit 2.4 GHz probes only on first antenna */
1502 scan_tx_antennas = first_antenna(scan_tx_antennas);
1505 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
1507 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
1508 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
1510 /* In power save mode use one chain, otherwise use all chains */
1511 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
1512 /* rx_ant has been set to all valid chains previously */
1513 active_chains = rx_ant &
1514 ((u8)(priv->chain_noise_data.active_chains));
1516 active_chains = rx_ant;
1518 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
1519 priv->chain_noise_data.active_chains);
1521 rx_ant = first_antenna(active_chains);
1523 if (priv->cfg->bt_params &&
1524 priv->cfg->bt_params->advanced_bt_coexist &&
1525 priv->bt_full_concurrent) {
1526 /* operated as 1x1 in full concurrency mode */
1527 rx_ant = first_antenna(rx_ant);
1530 /* MIMO is not used here, but value is required */
1531 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1532 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1533 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1534 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1535 scan->rx_chain = cpu_to_le16(rx_chain);
1536 if (!priv->is_internal_short_scan) {
1537 cmd_len = iwl_fill_probe_req(priv,
1538 (struct ieee80211_mgmt *)scan->data,
1540 priv->scan_request->ie,
1541 priv->scan_request->ie_len,
1542 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1544 /* use bcast addr, will not be transmitted but must be valid */
1545 cmd_len = iwl_fill_probe_req(priv,
1546 (struct ieee80211_mgmt *)scan->data,
1547 iwl_bcast_addr, NULL, 0,
1548 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1551 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1553 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1554 RXON_FILTER_BCON_AWARE_MSK);
1556 if (priv->is_internal_short_scan) {
1557 scan->channel_count =
1558 iwl_get_single_channel_for_scan(priv, vif, band,
1559 (void *)&scan->data[le16_to_cpu(
1560 scan->tx_cmd.len)]);
1562 scan->channel_count =
1563 iwl_get_channels_for_scan(priv, vif, band,
1564 is_active, n_probes,
1565 (void *)&scan->data[le16_to_cpu(
1566 scan->tx_cmd.len)]);
1568 if (scan->channel_count == 0) {
1569 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1573 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
1574 scan->channel_count * sizeof(struct iwl_scan_channel);
1576 scan->len = cpu_to_le16(cmd.len);
1578 /* set scan bit here for PAN params */
1579 set_bit(STATUS_SCAN_HW, &priv->status);
1581 if (priv->cfg->ops->hcmd->set_pan_params) {
1582 ret = priv->cfg->ops->hcmd->set_pan_params(priv);
1587 ret = iwl_send_cmd_sync(priv, &cmd);
1589 clear_bit(STATUS_SCAN_HW, &priv->status);
1590 if (priv->cfg->ops->hcmd->set_pan_params)
1591 priv->cfg->ops->hcmd->set_pan_params(priv);
1597 int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1598 struct ieee80211_vif *vif, bool add)
1600 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1603 return iwlagn_add_bssid_station(priv, vif_priv->ctx,
1604 vif->bss_conf.bssid,
1605 &vif_priv->ibss_bssid_sta_id);
1606 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1607 vif->bss_conf.bssid);
1610 void iwl_free_tfds_in_queue(struct iwl_priv *priv,
1611 int sta_id, int tid, int freed)
1613 lockdep_assert_held(&priv->sta_lock);
1615 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1616 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1618 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
1619 priv->stations[sta_id].tid[tid].tfds_in_queue,
1621 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
1625 #define IWL_FLUSH_WAIT_MS 2000
1627 int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
1629 struct iwl_tx_queue *txq;
1630 struct iwl_queue *q;
1632 unsigned long now = jiffies;
1635 /* waiting for all the tx frames complete might take a while */
1636 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1637 if (cnt == priv->cmd_queue)
1639 txq = &priv->txq[cnt];
1641 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1642 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1645 if (q->read_ptr != q->write_ptr) {
1646 IWL_ERR(priv, "fail to flush all tx fifo queues\n");
1654 #define IWL_TX_QUEUE_MSK 0xfffff
1657 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
1660 * 1. acquire mutex before calling
1661 * 2. make sure rf is on and not in exit state
1663 int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1665 struct iwl_txfifo_flush_cmd flush_cmd;
1666 struct iwl_host_cmd cmd = {
1667 .id = REPLY_TXFIFO_FLUSH,
1668 .len = sizeof(struct iwl_txfifo_flush_cmd),
1675 memset(&flush_cmd, 0, sizeof(flush_cmd));
1676 flush_cmd.fifo_control = IWL_TX_FIFO_VO_MSK | IWL_TX_FIFO_VI_MSK |
1677 IWL_TX_FIFO_BE_MSK | IWL_TX_FIFO_BK_MSK;
1678 if (priv->cfg->sku & IWL_SKU_N)
1679 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
1681 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
1682 flush_cmd.fifo_control);
1683 flush_cmd.flush_control = cpu_to_le16(flush_control);
1685 return iwl_send_cmd(priv, &cmd);
1688 void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1690 mutex_lock(&priv->mutex);
1691 ieee80211_stop_queues(priv->hw);
1692 if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) {
1693 IWL_ERR(priv, "flush request fail\n");
1696 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
1697 iwlagn_wait_tx_queue_empty(priv);
1699 ieee80211_wake_queues(priv->hw);
1700 mutex_unlock(&priv->mutex);
1707 * Macros to access the lookup table.
1709 * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
1710 * wifi_prio, wifi_txrx and wifi_sh_ant_req.
1712 * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
1714 * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
1715 * one after another in 32-bit registers, and "registers" 0 through 7 contain
1716 * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
1718 * These macros encode that format.
1720 #define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
1721 wifi_txrx, wifi_sh_ant_req) \
1722 (bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
1723 (wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
1725 #define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
1726 lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
1727 #define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1728 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1729 (!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
1730 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1732 #define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1733 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1734 LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
1735 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1737 #define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
1738 wifi_req, wifi_prio, wifi_txrx, \
1740 LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
1741 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1744 #define LUT_WLAN_KILL_OP(lut, op, val) \
1745 lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
1746 #define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1747 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1748 (!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1749 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
1750 #define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1751 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1752 LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1753 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1754 #define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1755 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1756 LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1757 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1759 #define LUT_ANT_SWITCH_OP(lut, op, val) \
1760 lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
1761 #define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1762 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1763 (!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1764 wifi_req, wifi_prio, wifi_txrx, \
1766 #define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1767 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1768 LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1769 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1770 #define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1771 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1772 LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1773 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1775 static const __le32 iwlagn_def_3w_lookup[12] = {
1776 cpu_to_le32(0xaaaaaaaa),
1777 cpu_to_le32(0xaaaaaaaa),
1778 cpu_to_le32(0xaeaaaaaa),
1779 cpu_to_le32(0xaaaaaaaa),
1780 cpu_to_le32(0xcc00ff28),
1781 cpu_to_le32(0x0000aaaa),
1782 cpu_to_le32(0xcc00aaaa),
1783 cpu_to_le32(0x0000aaaa),
1784 cpu_to_le32(0xc0004000),
1785 cpu_to_le32(0x00004000),
1786 cpu_to_le32(0xf0005000),
1787 cpu_to_le32(0xf0005000),
1790 static const __le32 iwlagn_concurrent_lookup[12] = {
1791 cpu_to_le32(0xaaaaaaaa),
1792 cpu_to_le32(0xaaaaaaaa),
1793 cpu_to_le32(0xaaaaaaaa),
1794 cpu_to_le32(0xaaaaaaaa),
1795 cpu_to_le32(0xaaaaaaaa),
1796 cpu_to_le32(0xaaaaaaaa),
1797 cpu_to_le32(0xaaaaaaaa),
1798 cpu_to_le32(0xaaaaaaaa),
1799 cpu_to_le32(0x00000000),
1800 cpu_to_le32(0x00000000),
1801 cpu_to_le32(0x00000000),
1802 cpu_to_le32(0x00000000),
1805 void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1807 struct iwlagn_bt_cmd bt_cmd = {
1808 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
1809 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
1810 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
1811 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
1814 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
1815 sizeof(bt_cmd.bt3_lookup_table));
1817 if (priv->cfg->bt_params)
1818 bt_cmd.prio_boost = priv->cfg->bt_params->bt_prio_boost;
1820 bt_cmd.prio_boost = 0;
1821 bt_cmd.kill_ack_mask = priv->kill_ack_mask;
1822 bt_cmd.kill_cts_mask = priv->kill_cts_mask;
1824 bt_cmd.valid = priv->bt_valid;
1825 bt_cmd.tx_prio_boost = 0;
1826 bt_cmd.rx_prio_boost = 0;
1829 * Configure BT coex mode to "no coexistence" when the
1830 * user disabled BT coexistence, we have no interface
1831 * (might be in monitor mode), or the interface is in
1832 * IBSS mode (no proper uCode support for coex then).
1834 if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1835 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
1837 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
1838 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
1839 if (priv->cfg->bt_params &&
1840 priv->cfg->bt_params->bt_sco_disable)
1841 bt_cmd.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
1843 if (priv->bt_ch_announce)
1844 bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
1845 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags);
1847 priv->bt_enable_flag = bt_cmd.flags;
1848 if (priv->bt_full_concurrent)
1849 memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup,
1850 sizeof(iwlagn_concurrent_lookup));
1852 memcpy(bt_cmd.bt3_lookup_table, iwlagn_def_3w_lookup,
1853 sizeof(iwlagn_def_3w_lookup));
1855 IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
1856 bt_cmd.flags ? "active" : "disabled",
1857 priv->bt_full_concurrent ?
1858 "full concurrency" : "3-wire");
1860 if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd))
1861 IWL_ERR(priv, "failed to send BT Coex Config\n");
1865 static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1867 struct iwl_priv *priv =
1868 container_of(work, struct iwl_priv, bt_traffic_change_work);
1869 struct iwl_rxon_context *ctx;
1870 int smps_request = -1;
1872 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
1873 /* bt coex disabled */
1878 * Note: bt_traffic_load can be overridden by scan complete and
1879 * coex profile notifications. Ignore that since only bad consequence
1880 * can be not matching debug print with actual state.
1882 IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
1883 priv->bt_traffic_load);
1885 switch (priv->bt_traffic_load) {
1886 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1887 if (priv->bt_status)
1888 smps_request = IEEE80211_SMPS_DYNAMIC;
1890 smps_request = IEEE80211_SMPS_AUTOMATIC;
1892 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1893 smps_request = IEEE80211_SMPS_DYNAMIC;
1895 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1896 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1897 smps_request = IEEE80211_SMPS_STATIC;
1900 IWL_ERR(priv, "Invalid BT traffic load: %d\n",
1901 priv->bt_traffic_load);
1905 mutex_lock(&priv->mutex);
1908 * We can not send command to firmware while scanning. When the scan
1909 * complete we will schedule this work again. We do check with mutex
1910 * locked to prevent new scan request to arrive. We do not check
1911 * STATUS_SCANNING to avoid race when queue_work two times from
1912 * different notifications, but quit and not perform any work at all.
1914 if (test_bit(STATUS_SCAN_HW, &priv->status))
1917 if (priv->cfg->ops->lib->update_chain_flags)
1918 priv->cfg->ops->lib->update_chain_flags(priv);
1920 if (smps_request != -1) {
1921 for_each_context(priv, ctx) {
1922 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
1923 ieee80211_request_smps(ctx->vif, smps_request);
1927 mutex_unlock(&priv->mutex);
1930 static void iwlagn_print_uartmsg(struct iwl_priv *priv,
1931 struct iwl_bt_uart_msg *uart_msg)
1933 IWL_DEBUG_NOTIF(priv, "Message Type = 0x%X, SSN = 0x%X, "
1934 "Update Req = 0x%X",
1935 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
1936 BT_UART_MSG_FRAME1MSGTYPE_POS,
1937 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
1938 BT_UART_MSG_FRAME1SSN_POS,
1939 (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
1940 BT_UART_MSG_FRAME1UPDATEREQ_POS);
1942 IWL_DEBUG_NOTIF(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
1943 "Chl_SeqN = 0x%X, In band = 0x%X",
1944 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
1945 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
1946 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
1947 BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
1948 (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
1949 BT_UART_MSG_FRAME2CHLSEQN_POS,
1950 (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
1951 BT_UART_MSG_FRAME2INBAND_POS);
1953 IWL_DEBUG_NOTIF(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
1954 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
1955 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
1956 BT_UART_MSG_FRAME3SCOESCO_POS,
1957 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
1958 BT_UART_MSG_FRAME3SNIFF_POS,
1959 (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
1960 BT_UART_MSG_FRAME3A2DP_POS,
1961 (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
1962 BT_UART_MSG_FRAME3ACL_POS,
1963 (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
1964 BT_UART_MSG_FRAME3MASTER_POS,
1965 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
1966 BT_UART_MSG_FRAME3OBEX_POS);
1968 IWL_DEBUG_NOTIF(priv, "Idle duration = 0x%X",
1969 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
1970 BT_UART_MSG_FRAME4IDLEDURATION_POS);
1972 IWL_DEBUG_NOTIF(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
1973 "eSCO Retransmissions = 0x%X",
1974 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
1975 BT_UART_MSG_FRAME5TXACTIVITY_POS,
1976 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
1977 BT_UART_MSG_FRAME5RXACTIVITY_POS,
1978 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
1979 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
1981 IWL_DEBUG_NOTIF(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
1982 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
1983 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
1984 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
1985 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
1987 IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = "
1988 "0x%X, Connectable = 0x%X",
1989 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
1990 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
1991 (BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK & uart_msg->frame7) >>
1992 BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS,
1993 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
1994 BT_UART_MSG_FRAME7CONNECTABLE_POS);
1997 static void iwlagn_set_kill_msk(struct iwl_priv *priv,
1998 struct iwl_bt_uart_msg *uart_msg)
2001 static const __le32 bt_kill_ack_msg[2] = {
2002 IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
2003 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
2004 static const __le32 bt_kill_cts_msg[2] = {
2005 IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
2006 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
2008 kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
2010 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
2011 priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
2012 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
2013 priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
2014 priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
2015 priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
2017 /* schedule to send runtime bt_config */
2018 queue_work(priv->workqueue, &priv->bt_runtime_config);
2022 void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
2023 struct iwl_rx_mem_buffer *rxb)
2025 unsigned long flags;
2026 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2027 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
2028 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
2030 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
2031 /* bt coex disabled */
2035 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
2036 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
2037 IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load);
2038 IWL_DEBUG_NOTIF(priv, " CI compliance: %d\n",
2039 coex->bt_ci_compliance);
2040 iwlagn_print_uartmsg(priv, uart_msg);
2042 priv->last_bt_traffic_load = priv->bt_traffic_load;
2043 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
2044 if (priv->bt_status != coex->bt_status ||
2045 priv->last_bt_traffic_load != coex->bt_traffic_load) {
2046 if (coex->bt_status) {
2048 if (!priv->bt_ch_announce)
2049 priv->bt_traffic_load =
2050 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
2052 priv->bt_traffic_load =
2053 coex->bt_traffic_load;
2056 priv->bt_traffic_load =
2057 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
2059 priv->bt_status = coex->bt_status;
2060 queue_work(priv->workqueue,
2061 &priv->bt_traffic_change_work);
2065 iwlagn_set_kill_msk(priv, uart_msg);
2067 /* FIXME: based on notification, adjust the prio_boost */
2069 spin_lock_irqsave(&priv->lock, flags);
2070 priv->bt_ci_compliance = coex->bt_ci_compliance;
2071 spin_unlock_irqrestore(&priv->lock, flags);
2074 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
2076 iwlagn_rx_handler_setup(priv);
2077 priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
2078 iwlagn_bt_coex_profile_notif;
2081 void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
2083 iwlagn_setup_deferred_work(priv);
2085 INIT_WORK(&priv->bt_traffic_change_work,
2086 iwlagn_bt_traffic_change_work);
2089 void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
2091 cancel_work_sync(&priv->bt_traffic_change_work);
2094 static bool is_single_rx_stream(struct iwl_priv *priv)
2096 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
2097 priv->current_ht_config.single_chain_sufficient;
2100 #define IWL_NUM_RX_CHAINS_MULTIPLE 3
2101 #define IWL_NUM_RX_CHAINS_SINGLE 2
2102 #define IWL_NUM_IDLE_CHAINS_DUAL 2
2103 #define IWL_NUM_IDLE_CHAINS_SINGLE 1
2106 * Determine how many receiver/antenna chains to use.
2108 * More provides better reception via diversity. Fewer saves power
2109 * at the expense of throughput, but only when not in powersave to
2112 * MIMO (dual stream) requires at least 2, but works better with 3.
2113 * This does not determine *which* chains to use, just how many.
2115 static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
2117 if (priv->cfg->bt_params &&
2118 priv->cfg->bt_params->advanced_bt_coexist &&
2119 (priv->bt_full_concurrent ||
2120 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
2122 * only use chain 'A' in bt high traffic load or
2123 * full concurrency mode
2125 return IWL_NUM_RX_CHAINS_SINGLE;
2127 /* # of Rx chains to use when expecting MIMO. */
2128 if (is_single_rx_stream(priv))
2129 return IWL_NUM_RX_CHAINS_SINGLE;
2131 return IWL_NUM_RX_CHAINS_MULTIPLE;
2135 * When we are in power saving mode, unless device support spatial
2136 * multiplexing power save, use the active count for rx chain count.
2138 static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
2140 /* # Rx chains when idling, depending on SMPS mode */
2141 switch (priv->current_ht_config.smps) {
2142 case IEEE80211_SMPS_STATIC:
2143 case IEEE80211_SMPS_DYNAMIC:
2144 return IWL_NUM_IDLE_CHAINS_SINGLE;
2145 case IEEE80211_SMPS_OFF:
2148 WARN(1, "invalid SMPS mode %d",
2149 priv->current_ht_config.smps);
2154 /* up to 4 chains */
2155 static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
2158 res = (chain_bitmap & BIT(0)) >> 0;
2159 res += (chain_bitmap & BIT(1)) >> 1;
2160 res += (chain_bitmap & BIT(2)) >> 2;
2161 res += (chain_bitmap & BIT(3)) >> 3;
2166 * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
2168 * Selects how many and which Rx receivers/antennas/chains to use.
2169 * This should not be used for scan command ... it puts data in wrong place.
2171 void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2173 bool is_single = is_single_rx_stream(priv);
2174 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
2175 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
2179 /* Tell uCode which antennas are actually connected.
2180 * Before first association, we assume all antennas are connected.
2181 * Just after first association, iwl_chain_noise_calibration()
2182 * checks which antennas actually *are* connected. */
2183 if (priv->chain_noise_data.active_chains)
2184 active_chains = priv->chain_noise_data.active_chains;
2186 active_chains = priv->hw_params.valid_rx_ant;
2188 if (priv->cfg->bt_params &&
2189 priv->cfg->bt_params->advanced_bt_coexist &&
2190 (priv->bt_full_concurrent ||
2191 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
2193 * only use chain 'A' in bt high traffic load or
2194 * full concurrency mode
2196 active_chains = first_antenna(active_chains);
2199 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
2201 /* How many receivers should we use? */
2202 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
2203 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
2206 /* correct rx chain count according hw settings
2207 * and chain noise calibration
2209 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
2210 if (valid_rx_cnt < active_rx_cnt)
2211 active_rx_cnt = valid_rx_cnt;
2213 if (valid_rx_cnt < idle_rx_cnt)
2214 idle_rx_cnt = valid_rx_cnt;
2216 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
2217 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
2219 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
2221 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
2222 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
2224 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
2226 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
2227 ctx->staging.rx_chain,
2228 active_rx_cnt, idle_rx_cnt);
2230 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
2231 active_rx_cnt < idle_rx_cnt);
2234 u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
2239 if (priv->band == IEEE80211_BAND_2GHZ &&
2240 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
2243 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
2244 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
2245 if (valid & BIT(ind))
2251 static const char *get_csr_string(int cmd)
2254 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2255 IWL_CMD(CSR_INT_COALESCING);
2257 IWL_CMD(CSR_INT_MASK);
2258 IWL_CMD(CSR_FH_INT_STATUS);
2259 IWL_CMD(CSR_GPIO_IN);
2261 IWL_CMD(CSR_GP_CNTRL);
2262 IWL_CMD(CSR_HW_REV);
2263 IWL_CMD(CSR_EEPROM_REG);
2264 IWL_CMD(CSR_EEPROM_GP);
2265 IWL_CMD(CSR_OTP_GP_REG);
2266 IWL_CMD(CSR_GIO_REG);
2267 IWL_CMD(CSR_GP_UCODE_REG);
2268 IWL_CMD(CSR_GP_DRIVER_REG);
2269 IWL_CMD(CSR_UCODE_DRV_GP1);
2270 IWL_CMD(CSR_UCODE_DRV_GP2);
2271 IWL_CMD(CSR_LED_REG);
2272 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2273 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2274 IWL_CMD(CSR_ANA_PLL_CFG);
2275 IWL_CMD(CSR_HW_REV_WA_REG);
2276 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2282 void iwl_dump_csr(struct iwl_priv *priv)
2285 static const u32 csr_tbl[] = {
2286 CSR_HW_IF_CONFIG_REG,
2304 CSR_DRAM_INT_TBL_REG,
2305 CSR_GIO_CHICKEN_BITS,
2308 CSR_DBG_HPET_MEM_REG
2310 IWL_ERR(priv, "CSR values:\n");
2311 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
2312 "CSR_INT_PERIODIC_REG)\n");
2313 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2314 IWL_ERR(priv, " %25s: 0X%08x\n",
2315 get_csr_string(csr_tbl[i]),
2316 iwl_read32(priv, csr_tbl[i]));
2320 static const char *get_fh_string(int cmd)
2323 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
2324 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
2325 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
2326 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
2327 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
2328 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
2329 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
2330 IWL_CMD(FH_TSSR_TX_STATUS_REG);
2331 IWL_CMD(FH_TSSR_TX_ERROR_REG);
2337 int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2340 #ifdef CONFIG_IWLWIFI_DEBUG
2344 static const u32 fh_tbl[] = {
2345 FH_RSCSR_CHNL0_STTS_WPTR_REG,
2346 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
2347 FH_RSCSR_CHNL0_WPTR,
2348 FH_MEM_RCSR_CHNL0_CONFIG_REG,
2349 FH_MEM_RSSR_SHARED_CTRL_REG,
2350 FH_MEM_RSSR_RX_STATUS_REG,
2351 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
2352 FH_TSSR_TX_STATUS_REG,
2353 FH_TSSR_TX_ERROR_REG
2355 #ifdef CONFIG_IWLWIFI_DEBUG
2357 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
2358 *buf = kmalloc(bufsz, GFP_KERNEL);
2361 pos += scnprintf(*buf + pos, bufsz - pos,
2362 "FH register values:\n");
2363 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2364 pos += scnprintf(*buf + pos, bufsz - pos,
2366 get_fh_string(fh_tbl[i]),
2367 iwl_read_direct32(priv, fh_tbl[i]));
2372 IWL_ERR(priv, "FH register values:\n");
2373 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2374 IWL_ERR(priv, " %34s: 0X%08x\n",
2375 get_fh_string(fh_tbl[i]),
2376 iwl_read_direct32(priv, fh_tbl[i]));
2381 /* notification wait support */
2382 void iwlagn_init_notification_wait(struct iwl_priv *priv,
2383 struct iwl_notification_wait *wait_entry,
2384 void (*fn)(struct iwl_priv *priv,
2385 struct iwl_rx_packet *pkt),
2388 wait_entry->fn = fn;
2389 wait_entry->cmd = cmd;
2390 wait_entry->triggered = false;
2392 spin_lock_bh(&priv->_agn.notif_wait_lock);
2393 list_add(&wait_entry->list, &priv->_agn.notif_waits);
2394 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2397 signed long iwlagn_wait_notification(struct iwl_priv *priv,
2398 struct iwl_notification_wait *wait_entry,
2399 unsigned long timeout)
2403 ret = wait_event_timeout(priv->_agn.notif_waitq,
2404 &wait_entry->triggered,
2407 spin_lock_bh(&priv->_agn.notif_wait_lock);
2408 list_del(&wait_entry->list);
2409 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2414 void iwlagn_remove_notification(struct iwl_priv *priv,
2415 struct iwl_notification_wait *wait_entry)
2417 spin_lock_bh(&priv->_agn.notif_wait_lock);
2418 list_del(&wait_entry->list);
2419 spin_unlock_bh(&priv->_agn.notif_wait_lock);