1 /******************************************************************************
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/etherdevice.h>
31 #include <linux/slab.h>
32 #include <net/mac80211.h>
33 #include <asm/unaligned.h>
34 #include "iwl-eeprom.h"
39 #include "iwl-calib.h"
40 #include "iwl-helpers.h"
41 /************************** RX-FUNCTIONS ****************************/
43 * Rx theory of operation
45 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
46 * each of which point to Receive Buffers to be filled by the NIC. These get
47 * used not only for Rx frames, but for any command response or notification
48 * from the NIC. The driver and NIC manage the Rx buffers by means
49 * of indexes into the circular buffer.
52 * The host/firmware share two index registers for managing the Rx buffers.
54 * The READ index maps to the first position that the firmware may be writing
55 * to -- the driver can read up to (but not including) this position and get
57 * The READ index is managed by the firmware once the card is enabled.
59 * The WRITE index maps to the last position the driver has read from -- the
60 * position preceding WRITE is the last slot the firmware can place a packet.
62 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
65 * During initialization, the host sets up the READ queue position to the first
66 * INDEX position, and WRITE to the last (READ - 1 wrapped)
68 * When the firmware places a packet in a buffer, it will advance the READ index
69 * and fire the RX interrupt. The driver can then query the READ index and
70 * process as many packets as possible, moving the WRITE index forward as it
71 * resets the Rx queue buffers with new memory.
73 * The management in the driver is as follows:
74 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
75 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
76 * to replenish the iwl->rxq->rx_free.
77 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
78 * iwl->rxq is replenished and the READ INDEX is updated (updating the
79 * 'processed' and 'read' driver indexes as well)
80 * + A received packet is processed and handed to the kernel network stack,
81 * detached from the iwl->rxq. The driver 'processed' index is updated.
82 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
83 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
84 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
85 * were enough free buffers and RX_STALLED is set it is cleared.
90 * iwl_rx_queue_alloc() Allocates rx_free
91 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
92 * iwl_rx_queue_restock
93 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
94 * queue, updates firmware pointers, and updates
95 * the WRITE index. If insufficient rx_free buffers
96 * are available, schedules iwl_rx_replenish
98 * -- enable interrupts --
99 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
100 * READ INDEX, detaching the SKB from the pool.
101 * Moves the packet buffer from queue to rx_used.
102 * Calls iwl_rx_queue_restock to refill any empty
109 * iwl_rx_queue_space - Return number of free slots available in queue.
111 int iwl_rx_queue_space(const struct iwl_rx_queue *q)
113 int s = q->read - q->write;
116 /* keep some buffer to not confuse full and empty queue */
122 EXPORT_SYMBOL(iwl_rx_queue_space);
125 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
127 void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
130 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
133 spin_lock_irqsave(&q->lock, flags);
135 if (q->need_update == 0)
138 /* If power-saving is in use, make sure device is awake */
139 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
140 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
142 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
143 IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n",
145 iwl_set_bit(priv, CSR_GP_CNTRL,
146 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
150 q->write_actual = (q->write & ~0x7);
151 iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
153 /* Else device is assumed to be awake */
155 /* Device expects a multiple of 8 */
156 q->write_actual = (q->write & ~0x7);
157 iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
163 spin_unlock_irqrestore(&q->lock, flags);
165 EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
167 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
169 static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
172 return cpu_to_le32((u32)(dma_addr >> 8));
176 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
178 * If there are slots in the RX queue that need to be restocked,
179 * and we have free pre-allocated buffers, fill the ranks as much
180 * as we can, pulling from rx_free.
182 * This moves the 'write' index forward to catch up with 'processed', and
183 * also updates the memory address in the firmware to reference the new
186 void iwl_rx_queue_restock(struct iwl_priv *priv)
188 struct iwl_rx_queue *rxq = &priv->rxq;
189 struct list_head *element;
190 struct iwl_rx_mem_buffer *rxb;
194 spin_lock_irqsave(&rxq->lock, flags);
195 write = rxq->write & ~0x7;
196 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
197 /* Get next free Rx buffer, remove from free list */
198 element = rxq->rx_free.next;
199 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
202 /* Point to Rx buffer via next RBD in circular buffer */
203 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
204 rxq->queue[rxq->write] = rxb;
205 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
208 spin_unlock_irqrestore(&rxq->lock, flags);
209 /* If the pre-allocated buffer pool is dropping low, schedule to
211 if (rxq->free_count <= RX_LOW_WATERMARK)
212 queue_work(priv->workqueue, &priv->rx_replenish);
215 /* If we've added more space for the firmware to place data, tell it.
216 * Increment device's write pointer in multiples of 8. */
217 if (rxq->write_actual != (rxq->write & ~0x7)) {
218 spin_lock_irqsave(&rxq->lock, flags);
219 rxq->need_update = 1;
220 spin_unlock_irqrestore(&rxq->lock, flags);
221 iwl_rx_queue_update_write_ptr(priv, rxq);
224 EXPORT_SYMBOL(iwl_rx_queue_restock);
228 * iwl_rx_replenish - Move all used packet from rx_used to rx_free
230 * When moving to rx_free an SKB is allocated for the slot.
232 * Also restock the Rx queue via iwl_rx_queue_restock.
233 * This is called as a scheduled work item (except for during initialization)
235 void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
237 struct iwl_rx_queue *rxq = &priv->rxq;
238 struct list_head *element;
239 struct iwl_rx_mem_buffer *rxb;
242 gfp_t gfp_mask = priority;
245 spin_lock_irqsave(&rxq->lock, flags);
246 if (list_empty(&rxq->rx_used)) {
247 spin_unlock_irqrestore(&rxq->lock, flags);
250 spin_unlock_irqrestore(&rxq->lock, flags);
252 if (rxq->free_count > RX_LOW_WATERMARK)
253 gfp_mask |= __GFP_NOWARN;
255 if (priv->hw_params.rx_page_order > 0)
256 gfp_mask |= __GFP_COMP;
258 /* Alloc a new receive buffer */
259 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
262 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
264 priv->hw_params.rx_page_order);
266 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
268 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
269 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
271 /* We don't reschedule replenish work here -- we will
272 * call the restock method and if it still needs
273 * more buffers it will schedule replenish */
277 spin_lock_irqsave(&rxq->lock, flags);
279 if (list_empty(&rxq->rx_used)) {
280 spin_unlock_irqrestore(&rxq->lock, flags);
281 __free_pages(page, priv->hw_params.rx_page_order);
284 element = rxq->rx_used.next;
285 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
288 spin_unlock_irqrestore(&rxq->lock, flags);
291 /* Get physical address of the RB */
292 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
293 PAGE_SIZE << priv->hw_params.rx_page_order,
295 /* dma address must be no more than 36 bits */
296 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
297 /* and also 256 byte aligned! */
298 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
300 spin_lock_irqsave(&rxq->lock, flags);
302 list_add_tail(&rxb->list, &rxq->rx_free);
304 priv->alloc_rxb_page++;
306 spin_unlock_irqrestore(&rxq->lock, flags);
310 void iwl_rx_replenish(struct iwl_priv *priv)
314 iwl_rx_allocate(priv, GFP_KERNEL);
316 spin_lock_irqsave(&priv->lock, flags);
317 iwl_rx_queue_restock(priv);
318 spin_unlock_irqrestore(&priv->lock, flags);
320 EXPORT_SYMBOL(iwl_rx_replenish);
322 void iwl_rx_replenish_now(struct iwl_priv *priv)
324 iwl_rx_allocate(priv, GFP_ATOMIC);
326 iwl_rx_queue_restock(priv);
328 EXPORT_SYMBOL(iwl_rx_replenish_now);
331 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
332 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
333 * This free routine walks the list of POOL entries and if SKB is set to
334 * non NULL it is unmapped and freed
336 void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
339 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
340 if (rxq->pool[i].page != NULL) {
341 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
342 PAGE_SIZE << priv->hw_params.rx_page_order,
344 __iwl_free_pages(priv, rxq->pool[i].page);
345 rxq->pool[i].page = NULL;
349 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
351 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
352 rxq->rb_stts, rxq->rb_stts_dma);
356 EXPORT_SYMBOL(iwl_rx_queue_free);
358 int iwl_rx_queue_alloc(struct iwl_priv *priv)
360 struct iwl_rx_queue *rxq = &priv->rxq;
361 struct device *dev = &priv->pci_dev->dev;
364 spin_lock_init(&rxq->lock);
365 INIT_LIST_HEAD(&rxq->rx_free);
366 INIT_LIST_HEAD(&rxq->rx_used);
368 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
369 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
374 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
375 &rxq->rb_stts_dma, GFP_KERNEL);
379 /* Fill the rx_used queue with _all_ of the Rx buffers */
380 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
381 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
383 /* Set us so that we have processed and used all buffers, but have
384 * not restocked the Rx queue with fresh buffers */
385 rxq->read = rxq->write = 0;
386 rxq->write_actual = 0;
388 rxq->need_update = 0;
392 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
397 EXPORT_SYMBOL(iwl_rx_queue_alloc);
399 void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
403 spin_lock_irqsave(&rxq->lock, flags);
404 INIT_LIST_HEAD(&rxq->rx_free);
405 INIT_LIST_HEAD(&rxq->rx_used);
406 /* Fill the rx_used queue with _all_ of the Rx buffers */
407 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
408 /* In the reset function, these buffers may have been allocated
409 * to an SKB, so we need to unmap and free potential storage */
410 if (rxq->pool[i].page != NULL) {
411 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
412 PAGE_SIZE << priv->hw_params.rx_page_order,
414 __iwl_free_pages(priv, rxq->pool[i].page);
415 rxq->pool[i].page = NULL;
417 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
420 /* Set us so that we have processed and used all buffers, but have
421 * not restocked the Rx queue with fresh buffers */
422 rxq->read = rxq->write = 0;
423 rxq->write_actual = 0;
425 spin_unlock_irqrestore(&rxq->lock, flags);
428 int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
431 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
432 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
434 if (!priv->cfg->use_isr_legacy)
435 rb_timeout = RX_RB_TIMEOUT;
437 if (priv->cfg->mod_params->amsdu_size_8K)
438 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
440 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
443 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
445 /* Reset driver's Rx queue write index */
446 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
448 /* Tell device where to find RBD circular buffer in DRAM */
449 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
450 (u32)(rxq->dma_addr >> 8));
452 /* Tell device where in DRAM to update its Rx status */
453 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
454 rxq->rb_stts_dma >> 4);
457 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
458 * the credit mechanism in 5000 HW RX FIFO
459 * Direct rx interrupts to hosts
460 * Rx buffer size 4 or 8k
464 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
465 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
466 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
467 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
468 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
470 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
471 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
473 /* Set interrupt coalescing timer to default (2048 usecs) */
474 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
479 int iwl_rxq_stop(struct iwl_priv *priv)
483 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
484 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
485 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
489 EXPORT_SYMBOL(iwl_rxq_stop);
491 void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
492 struct iwl_rx_mem_buffer *rxb)
495 struct iwl_rx_packet *pkt = rxb_addr(rxb);
496 struct iwl_missed_beacon_notif *missed_beacon;
498 missed_beacon = &pkt->u.missed_beacon;
499 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
500 priv->missed_beacon_threshold) {
501 IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
502 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
503 le32_to_cpu(missed_beacon->total_missed_becons),
504 le32_to_cpu(missed_beacon->num_recvd_beacons),
505 le32_to_cpu(missed_beacon->num_expected_beacons));
506 if (!test_bit(STATUS_SCANNING, &priv->status))
507 iwl_init_sensitivity(priv);
510 EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
512 void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
513 struct iwl_rx_mem_buffer *rxb)
515 struct iwl_rx_packet *pkt = rxb_addr(rxb);
516 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
518 if (!report->state) {
520 "Spectrum Measure Notification: Start\n");
524 memcpy(&priv->measure_report, report, sizeof(*report));
525 priv->measurement_status |= MEASUREMENT_READY;
527 EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
531 /* Calculate noise level, based on measurements during network silence just
532 * before arriving beacon. This measurement can be done only if we know
533 * exactly when to expect beacons, therefore only when we're associated. */
534 static void iwl_rx_calc_noise(struct iwl_priv *priv)
536 struct statistics_rx_non_phy *rx_info
537 = &(priv->statistics.rx.general);
538 int num_active_rx = 0;
539 int total_silence = 0;
541 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
543 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
545 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
548 total_silence += bcn_silence_a;
552 total_silence += bcn_silence_b;
556 total_silence += bcn_silence_c;
560 /* Average among active antennas */
562 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
564 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
566 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
567 bcn_silence_a, bcn_silence_b, bcn_silence_c,
568 priv->last_rx_noise);
571 #ifdef CONFIG_IWLWIFI_DEBUG
573 * based on the assumption of all statistics counter are in DWORD
574 * FIXME: This function is for debugging, do not deal with
575 * the case of counters roll-over.
577 static void iwl_accumulative_statistics(struct iwl_priv *priv,
583 u32 *delta, *max_delta;
585 prev_stats = (__le32 *)&priv->statistics;
586 accum_stats = (u32 *)&priv->accum_statistics;
587 delta = (u32 *)&priv->delta_statistics;
588 max_delta = (u32 *)&priv->max_delta;
590 for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
591 i += sizeof(__le32), stats++, prev_stats++, delta++,
592 max_delta++, accum_stats++) {
593 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
594 *delta = (le32_to_cpu(*stats) -
595 le32_to_cpu(*prev_stats));
596 *accum_stats += *delta;
597 if (*delta > *max_delta)
602 /* reset accumulative statistics for "no-counter" type statistics */
603 priv->accum_statistics.general.temperature =
604 priv->statistics.general.temperature;
605 priv->accum_statistics.general.temperature_m =
606 priv->statistics.general.temperature_m;
607 priv->accum_statistics.general.ttl_timestamp =
608 priv->statistics.general.ttl_timestamp;
609 priv->accum_statistics.tx.tx_power.ant_a =
610 priv->statistics.tx.tx_power.ant_a;
611 priv->accum_statistics.tx.tx_power.ant_b =
612 priv->statistics.tx.tx_power.ant_b;
613 priv->accum_statistics.tx.tx_power.ant_c =
614 priv->statistics.tx.tx_power.ant_c;
618 #define REG_RECALIB_PERIOD (60)
620 /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
621 #define ACK_CNT_RATIO (50)
622 #define BA_TIMEOUT_CNT (5)
623 #define BA_TIMEOUT_MAX (16)
625 #if defined(CONFIG_IWLAGN) || defined(CONFIG_IWLAGN_MODULE)
627 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
629 * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
630 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
633 bool iwl_good_ack_health(struct iwl_priv *priv,
634 struct iwl_rx_packet *pkt)
637 int actual_ack_cnt_delta, expected_ack_cnt_delta;
638 int ba_timeout_delta;
640 actual_ack_cnt_delta =
641 le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
642 le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
643 expected_ack_cnt_delta =
644 le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
645 le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
647 le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
648 le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
649 if ((priv->_agn.agg_tids_count > 0) &&
650 (expected_ack_cnt_delta > 0) &&
651 (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
653 (ba_timeout_delta > BA_TIMEOUT_CNT)) {
654 IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
655 " expected_ack_cnt = %d\n",
656 actual_ack_cnt_delta, expected_ack_cnt_delta);
658 #ifdef CONFIG_IWLWIFI_DEBUG
659 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
660 priv->delta_statistics.tx.rx_detected_cnt);
661 IWL_DEBUG_RADIO(priv,
662 "ack_or_ba_timeout_collision delta = %d\n",
663 priv->delta_statistics.tx.
664 ack_or_ba_timeout_collision);
666 IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
668 if (!actual_ack_cnt_delta &&
669 (ba_timeout_delta >= BA_TIMEOUT_MAX))
674 EXPORT_SYMBOL(iwl_good_ack_health);
678 * iwl_good_plcp_health - checks for plcp error.
680 * When the plcp error is exceeding the thresholds, reset the radio
681 * to improve the throughput.
683 bool iwl_good_plcp_health(struct iwl_priv *priv,
684 struct iwl_rx_packet *pkt)
687 int combined_plcp_delta;
688 unsigned int plcp_msec;
689 unsigned long plcp_received_jiffies;
692 * check for plcp_err and trigger radio reset if it exceeds
693 * the plcp error threshold plcp_delta.
695 plcp_received_jiffies = jiffies;
696 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
697 (long) priv->plcp_jiffies);
698 priv->plcp_jiffies = plcp_received_jiffies;
700 * check to make sure plcp_msec is not 0 to prevent division
704 combined_plcp_delta =
705 (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
706 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
707 (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
708 le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
710 if ((combined_plcp_delta > 0) &&
711 ((combined_plcp_delta * 100) / plcp_msec) >
712 priv->cfg->plcp_delta_threshold) {
714 * if plcp_err exceed the threshold,
715 * the following data is printed in csv format:
716 * Text: plcp_err exceeded %d,
717 * Received ofdm.plcp_err,
718 * Current ofdm.plcp_err,
719 * Received ofdm_ht.plcp_err,
720 * Current ofdm_ht.plcp_err,
721 * combined_plcp_delta,
724 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
725 "%u, %u, %u, %u, %d, %u mSecs\n",
726 priv->cfg->plcp_delta_threshold,
727 le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
728 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
729 le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
731 priv->statistics.rx.ofdm_ht.plcp_err),
732 combined_plcp_delta, plcp_msec);
738 EXPORT_SYMBOL(iwl_good_plcp_health);
740 static void iwl_recover_from_statistics(struct iwl_priv *priv,
741 struct iwl_rx_packet *pkt)
743 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
745 if (iwl_is_associated(priv)) {
746 if (priv->cfg->ops->lib->check_ack_health) {
747 if (!priv->cfg->ops->lib->check_ack_health(
750 * low ack count detected
753 IWL_ERR(priv, "low ack count detected, "
754 "restart firmware\n");
755 iwl_force_reset(priv, IWL_FW_RESET);
757 } else if (priv->cfg->ops->lib->check_plcp_health) {
758 if (!priv->cfg->ops->lib->check_plcp_health(
761 * high plcp error detected
764 iwl_force_reset(priv, IWL_RF_RESET);
770 void iwl_rx_statistics(struct iwl_priv *priv,
771 struct iwl_rx_mem_buffer *rxb)
774 struct iwl_rx_packet *pkt = rxb_addr(rxb);
777 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
778 (int)sizeof(priv->statistics),
779 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
781 change = ((priv->statistics.general.temperature !=
782 pkt->u.stats.general.temperature) ||
783 ((priv->statistics.flag &
784 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
785 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
787 #ifdef CONFIG_IWLWIFI_DEBUG
788 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
790 iwl_recover_from_statistics(priv, pkt);
792 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
794 set_bit(STATUS_STATISTICS, &priv->status);
796 /* Reschedule the statistics timer to occur in
797 * REG_RECALIB_PERIOD seconds to ensure we get a
798 * thermal update even if the uCode doesn't give
800 mod_timer(&priv->statistics_periodic, jiffies +
801 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
803 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
804 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
805 iwl_rx_calc_noise(priv);
806 queue_work(priv->workqueue, &priv->run_time_calib_work);
808 if (priv->cfg->ops->lib->temp_ops.temperature && change)
809 priv->cfg->ops->lib->temp_ops.temperature(priv);
811 EXPORT_SYMBOL(iwl_rx_statistics);
813 void iwl_reply_statistics(struct iwl_priv *priv,
814 struct iwl_rx_mem_buffer *rxb)
816 struct iwl_rx_packet *pkt = rxb_addr(rxb);
818 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
819 #ifdef CONFIG_IWLWIFI_DEBUG
820 memset(&priv->accum_statistics, 0,
821 sizeof(struct iwl_notif_statistics));
822 memset(&priv->delta_statistics, 0,
823 sizeof(struct iwl_notif_statistics));
824 memset(&priv->max_delta, 0,
825 sizeof(struct iwl_notif_statistics));
827 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
829 iwl_rx_statistics(priv, rxb);
831 EXPORT_SYMBOL(iwl_reply_statistics);
833 /* Calc max signal level (dBm) among 3 possible receivers */
834 static inline int iwl_calc_rssi(struct iwl_priv *priv,
835 struct iwl_rx_phy_res *rx_resp)
837 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
840 #ifdef CONFIG_IWLWIFI_DEBUG
842 * iwl_dbg_report_frame - dump frame to syslog during debug sessions
844 * You may hack this function to show different aspects of received frames,
845 * including selective frame dumps.
846 * group100 parameter selects whether to show 1 out of 100 good data frames.
847 * All beacon and probe response frames are printed.
849 static void iwl_dbg_report_frame(struct iwl_priv *priv,
850 struct iwl_rx_phy_res *phy_res, u16 length,
851 struct ieee80211_hdr *header, int group100)
854 u32 print_summary = 0;
855 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
866 if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
870 fc = header->frame_control;
871 seq_ctl = le16_to_cpu(header->seq_ctrl);
874 channel = le16_to_cpu(phy_res->channel);
875 phy_flags = le16_to_cpu(phy_res->phy_flags);
876 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
878 /* signal statistics */
879 rssi = iwl_calc_rssi(priv, phy_res);
880 tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
882 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
884 /* if data frame is to us and all is good,
885 * (optionally) print summary for only 1 out of every 100 */
886 if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
887 cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
890 print_summary = 1; /* print each frame */
891 else if (priv->framecnt_to_us < 100) {
892 priv->framecnt_to_us++;
895 priv->framecnt_to_us = 0;
900 /* print summary for all other frames */
911 else if (ieee80211_has_retry(fc))
913 else if (ieee80211_is_assoc_resp(fc))
915 else if (ieee80211_is_reassoc_resp(fc))
917 else if (ieee80211_is_probe_resp(fc)) {
919 print_dump = 1; /* dump frame contents */
920 } else if (ieee80211_is_beacon(fc)) {
922 print_dump = 1; /* dump frame contents */
923 } else if (ieee80211_is_atim(fc))
925 else if (ieee80211_is_auth(fc))
927 else if (ieee80211_is_deauth(fc))
929 else if (ieee80211_is_disassoc(fc))
934 rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
935 if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
939 bitrate = iwl_rates[rate_idx].ieee / 2;
942 /* print frame summary.
943 * MAC addresses show just the last byte (for brevity),
944 * but you can hack it to show more, if you'd like to. */
946 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
947 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
948 title, le16_to_cpu(fc), header->addr1[5],
949 length, rssi, channel, bitrate);
951 /* src/dst addresses assume managed mode */
952 IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
953 "len=%u, rssi=%d, tim=%lu usec, "
954 "phy=0x%02x, chnl=%d\n",
955 title, le16_to_cpu(fc), header->addr1[5],
956 header->addr3[5], length, rssi,
957 tsf_low - priv->scan_start_tsf,
962 iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
967 * returns non-zero if packet should be dropped
969 int iwl_set_decrypted_flag(struct iwl_priv *priv,
970 struct ieee80211_hdr *hdr,
972 struct ieee80211_rx_status *stats)
974 u16 fc = le16_to_cpu(hdr->frame_control);
976 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
979 if (!(fc & IEEE80211_FCTL_PROTECTED))
982 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
983 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
984 case RX_RES_STATUS_SEC_TYPE_TKIP:
985 /* The uCode has got a bad phase 1 Key, pushes the packet.
986 * Decryption will be done in SW. */
987 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
988 RX_RES_STATUS_BAD_KEY_TTAK)
991 case RX_RES_STATUS_SEC_TYPE_WEP:
992 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
993 RX_RES_STATUS_BAD_ICV_MIC) {
994 /* bad ICV, the packet is destroyed since the
995 * decryption is inplace, drop it */
996 IWL_DEBUG_RX(priv, "Packet destroyed\n");
999 case RX_RES_STATUS_SEC_TYPE_CCMP:
1000 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
1001 RX_RES_STATUS_DECRYPT_OK) {
1002 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
1003 stats->flag |= RX_FLAG_DECRYPTED;
1012 EXPORT_SYMBOL(iwl_set_decrypted_flag);
1014 static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
1016 u32 decrypt_out = 0;
1018 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
1019 RX_RES_STATUS_STATION_FOUND)
1020 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
1021 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
1023 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
1025 /* packet was not encrypted */
1026 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
1027 RX_RES_STATUS_SEC_TYPE_NONE)
1030 /* packet was encrypted with unknown alg */
1031 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
1032 RX_RES_STATUS_SEC_TYPE_ERR)
1035 /* decryption was not done in HW */
1036 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
1037 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
1040 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
1042 case RX_RES_STATUS_SEC_TYPE_CCMP:
1043 /* alg is CCM: check MIC only */
1044 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
1046 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
1048 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
1052 case RX_RES_STATUS_SEC_TYPE_TKIP:
1053 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
1055 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
1058 /* fall through if TTAK OK */
1060 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
1061 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
1063 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
1067 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
1068 decrypt_in, decrypt_out);
1073 static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1074 struct ieee80211_hdr *hdr,
1077 struct iwl_rx_mem_buffer *rxb,
1078 struct ieee80211_rx_status *stats)
1080 struct sk_buff *skb;
1082 __le16 fc = hdr->frame_control;
1084 /* We only process data packets if the interface is open */
1085 if (unlikely(!priv->is_open)) {
1086 IWL_DEBUG_DROP_LIMIT(priv,
1087 "Dropping packet while interface is not open.\n");
1091 /* In case of HW accelerated crypto and bad decryption, drop */
1092 if (!priv->cfg->mod_params->sw_crypto &&
1093 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
1096 skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
1098 IWL_ERR(priv, "alloc_skb failed\n");
1102 skb_reserve(skb, IWL_LINK_HDR_MAX);
1103 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
1105 /* mac80211 currently doesn't support paged SKB. Convert it to
1106 * linear SKB for management frame and data frame requires
1107 * software decryption or software defragementation. */
1108 if (ieee80211_is_mgmt(fc) ||
1109 ieee80211_has_protected(fc) ||
1110 ieee80211_has_morefrags(fc) ||
1111 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
1112 (ieee80211_is_data_qos(fc) &&
1113 *ieee80211_get_qos_ctl(hdr) &
1114 IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
1115 ret = skb_linearize(skb);
1117 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
1126 * XXX: We cannot touch the page and its virtual memory (hdr) after
1127 * here. It might have already been freed by the above skb change.
1130 iwl_update_stats(priv, false, fc, len);
1131 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
1133 ieee80211_rx(priv->hw, skb);
1135 priv->alloc_rxb_page--;
1139 /* Called for REPLY_RX (legacy ABG frames), or
1140 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
1141 void iwl_rx_reply_rx(struct iwl_priv *priv,
1142 struct iwl_rx_mem_buffer *rxb)
1144 struct ieee80211_hdr *header;
1145 struct ieee80211_rx_status rx_status;
1146 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1147 struct iwl_rx_phy_res *phy_res;
1148 __le32 rx_pkt_status;
1149 struct iwl4965_rx_mpdu_res_start *amsdu;
1155 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
1156 * REPLY_RX: physical layer info is in this buffer
1157 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
1158 * command and cached in priv->last_phy_res
1160 * Here we set up local variables depending on which command is
1163 if (pkt->hdr.cmd == REPLY_RX) {
1164 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
1165 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
1166 + phy_res->cfg_phy_cnt);
1168 len = le16_to_cpu(phy_res->byte_count);
1169 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
1170 phy_res->cfg_phy_cnt + len);
1171 ampdu_status = le32_to_cpu(rx_pkt_status);
1173 if (!priv->last_phy_res[0]) {
1174 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
1177 phy_res = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
1178 amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
1179 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1180 len = le16_to_cpu(amsdu->byte_count);
1181 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
1182 ampdu_status = iwl_translate_rx_status(priv,
1183 le32_to_cpu(rx_pkt_status));
1186 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
1187 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1188 phy_res->cfg_phy_cnt);
1192 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1193 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1194 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1195 le32_to_cpu(rx_pkt_status));
1199 /* This will be used in several places later */
1200 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1202 /* rx_status carries information about the packet to mac80211 */
1203 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1205 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
1206 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1207 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1208 rx_status.rate_idx =
1209 iwl_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1212 /* TSF isn't reliable. In order to allow smooth user experience,
1213 * this W/A doesn't propagate it to the mac80211 */
1214 /*rx_status.flag |= RX_FLAG_TSFT;*/
1216 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1218 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1219 rx_status.signal = iwl_calc_rssi(priv, phy_res);
1221 /* Meaningful noise values are available only from beacon statistics,
1222 * which are gathered only when associated, and indicate noise
1223 * only for the associated network channel ...
1224 * Ignore these noise values while scanning (other channels) */
1225 if (iwl_is_associated(priv) &&
1226 !test_bit(STATUS_SCANNING, &priv->status)) {
1227 rx_status.noise = priv->last_rx_noise;
1229 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1232 /* Reset beacon noise level if not associated. */
1233 if (!iwl_is_associated(priv))
1234 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1236 #ifdef CONFIG_IWLWIFI_DEBUG
1237 /* Set "1" to report good data frames in groups of 100 */
1238 if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
1239 iwl_dbg_report_frame(priv, phy_res, len, header, 1);
1241 iwl_dbg_log_rx_data_frame(priv, len, header);
1242 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
1243 rx_status.signal, rx_status.noise,
1244 (unsigned long long)rx_status.mactime);
1249 * It seems that the antenna field in the phy flags value
1250 * is actually a bit field. This is undefined by radiotap,
1251 * it wants an actual antenna number but I always get "7"
1252 * for most legacy frames I receive indicating that the
1253 * same frame was received on all three RX chains.
1255 * I think this field should be removed in favor of a
1256 * new 802.11n radiotap field "RX chains" that is defined
1260 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1261 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1263 /* set the preamble flag if appropriate */
1264 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1265 rx_status.flag |= RX_FLAG_SHORTPRE;
1267 /* Set up the HT phy flags */
1268 if (rate_n_flags & RATE_MCS_HT_MSK)
1269 rx_status.flag |= RX_FLAG_HT;
1270 if (rate_n_flags & RATE_MCS_HT40_MSK)
1271 rx_status.flag |= RX_FLAG_40MHZ;
1272 if (rate_n_flags & RATE_MCS_SGI_MSK)
1273 rx_status.flag |= RX_FLAG_SHORT_GI;
1275 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1278 EXPORT_SYMBOL(iwl_rx_reply_rx);
1280 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1281 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1282 void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
1283 struct iwl_rx_mem_buffer *rxb)
1285 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1286 priv->last_phy_res[0] = 1;
1287 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1288 sizeof(struct iwl_rx_phy_res));
1290 EXPORT_SYMBOL(iwl_rx_reply_rx_phy);