2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
19 #include "ar9003_mac.h"
21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
23 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
24 int mindelta, int main_rssi_avg,
25 int alt_rssi_avg, int pkt_count)
27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
28 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
32 static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
33 int curr_main_set, int curr_alt_set,
34 int alt_rssi_avg, int main_rssi_avg)
39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
46 (alt_rssi_avg >= (main_rssi_avg - 5))) ||
47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
49 (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
60 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
62 return sc->ps_enabled &&
63 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
67 * Setup and link descriptors.
69 * 11N: we can no longer afford to self link the last descriptor.
70 * MAC acknowledges BA status as long as it copies frames to host
71 * buffer (or rx fifo). This can incorrectly acknowledge packets
72 * to a sender if last desc is self-linked.
74 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
76 struct ath_hw *ah = sc->sc_ah;
77 struct ath_common *common = ath9k_hw_common(ah);
84 ds->ds_link = 0; /* link to null */
85 ds->ds_data = bf->bf_buf_addr;
87 /* virtual addr of the beginning of the buffer. */
90 ds->ds_vdata = skb->data;
93 * setup rx descriptors. The rx_bufsize here tells the hardware
94 * how much data it can DMA to us and that we are prepared
97 ath9k_hw_setuprxdesc(ah, ds,
101 if (sc->rx.rxlink == NULL)
102 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
104 *sc->rx.rxlink = bf->bf_daddr;
106 sc->rx.rxlink = &ds->ds_link;
109 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
111 /* XXX block beacon interrupts */
112 ath9k_hw_setantenna(sc->sc_ah, antenna);
113 sc->rx.defant = antenna;
114 sc->rx.rxotherant = 0;
117 static void ath_opmode_init(struct ath_softc *sc)
119 struct ath_hw *ah = sc->sc_ah;
120 struct ath_common *common = ath9k_hw_common(ah);
124 /* configure rx filter */
125 rfilt = ath_calcrxfilter(sc);
126 ath9k_hw_setrxfilter(ah, rfilt);
128 /* configure bssid mask */
129 ath_hw_setbssidmask(common);
131 /* configure operational mode */
132 ath9k_hw_setopmode(ah);
134 /* calculate and install multicast filter */
135 mfilt[0] = mfilt[1] = ~0;
136 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
139 static bool ath_rx_edma_buf_link(struct ath_softc *sc,
140 enum ath9k_rx_qtype qtype)
142 struct ath_hw *ah = sc->sc_ah;
143 struct ath_rx_edma *rx_edma;
147 rx_edma = &sc->rx.rx_edma[qtype];
148 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
151 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
152 list_del_init(&bf->list);
157 memset(skb->data, 0, ah->caps.rx_status_len);
158 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
159 ah->caps.rx_status_len, DMA_TO_DEVICE);
161 SKB_CB_ATHBUF(skb) = bf;
162 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
163 skb_queue_tail(&rx_edma->rx_fifo, skb);
168 static void ath_rx_addbuffer_edma(struct ath_softc *sc,
169 enum ath9k_rx_qtype qtype, int size)
171 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
174 if (list_empty(&sc->rx.rxbuf)) {
175 ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n");
179 while (!list_empty(&sc->rx.rxbuf)) {
182 if (!ath_rx_edma_buf_link(sc, qtype))
190 static void ath_rx_remove_buffer(struct ath_softc *sc,
191 enum ath9k_rx_qtype qtype)
194 struct ath_rx_edma *rx_edma;
197 rx_edma = &sc->rx.rx_edma[qtype];
199 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
200 bf = SKB_CB_ATHBUF(skb);
202 list_add_tail(&bf->list, &sc->rx.rxbuf);
206 static void ath_rx_edma_cleanup(struct ath_softc *sc)
208 struct ath_hw *ah = sc->sc_ah;
209 struct ath_common *common = ath9k_hw_common(ah);
212 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
213 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
215 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
217 dma_unmap_single(sc->dev, bf->bf_buf_addr,
220 dev_kfree_skb_any(bf->bf_mpdu);
226 INIT_LIST_HEAD(&sc->rx.rxbuf);
228 kfree(sc->rx.rx_bufptr);
229 sc->rx.rx_bufptr = NULL;
232 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
234 skb_queue_head_init(&rx_edma->rx_fifo);
235 skb_queue_head_init(&rx_edma->rx_buffers);
236 rx_edma->rx_fifo_hwsize = size;
239 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
241 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
242 struct ath_hw *ah = sc->sc_ah;
248 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
249 ah->caps.rx_status_len);
251 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
252 ah->caps.rx_lp_qdepth);
253 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
254 ah->caps.rx_hp_qdepth);
256 size = sizeof(struct ath_buf) * nbufs;
257 bf = kzalloc(size, GFP_KERNEL);
261 INIT_LIST_HEAD(&sc->rx.rxbuf);
262 sc->rx.rx_bufptr = bf;
264 for (i = 0; i < nbufs; i++, bf++) {
265 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
271 memset(skb->data, 0, common->rx_bufsize);
274 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
277 if (unlikely(dma_mapping_error(sc->dev,
279 dev_kfree_skb_any(skb);
283 "dma_mapping_error() on RX init\n");
288 list_add_tail(&bf->list, &sc->rx.rxbuf);
294 ath_rx_edma_cleanup(sc);
298 static void ath_edma_start_recv(struct ath_softc *sc)
300 spin_lock_bh(&sc->rx.rxbuflock);
302 ath9k_hw_rxena(sc->sc_ah);
304 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
305 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
307 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
308 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
312 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
314 spin_unlock_bh(&sc->rx.rxbuflock);
317 static void ath_edma_stop_recv(struct ath_softc *sc)
319 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
320 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
323 int ath_rx_init(struct ath_softc *sc, int nbufs)
325 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
330 spin_lock_init(&sc->sc_pcu_lock);
331 sc->sc_flags &= ~SC_OP_RXFLUSH;
332 spin_lock_init(&sc->rx.rxbuflock);
334 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
335 sc->sc_ah->caps.rx_status_len;
337 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
338 return ath_rx_edma_init(sc, nbufs);
340 ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
341 common->cachelsz, common->rx_bufsize);
343 /* Initialize rx descriptors */
345 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
349 "failed to allocate rx descriptors: %d\n",
354 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
355 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
363 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
366 if (unlikely(dma_mapping_error(sc->dev,
368 dev_kfree_skb_any(skb);
372 "dma_mapping_error() on RX init\n");
377 sc->rx.rxlink = NULL;
387 void ath_rx_cleanup(struct ath_softc *sc)
389 struct ath_hw *ah = sc->sc_ah;
390 struct ath_common *common = ath9k_hw_common(ah);
394 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
395 ath_rx_edma_cleanup(sc);
398 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
401 dma_unmap_single(sc->dev, bf->bf_buf_addr,
410 if (sc->rx.rxdma.dd_desc_len != 0)
411 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
416 * Calculate the receive filter according to the
417 * operating mode and state:
419 * o always accept unicast, broadcast, and multicast traffic
420 * o maintain current state of phy error reception (the hal
421 * may enable phy error frames for noise immunity work)
422 * o probe request frames are accepted only when operating in
423 * hostap, adhoc, or monitor modes
424 * o enable promiscuous mode according to the interface state
426 * - when operating in adhoc mode so the 802.11 layer creates
427 * node table entries for peers,
428 * - when operating in station mode for collecting rssi data when
429 * the station is otherwise quiet, or
430 * - when operating as a repeater so we see repeater-sta beacons
434 u32 ath_calcrxfilter(struct ath_softc *sc)
436 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
440 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
441 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
442 | ATH9K_RX_FILTER_MCAST;
444 if (sc->rx.rxfilter & FIF_PROBE_REQ)
445 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
448 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
449 * mode interface or when in monitor mode. AP mode does not need this
450 * since it receives all in-BSS frames anyway.
452 if (sc->sc_ah->is_monitoring)
453 rfilt |= ATH9K_RX_FILTER_PROM;
455 if (sc->rx.rxfilter & FIF_CONTROL)
456 rfilt |= ATH9K_RX_FILTER_CONTROL;
458 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
460 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
461 rfilt |= ATH9K_RX_FILTER_MYBEACON;
463 rfilt |= ATH9K_RX_FILTER_BEACON;
465 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
466 (sc->rx.rxfilter & FIF_PSPOLL))
467 rfilt |= ATH9K_RX_FILTER_PSPOLL;
469 if (conf_is_ht(&sc->hw->conf))
470 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
472 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
473 /* The following may also be needed for other older chips */
474 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
475 rfilt |= ATH9K_RX_FILTER_PROM;
476 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
481 #undef RX_FILTER_PRESERVE
484 int ath_startrecv(struct ath_softc *sc)
486 struct ath_hw *ah = sc->sc_ah;
487 struct ath_buf *bf, *tbf;
489 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
490 ath_edma_start_recv(sc);
494 spin_lock_bh(&sc->rx.rxbuflock);
495 if (list_empty(&sc->rx.rxbuf))
498 sc->rx.rxlink = NULL;
499 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
500 ath_rx_buf_link(sc, bf);
503 /* We could have deleted elements so the list may be empty now */
504 if (list_empty(&sc->rx.rxbuf))
507 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
508 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
513 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
515 spin_unlock_bh(&sc->rx.rxbuflock);
520 bool ath_stoprecv(struct ath_softc *sc)
522 struct ath_hw *ah = sc->sc_ah;
523 bool stopped, reset = false;
525 spin_lock_bh(&sc->rx.rxbuflock);
526 ath9k_hw_abortpcurecv(ah);
527 ath9k_hw_setrxfilter(ah, 0);
528 stopped = ath9k_hw_stopdmarecv(ah, &reset);
530 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
531 ath_edma_stop_recv(sc);
533 sc->rx.rxlink = NULL;
534 spin_unlock_bh(&sc->rx.rxbuflock);
536 if (!(ah->ah_flags & AH_UNPLUGGED) &&
537 unlikely(!stopped)) {
538 ath_err(ath9k_hw_common(sc->sc_ah),
539 "Could not stop RX, we could be "
540 "confusing the DMA engine when we start RX up\n");
541 ATH_DBG_WARN_ON_ONCE(!stopped);
543 return stopped && !reset;
546 void ath_flushrecv(struct ath_softc *sc)
548 sc->sc_flags |= SC_OP_RXFLUSH;
549 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
550 ath_rx_tasklet(sc, 1, true);
551 ath_rx_tasklet(sc, 1, false);
552 sc->sc_flags &= ~SC_OP_RXFLUSH;
555 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
557 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
558 struct ieee80211_mgmt *mgmt;
559 u8 *pos, *end, id, elen;
560 struct ieee80211_tim_ie *tim;
562 mgmt = (struct ieee80211_mgmt *)skb->data;
563 pos = mgmt->u.beacon.variable;
564 end = skb->data + skb->len;
566 while (pos + 2 < end) {
569 if (pos + elen > end)
572 if (id == WLAN_EID_TIM) {
573 if (elen < sizeof(*tim))
575 tim = (struct ieee80211_tim_ie *) pos;
576 if (tim->dtim_count != 0)
578 return tim->bitmap_ctrl & 0x01;
587 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
589 struct ieee80211_mgmt *mgmt;
590 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
592 if (skb->len < 24 + 8 + 2 + 2)
595 mgmt = (struct ieee80211_mgmt *)skb->data;
596 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
597 /* TODO: This doesn't work well if you have stations
598 * associated to two different APs because curbssid
599 * is just the last AP that any of the stations associated
602 return; /* not from our current AP */
605 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
607 if (sc->ps_flags & PS_BEACON_SYNC) {
608 sc->ps_flags &= ~PS_BEACON_SYNC;
609 ath_dbg(common, ATH_DBG_PS,
610 "Reconfigure Beacon timers based on timestamp from the AP\n");
612 sc->ps_flags &= ~PS_TSFOOR_SYNC;
615 if (ath_beacon_dtim_pending_cab(skb)) {
617 * Remain awake waiting for buffered broadcast/multicast
618 * frames. If the last broadcast/multicast frame is not
619 * received properly, the next beacon frame will work as
620 * a backup trigger for returning into NETWORK SLEEP state,
621 * so we are waiting for it as well.
623 ath_dbg(common, ATH_DBG_PS,
624 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
625 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
629 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
631 * This can happen if a broadcast frame is dropped or the AP
632 * fails to send a frame indicating that all CAB frames have
635 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
636 ath_dbg(common, ATH_DBG_PS,
637 "PS wait for CAB frames timed out\n");
641 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
643 struct ieee80211_hdr *hdr;
644 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
646 hdr = (struct ieee80211_hdr *)skb->data;
648 /* Process Beacon and CAB receive in PS state */
649 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
650 && ieee80211_is_beacon(hdr->frame_control))
651 ath_rx_ps_beacon(sc, skb);
652 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
653 (ieee80211_is_data(hdr->frame_control) ||
654 ieee80211_is_action(hdr->frame_control)) &&
655 is_multicast_ether_addr(hdr->addr1) &&
656 !ieee80211_has_moredata(hdr->frame_control)) {
658 * No more broadcast/multicast frames to be received at this
661 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
662 ath_dbg(common, ATH_DBG_PS,
663 "All PS CAB frames received, back to sleep\n");
664 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
665 !is_multicast_ether_addr(hdr->addr1) &&
666 !ieee80211_has_morefrags(hdr->frame_control)) {
667 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
668 ath_dbg(common, ATH_DBG_PS,
669 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
670 sc->ps_flags & (PS_WAIT_FOR_BEACON |
672 PS_WAIT_FOR_PSPOLL_DATA |
673 PS_WAIT_FOR_TX_ACK));
677 static bool ath_edma_get_buffers(struct ath_softc *sc,
678 enum ath9k_rx_qtype qtype)
680 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
681 struct ath_hw *ah = sc->sc_ah;
682 struct ath_common *common = ath9k_hw_common(ah);
687 skb = skb_peek(&rx_edma->rx_fifo);
691 bf = SKB_CB_ATHBUF(skb);
694 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
695 common->rx_bufsize, DMA_FROM_DEVICE);
697 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
698 if (ret == -EINPROGRESS) {
699 /*let device gain the buffer again*/
700 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
701 common->rx_bufsize, DMA_FROM_DEVICE);
705 __skb_unlink(skb, &rx_edma->rx_fifo);
706 if (ret == -EINVAL) {
707 /* corrupt descriptor, skip this one and the following one */
708 list_add_tail(&bf->list, &sc->rx.rxbuf);
709 ath_rx_edma_buf_link(sc, qtype);
710 skb = skb_peek(&rx_edma->rx_fifo);
714 bf = SKB_CB_ATHBUF(skb);
717 __skb_unlink(skb, &rx_edma->rx_fifo);
718 list_add_tail(&bf->list, &sc->rx.rxbuf);
719 ath_rx_edma_buf_link(sc, qtype);
722 skb_queue_tail(&rx_edma->rx_buffers, skb);
727 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
728 struct ath_rx_status *rs,
729 enum ath9k_rx_qtype qtype)
731 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
735 while (ath_edma_get_buffers(sc, qtype));
736 skb = __skb_dequeue(&rx_edma->rx_buffers);
740 bf = SKB_CB_ATHBUF(skb);
741 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
745 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
746 struct ath_rx_status *rs)
748 struct ath_hw *ah = sc->sc_ah;
749 struct ath_common *common = ath9k_hw_common(ah);
754 if (list_empty(&sc->rx.rxbuf)) {
755 sc->rx.rxlink = NULL;
759 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
763 * Must provide the virtual address of the current
764 * descriptor, the physical address, and the virtual
765 * address of the next descriptor in the h/w chain.
766 * This allows the HAL to look ahead to see if the
767 * hardware is done with a descriptor by checking the
768 * done bit in the following descriptor and the address
769 * of the current descriptor the DMA engine is working
770 * on. All this is necessary because of our use of
771 * a self-linked list to avoid rx overruns.
773 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
774 if (ret == -EINPROGRESS) {
775 struct ath_rx_status trs;
777 struct ath_desc *tds;
779 memset(&trs, 0, sizeof(trs));
780 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
781 sc->rx.rxlink = NULL;
785 tbf = list_entry(bf->list.next, struct ath_buf, list);
788 * On some hardware the descriptor status words could
789 * get corrupted, including the done bit. Because of
790 * this, check if the next descriptor's done bit is
793 * If the next descriptor's done bit is set, the current
794 * descriptor has been corrupted. Force s/w to discard
795 * this descriptor and continue...
799 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
800 if (ret == -EINPROGRESS)
808 * Synchronize the DMA transfer with CPU before
809 * 1. accessing the frame
810 * 2. requeueing the same buffer to h/w
812 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
819 /* Assumes you've already done the endian to CPU conversion */
820 static bool ath9k_rx_accept(struct ath_common *common,
821 struct ieee80211_hdr *hdr,
822 struct ieee80211_rx_status *rxs,
823 struct ath_rx_status *rx_stats,
826 bool is_mc, is_valid_tkip, strip_mic, mic_error;
827 struct ath_hw *ah = common->ah;
829 u8 rx_status_len = ah->caps.rx_status_len;
831 fc = hdr->frame_control;
833 is_mc = !!is_multicast_ether_addr(hdr->addr1);
834 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
835 test_bit(rx_stats->rs_keyix, common->tkip_keymap);
836 strip_mic = is_valid_tkip && !(rx_stats->rs_status &
837 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC));
839 if (!rx_stats->rs_datalen)
842 * rs_status follows rs_datalen so if rs_datalen is too large
843 * we can take a hint that hardware corrupted it, so ignore
846 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
849 /* Only use error bits from the last fragment */
850 if (rx_stats->rs_more)
853 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
854 !ieee80211_has_morefrags(fc) &&
855 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
856 (rx_stats->rs_status & ATH9K_RXERR_MIC);
859 * The rx_stats->rs_status will not be set until the end of the
860 * chained descriptors so it can be ignored if rs_more is set. The
861 * rs_more will be false at the last element of the chained
864 if (rx_stats->rs_status != 0) {
865 if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
866 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
869 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
872 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
873 *decrypt_error = true;
878 * Reject error frames with the exception of
879 * decryption and MIC failures. For monitor mode,
880 * we also ignore the CRC error.
882 if (ah->is_monitoring) {
883 if (rx_stats->rs_status &
884 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
888 if (rx_stats->rs_status &
889 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
896 * For unicast frames the MIC error bit can have false positives,
897 * so all MIC error reports need to be validated in software.
898 * False negatives are not common, so skip software verification
899 * if the hardware considers the MIC valid.
902 rxs->flag |= RX_FLAG_MMIC_STRIPPED;
903 else if (is_mc && mic_error)
904 rxs->flag |= RX_FLAG_MMIC_ERROR;
909 static int ath9k_process_rate(struct ath_common *common,
910 struct ieee80211_hw *hw,
911 struct ath_rx_status *rx_stats,
912 struct ieee80211_rx_status *rxs)
914 struct ieee80211_supported_band *sband;
915 enum ieee80211_band band;
918 band = hw->conf.channel->band;
919 sband = hw->wiphy->bands[band];
921 if (rx_stats->rs_rate & 0x80) {
923 rxs->flag |= RX_FLAG_HT;
924 if (rx_stats->rs_flags & ATH9K_RX_2040)
925 rxs->flag |= RX_FLAG_40MHZ;
926 if (rx_stats->rs_flags & ATH9K_RX_GI)
927 rxs->flag |= RX_FLAG_SHORT_GI;
928 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
932 for (i = 0; i < sband->n_bitrates; i++) {
933 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
937 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
938 rxs->flag |= RX_FLAG_SHORTPRE;
945 * No valid hardware bitrate found -- we should not get here
946 * because hardware has already validated this frame as OK.
948 ath_dbg(common, ATH_DBG_XMIT,
949 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
955 static void ath9k_process_rssi(struct ath_common *common,
956 struct ieee80211_hw *hw,
957 struct ieee80211_hdr *hdr,
958 struct ath_rx_status *rx_stats)
960 struct ath_softc *sc = hw->priv;
961 struct ath_hw *ah = common->ah;
965 if ((ah->opmode != NL80211_IFTYPE_STATION) &&
966 (ah->opmode != NL80211_IFTYPE_ADHOC))
969 fc = hdr->frame_control;
970 if (!ieee80211_is_beacon(fc) ||
971 compare_ether_addr(hdr->addr3, common->curbssid)) {
972 /* TODO: This doesn't work well if you have stations
973 * associated to two different APs because curbssid
974 * is just the last AP that any of the stations associated
980 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
981 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
983 last_rssi = sc->last_rssi;
984 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
985 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
986 ATH_RSSI_EP_MULTIPLIER);
987 if (rx_stats->rs_rssi < 0)
988 rx_stats->rs_rssi = 0;
990 /* Update Beacon RSSI, this is used by ANI. */
991 ah->stats.avgbrssi = rx_stats->rs_rssi;
995 * For Decrypt or Demic errors, we only mark packet status here and always push
996 * up the frame up to let mac80211 handle the actual error case, be it no
997 * decryption key or real decryption error. This let us keep statistics there.
999 static int ath9k_rx_skb_preprocess(struct ath_common *common,
1000 struct ieee80211_hw *hw,
1001 struct ieee80211_hdr *hdr,
1002 struct ath_rx_status *rx_stats,
1003 struct ieee80211_rx_status *rx_status,
1004 bool *decrypt_error)
1006 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
1009 * everything but the rate is checked here, the rate check is done
1010 * separately to avoid doing two lookups for a rate for each frame.
1012 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
1015 /* Only use status info from the last fragment */
1016 if (rx_stats->rs_more)
1019 ath9k_process_rssi(common, hw, hdr, rx_stats);
1021 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
1024 rx_status->band = hw->conf.channel->band;
1025 rx_status->freq = hw->conf.channel->center_freq;
1026 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
1027 rx_status->antenna = rx_stats->rs_antenna;
1028 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
1033 static void ath9k_rx_skb_postprocess(struct ath_common *common,
1034 struct sk_buff *skb,
1035 struct ath_rx_status *rx_stats,
1036 struct ieee80211_rx_status *rxs,
1039 struct ath_hw *ah = common->ah;
1040 struct ieee80211_hdr *hdr;
1041 int hdrlen, padpos, padsize;
1045 /* see if any padding is done by the hw and remove it */
1046 hdr = (struct ieee80211_hdr *) skb->data;
1047 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1048 fc = hdr->frame_control;
1049 padpos = ath9k_cmn_padpos(hdr->frame_control);
1051 /* The MAC header is padded to have 32-bit boundary if the
1052 * packet payload is non-zero. The general calculation for
1053 * padsize would take into account odd header lengths:
1054 * padsize = (4 - padpos % 4) % 4; However, since only
1055 * even-length headers are used, padding can only be 0 or 2
1056 * bytes and we can optimize this a bit. In addition, we must
1057 * not try to remove padding from short control frames that do
1058 * not have payload. */
1059 padsize = padpos & 3;
1060 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1061 memmove(skb->data + padsize, skb->data, padpos);
1062 skb_pull(skb, padsize);
1065 keyix = rx_stats->rs_keyix;
1067 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1068 ieee80211_has_protected(fc)) {
1069 rxs->flag |= RX_FLAG_DECRYPTED;
1070 } else if (ieee80211_has_protected(fc)
1071 && !decrypt_error && skb->len >= hdrlen + 4) {
1072 keyix = skb->data[hdrlen + 3] >> 6;
1074 if (test_bit(keyix, common->keymap))
1075 rxs->flag |= RX_FLAG_DECRYPTED;
1077 if (ah->sw_mgmt_crypto &&
1078 (rxs->flag & RX_FLAG_DECRYPTED) &&
1079 ieee80211_is_mgmt(fc))
1080 /* Use software decrypt for management frames. */
1081 rxs->flag &= ~RX_FLAG_DECRYPTED;
1084 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1085 struct ath_hw_antcomb_conf ant_conf,
1088 antcomb->quick_scan_cnt = 0;
1090 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1091 antcomb->rssi_lna2 = main_rssi_avg;
1092 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1093 antcomb->rssi_lna1 = main_rssi_avg;
1095 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1096 case 0x10: /* LNA2 A-B */
1097 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1098 antcomb->first_quick_scan_conf =
1099 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1100 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1102 case 0x20: /* LNA1 A-B */
1103 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1104 antcomb->first_quick_scan_conf =
1105 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1106 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1108 case 0x21: /* LNA1 LNA2 */
1109 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1110 antcomb->first_quick_scan_conf =
1111 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1112 antcomb->second_quick_scan_conf =
1113 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1115 case 0x12: /* LNA2 LNA1 */
1116 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1117 antcomb->first_quick_scan_conf =
1118 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1119 antcomb->second_quick_scan_conf =
1120 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1122 case 0x13: /* LNA2 A+B */
1123 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1124 antcomb->first_quick_scan_conf =
1125 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1126 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1128 case 0x23: /* LNA1 A+B */
1129 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1130 antcomb->first_quick_scan_conf =
1131 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1132 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1139 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1140 struct ath_hw_antcomb_conf *div_ant_conf,
1141 int main_rssi_avg, int alt_rssi_avg,
1145 switch (antcomb->quick_scan_cnt) {
1147 /* set alt to main, and alt to first conf */
1148 div_ant_conf->main_lna_conf = antcomb->main_conf;
1149 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1152 /* set alt to main, and alt to first conf */
1153 div_ant_conf->main_lna_conf = antcomb->main_conf;
1154 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1155 antcomb->rssi_first = main_rssi_avg;
1156 antcomb->rssi_second = alt_rssi_avg;
1158 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1160 if (ath_is_alt_ant_ratio_better(alt_ratio,
1161 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1162 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1163 main_rssi_avg, alt_rssi_avg,
1164 antcomb->total_pkt_count))
1165 antcomb->first_ratio = true;
1167 antcomb->first_ratio = false;
1168 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1169 if (ath_is_alt_ant_ratio_better(alt_ratio,
1170 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1171 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1172 main_rssi_avg, alt_rssi_avg,
1173 antcomb->total_pkt_count))
1174 antcomb->first_ratio = true;
1176 antcomb->first_ratio = false;
1178 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1179 (alt_rssi_avg > main_rssi_avg +
1180 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1181 (alt_rssi_avg > main_rssi_avg)) &&
1182 (antcomb->total_pkt_count > 50))
1183 antcomb->first_ratio = true;
1185 antcomb->first_ratio = false;
1189 antcomb->alt_good = false;
1190 antcomb->scan_not_start = false;
1191 antcomb->scan = false;
1192 antcomb->rssi_first = main_rssi_avg;
1193 antcomb->rssi_third = alt_rssi_avg;
1195 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1196 antcomb->rssi_lna1 = alt_rssi_avg;
1197 else if (antcomb->second_quick_scan_conf ==
1198 ATH_ANT_DIV_COMB_LNA2)
1199 antcomb->rssi_lna2 = alt_rssi_avg;
1200 else if (antcomb->second_quick_scan_conf ==
1201 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1202 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1203 antcomb->rssi_lna2 = main_rssi_avg;
1204 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1205 antcomb->rssi_lna1 = main_rssi_avg;
1208 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1209 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1210 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1212 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1214 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1215 if (ath_is_alt_ant_ratio_better(alt_ratio,
1216 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1217 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1218 main_rssi_avg, alt_rssi_avg,
1219 antcomb->total_pkt_count))
1220 antcomb->second_ratio = true;
1222 antcomb->second_ratio = false;
1223 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1224 if (ath_is_alt_ant_ratio_better(alt_ratio,
1225 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1226 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1227 main_rssi_avg, alt_rssi_avg,
1228 antcomb->total_pkt_count))
1229 antcomb->second_ratio = true;
1231 antcomb->second_ratio = false;
1233 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1234 (alt_rssi_avg > main_rssi_avg +
1235 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1236 (alt_rssi_avg > main_rssi_avg)) &&
1237 (antcomb->total_pkt_count > 50))
1238 antcomb->second_ratio = true;
1240 antcomb->second_ratio = false;
1243 /* set alt to the conf with maximun ratio */
1244 if (antcomb->first_ratio && antcomb->second_ratio) {
1245 if (antcomb->rssi_second > antcomb->rssi_third) {
1247 if ((antcomb->first_quick_scan_conf ==
1248 ATH_ANT_DIV_COMB_LNA1) ||
1249 (antcomb->first_quick_scan_conf ==
1250 ATH_ANT_DIV_COMB_LNA2))
1251 /* Set alt LNA1 or LNA2*/
1252 if (div_ant_conf->main_lna_conf ==
1253 ATH_ANT_DIV_COMB_LNA2)
1254 div_ant_conf->alt_lna_conf =
1255 ATH_ANT_DIV_COMB_LNA1;
1257 div_ant_conf->alt_lna_conf =
1258 ATH_ANT_DIV_COMB_LNA2;
1260 /* Set alt to A+B or A-B */
1261 div_ant_conf->alt_lna_conf =
1262 antcomb->first_quick_scan_conf;
1263 } else if ((antcomb->second_quick_scan_conf ==
1264 ATH_ANT_DIV_COMB_LNA1) ||
1265 (antcomb->second_quick_scan_conf ==
1266 ATH_ANT_DIV_COMB_LNA2)) {
1267 /* Set alt LNA1 or LNA2 */
1268 if (div_ant_conf->main_lna_conf ==
1269 ATH_ANT_DIV_COMB_LNA2)
1270 div_ant_conf->alt_lna_conf =
1271 ATH_ANT_DIV_COMB_LNA1;
1273 div_ant_conf->alt_lna_conf =
1274 ATH_ANT_DIV_COMB_LNA2;
1276 /* Set alt to A+B or A-B */
1277 div_ant_conf->alt_lna_conf =
1278 antcomb->second_quick_scan_conf;
1280 } else if (antcomb->first_ratio) {
1282 if ((antcomb->first_quick_scan_conf ==
1283 ATH_ANT_DIV_COMB_LNA1) ||
1284 (antcomb->first_quick_scan_conf ==
1285 ATH_ANT_DIV_COMB_LNA2))
1286 /* Set alt LNA1 or LNA2 */
1287 if (div_ant_conf->main_lna_conf ==
1288 ATH_ANT_DIV_COMB_LNA2)
1289 div_ant_conf->alt_lna_conf =
1290 ATH_ANT_DIV_COMB_LNA1;
1292 div_ant_conf->alt_lna_conf =
1293 ATH_ANT_DIV_COMB_LNA2;
1295 /* Set alt to A+B or A-B */
1296 div_ant_conf->alt_lna_conf =
1297 antcomb->first_quick_scan_conf;
1298 } else if (antcomb->second_ratio) {
1300 if ((antcomb->second_quick_scan_conf ==
1301 ATH_ANT_DIV_COMB_LNA1) ||
1302 (antcomb->second_quick_scan_conf ==
1303 ATH_ANT_DIV_COMB_LNA2))
1304 /* Set alt LNA1 or LNA2 */
1305 if (div_ant_conf->main_lna_conf ==
1306 ATH_ANT_DIV_COMB_LNA2)
1307 div_ant_conf->alt_lna_conf =
1308 ATH_ANT_DIV_COMB_LNA1;
1310 div_ant_conf->alt_lna_conf =
1311 ATH_ANT_DIV_COMB_LNA2;
1313 /* Set alt to A+B or A-B */
1314 div_ant_conf->alt_lna_conf =
1315 antcomb->second_quick_scan_conf;
1317 /* main is largest */
1318 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1319 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1320 /* Set alt LNA1 or LNA2 */
1321 if (div_ant_conf->main_lna_conf ==
1322 ATH_ANT_DIV_COMB_LNA2)
1323 div_ant_conf->alt_lna_conf =
1324 ATH_ANT_DIV_COMB_LNA1;
1326 div_ant_conf->alt_lna_conf =
1327 ATH_ANT_DIV_COMB_LNA2;
1329 /* Set alt to A+B or A-B */
1330 div_ant_conf->alt_lna_conf = antcomb->main_conf;
1338 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
1339 struct ath_ant_comb *antcomb, int alt_ratio)
1341 if (ant_conf->div_group == 0) {
1342 /* Adjust the fast_div_bias based on main and alt lna conf */
1343 switch ((ant_conf->main_lna_conf << 4) |
1344 ant_conf->alt_lna_conf) {
1345 case 0x01: /* A-B LNA2 */
1346 ant_conf->fast_div_bias = 0x3b;
1348 case 0x02: /* A-B LNA1 */
1349 ant_conf->fast_div_bias = 0x3d;
1351 case 0x03: /* A-B A+B */
1352 ant_conf->fast_div_bias = 0x1;
1354 case 0x10: /* LNA2 A-B */
1355 ant_conf->fast_div_bias = 0x7;
1357 case 0x12: /* LNA2 LNA1 */
1358 ant_conf->fast_div_bias = 0x2;
1360 case 0x13: /* LNA2 A+B */
1361 ant_conf->fast_div_bias = 0x7;
1363 case 0x20: /* LNA1 A-B */
1364 ant_conf->fast_div_bias = 0x6;
1366 case 0x21: /* LNA1 LNA2 */
1367 ant_conf->fast_div_bias = 0x0;
1369 case 0x23: /* LNA1 A+B */
1370 ant_conf->fast_div_bias = 0x6;
1372 case 0x30: /* A+B A-B */
1373 ant_conf->fast_div_bias = 0x1;
1375 case 0x31: /* A+B LNA2 */
1376 ant_conf->fast_div_bias = 0x3b;
1378 case 0x32: /* A+B LNA1 */
1379 ant_conf->fast_div_bias = 0x3d;
1384 } else if (ant_conf->div_group == 1) {
1385 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1386 switch ((ant_conf->main_lna_conf << 4) |
1387 ant_conf->alt_lna_conf) {
1388 case 0x01: /* A-B LNA2 */
1389 ant_conf->fast_div_bias = 0x1;
1390 ant_conf->main_gaintb = 0;
1391 ant_conf->alt_gaintb = 0;
1393 case 0x02: /* A-B LNA1 */
1394 ant_conf->fast_div_bias = 0x1;
1395 ant_conf->main_gaintb = 0;
1396 ant_conf->alt_gaintb = 0;
1398 case 0x03: /* A-B A+B */
1399 ant_conf->fast_div_bias = 0x1;
1400 ant_conf->main_gaintb = 0;
1401 ant_conf->alt_gaintb = 0;
1403 case 0x10: /* LNA2 A-B */
1404 if (!(antcomb->scan) &&
1405 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1406 ant_conf->fast_div_bias = 0x3f;
1408 ant_conf->fast_div_bias = 0x1;
1409 ant_conf->main_gaintb = 0;
1410 ant_conf->alt_gaintb = 0;
1412 case 0x12: /* LNA2 LNA1 */
1413 ant_conf->fast_div_bias = 0x1;
1414 ant_conf->main_gaintb = 0;
1415 ant_conf->alt_gaintb = 0;
1417 case 0x13: /* LNA2 A+B */
1418 if (!(antcomb->scan) &&
1419 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1420 ant_conf->fast_div_bias = 0x3f;
1422 ant_conf->fast_div_bias = 0x1;
1423 ant_conf->main_gaintb = 0;
1424 ant_conf->alt_gaintb = 0;
1426 case 0x20: /* LNA1 A-B */
1427 if (!(antcomb->scan) &&
1428 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1429 ant_conf->fast_div_bias = 0x3f;
1431 ant_conf->fast_div_bias = 0x1;
1432 ant_conf->main_gaintb = 0;
1433 ant_conf->alt_gaintb = 0;
1435 case 0x21: /* LNA1 LNA2 */
1436 ant_conf->fast_div_bias = 0x1;
1437 ant_conf->main_gaintb = 0;
1438 ant_conf->alt_gaintb = 0;
1440 case 0x23: /* LNA1 A+B */
1441 if (!(antcomb->scan) &&
1442 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1443 ant_conf->fast_div_bias = 0x3f;
1445 ant_conf->fast_div_bias = 0x1;
1446 ant_conf->main_gaintb = 0;
1447 ant_conf->alt_gaintb = 0;
1449 case 0x30: /* A+B A-B */
1450 ant_conf->fast_div_bias = 0x1;
1451 ant_conf->main_gaintb = 0;
1452 ant_conf->alt_gaintb = 0;
1454 case 0x31: /* A+B LNA2 */
1455 ant_conf->fast_div_bias = 0x1;
1456 ant_conf->main_gaintb = 0;
1457 ant_conf->alt_gaintb = 0;
1459 case 0x32: /* A+B LNA1 */
1460 ant_conf->fast_div_bias = 0x1;
1461 ant_conf->main_gaintb = 0;
1462 ant_conf->alt_gaintb = 0;
1467 } else if (ant_conf->div_group == 2) {
1468 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1469 switch ((ant_conf->main_lna_conf << 4) |
1470 ant_conf->alt_lna_conf) {
1471 case 0x01: /* A-B LNA2 */
1472 ant_conf->fast_div_bias = 0x1;
1473 ant_conf->main_gaintb = 0;
1474 ant_conf->alt_gaintb = 0;
1476 case 0x02: /* A-B LNA1 */
1477 ant_conf->fast_div_bias = 0x1;
1478 ant_conf->main_gaintb = 0;
1479 ant_conf->alt_gaintb = 0;
1481 case 0x03: /* A-B A+B */
1482 ant_conf->fast_div_bias = 0x1;
1483 ant_conf->main_gaintb = 0;
1484 ant_conf->alt_gaintb = 0;
1486 case 0x10: /* LNA2 A-B */
1487 if (!(antcomb->scan) &&
1488 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1489 ant_conf->fast_div_bias = 0x1;
1491 ant_conf->fast_div_bias = 0x2;
1492 ant_conf->main_gaintb = 0;
1493 ant_conf->alt_gaintb = 0;
1495 case 0x12: /* LNA2 LNA1 */
1496 ant_conf->fast_div_bias = 0x1;
1497 ant_conf->main_gaintb = 0;
1498 ant_conf->alt_gaintb = 0;
1500 case 0x13: /* LNA2 A+B */
1501 if (!(antcomb->scan) &&
1502 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1503 ant_conf->fast_div_bias = 0x1;
1505 ant_conf->fast_div_bias = 0x2;
1506 ant_conf->main_gaintb = 0;
1507 ant_conf->alt_gaintb = 0;
1509 case 0x20: /* LNA1 A-B */
1510 if (!(antcomb->scan) &&
1511 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1512 ant_conf->fast_div_bias = 0x1;
1514 ant_conf->fast_div_bias = 0x2;
1515 ant_conf->main_gaintb = 0;
1516 ant_conf->alt_gaintb = 0;
1518 case 0x21: /* LNA1 LNA2 */
1519 ant_conf->fast_div_bias = 0x1;
1520 ant_conf->main_gaintb = 0;
1521 ant_conf->alt_gaintb = 0;
1523 case 0x23: /* LNA1 A+B */
1524 if (!(antcomb->scan) &&
1525 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1526 ant_conf->fast_div_bias = 0x1;
1528 ant_conf->fast_div_bias = 0x2;
1529 ant_conf->main_gaintb = 0;
1530 ant_conf->alt_gaintb = 0;
1532 case 0x30: /* A+B A-B */
1533 ant_conf->fast_div_bias = 0x1;
1534 ant_conf->main_gaintb = 0;
1535 ant_conf->alt_gaintb = 0;
1537 case 0x31: /* A+B LNA2 */
1538 ant_conf->fast_div_bias = 0x1;
1539 ant_conf->main_gaintb = 0;
1540 ant_conf->alt_gaintb = 0;
1542 case 0x32: /* A+B LNA1 */
1543 ant_conf->fast_div_bias = 0x1;
1544 ant_conf->main_gaintb = 0;
1545 ant_conf->alt_gaintb = 0;
1553 /* Antenna diversity and combining */
1554 static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1556 struct ath_hw_antcomb_conf div_ant_conf;
1557 struct ath_ant_comb *antcomb = &sc->ant_comb;
1558 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1560 int main_rssi = rs->rs_rssi_ctl0;
1561 int alt_rssi = rs->rs_rssi_ctl1;
1562 int rx_ant_conf, main_ant_conf;
1563 bool short_scan = false;
1565 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1567 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1570 /* Record packet only when both main_rssi and alt_rssi is positive */
1571 if (main_rssi > 0 && alt_rssi > 0) {
1572 antcomb->total_pkt_count++;
1573 antcomb->main_total_rssi += main_rssi;
1574 antcomb->alt_total_rssi += alt_rssi;
1575 if (main_ant_conf == rx_ant_conf)
1576 antcomb->main_recv_cnt++;
1578 antcomb->alt_recv_cnt++;
1581 /* Short scan check */
1582 if (antcomb->scan && antcomb->alt_good) {
1583 if (time_after(jiffies, antcomb->scan_start_time +
1584 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1587 if (antcomb->total_pkt_count ==
1588 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1589 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1590 antcomb->total_pkt_count);
1591 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1596 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1597 rs->rs_moreaggr) && !short_scan)
1600 if (antcomb->total_pkt_count) {
1601 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1602 antcomb->total_pkt_count);
1603 main_rssi_avg = (antcomb->main_total_rssi /
1604 antcomb->total_pkt_count);
1605 alt_rssi_avg = (antcomb->alt_total_rssi /
1606 antcomb->total_pkt_count);
1610 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1611 curr_alt_set = div_ant_conf.alt_lna_conf;
1612 curr_main_set = div_ant_conf.main_lna_conf;
1616 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1617 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1618 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1620 antcomb->alt_good = true;
1622 antcomb->alt_good = false;
1626 antcomb->scan = true;
1627 antcomb->scan_not_start = true;
1630 if (!antcomb->scan) {
1631 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
1632 alt_ratio, curr_main_set, curr_alt_set,
1633 alt_rssi_avg, main_rssi_avg)) {
1634 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1635 /* Switch main and alt LNA */
1636 div_ant_conf.main_lna_conf =
1637 ATH_ANT_DIV_COMB_LNA2;
1638 div_ant_conf.alt_lna_conf =
1639 ATH_ANT_DIV_COMB_LNA1;
1640 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1641 div_ant_conf.main_lna_conf =
1642 ATH_ANT_DIV_COMB_LNA1;
1643 div_ant_conf.alt_lna_conf =
1644 ATH_ANT_DIV_COMB_LNA2;
1648 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1649 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1650 /* Set alt to another LNA */
1651 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1652 div_ant_conf.alt_lna_conf =
1653 ATH_ANT_DIV_COMB_LNA1;
1654 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1655 div_ant_conf.alt_lna_conf =
1656 ATH_ANT_DIV_COMB_LNA2;
1661 if ((alt_rssi_avg < (main_rssi_avg +
1662 div_ant_conf.lna1_lna2_delta)))
1666 if (!antcomb->scan_not_start) {
1667 switch (curr_alt_set) {
1668 case ATH_ANT_DIV_COMB_LNA2:
1669 antcomb->rssi_lna2 = alt_rssi_avg;
1670 antcomb->rssi_lna1 = main_rssi_avg;
1671 antcomb->scan = true;
1673 div_ant_conf.main_lna_conf =
1674 ATH_ANT_DIV_COMB_LNA1;
1675 div_ant_conf.alt_lna_conf =
1676 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1678 case ATH_ANT_DIV_COMB_LNA1:
1679 antcomb->rssi_lna1 = alt_rssi_avg;
1680 antcomb->rssi_lna2 = main_rssi_avg;
1681 antcomb->scan = true;
1683 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1684 div_ant_conf.alt_lna_conf =
1685 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1687 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1688 antcomb->rssi_add = alt_rssi_avg;
1689 antcomb->scan = true;
1691 div_ant_conf.alt_lna_conf =
1692 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1694 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1695 antcomb->rssi_sub = alt_rssi_avg;
1696 antcomb->scan = false;
1697 if (antcomb->rssi_lna2 >
1698 (antcomb->rssi_lna1 +
1699 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1700 /* use LNA2 as main LNA */
1701 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1702 (antcomb->rssi_add > antcomb->rssi_sub)) {
1704 div_ant_conf.main_lna_conf =
1705 ATH_ANT_DIV_COMB_LNA2;
1706 div_ant_conf.alt_lna_conf =
1707 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1708 } else if (antcomb->rssi_sub >
1709 antcomb->rssi_lna1) {
1711 div_ant_conf.main_lna_conf =
1712 ATH_ANT_DIV_COMB_LNA2;
1713 div_ant_conf.alt_lna_conf =
1714 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1717 div_ant_conf.main_lna_conf =
1718 ATH_ANT_DIV_COMB_LNA2;
1719 div_ant_conf.alt_lna_conf =
1720 ATH_ANT_DIV_COMB_LNA1;
1723 /* use LNA1 as main LNA */
1724 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1725 (antcomb->rssi_add > antcomb->rssi_sub)) {
1727 div_ant_conf.main_lna_conf =
1728 ATH_ANT_DIV_COMB_LNA1;
1729 div_ant_conf.alt_lna_conf =
1730 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1731 } else if (antcomb->rssi_sub >
1732 antcomb->rssi_lna1) {
1734 div_ant_conf.main_lna_conf =
1735 ATH_ANT_DIV_COMB_LNA1;
1736 div_ant_conf.alt_lna_conf =
1737 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1740 div_ant_conf.main_lna_conf =
1741 ATH_ANT_DIV_COMB_LNA1;
1742 div_ant_conf.alt_lna_conf =
1743 ATH_ANT_DIV_COMB_LNA2;
1751 if (!antcomb->alt_good) {
1752 antcomb->scan_not_start = false;
1753 /* Set alt to another LNA */
1754 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1755 div_ant_conf.main_lna_conf =
1756 ATH_ANT_DIV_COMB_LNA2;
1757 div_ant_conf.alt_lna_conf =
1758 ATH_ANT_DIV_COMB_LNA1;
1759 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1760 div_ant_conf.main_lna_conf =
1761 ATH_ANT_DIV_COMB_LNA1;
1762 div_ant_conf.alt_lna_conf =
1763 ATH_ANT_DIV_COMB_LNA2;
1769 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1770 main_rssi_avg, alt_rssi_avg,
1773 antcomb->quick_scan_cnt++;
1776 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
1777 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1779 antcomb->scan_start_time = jiffies;
1780 antcomb->total_pkt_count = 0;
1781 antcomb->main_total_rssi = 0;
1782 antcomb->alt_total_rssi = 0;
1783 antcomb->main_recv_cnt = 0;
1784 antcomb->alt_recv_cnt = 0;
1787 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1790 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1791 struct ieee80211_rx_status *rxs;
1792 struct ath_hw *ah = sc->sc_ah;
1793 struct ath_common *common = ath9k_hw_common(ah);
1795 * The hw can technically differ from common->hw when using ath9k
1796 * virtual wiphy so to account for that we iterate over the active
1797 * wiphys and find the appropriate wiphy and therefore hw.
1799 struct ieee80211_hw *hw = sc->hw;
1800 struct ieee80211_hdr *hdr;
1802 bool decrypt_error = false;
1803 struct ath_rx_status rs;
1804 enum ath9k_rx_qtype qtype;
1805 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1807 u8 rx_status_len = ah->caps.rx_status_len;
1810 unsigned long flags;
1813 dma_type = DMA_BIDIRECTIONAL;
1815 dma_type = DMA_FROM_DEVICE;
1817 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1818 spin_lock_bh(&sc->rx.rxbuflock);
1820 tsf = ath9k_hw_gettsf64(ah);
1821 tsf_lower = tsf & 0xffffffff;
1824 /* If handling rx interrupt and flush is in progress => exit */
1825 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
1828 memset(&rs, 0, sizeof(rs));
1830 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1832 bf = ath_get_next_rx_buf(sc, &rs);
1842 * Take frame header from the first fragment and RX status from
1846 hdr_skb = sc->rx.frag;
1850 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1851 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1853 ath_debug_stat_rx(sc, &rs);
1856 * If we're asked to flush receive queue, directly
1857 * chain it back at the queue without processing it.
1860 goto requeue_drop_frag;
1862 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1863 rxs, &decrypt_error);
1865 goto requeue_drop_frag;
1867 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1868 if (rs.rs_tstamp > tsf_lower &&
1869 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1870 rxs->mactime -= 0x100000000ULL;
1872 if (rs.rs_tstamp < tsf_lower &&
1873 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1874 rxs->mactime += 0x100000000ULL;
1876 /* Ensure we always have an skb to requeue once we are done
1877 * processing the current buffer's skb */
1878 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1880 /* If there is no memory we ignore the current RX'd frame,
1881 * tell hardware it can give us a new frame using the old
1882 * skb and put it at the tail of the sc->rx.rxbuf list for
1885 goto requeue_drop_frag;
1887 /* Unmap the frame */
1888 dma_unmap_single(sc->dev, bf->bf_buf_addr,
1892 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1893 if (ah->caps.rx_status_len)
1894 skb_pull(skb, ah->caps.rx_status_len);
1897 ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1898 rxs, decrypt_error);
1900 /* We will now give hardware our shiny new allocated skb */
1901 bf->bf_mpdu = requeue_skb;
1902 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1905 if (unlikely(dma_mapping_error(sc->dev,
1906 bf->bf_buf_addr))) {
1907 dev_kfree_skb_any(requeue_skb);
1909 bf->bf_buf_addr = 0;
1910 ath_err(common, "dma_mapping_error() on RX\n");
1911 ieee80211_rx(hw, skb);
1917 * rs_more indicates chained descriptors which can be
1918 * used to link buffers together for a sort of
1919 * scatter-gather operation.
1922 /* too many fragments - cannot handle frame */
1923 dev_kfree_skb_any(sc->rx.frag);
1924 dev_kfree_skb_any(skb);
1932 int space = skb->len - skb_tailroom(hdr_skb);
1936 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1938 goto requeue_drop_frag;
1941 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1943 dev_kfree_skb_any(skb);
1948 * change the default rx antenna if rx diversity chooses the
1949 * other antenna 3 times in a row.
1951 if (sc->rx.defant != rs.rs_antenna) {
1952 if (++sc->rx.rxotherant >= 3)
1953 ath_setdefantenna(sc, rs.rs_antenna);
1955 sc->rx.rxotherant = 0;
1958 if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1959 skb_trim(skb, skb->len - 8);
1961 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1963 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1965 PS_WAIT_FOR_PSPOLL_DATA)) ||
1966 ath9k_check_auto_sleep(sc))
1968 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1970 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
1971 ath_ant_comb_scan(sc, &rs);
1973 ieee80211_rx(hw, skb);
1977 dev_kfree_skb_any(sc->rx.frag);
1982 list_add_tail(&bf->list, &sc->rx.rxbuf);
1983 ath_rx_edma_buf_link(sc, qtype);
1985 list_move_tail(&bf->list, &sc->rx.rxbuf);
1986 ath_rx_buf_link(sc, bf);
1991 spin_unlock_bh(&sc->rx.rxbuflock);