2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
19 #include "ar9003_mac.h"
21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
23 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
24 int mindelta, int main_rssi_avg,
25 int alt_rssi_avg, int pkt_count)
27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
28 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
32 static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
33 int curr_main_set, int curr_alt_set,
34 int alt_rssi_avg, int main_rssi_avg)
39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
46 (alt_rssi_avg >= (main_rssi_avg - 5))) ||
47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
49 (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
60 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
62 return sc->ps_enabled &&
63 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
67 * Setup and link descriptors.
69 * 11N: we can no longer afford to self link the last descriptor.
70 * MAC acknowledges BA status as long as it copies frames to host
71 * buffer (or rx fifo). This can incorrectly acknowledge packets
72 * to a sender if last desc is self-linked.
74 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
76 struct ath_hw *ah = sc->sc_ah;
77 struct ath_common *common = ath9k_hw_common(ah);
82 ds->ds_link = 0; /* link to null */
83 ds->ds_data = bf->bf_buf_addr;
85 /* virtual addr of the beginning of the buffer. */
88 ds->ds_vdata = skb->data;
91 * setup rx descriptors. The rx_bufsize here tells the hardware
92 * how much data it can DMA to us and that we are prepared
95 ath9k_hw_setuprxdesc(ah, ds,
99 if (sc->rx.rxlink == NULL)
100 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
102 *sc->rx.rxlink = bf->bf_daddr;
104 sc->rx.rxlink = &ds->ds_link;
107 static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
110 ath_rx_buf_link(sc, sc->rx.buf_hold);
112 sc->rx.buf_hold = bf;
115 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
117 /* XXX block beacon interrupts */
118 ath9k_hw_setantenna(sc->sc_ah, antenna);
119 sc->rx.defant = antenna;
120 sc->rx.rxotherant = 0;
123 static void ath_opmode_init(struct ath_softc *sc)
125 struct ath_hw *ah = sc->sc_ah;
126 struct ath_common *common = ath9k_hw_common(ah);
130 /* configure rx filter */
131 rfilt = ath_calcrxfilter(sc);
132 ath9k_hw_setrxfilter(ah, rfilt);
134 /* configure bssid mask */
135 ath_hw_setbssidmask(common);
137 /* configure operational mode */
138 ath9k_hw_setopmode(ah);
140 /* calculate and install multicast filter */
141 mfilt[0] = mfilt[1] = ~0;
142 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
145 static bool ath_rx_edma_buf_link(struct ath_softc *sc,
146 enum ath9k_rx_qtype qtype)
148 struct ath_hw *ah = sc->sc_ah;
149 struct ath_rx_edma *rx_edma;
153 rx_edma = &sc->rx.rx_edma[qtype];
154 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
157 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
158 list_del_init(&bf->list);
162 memset(skb->data, 0, ah->caps.rx_status_len);
163 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
164 ah->caps.rx_status_len, DMA_TO_DEVICE);
166 SKB_CB_ATHBUF(skb) = bf;
167 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
168 skb_queue_tail(&rx_edma->rx_fifo, skb);
173 static void ath_rx_addbuffer_edma(struct ath_softc *sc,
174 enum ath9k_rx_qtype qtype, int size)
176 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
179 if (list_empty(&sc->rx.rxbuf)) {
180 ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n");
184 while (!list_empty(&sc->rx.rxbuf)) {
187 if (!ath_rx_edma_buf_link(sc, qtype))
195 static void ath_rx_remove_buffer(struct ath_softc *sc,
196 enum ath9k_rx_qtype qtype)
199 struct ath_rx_edma *rx_edma;
202 rx_edma = &sc->rx.rx_edma[qtype];
204 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
205 bf = SKB_CB_ATHBUF(skb);
207 list_add_tail(&bf->list, &sc->rx.rxbuf);
211 static void ath_rx_edma_cleanup(struct ath_softc *sc)
213 struct ath_hw *ah = sc->sc_ah;
214 struct ath_common *common = ath9k_hw_common(ah);
217 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
218 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
220 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
222 dma_unmap_single(sc->dev, bf->bf_buf_addr,
225 dev_kfree_skb_any(bf->bf_mpdu);
231 INIT_LIST_HEAD(&sc->rx.rxbuf);
233 kfree(sc->rx.rx_bufptr);
234 sc->rx.rx_bufptr = NULL;
237 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
239 skb_queue_head_init(&rx_edma->rx_fifo);
240 skb_queue_head_init(&rx_edma->rx_buffers);
241 rx_edma->rx_fifo_hwsize = size;
244 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
246 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
247 struct ath_hw *ah = sc->sc_ah;
253 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
254 ah->caps.rx_status_len);
256 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
257 ah->caps.rx_lp_qdepth);
258 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
259 ah->caps.rx_hp_qdepth);
261 size = sizeof(struct ath_buf) * nbufs;
262 bf = kzalloc(size, GFP_KERNEL);
266 INIT_LIST_HEAD(&sc->rx.rxbuf);
267 sc->rx.rx_bufptr = bf;
269 for (i = 0; i < nbufs; i++, bf++) {
270 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
276 memset(skb->data, 0, common->rx_bufsize);
279 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
282 if (unlikely(dma_mapping_error(sc->dev,
284 dev_kfree_skb_any(skb);
288 "dma_mapping_error() on RX init\n");
293 list_add_tail(&bf->list, &sc->rx.rxbuf);
299 ath_rx_edma_cleanup(sc);
303 static void ath_edma_start_recv(struct ath_softc *sc)
305 spin_lock_bh(&sc->rx.rxbuflock);
307 ath9k_hw_rxena(sc->sc_ah);
309 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
310 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
312 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
313 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
317 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
319 spin_unlock_bh(&sc->rx.rxbuflock);
322 static void ath_edma_stop_recv(struct ath_softc *sc)
324 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
325 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
328 int ath_rx_init(struct ath_softc *sc, int nbufs)
330 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
335 spin_lock_init(&sc->sc_pcu_lock);
336 sc->sc_flags &= ~SC_OP_RXFLUSH;
337 spin_lock_init(&sc->rx.rxbuflock);
339 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
340 sc->sc_ah->caps.rx_status_len;
342 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
343 return ath_rx_edma_init(sc, nbufs);
345 ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
346 common->cachelsz, common->rx_bufsize);
348 /* Initialize rx descriptors */
350 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
354 "failed to allocate rx descriptors: %d\n",
359 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
360 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
368 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
371 if (unlikely(dma_mapping_error(sc->dev,
373 dev_kfree_skb_any(skb);
377 "dma_mapping_error() on RX init\n");
382 sc->rx.rxlink = NULL;
392 void ath_rx_cleanup(struct ath_softc *sc)
394 struct ath_hw *ah = sc->sc_ah;
395 struct ath_common *common = ath9k_hw_common(ah);
399 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
400 ath_rx_edma_cleanup(sc);
403 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
406 dma_unmap_single(sc->dev, bf->bf_buf_addr,
415 if (sc->rx.rxdma.dd_desc_len != 0)
416 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
421 * Calculate the receive filter according to the
422 * operating mode and state:
424 * o always accept unicast, broadcast, and multicast traffic
425 * o maintain current state of phy error reception (the hal
426 * may enable phy error frames for noise immunity work)
427 * o probe request frames are accepted only when operating in
428 * hostap, adhoc, or monitor modes
429 * o enable promiscuous mode according to the interface state
431 * - when operating in adhoc mode so the 802.11 layer creates
432 * node table entries for peers,
433 * - when operating in station mode for collecting rssi data when
434 * the station is otherwise quiet, or
435 * - when operating as a repeater so we see repeater-sta beacons
439 u32 ath_calcrxfilter(struct ath_softc *sc)
443 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
444 | ATH9K_RX_FILTER_MCAST;
446 if (sc->rx.rxfilter & FIF_PROBE_REQ)
447 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
450 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
451 * mode interface or when in monitor mode. AP mode does not need this
452 * since it receives all in-BSS frames anyway.
454 if (sc->sc_ah->is_monitoring)
455 rfilt |= ATH9K_RX_FILTER_PROM;
457 if (sc->rx.rxfilter & FIF_CONTROL)
458 rfilt |= ATH9K_RX_FILTER_CONTROL;
460 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
462 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
463 rfilt |= ATH9K_RX_FILTER_MYBEACON;
465 rfilt |= ATH9K_RX_FILTER_BEACON;
467 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
468 (sc->rx.rxfilter & FIF_PSPOLL))
469 rfilt |= ATH9K_RX_FILTER_PSPOLL;
471 if (conf_is_ht(&sc->hw->conf))
472 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
474 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
475 /* The following may also be needed for other older chips */
476 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
477 rfilt |= ATH9K_RX_FILTER_PROM;
478 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
483 #undef RX_FILTER_PRESERVE
486 int ath_startrecv(struct ath_softc *sc)
488 struct ath_hw *ah = sc->sc_ah;
489 struct ath_buf *bf, *tbf;
491 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
492 ath_edma_start_recv(sc);
496 spin_lock_bh(&sc->rx.rxbuflock);
497 if (list_empty(&sc->rx.rxbuf))
500 sc->rx.buf_hold = NULL;
501 sc->rx.rxlink = NULL;
502 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
503 ath_rx_buf_link(sc, bf);
506 /* We could have deleted elements so the list may be empty now */
507 if (list_empty(&sc->rx.rxbuf))
510 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
511 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
516 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
518 spin_unlock_bh(&sc->rx.rxbuflock);
523 bool ath_stoprecv(struct ath_softc *sc)
525 struct ath_hw *ah = sc->sc_ah;
526 bool stopped, reset = false;
528 spin_lock_bh(&sc->rx.rxbuflock);
529 ath9k_hw_abortpcurecv(ah);
530 ath9k_hw_setrxfilter(ah, 0);
531 stopped = ath9k_hw_stopdmarecv(ah, &reset);
533 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
534 ath_edma_stop_recv(sc);
536 sc->rx.rxlink = NULL;
537 spin_unlock_bh(&sc->rx.rxbuflock);
539 if (!(ah->ah_flags & AH_UNPLUGGED) &&
540 unlikely(!stopped)) {
541 ath_err(ath9k_hw_common(sc->sc_ah),
542 "Could not stop RX, we could be "
543 "confusing the DMA engine when we start RX up\n");
544 ATH_DBG_WARN_ON_ONCE(!stopped);
546 return stopped && !reset;
549 void ath_flushrecv(struct ath_softc *sc)
551 sc->sc_flags |= SC_OP_RXFLUSH;
552 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
553 ath_rx_tasklet(sc, 1, true);
554 ath_rx_tasklet(sc, 1, false);
555 sc->sc_flags &= ~SC_OP_RXFLUSH;
558 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
560 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
561 struct ieee80211_mgmt *mgmt;
562 u8 *pos, *end, id, elen;
563 struct ieee80211_tim_ie *tim;
565 mgmt = (struct ieee80211_mgmt *)skb->data;
566 pos = mgmt->u.beacon.variable;
567 end = skb->data + skb->len;
569 while (pos + 2 < end) {
572 if (pos + elen > end)
575 if (id == WLAN_EID_TIM) {
576 if (elen < sizeof(*tim))
578 tim = (struct ieee80211_tim_ie *) pos;
579 if (tim->dtim_count != 0)
581 return tim->bitmap_ctrl & 0x01;
590 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
592 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
594 if (skb->len < 24 + 8 + 2 + 2)
597 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
599 if (sc->ps_flags & PS_BEACON_SYNC) {
600 sc->ps_flags &= ~PS_BEACON_SYNC;
601 ath_dbg(common, ATH_DBG_PS,
602 "Reconfigure Beacon timers based on timestamp from the AP\n");
606 if (ath_beacon_dtim_pending_cab(skb)) {
608 * Remain awake waiting for buffered broadcast/multicast
609 * frames. If the last broadcast/multicast frame is not
610 * received properly, the next beacon frame will work as
611 * a backup trigger for returning into NETWORK SLEEP state,
612 * so we are waiting for it as well.
614 ath_dbg(common, ATH_DBG_PS,
615 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
616 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
620 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
622 * This can happen if a broadcast frame is dropped or the AP
623 * fails to send a frame indicating that all CAB frames have
626 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
627 ath_dbg(common, ATH_DBG_PS,
628 "PS wait for CAB frames timed out\n");
632 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
634 struct ieee80211_hdr *hdr;
635 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
637 hdr = (struct ieee80211_hdr *)skb->data;
639 /* Process Beacon and CAB receive in PS state */
640 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
642 ath_rx_ps_beacon(sc, skb);
643 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
644 (ieee80211_is_data(hdr->frame_control) ||
645 ieee80211_is_action(hdr->frame_control)) &&
646 is_multicast_ether_addr(hdr->addr1) &&
647 !ieee80211_has_moredata(hdr->frame_control)) {
649 * No more broadcast/multicast frames to be received at this
652 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
653 ath_dbg(common, ATH_DBG_PS,
654 "All PS CAB frames received, back to sleep\n");
655 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
656 !is_multicast_ether_addr(hdr->addr1) &&
657 !ieee80211_has_morefrags(hdr->frame_control)) {
658 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
659 ath_dbg(common, ATH_DBG_PS,
660 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
661 sc->ps_flags & (PS_WAIT_FOR_BEACON |
663 PS_WAIT_FOR_PSPOLL_DATA |
664 PS_WAIT_FOR_TX_ACK));
668 static bool ath_edma_get_buffers(struct ath_softc *sc,
669 enum ath9k_rx_qtype qtype)
671 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
672 struct ath_hw *ah = sc->sc_ah;
673 struct ath_common *common = ath9k_hw_common(ah);
678 skb = skb_peek(&rx_edma->rx_fifo);
682 bf = SKB_CB_ATHBUF(skb);
685 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
686 common->rx_bufsize, DMA_FROM_DEVICE);
688 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
689 if (ret == -EINPROGRESS) {
690 /*let device gain the buffer again*/
691 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
692 common->rx_bufsize, DMA_FROM_DEVICE);
696 __skb_unlink(skb, &rx_edma->rx_fifo);
697 if (ret == -EINVAL) {
698 /* corrupt descriptor, skip this one and the following one */
699 list_add_tail(&bf->list, &sc->rx.rxbuf);
700 ath_rx_edma_buf_link(sc, qtype);
701 skb = skb_peek(&rx_edma->rx_fifo);
705 bf = SKB_CB_ATHBUF(skb);
708 __skb_unlink(skb, &rx_edma->rx_fifo);
709 list_add_tail(&bf->list, &sc->rx.rxbuf);
710 ath_rx_edma_buf_link(sc, qtype);
713 skb_queue_tail(&rx_edma->rx_buffers, skb);
718 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
719 struct ath_rx_status *rs,
720 enum ath9k_rx_qtype qtype)
722 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
726 while (ath_edma_get_buffers(sc, qtype));
727 skb = __skb_dequeue(&rx_edma->rx_buffers);
731 bf = SKB_CB_ATHBUF(skb);
732 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
736 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
737 struct ath_rx_status *rs)
739 struct ath_hw *ah = sc->sc_ah;
740 struct ath_common *common = ath9k_hw_common(ah);
745 if (list_empty(&sc->rx.rxbuf)) {
746 sc->rx.rxlink = NULL;
750 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
751 if (bf == sc->rx.buf_hold)
757 * Must provide the virtual address of the current
758 * descriptor, the physical address, and the virtual
759 * address of the next descriptor in the h/w chain.
760 * This allows the HAL to look ahead to see if the
761 * hardware is done with a descriptor by checking the
762 * done bit in the following descriptor and the address
763 * of the current descriptor the DMA engine is working
764 * on. All this is necessary because of our use of
765 * a self-linked list to avoid rx overruns.
767 ret = ath9k_hw_rxprocdesc(ah, ds, rs);
768 if (ret == -EINPROGRESS) {
769 struct ath_rx_status trs;
771 struct ath_desc *tds;
773 memset(&trs, 0, sizeof(trs));
774 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
775 sc->rx.rxlink = NULL;
779 tbf = list_entry(bf->list.next, struct ath_buf, list);
782 * On some hardware the descriptor status words could
783 * get corrupted, including the done bit. Because of
784 * this, check if the next descriptor's done bit is
787 * If the next descriptor's done bit is set, the current
788 * descriptor has been corrupted. Force s/w to discard
789 * this descriptor and continue...
793 ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
794 if (ret == -EINPROGRESS)
803 * Synchronize the DMA transfer with CPU before
804 * 1. accessing the frame
805 * 2. requeueing the same buffer to h/w
807 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
814 /* Assumes you've already done the endian to CPU conversion */
815 static bool ath9k_rx_accept(struct ath_common *common,
816 struct ieee80211_hdr *hdr,
817 struct ieee80211_rx_status *rxs,
818 struct ath_rx_status *rx_stats,
821 struct ath_softc *sc = (struct ath_softc *) common->priv;
822 bool is_mc, is_valid_tkip, strip_mic, mic_error;
823 struct ath_hw *ah = common->ah;
825 u8 rx_status_len = ah->caps.rx_status_len;
827 fc = hdr->frame_control;
829 is_mc = !!is_multicast_ether_addr(hdr->addr1);
830 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
831 test_bit(rx_stats->rs_keyix, common->tkip_keymap);
832 strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
833 !(rx_stats->rs_status &
834 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
835 ATH9K_RXERR_KEYMISS));
838 * Key miss events are only relevant for pairwise keys where the
839 * descriptor does contain a valid key index. This has been observed
840 * mostly with CCMP encryption.
842 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
843 !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
844 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
846 if (!rx_stats->rs_datalen)
849 * rs_status follows rs_datalen so if rs_datalen is too large
850 * we can take a hint that hardware corrupted it, so ignore
853 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
856 /* Only use error bits from the last fragment */
857 if (rx_stats->rs_more)
860 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
861 !ieee80211_has_morefrags(fc) &&
862 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
863 (rx_stats->rs_status & ATH9K_RXERR_MIC);
866 * The rx_stats->rs_status will not be set until the end of the
867 * chained descriptors so it can be ignored if rs_more is set. The
868 * rs_more will be false at the last element of the chained
871 if (rx_stats->rs_status != 0) {
874 if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
875 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
878 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
881 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
882 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
883 *decrypt_error = true;
888 * Reject error frames with the exception of
889 * decryption and MIC failures. For monitor mode,
890 * we also ignore the CRC error.
892 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
895 if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
896 status_mask |= ATH9K_RXERR_CRC;
898 if (rx_stats->rs_status & ~status_mask)
903 * For unicast frames the MIC error bit can have false positives,
904 * so all MIC error reports need to be validated in software.
905 * False negatives are not common, so skip software verification
906 * if the hardware considers the MIC valid.
909 rxs->flag |= RX_FLAG_MMIC_STRIPPED;
910 else if (is_mc && mic_error)
911 rxs->flag |= RX_FLAG_MMIC_ERROR;
916 static int ath9k_process_rate(struct ath_common *common,
917 struct ieee80211_hw *hw,
918 struct ath_rx_status *rx_stats,
919 struct ieee80211_rx_status *rxs)
921 struct ieee80211_supported_band *sband;
922 enum ieee80211_band band;
925 band = hw->conf.channel->band;
926 sband = hw->wiphy->bands[band];
928 if (rx_stats->rs_rate & 0x80) {
930 rxs->flag |= RX_FLAG_HT;
931 if (rx_stats->rs_flags & ATH9K_RX_2040)
932 rxs->flag |= RX_FLAG_40MHZ;
933 if (rx_stats->rs_flags & ATH9K_RX_GI)
934 rxs->flag |= RX_FLAG_SHORT_GI;
935 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
939 for (i = 0; i < sband->n_bitrates; i++) {
940 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
944 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
945 rxs->flag |= RX_FLAG_SHORTPRE;
952 * No valid hardware bitrate found -- we should not get here
953 * because hardware has already validated this frame as OK.
955 ath_dbg(common, ATH_DBG_ANY,
956 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
962 static void ath9k_process_rssi(struct ath_common *common,
963 struct ieee80211_hw *hw,
964 struct ieee80211_hdr *hdr,
965 struct ath_rx_status *rx_stats)
967 struct ath_softc *sc = hw->priv;
968 struct ath_hw *ah = common->ah;
971 if (!rx_stats->is_mybeacon ||
972 ((ah->opmode != NL80211_IFTYPE_STATION) &&
973 (ah->opmode != NL80211_IFTYPE_ADHOC)))
976 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
977 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
979 last_rssi = sc->last_rssi;
980 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
981 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
982 ATH_RSSI_EP_MULTIPLIER);
983 if (rx_stats->rs_rssi < 0)
984 rx_stats->rs_rssi = 0;
986 /* Update Beacon RSSI, this is used by ANI. */
987 ah->stats.avgbrssi = rx_stats->rs_rssi;
991 * For Decrypt or Demic errors, we only mark packet status here and always push
992 * up the frame up to let mac80211 handle the actual error case, be it no
993 * decryption key or real decryption error. This let us keep statistics there.
995 static int ath9k_rx_skb_preprocess(struct ath_common *common,
996 struct ieee80211_hw *hw,
997 struct ieee80211_hdr *hdr,
998 struct ath_rx_status *rx_stats,
999 struct ieee80211_rx_status *rx_status,
1000 bool *decrypt_error)
1002 struct ath_hw *ah = common->ah;
1004 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
1007 * everything but the rate is checked here, the rate check is done
1008 * separately to avoid doing two lookups for a rate for each frame.
1010 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
1013 /* Only use status info from the last fragment */
1014 if (rx_stats->rs_more)
1017 ath9k_process_rssi(common, hw, hdr, rx_stats);
1019 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
1022 rx_status->band = hw->conf.channel->band;
1023 rx_status->freq = hw->conf.channel->center_freq;
1024 rx_status->signal = ah->noise + rx_stats->rs_rssi;
1025 rx_status->antenna = rx_stats->rs_antenna;
1026 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
1031 static void ath9k_rx_skb_postprocess(struct ath_common *common,
1032 struct sk_buff *skb,
1033 struct ath_rx_status *rx_stats,
1034 struct ieee80211_rx_status *rxs,
1037 struct ath_hw *ah = common->ah;
1038 struct ieee80211_hdr *hdr;
1039 int hdrlen, padpos, padsize;
1043 /* see if any padding is done by the hw and remove it */
1044 hdr = (struct ieee80211_hdr *) skb->data;
1045 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1046 fc = hdr->frame_control;
1047 padpos = ath9k_cmn_padpos(hdr->frame_control);
1049 /* The MAC header is padded to have 32-bit boundary if the
1050 * packet payload is non-zero. The general calculation for
1051 * padsize would take into account odd header lengths:
1052 * padsize = (4 - padpos % 4) % 4; However, since only
1053 * even-length headers are used, padding can only be 0 or 2
1054 * bytes and we can optimize this a bit. In addition, we must
1055 * not try to remove padding from short control frames that do
1056 * not have payload. */
1057 padsize = padpos & 3;
1058 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1059 memmove(skb->data + padsize, skb->data, padpos);
1060 skb_pull(skb, padsize);
1063 keyix = rx_stats->rs_keyix;
1065 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1066 ieee80211_has_protected(fc)) {
1067 rxs->flag |= RX_FLAG_DECRYPTED;
1068 } else if (ieee80211_has_protected(fc)
1069 && !decrypt_error && skb->len >= hdrlen + 4) {
1070 keyix = skb->data[hdrlen + 3] >> 6;
1072 if (test_bit(keyix, common->keymap))
1073 rxs->flag |= RX_FLAG_DECRYPTED;
1075 if (ah->sw_mgmt_crypto &&
1076 (rxs->flag & RX_FLAG_DECRYPTED) &&
1077 ieee80211_is_mgmt(fc))
1078 /* Use software decrypt for management frames. */
1079 rxs->flag &= ~RX_FLAG_DECRYPTED;
1082 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1083 struct ath_hw_antcomb_conf ant_conf,
1086 antcomb->quick_scan_cnt = 0;
1088 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1089 antcomb->rssi_lna2 = main_rssi_avg;
1090 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1091 antcomb->rssi_lna1 = main_rssi_avg;
1093 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1094 case 0x10: /* LNA2 A-B */
1095 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1096 antcomb->first_quick_scan_conf =
1097 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1098 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1100 case 0x20: /* LNA1 A-B */
1101 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1102 antcomb->first_quick_scan_conf =
1103 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1104 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1106 case 0x21: /* LNA1 LNA2 */
1107 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1108 antcomb->first_quick_scan_conf =
1109 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1110 antcomb->second_quick_scan_conf =
1111 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1113 case 0x12: /* LNA2 LNA1 */
1114 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1115 antcomb->first_quick_scan_conf =
1116 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1117 antcomb->second_quick_scan_conf =
1118 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1120 case 0x13: /* LNA2 A+B */
1121 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1122 antcomb->first_quick_scan_conf =
1123 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1124 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1126 case 0x23: /* LNA1 A+B */
1127 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1128 antcomb->first_quick_scan_conf =
1129 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1130 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1137 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1138 struct ath_hw_antcomb_conf *div_ant_conf,
1139 int main_rssi_avg, int alt_rssi_avg,
1143 switch (antcomb->quick_scan_cnt) {
1145 /* set alt to main, and alt to first conf */
1146 div_ant_conf->main_lna_conf = antcomb->main_conf;
1147 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1150 /* set alt to main, and alt to first conf */
1151 div_ant_conf->main_lna_conf = antcomb->main_conf;
1152 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1153 antcomb->rssi_first = main_rssi_avg;
1154 antcomb->rssi_second = alt_rssi_avg;
1156 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1158 if (ath_is_alt_ant_ratio_better(alt_ratio,
1159 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1160 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1161 main_rssi_avg, alt_rssi_avg,
1162 antcomb->total_pkt_count))
1163 antcomb->first_ratio = true;
1165 antcomb->first_ratio = false;
1166 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1167 if (ath_is_alt_ant_ratio_better(alt_ratio,
1168 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1169 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1170 main_rssi_avg, alt_rssi_avg,
1171 antcomb->total_pkt_count))
1172 antcomb->first_ratio = true;
1174 antcomb->first_ratio = false;
1176 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1177 (alt_rssi_avg > main_rssi_avg +
1178 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1179 (alt_rssi_avg > main_rssi_avg)) &&
1180 (antcomb->total_pkt_count > 50))
1181 antcomb->first_ratio = true;
1183 antcomb->first_ratio = false;
1187 antcomb->alt_good = false;
1188 antcomb->scan_not_start = false;
1189 antcomb->scan = false;
1190 antcomb->rssi_first = main_rssi_avg;
1191 antcomb->rssi_third = alt_rssi_avg;
1193 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1194 antcomb->rssi_lna1 = alt_rssi_avg;
1195 else if (antcomb->second_quick_scan_conf ==
1196 ATH_ANT_DIV_COMB_LNA2)
1197 antcomb->rssi_lna2 = alt_rssi_avg;
1198 else if (antcomb->second_quick_scan_conf ==
1199 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1200 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1201 antcomb->rssi_lna2 = main_rssi_avg;
1202 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1203 antcomb->rssi_lna1 = main_rssi_avg;
1206 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1207 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1208 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1210 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1212 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1213 if (ath_is_alt_ant_ratio_better(alt_ratio,
1214 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1215 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1216 main_rssi_avg, alt_rssi_avg,
1217 antcomb->total_pkt_count))
1218 antcomb->second_ratio = true;
1220 antcomb->second_ratio = false;
1221 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1222 if (ath_is_alt_ant_ratio_better(alt_ratio,
1223 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1224 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1225 main_rssi_avg, alt_rssi_avg,
1226 antcomb->total_pkt_count))
1227 antcomb->second_ratio = true;
1229 antcomb->second_ratio = false;
1231 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1232 (alt_rssi_avg > main_rssi_avg +
1233 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1234 (alt_rssi_avg > main_rssi_avg)) &&
1235 (antcomb->total_pkt_count > 50))
1236 antcomb->second_ratio = true;
1238 antcomb->second_ratio = false;
1241 /* set alt to the conf with maximun ratio */
1242 if (antcomb->first_ratio && antcomb->second_ratio) {
1243 if (antcomb->rssi_second > antcomb->rssi_third) {
1245 if ((antcomb->first_quick_scan_conf ==
1246 ATH_ANT_DIV_COMB_LNA1) ||
1247 (antcomb->first_quick_scan_conf ==
1248 ATH_ANT_DIV_COMB_LNA2))
1249 /* Set alt LNA1 or LNA2*/
1250 if (div_ant_conf->main_lna_conf ==
1251 ATH_ANT_DIV_COMB_LNA2)
1252 div_ant_conf->alt_lna_conf =
1253 ATH_ANT_DIV_COMB_LNA1;
1255 div_ant_conf->alt_lna_conf =
1256 ATH_ANT_DIV_COMB_LNA2;
1258 /* Set alt to A+B or A-B */
1259 div_ant_conf->alt_lna_conf =
1260 antcomb->first_quick_scan_conf;
1261 } else if ((antcomb->second_quick_scan_conf ==
1262 ATH_ANT_DIV_COMB_LNA1) ||
1263 (antcomb->second_quick_scan_conf ==
1264 ATH_ANT_DIV_COMB_LNA2)) {
1265 /* Set alt LNA1 or LNA2 */
1266 if (div_ant_conf->main_lna_conf ==
1267 ATH_ANT_DIV_COMB_LNA2)
1268 div_ant_conf->alt_lna_conf =
1269 ATH_ANT_DIV_COMB_LNA1;
1271 div_ant_conf->alt_lna_conf =
1272 ATH_ANT_DIV_COMB_LNA2;
1274 /* Set alt to A+B or A-B */
1275 div_ant_conf->alt_lna_conf =
1276 antcomb->second_quick_scan_conf;
1278 } else if (antcomb->first_ratio) {
1280 if ((antcomb->first_quick_scan_conf ==
1281 ATH_ANT_DIV_COMB_LNA1) ||
1282 (antcomb->first_quick_scan_conf ==
1283 ATH_ANT_DIV_COMB_LNA2))
1284 /* Set alt LNA1 or LNA2 */
1285 if (div_ant_conf->main_lna_conf ==
1286 ATH_ANT_DIV_COMB_LNA2)
1287 div_ant_conf->alt_lna_conf =
1288 ATH_ANT_DIV_COMB_LNA1;
1290 div_ant_conf->alt_lna_conf =
1291 ATH_ANT_DIV_COMB_LNA2;
1293 /* Set alt to A+B or A-B */
1294 div_ant_conf->alt_lna_conf =
1295 antcomb->first_quick_scan_conf;
1296 } else if (antcomb->second_ratio) {
1298 if ((antcomb->second_quick_scan_conf ==
1299 ATH_ANT_DIV_COMB_LNA1) ||
1300 (antcomb->second_quick_scan_conf ==
1301 ATH_ANT_DIV_COMB_LNA2))
1302 /* Set alt LNA1 or LNA2 */
1303 if (div_ant_conf->main_lna_conf ==
1304 ATH_ANT_DIV_COMB_LNA2)
1305 div_ant_conf->alt_lna_conf =
1306 ATH_ANT_DIV_COMB_LNA1;
1308 div_ant_conf->alt_lna_conf =
1309 ATH_ANT_DIV_COMB_LNA2;
1311 /* Set alt to A+B or A-B */
1312 div_ant_conf->alt_lna_conf =
1313 antcomb->second_quick_scan_conf;
1315 /* main is largest */
1316 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1317 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1318 /* Set alt LNA1 or LNA2 */
1319 if (div_ant_conf->main_lna_conf ==
1320 ATH_ANT_DIV_COMB_LNA2)
1321 div_ant_conf->alt_lna_conf =
1322 ATH_ANT_DIV_COMB_LNA1;
1324 div_ant_conf->alt_lna_conf =
1325 ATH_ANT_DIV_COMB_LNA2;
1327 /* Set alt to A+B or A-B */
1328 div_ant_conf->alt_lna_conf = antcomb->main_conf;
1336 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
1337 struct ath_ant_comb *antcomb, int alt_ratio)
1339 if (ant_conf->div_group == 0) {
1340 /* Adjust the fast_div_bias based on main and alt lna conf */
1341 switch ((ant_conf->main_lna_conf << 4) |
1342 ant_conf->alt_lna_conf) {
1343 case 0x01: /* A-B LNA2 */
1344 ant_conf->fast_div_bias = 0x3b;
1346 case 0x02: /* A-B LNA1 */
1347 ant_conf->fast_div_bias = 0x3d;
1349 case 0x03: /* A-B A+B */
1350 ant_conf->fast_div_bias = 0x1;
1352 case 0x10: /* LNA2 A-B */
1353 ant_conf->fast_div_bias = 0x7;
1355 case 0x12: /* LNA2 LNA1 */
1356 ant_conf->fast_div_bias = 0x2;
1358 case 0x13: /* LNA2 A+B */
1359 ant_conf->fast_div_bias = 0x7;
1361 case 0x20: /* LNA1 A-B */
1362 ant_conf->fast_div_bias = 0x6;
1364 case 0x21: /* LNA1 LNA2 */
1365 ant_conf->fast_div_bias = 0x0;
1367 case 0x23: /* LNA1 A+B */
1368 ant_conf->fast_div_bias = 0x6;
1370 case 0x30: /* A+B A-B */
1371 ant_conf->fast_div_bias = 0x1;
1373 case 0x31: /* A+B LNA2 */
1374 ant_conf->fast_div_bias = 0x3b;
1376 case 0x32: /* A+B LNA1 */
1377 ant_conf->fast_div_bias = 0x3d;
1382 } else if (ant_conf->div_group == 1) {
1383 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1384 switch ((ant_conf->main_lna_conf << 4) |
1385 ant_conf->alt_lna_conf) {
1386 case 0x01: /* A-B LNA2 */
1387 ant_conf->fast_div_bias = 0x1;
1388 ant_conf->main_gaintb = 0;
1389 ant_conf->alt_gaintb = 0;
1391 case 0x02: /* A-B LNA1 */
1392 ant_conf->fast_div_bias = 0x1;
1393 ant_conf->main_gaintb = 0;
1394 ant_conf->alt_gaintb = 0;
1396 case 0x03: /* A-B A+B */
1397 ant_conf->fast_div_bias = 0x1;
1398 ant_conf->main_gaintb = 0;
1399 ant_conf->alt_gaintb = 0;
1401 case 0x10: /* LNA2 A-B */
1402 if (!(antcomb->scan) &&
1403 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1404 ant_conf->fast_div_bias = 0x3f;
1406 ant_conf->fast_div_bias = 0x1;
1407 ant_conf->main_gaintb = 0;
1408 ant_conf->alt_gaintb = 0;
1410 case 0x12: /* LNA2 LNA1 */
1411 ant_conf->fast_div_bias = 0x1;
1412 ant_conf->main_gaintb = 0;
1413 ant_conf->alt_gaintb = 0;
1415 case 0x13: /* LNA2 A+B */
1416 if (!(antcomb->scan) &&
1417 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1418 ant_conf->fast_div_bias = 0x3f;
1420 ant_conf->fast_div_bias = 0x1;
1421 ant_conf->main_gaintb = 0;
1422 ant_conf->alt_gaintb = 0;
1424 case 0x20: /* LNA1 A-B */
1425 if (!(antcomb->scan) &&
1426 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1427 ant_conf->fast_div_bias = 0x3f;
1429 ant_conf->fast_div_bias = 0x1;
1430 ant_conf->main_gaintb = 0;
1431 ant_conf->alt_gaintb = 0;
1433 case 0x21: /* LNA1 LNA2 */
1434 ant_conf->fast_div_bias = 0x1;
1435 ant_conf->main_gaintb = 0;
1436 ant_conf->alt_gaintb = 0;
1438 case 0x23: /* LNA1 A+B */
1439 if (!(antcomb->scan) &&
1440 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1441 ant_conf->fast_div_bias = 0x3f;
1443 ant_conf->fast_div_bias = 0x1;
1444 ant_conf->main_gaintb = 0;
1445 ant_conf->alt_gaintb = 0;
1447 case 0x30: /* A+B A-B */
1448 ant_conf->fast_div_bias = 0x1;
1449 ant_conf->main_gaintb = 0;
1450 ant_conf->alt_gaintb = 0;
1452 case 0x31: /* A+B LNA2 */
1453 ant_conf->fast_div_bias = 0x1;
1454 ant_conf->main_gaintb = 0;
1455 ant_conf->alt_gaintb = 0;
1457 case 0x32: /* A+B LNA1 */
1458 ant_conf->fast_div_bias = 0x1;
1459 ant_conf->main_gaintb = 0;
1460 ant_conf->alt_gaintb = 0;
1465 } else if (ant_conf->div_group == 2) {
1466 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1467 switch ((ant_conf->main_lna_conf << 4) |
1468 ant_conf->alt_lna_conf) {
1469 case 0x01: /* A-B LNA2 */
1470 ant_conf->fast_div_bias = 0x1;
1471 ant_conf->main_gaintb = 0;
1472 ant_conf->alt_gaintb = 0;
1474 case 0x02: /* A-B LNA1 */
1475 ant_conf->fast_div_bias = 0x1;
1476 ant_conf->main_gaintb = 0;
1477 ant_conf->alt_gaintb = 0;
1479 case 0x03: /* A-B A+B */
1480 ant_conf->fast_div_bias = 0x1;
1481 ant_conf->main_gaintb = 0;
1482 ant_conf->alt_gaintb = 0;
1484 case 0x10: /* LNA2 A-B */
1485 if (!(antcomb->scan) &&
1486 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1487 ant_conf->fast_div_bias = 0x1;
1489 ant_conf->fast_div_bias = 0x2;
1490 ant_conf->main_gaintb = 0;
1491 ant_conf->alt_gaintb = 0;
1493 case 0x12: /* LNA2 LNA1 */
1494 ant_conf->fast_div_bias = 0x1;
1495 ant_conf->main_gaintb = 0;
1496 ant_conf->alt_gaintb = 0;
1498 case 0x13: /* LNA2 A+B */
1499 if (!(antcomb->scan) &&
1500 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1501 ant_conf->fast_div_bias = 0x1;
1503 ant_conf->fast_div_bias = 0x2;
1504 ant_conf->main_gaintb = 0;
1505 ant_conf->alt_gaintb = 0;
1507 case 0x20: /* LNA1 A-B */
1508 if (!(antcomb->scan) &&
1509 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1510 ant_conf->fast_div_bias = 0x1;
1512 ant_conf->fast_div_bias = 0x2;
1513 ant_conf->main_gaintb = 0;
1514 ant_conf->alt_gaintb = 0;
1516 case 0x21: /* LNA1 LNA2 */
1517 ant_conf->fast_div_bias = 0x1;
1518 ant_conf->main_gaintb = 0;
1519 ant_conf->alt_gaintb = 0;
1521 case 0x23: /* LNA1 A+B */
1522 if (!(antcomb->scan) &&
1523 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1524 ant_conf->fast_div_bias = 0x1;
1526 ant_conf->fast_div_bias = 0x2;
1527 ant_conf->main_gaintb = 0;
1528 ant_conf->alt_gaintb = 0;
1530 case 0x30: /* A+B A-B */
1531 ant_conf->fast_div_bias = 0x1;
1532 ant_conf->main_gaintb = 0;
1533 ant_conf->alt_gaintb = 0;
1535 case 0x31: /* A+B LNA2 */
1536 ant_conf->fast_div_bias = 0x1;
1537 ant_conf->main_gaintb = 0;
1538 ant_conf->alt_gaintb = 0;
1540 case 0x32: /* A+B LNA1 */
1541 ant_conf->fast_div_bias = 0x1;
1542 ant_conf->main_gaintb = 0;
1543 ant_conf->alt_gaintb = 0;
1551 /* Antenna diversity and combining */
1552 static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1554 struct ath_hw_antcomb_conf div_ant_conf;
1555 struct ath_ant_comb *antcomb = &sc->ant_comb;
1556 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1558 int main_rssi = rs->rs_rssi_ctl0;
1559 int alt_rssi = rs->rs_rssi_ctl1;
1560 int rx_ant_conf, main_ant_conf;
1561 bool short_scan = false;
1563 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1565 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1568 /* Record packet only when both main_rssi and alt_rssi is positive */
1569 if (main_rssi > 0 && alt_rssi > 0) {
1570 antcomb->total_pkt_count++;
1571 antcomb->main_total_rssi += main_rssi;
1572 antcomb->alt_total_rssi += alt_rssi;
1573 if (main_ant_conf == rx_ant_conf)
1574 antcomb->main_recv_cnt++;
1576 antcomb->alt_recv_cnt++;
1579 /* Short scan check */
1580 if (antcomb->scan && antcomb->alt_good) {
1581 if (time_after(jiffies, antcomb->scan_start_time +
1582 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1585 if (antcomb->total_pkt_count ==
1586 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1587 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1588 antcomb->total_pkt_count);
1589 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1594 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1595 rs->rs_moreaggr) && !short_scan)
1598 if (antcomb->total_pkt_count) {
1599 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1600 antcomb->total_pkt_count);
1601 main_rssi_avg = (antcomb->main_total_rssi /
1602 antcomb->total_pkt_count);
1603 alt_rssi_avg = (antcomb->alt_total_rssi /
1604 antcomb->total_pkt_count);
1608 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1609 curr_alt_set = div_ant_conf.alt_lna_conf;
1610 curr_main_set = div_ant_conf.main_lna_conf;
1614 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1615 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1616 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1618 antcomb->alt_good = true;
1620 antcomb->alt_good = false;
1624 antcomb->scan = true;
1625 antcomb->scan_not_start = true;
1628 if (!antcomb->scan) {
1629 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
1630 alt_ratio, curr_main_set, curr_alt_set,
1631 alt_rssi_avg, main_rssi_avg)) {
1632 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1633 /* Switch main and alt LNA */
1634 div_ant_conf.main_lna_conf =
1635 ATH_ANT_DIV_COMB_LNA2;
1636 div_ant_conf.alt_lna_conf =
1637 ATH_ANT_DIV_COMB_LNA1;
1638 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1639 div_ant_conf.main_lna_conf =
1640 ATH_ANT_DIV_COMB_LNA1;
1641 div_ant_conf.alt_lna_conf =
1642 ATH_ANT_DIV_COMB_LNA2;
1646 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1647 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1648 /* Set alt to another LNA */
1649 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1650 div_ant_conf.alt_lna_conf =
1651 ATH_ANT_DIV_COMB_LNA1;
1652 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1653 div_ant_conf.alt_lna_conf =
1654 ATH_ANT_DIV_COMB_LNA2;
1659 if ((alt_rssi_avg < (main_rssi_avg +
1660 div_ant_conf.lna1_lna2_delta)))
1664 if (!antcomb->scan_not_start) {
1665 switch (curr_alt_set) {
1666 case ATH_ANT_DIV_COMB_LNA2:
1667 antcomb->rssi_lna2 = alt_rssi_avg;
1668 antcomb->rssi_lna1 = main_rssi_avg;
1669 antcomb->scan = true;
1671 div_ant_conf.main_lna_conf =
1672 ATH_ANT_DIV_COMB_LNA1;
1673 div_ant_conf.alt_lna_conf =
1674 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1676 case ATH_ANT_DIV_COMB_LNA1:
1677 antcomb->rssi_lna1 = alt_rssi_avg;
1678 antcomb->rssi_lna2 = main_rssi_avg;
1679 antcomb->scan = true;
1681 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1682 div_ant_conf.alt_lna_conf =
1683 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1685 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1686 antcomb->rssi_add = alt_rssi_avg;
1687 antcomb->scan = true;
1689 div_ant_conf.alt_lna_conf =
1690 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1692 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1693 antcomb->rssi_sub = alt_rssi_avg;
1694 antcomb->scan = false;
1695 if (antcomb->rssi_lna2 >
1696 (antcomb->rssi_lna1 +
1697 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1698 /* use LNA2 as main LNA */
1699 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1700 (antcomb->rssi_add > antcomb->rssi_sub)) {
1702 div_ant_conf.main_lna_conf =
1703 ATH_ANT_DIV_COMB_LNA2;
1704 div_ant_conf.alt_lna_conf =
1705 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1706 } else if (antcomb->rssi_sub >
1707 antcomb->rssi_lna1) {
1709 div_ant_conf.main_lna_conf =
1710 ATH_ANT_DIV_COMB_LNA2;
1711 div_ant_conf.alt_lna_conf =
1712 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1715 div_ant_conf.main_lna_conf =
1716 ATH_ANT_DIV_COMB_LNA2;
1717 div_ant_conf.alt_lna_conf =
1718 ATH_ANT_DIV_COMB_LNA1;
1721 /* use LNA1 as main LNA */
1722 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1723 (antcomb->rssi_add > antcomb->rssi_sub)) {
1725 div_ant_conf.main_lna_conf =
1726 ATH_ANT_DIV_COMB_LNA1;
1727 div_ant_conf.alt_lna_conf =
1728 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1729 } else if (antcomb->rssi_sub >
1730 antcomb->rssi_lna1) {
1732 div_ant_conf.main_lna_conf =
1733 ATH_ANT_DIV_COMB_LNA1;
1734 div_ant_conf.alt_lna_conf =
1735 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1738 div_ant_conf.main_lna_conf =
1739 ATH_ANT_DIV_COMB_LNA1;
1740 div_ant_conf.alt_lna_conf =
1741 ATH_ANT_DIV_COMB_LNA2;
1749 if (!antcomb->alt_good) {
1750 antcomb->scan_not_start = false;
1751 /* Set alt to another LNA */
1752 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1753 div_ant_conf.main_lna_conf =
1754 ATH_ANT_DIV_COMB_LNA2;
1755 div_ant_conf.alt_lna_conf =
1756 ATH_ANT_DIV_COMB_LNA1;
1757 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1758 div_ant_conf.main_lna_conf =
1759 ATH_ANT_DIV_COMB_LNA1;
1760 div_ant_conf.alt_lna_conf =
1761 ATH_ANT_DIV_COMB_LNA2;
1767 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1768 main_rssi_avg, alt_rssi_avg,
1771 antcomb->quick_scan_cnt++;
1774 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
1775 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1777 antcomb->scan_start_time = jiffies;
1778 antcomb->total_pkt_count = 0;
1779 antcomb->main_total_rssi = 0;
1780 antcomb->alt_total_rssi = 0;
1781 antcomb->main_recv_cnt = 0;
1782 antcomb->alt_recv_cnt = 0;
1785 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1788 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1789 struct ieee80211_rx_status *rxs;
1790 struct ath_hw *ah = sc->sc_ah;
1791 struct ath_common *common = ath9k_hw_common(ah);
1792 struct ieee80211_hw *hw = sc->hw;
1793 struct ieee80211_hdr *hdr;
1795 struct ath_rx_status rs;
1796 enum ath9k_rx_qtype qtype;
1797 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1799 u8 rx_status_len = ah->caps.rx_status_len;
1802 unsigned long flags;
1805 dma_type = DMA_BIDIRECTIONAL;
1807 dma_type = DMA_FROM_DEVICE;
1809 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1810 spin_lock_bh(&sc->rx.rxbuflock);
1812 tsf = ath9k_hw_gettsf64(ah);
1813 tsf_lower = tsf & 0xffffffff;
1816 bool decrypt_error = false;
1817 /* If handling rx interrupt and flush is in progress => exit */
1818 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
1821 memset(&rs, 0, sizeof(rs));
1823 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1825 bf = ath_get_next_rx_buf(sc, &rs);
1835 * Take frame header from the first fragment and RX status from
1839 hdr_skb = sc->rx.frag;
1843 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1844 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1845 if (ieee80211_is_beacon(hdr->frame_control) &&
1846 !compare_ether_addr(hdr->addr3, common->curbssid))
1847 rs.is_mybeacon = true;
1849 rs.is_mybeacon = false;
1851 ath_debug_stat_rx(sc, &rs);
1854 * If we're asked to flush receive queue, directly
1855 * chain it back at the queue without processing it.
1857 if (sc->sc_flags & SC_OP_RXFLUSH)
1858 goto requeue_drop_frag;
1860 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1861 rxs, &decrypt_error);
1863 goto requeue_drop_frag;
1865 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1866 if (rs.rs_tstamp > tsf_lower &&
1867 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1868 rxs->mactime -= 0x100000000ULL;
1870 if (rs.rs_tstamp < tsf_lower &&
1871 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1872 rxs->mactime += 0x100000000ULL;
1874 /* Ensure we always have an skb to requeue once we are done
1875 * processing the current buffer's skb */
1876 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1878 /* If there is no memory we ignore the current RX'd frame,
1879 * tell hardware it can give us a new frame using the old
1880 * skb and put it at the tail of the sc->rx.rxbuf list for
1883 goto requeue_drop_frag;
1885 /* Unmap the frame */
1886 dma_unmap_single(sc->dev, bf->bf_buf_addr,
1890 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1891 if (ah->caps.rx_status_len)
1892 skb_pull(skb, ah->caps.rx_status_len);
1895 ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1896 rxs, decrypt_error);
1898 /* We will now give hardware our shiny new allocated skb */
1899 bf->bf_mpdu = requeue_skb;
1900 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1903 if (unlikely(dma_mapping_error(sc->dev,
1904 bf->bf_buf_addr))) {
1905 dev_kfree_skb_any(requeue_skb);
1907 bf->bf_buf_addr = 0;
1908 ath_err(common, "dma_mapping_error() on RX\n");
1909 ieee80211_rx(hw, skb);
1915 * rs_more indicates chained descriptors which can be
1916 * used to link buffers together for a sort of
1917 * scatter-gather operation.
1920 /* too many fragments - cannot handle frame */
1921 dev_kfree_skb_any(sc->rx.frag);
1922 dev_kfree_skb_any(skb);
1930 int space = skb->len - skb_tailroom(hdr_skb);
1934 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1936 goto requeue_drop_frag;
1939 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1941 dev_kfree_skb_any(skb);
1946 * change the default rx antenna if rx diversity chooses the
1947 * other antenna 3 times in a row.
1949 if (sc->rx.defant != rs.rs_antenna) {
1950 if (++sc->rx.rxotherant >= 3)
1951 ath_setdefantenna(sc, rs.rs_antenna);
1953 sc->rx.rxotherant = 0;
1956 if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1957 skb_trim(skb, skb->len - 8);
1959 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1961 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1963 PS_WAIT_FOR_PSPOLL_DATA)) ||
1964 ath9k_check_auto_sleep(sc))
1965 ath_rx_ps(sc, skb, rs.is_mybeacon);
1966 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1968 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
1969 ath_ant_comb_scan(sc, &rs);
1971 ieee80211_rx(hw, skb);
1975 dev_kfree_skb_any(sc->rx.frag);
1979 list_add_tail(&bf->list, &sc->rx.rxbuf);
1984 ath_rx_edma_buf_link(sc, qtype);
1986 ath_rx_buf_relink(sc, bf);
1991 spin_unlock_bh(&sc->rx.rxbuflock);
1993 if (!(ah->imask & ATH9K_INT_RXEOL)) {
1994 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
1995 ath9k_hw_set_interrupts(ah);