2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004-2005 Atheros Communications, Inc.
4 * Copyright (c) 2006 Devicescape Software, Inc.
5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
18 * redistribution must be conditioned upon including a substantially
19 * similar Disclaimer requirement for further binary redistribution.
20 * 3. Neither the names of the above-listed copyright holders nor the names
21 * of any contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * Alternatively, this software may be distributed under the terms of the
25 * GNU General Public License ("GPL") version 2 as published by the Free
26 * Software Foundation.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
32 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
33 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
34 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
39 * THE POSSIBILITY OF SUCH DAMAGES.
43 #include <linux/module.h>
44 #include <linux/delay.h>
45 #include <linux/hardirq.h>
48 #include <linux/netdevice.h>
49 #include <linux/cache.h>
50 #include <linux/ethtool.h>
51 #include <linux/uaccess.h>
52 #include <linux/slab.h>
53 #include <linux/etherdevice.h>
55 #include <net/ieee80211_radiotap.h>
57 #include <asm/unaligned.h>
64 int ath5k_modparam_nohwcrypt;
65 module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
66 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
68 static int modparam_all_channels;
69 module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
70 MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
73 MODULE_AUTHOR("Jiri Slaby");
74 MODULE_AUTHOR("Nick Kossifidis");
75 MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
76 MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
77 MODULE_LICENSE("Dual BSD/GPL");
79 static int ath5k_init(struct ieee80211_hw *hw);
80 static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
82 int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
83 void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
86 static const struct ath5k_srev_name srev_names[] = {
87 #ifdef CONFIG_ATHEROS_AR231X
88 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 },
89 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 },
90 { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 },
91 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 },
92 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 },
93 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 },
94 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 },
96 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
97 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
98 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
99 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B },
100 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 },
101 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 },
102 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 },
103 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A },
104 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 },
105 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 },
106 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 },
107 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 },
108 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 },
109 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 },
110 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 },
111 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
112 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
113 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
115 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
116 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
117 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
118 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A },
119 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 },
120 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 },
121 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
122 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B },
123 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
124 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
125 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
126 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
127 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
128 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
129 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
130 #ifdef CONFIG_ATHEROS_AR231X
131 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
132 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
134 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
137 static const struct ieee80211_rate ath5k_rates[] = {
139 .hw_value = ATH5K_RATE_CODE_1M, },
141 .hw_value = ATH5K_RATE_CODE_2M,
142 .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
143 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
145 .hw_value = ATH5K_RATE_CODE_5_5M,
146 .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
147 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
149 .hw_value = ATH5K_RATE_CODE_11M,
150 .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
151 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
153 .hw_value = ATH5K_RATE_CODE_6M,
156 .hw_value = ATH5K_RATE_CODE_9M,
159 .hw_value = ATH5K_RATE_CODE_12M,
162 .hw_value = ATH5K_RATE_CODE_18M,
165 .hw_value = ATH5K_RATE_CODE_24M,
168 .hw_value = ATH5K_RATE_CODE_36M,
171 .hw_value = ATH5K_RATE_CODE_48M,
174 .hw_value = ATH5K_RATE_CODE_54M,
179 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
181 u64 tsf = ath5k_hw_get_tsf64(ah);
183 if ((tsf & 0x7fff) < rstamp)
186 return (tsf & ~0x7fff) | rstamp;
190 ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
192 const char *name = "xxxxx";
195 for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
196 if (srev_names[i].sr_type != type)
199 if ((val & 0xf0) == srev_names[i].sr_val)
200 name = srev_names[i].sr_name;
202 if ((val & 0xff) == srev_names[i].sr_val) {
203 name = srev_names[i].sr_name;
210 static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
212 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
213 return ath5k_hw_reg_read(ah, reg_offset);
216 static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
218 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
219 ath5k_hw_reg_write(ah, val, reg_offset);
222 static const struct ath_ops ath5k_common_ops = {
223 .read = ath5k_ioread32,
224 .write = ath5k_iowrite32,
227 /***********************\
228 * Driver Initialization *
229 \***********************/
231 static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
233 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
234 struct ath5k_softc *sc = hw->priv;
235 struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
237 return ath_reg_notifier_apply(wiphy, request, regulatory);
240 /********************\
241 * Channel/mode setup *
242 \********************/
245 * Returns true for the channel numbers used without all_channels modparam.
247 static bool ath5k_is_standard_channel(short chan)
249 return ((chan <= 14) ||
251 ((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
253 ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
255 ((chan & 3) == 1 && chan >= 149 && chan <= 165));
259 ath5k_copy_channels(struct ath5k_hw *ah,
260 struct ieee80211_channel *channels,
264 unsigned int i, count, size, chfreq, freq, ch;
265 enum ieee80211_band band;
267 if (!test_bit(mode, ah->ah_modes))
272 /* 1..220, but 2GHz frequencies are filtered by check_channel */
274 chfreq = CHANNEL_5GHZ;
275 band = IEEE80211_BAND_5GHZ;
280 chfreq = CHANNEL_2GHZ;
281 band = IEEE80211_BAND_2GHZ;
284 ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
288 for (i = 0, count = 0; i < size && max > 0; i++) {
290 freq = ieee80211_channel_to_frequency(ch, band);
292 if (freq == 0) /* mapping failed - not a standard channel */
295 /* Check if channel is supported by the chipset */
296 if (!ath5k_channel_ok(ah, freq, chfreq))
299 if (!modparam_all_channels && !ath5k_is_standard_channel(ch))
302 /* Write channel info and increment counter */
303 channels[count].center_freq = freq;
304 channels[count].band = band;
308 channels[count].hw_value = chfreq | CHANNEL_OFDM;
311 channels[count].hw_value = CHANNEL_B;
322 ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
326 for (i = 0; i < AR5K_MAX_RATES; i++)
327 sc->rate_idx[b->band][i] = -1;
329 for (i = 0; i < b->n_bitrates; i++) {
330 sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
331 if (b->bitrates[i].hw_value_short)
332 sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
337 ath5k_setup_bands(struct ieee80211_hw *hw)
339 struct ath5k_softc *sc = hw->priv;
340 struct ath5k_hw *ah = sc->ah;
341 struct ieee80211_supported_band *sband;
342 int max_c, count_c = 0;
345 BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
346 max_c = ARRAY_SIZE(sc->channels);
349 sband = &sc->sbands[IEEE80211_BAND_2GHZ];
350 sband->band = IEEE80211_BAND_2GHZ;
351 sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
353 if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
355 memcpy(sband->bitrates, &ath5k_rates[0],
356 sizeof(struct ieee80211_rate) * 12);
357 sband->n_bitrates = 12;
359 sband->channels = sc->channels;
360 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
361 AR5K_MODE_11G, max_c);
363 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
364 count_c = sband->n_channels;
366 } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
368 memcpy(sband->bitrates, &ath5k_rates[0],
369 sizeof(struct ieee80211_rate) * 4);
370 sband->n_bitrates = 4;
372 /* 5211 only supports B rates and uses 4bit rate codes
373 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
376 if (ah->ah_version == AR5K_AR5211) {
377 for (i = 0; i < 4; i++) {
378 sband->bitrates[i].hw_value =
379 sband->bitrates[i].hw_value & 0xF;
380 sband->bitrates[i].hw_value_short =
381 sband->bitrates[i].hw_value_short & 0xF;
385 sband->channels = sc->channels;
386 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
387 AR5K_MODE_11B, max_c);
389 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
390 count_c = sband->n_channels;
393 ath5k_setup_rate_idx(sc, sband);
395 /* 5GHz band, A mode */
396 if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
397 sband = &sc->sbands[IEEE80211_BAND_5GHZ];
398 sband->band = IEEE80211_BAND_5GHZ;
399 sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
401 memcpy(sband->bitrates, &ath5k_rates[4],
402 sizeof(struct ieee80211_rate) * 8);
403 sband->n_bitrates = 8;
405 sband->channels = &sc->channels[count_c];
406 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
407 AR5K_MODE_11A, max_c);
409 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
411 ath5k_setup_rate_idx(sc, sband);
413 ath5k_debug_dump_bands(sc);
419 * Set/change channels. We always reset the chip.
420 * To accomplish this we must first cleanup any pending DMA,
421 * then restart stuff after a la ath5k_init.
423 * Called with sc->lock.
426 ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
428 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
429 "channel set, resetting (%u -> %u MHz)\n",
430 sc->curchan->center_freq, chan->center_freq);
433 * To switch channels clear any pending DMA operations;
434 * wait long enough for the RX fifo to drain, reset the
435 * hardware at the new frequency, and then re-enable
436 * the relevant bits of the h/w.
438 return ath5k_reset(sc, chan, true);
442 ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
446 if (mode == AR5K_MODE_11A) {
447 sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
449 sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
453 struct ath_vif_iter_data {
454 const u8 *hw_macaddr;
456 u8 active_mac[ETH_ALEN]; /* first active MAC */
457 bool need_set_hw_addr;
460 enum nl80211_iftype opmode;
463 static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
465 struct ath_vif_iter_data *iter_data = data;
467 struct ath5k_vif *avf = (void *)vif->drv_priv;
469 if (iter_data->hw_macaddr)
470 for (i = 0; i < ETH_ALEN; i++)
471 iter_data->mask[i] &=
472 ~(iter_data->hw_macaddr[i] ^ mac[i]);
474 if (!iter_data->found_active) {
475 iter_data->found_active = true;
476 memcpy(iter_data->active_mac, mac, ETH_ALEN);
479 if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
480 if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
481 iter_data->need_set_hw_addr = false;
483 if (!iter_data->any_assoc) {
485 iter_data->any_assoc = true;
488 /* Calculate combined mode - when APs are active, operate in AP mode.
489 * Otherwise use the mode of the new interface. This can currently
490 * only deal with combinations of APs and STAs. Only one ad-hoc
491 * interfaces is allowed.
493 if (avf->opmode == NL80211_IFTYPE_AP)
494 iter_data->opmode = NL80211_IFTYPE_AP;
496 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
497 iter_data->opmode = avf->opmode;
501 ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
502 struct ieee80211_vif *vif)
504 struct ath_common *common = ath5k_hw_common(sc->ah);
505 struct ath_vif_iter_data iter_data;
508 * Use the hardware MAC address as reference, the hardware uses it
509 * together with the BSSID mask when matching addresses.
511 iter_data.hw_macaddr = common->macaddr;
512 memset(&iter_data.mask, 0xff, ETH_ALEN);
513 iter_data.found_active = false;
514 iter_data.need_set_hw_addr = true;
515 iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
518 ath_vif_iter(&iter_data, vif->addr, vif);
520 /* Get list of all active MAC addresses */
521 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
523 memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);
525 sc->opmode = iter_data.opmode;
526 if (sc->opmode == NL80211_IFTYPE_UNSPECIFIED)
527 /* Nothing active, default to station mode */
528 sc->opmode = NL80211_IFTYPE_STATION;
530 ath5k_hw_set_opmode(sc->ah, sc->opmode);
531 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
532 sc->opmode, ath_opmode_to_string(sc->opmode));
534 if (iter_data.need_set_hw_addr && iter_data.found_active)
535 ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);
537 if (ath5k_hw_hasbssidmask(sc->ah))
538 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
542 ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif)
544 struct ath5k_hw *ah = sc->ah;
547 /* configure rx filter */
548 rfilt = sc->filter_flags;
549 ath5k_hw_set_rx_filter(ah, rfilt);
550 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
552 ath5k_update_bssid_mask_and_opmode(sc, vif);
556 ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
560 /* return base rate on errors */
561 if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
562 "hw_rix out of bounds: %x\n", hw_rix))
565 rix = sc->rate_idx[sc->curband->band][hw_rix];
566 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
577 struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
579 struct ath_common *common = ath5k_hw_common(sc->ah);
583 * Allocate buffer with headroom_needed space for the
584 * fake physical layer header at the start.
586 skb = ath_rxbuf_alloc(common,
591 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
596 *skb_addr = dma_map_single(sc->dev,
597 skb->data, common->rx_bufsize,
600 if (unlikely(dma_mapping_error(sc->dev, *skb_addr))) {
601 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
609 ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
611 struct ath5k_hw *ah = sc->ah;
612 struct sk_buff *skb = bf->skb;
613 struct ath5k_desc *ds;
617 skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
624 * Setup descriptors. For receive we always terminate
625 * the descriptor list with a self-linked entry so we'll
626 * not get overrun under high load (as can happen with a
627 * 5212 when ANI processing enables PHY error frames).
629 * To ensure the last descriptor is self-linked we create
630 * each descriptor as self-linked and add it to the end. As
631 * each additional descriptor is added the previous self-linked
632 * entry is "fixed" naturally. This should be safe even
633 * if DMA is happening. When processing RX interrupts we
634 * never remove/process the last, self-linked, entry on the
635 * descriptor list. This ensures the hardware always has
636 * someplace to write a new frame.
639 ds->ds_link = bf->daddr; /* link to self */
640 ds->ds_data = bf->skbaddr;
641 ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
643 ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
647 if (sc->rxlink != NULL)
648 *sc->rxlink = bf->daddr;
649 sc->rxlink = &ds->ds_link;
653 static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
655 struct ieee80211_hdr *hdr;
656 enum ath5k_pkt_type htype;
659 hdr = (struct ieee80211_hdr *)skb->data;
660 fc = hdr->frame_control;
662 if (ieee80211_is_beacon(fc))
663 htype = AR5K_PKT_TYPE_BEACON;
664 else if (ieee80211_is_probe_resp(fc))
665 htype = AR5K_PKT_TYPE_PROBE_RESP;
666 else if (ieee80211_is_atim(fc))
667 htype = AR5K_PKT_TYPE_ATIM;
668 else if (ieee80211_is_pspoll(fc))
669 htype = AR5K_PKT_TYPE_PSPOLL;
671 htype = AR5K_PKT_TYPE_NORMAL;
677 ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
678 struct ath5k_txq *txq, int padsize)
680 struct ath5k_hw *ah = sc->ah;
681 struct ath5k_desc *ds = bf->desc;
682 struct sk_buff *skb = bf->skb;
683 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
684 unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
685 struct ieee80211_rate *rate;
686 unsigned int mrr_rate[3], mrr_tries[3];
693 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
696 bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
699 rate = ieee80211_get_tx_rate(sc->hw, info);
705 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
706 flags |= AR5K_TXDESC_NOACK;
708 rc_flags = info->control.rates[0].flags;
709 hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
710 rate->hw_value_short : rate->hw_value;
714 /* FIXME: If we are in g mode and rate is a CCK rate
715 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
716 * from tx power (value is in dB units already) */
717 if (info->control.hw_key) {
718 keyidx = info->control.hw_key->hw_key_idx;
719 pktlen += info->control.hw_key->icv_len;
721 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
722 flags |= AR5K_TXDESC_RTSENA;
723 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
724 duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
725 info->control.vif, pktlen, info));
727 if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
728 flags |= AR5K_TXDESC_CTSENA;
729 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
730 duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
731 info->control.vif, pktlen, info));
733 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
734 ieee80211_get_hdrlen_from_skb(skb), padsize,
735 get_hw_packet_type(skb),
736 (sc->power_level * 2),
738 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
743 memset(mrr_rate, 0, sizeof(mrr_rate));
744 memset(mrr_tries, 0, sizeof(mrr_tries));
745 for (i = 0; i < 3; i++) {
746 rate = ieee80211_get_alt_retry_rate(sc->hw, info, i);
750 mrr_rate[i] = rate->hw_value;
751 mrr_tries[i] = info->control.rates[i + 1].count;
754 ath5k_hw_setup_mrr_tx_desc(ah, ds,
755 mrr_rate[0], mrr_tries[0],
756 mrr_rate[1], mrr_tries[1],
757 mrr_rate[2], mrr_tries[2]);
760 ds->ds_data = bf->skbaddr;
762 spin_lock_bh(&txq->lock);
763 list_add_tail(&bf->list, &txq->q);
765 if (txq->link == NULL) /* is this first packet? */
766 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
767 else /* no, so only link it */
768 *txq->link = bf->daddr;
770 txq->link = &ds->ds_link;
771 ath5k_hw_start_tx_dma(ah, txq->qnum);
773 spin_unlock_bh(&txq->lock);
777 dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
781 /*******************\
782 * Descriptors setup *
783 \*******************/
786 ath5k_desc_alloc(struct ath5k_softc *sc)
788 struct ath5k_desc *ds;
789 struct ath5k_buf *bf;
794 /* allocate descriptors */
795 sc->desc_len = sizeof(struct ath5k_desc) *
796 (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
798 sc->desc = dma_alloc_coherent(sc->dev, sc->desc_len,
799 &sc->desc_daddr, GFP_KERNEL);
800 if (sc->desc == NULL) {
801 ATH5K_ERR(sc, "can't allocate descriptors\n");
807 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
808 ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
810 bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
811 sizeof(struct ath5k_buf), GFP_KERNEL);
813 ATH5K_ERR(sc, "can't allocate bufptr\n");
819 INIT_LIST_HEAD(&sc->rxbuf);
820 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
823 list_add_tail(&bf->list, &sc->rxbuf);
826 INIT_LIST_HEAD(&sc->txbuf);
827 sc->txbuf_len = ATH_TXBUF;
828 for (i = 0; i < ATH_TXBUF; i++, bf++, ds++,
832 list_add_tail(&bf->list, &sc->txbuf);
836 INIT_LIST_HEAD(&sc->bcbuf);
837 for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
840 list_add_tail(&bf->list, &sc->bcbuf);
845 dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
852 ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
857 dma_unmap_single(sc->dev, bf->skbaddr, bf->skb->len,
859 dev_kfree_skb_any(bf->skb);
862 bf->desc->ds_data = 0;
866 ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
868 struct ath5k_hw *ah = sc->ah;
869 struct ath_common *common = ath5k_hw_common(ah);
874 dma_unmap_single(sc->dev, bf->skbaddr, common->rx_bufsize,
876 dev_kfree_skb_any(bf->skb);
879 bf->desc->ds_data = 0;
883 ath5k_desc_free(struct ath5k_softc *sc)
885 struct ath5k_buf *bf;
887 list_for_each_entry(bf, &sc->txbuf, list)
888 ath5k_txbuf_free_skb(sc, bf);
889 list_for_each_entry(bf, &sc->rxbuf, list)
890 ath5k_rxbuf_free_skb(sc, bf);
891 list_for_each_entry(bf, &sc->bcbuf, list)
892 ath5k_txbuf_free_skb(sc, bf);
894 /* Free memory associated with all descriptors */
895 dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
908 static struct ath5k_txq *
909 ath5k_txq_setup(struct ath5k_softc *sc,
910 int qtype, int subtype)
912 struct ath5k_hw *ah = sc->ah;
913 struct ath5k_txq *txq;
914 struct ath5k_txq_info qi = {
915 .tqi_subtype = subtype,
916 /* XXX: default values not correct for B and XR channels,
918 .tqi_aifs = AR5K_TUNE_AIFS,
919 .tqi_cw_min = AR5K_TUNE_CWMIN,
920 .tqi_cw_max = AR5K_TUNE_CWMAX
925 * Enable interrupts only for EOL and DESC conditions.
926 * We mark tx descriptors to receive a DESC interrupt
927 * when a tx queue gets deep; otherwise we wait for the
928 * EOL to reap descriptors. Note that this is done to
929 * reduce interrupt load and this only defers reaping
930 * descriptors, never transmitting frames. Aside from
931 * reducing interrupts this also permits more concurrency.
932 * The only potential downside is if the tx queue backs
933 * up in which case the top half of the kernel may backup
934 * due to a lack of tx descriptors.
936 qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
937 AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
938 qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
941 * NB: don't print a message, this happens
942 * normally on parts with too few tx queues
944 return ERR_PTR(qnum);
946 if (qnum >= ARRAY_SIZE(sc->txqs)) {
947 ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n",
948 qnum, ARRAY_SIZE(sc->txqs));
949 ath5k_hw_release_tx_queue(ah, qnum);
950 return ERR_PTR(-EINVAL);
952 txq = &sc->txqs[qnum];
956 INIT_LIST_HEAD(&txq->q);
957 spin_lock_init(&txq->lock);
960 txq->txq_poll_mark = false;
963 return &sc->txqs[qnum];
967 ath5k_beaconq_setup(struct ath5k_hw *ah)
969 struct ath5k_txq_info qi = {
970 /* XXX: default values not correct for B and XR channels,
972 .tqi_aifs = AR5K_TUNE_AIFS,
973 .tqi_cw_min = AR5K_TUNE_CWMIN,
974 .tqi_cw_max = AR5K_TUNE_CWMAX,
975 /* NB: for dynamic turbo, don't enable any other interrupts */
976 .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
979 return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
983 ath5k_beaconq_config(struct ath5k_softc *sc)
985 struct ath5k_hw *ah = sc->ah;
986 struct ath5k_txq_info qi;
989 ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
993 if (sc->opmode == NL80211_IFTYPE_AP ||
994 sc->opmode == NL80211_IFTYPE_MESH_POINT) {
996 * Always burst out beacon and CAB traffic
997 * (aifs = cwmin = cwmax = 0)
1002 } else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
1004 * Adhoc mode; backoff between 0 and (2 * cw_min).
1008 qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
1011 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1012 "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
1013 qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
1015 ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
1017 ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
1018 "hardware queue!\n", __func__);
1021 ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
1025 /* reconfigure cabq with ready time to 80% of beacon_interval */
1026 ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1030 qi.tqi_ready_time = (sc->bintval * 80) / 100;
1031 ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1035 ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1041 * ath5k_drain_tx_buffs - Empty tx buffers
1043 * @sc The &struct ath5k_softc
1045 * Empty tx buffers from all queues in preparation
1046 * of a reset or during shutdown.
1048 * NB: this assumes output has been stopped and
1049 * we do not need to block ath5k_tx_tasklet
1052 ath5k_drain_tx_buffs(struct ath5k_softc *sc)
1054 struct ath5k_txq *txq;
1055 struct ath5k_buf *bf, *bf0;
1058 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
1059 if (sc->txqs[i].setup) {
1061 spin_lock_bh(&txq->lock);
1062 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1063 ath5k_debug_printtxbuf(sc, bf);
1065 ath5k_txbuf_free_skb(sc, bf);
1067 spin_lock_bh(&sc->txbuflock);
1068 list_move_tail(&bf->list, &sc->txbuf);
1071 spin_unlock_bh(&sc->txbuflock);
1074 txq->txq_poll_mark = false;
1075 spin_unlock_bh(&txq->lock);
1081 ath5k_txq_release(struct ath5k_softc *sc)
1083 struct ath5k_txq *txq = sc->txqs;
1086 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++)
1088 ath5k_hw_release_tx_queue(sc->ah, txq->qnum);
1099 * Enable the receive h/w following a reset.
1102 ath5k_rx_start(struct ath5k_softc *sc)
1104 struct ath5k_hw *ah = sc->ah;
1105 struct ath_common *common = ath5k_hw_common(ah);
1106 struct ath5k_buf *bf;
1109 common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1111 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1112 common->cachelsz, common->rx_bufsize);
1114 spin_lock_bh(&sc->rxbuflock);
1116 list_for_each_entry(bf, &sc->rxbuf, list) {
1117 ret = ath5k_rxbuf_setup(sc, bf);
1119 spin_unlock_bh(&sc->rxbuflock);
1123 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
1124 ath5k_hw_set_rxdp(ah, bf->daddr);
1125 spin_unlock_bh(&sc->rxbuflock);
1127 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
1128 ath5k_mode_setup(sc, NULL); /* set filters, etc. */
1129 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1137 * Disable the receive logic on PCU (DRU)
1138 * In preparation for a shutdown.
1140 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
1144 ath5k_rx_stop(struct ath5k_softc *sc)
1146 struct ath5k_hw *ah = sc->ah;
1148 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1149 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1151 ath5k_debug_printrxbuffs(sc, ah);
1155 ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
1156 struct ath5k_rx_status *rs)
1158 struct ath5k_hw *ah = sc->ah;
1159 struct ath_common *common = ath5k_hw_common(ah);
1160 struct ieee80211_hdr *hdr = (void *)skb->data;
1161 unsigned int keyix, hlen;
1163 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1164 rs->rs_keyix != AR5K_RXKEYIX_INVALID)
1165 return RX_FLAG_DECRYPTED;
1167 /* Apparently when a default key is used to decrypt the packet
1168 the hw does not set the index used to decrypt. In such cases
1169 get the index from the packet. */
1170 hlen = ieee80211_hdrlen(hdr->frame_control);
1171 if (ieee80211_has_protected(hdr->frame_control) &&
1172 !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1173 skb->len >= hlen + 4) {
1174 keyix = skb->data[hlen + 3] >> 6;
1176 if (test_bit(keyix, common->keymap))
1177 return RX_FLAG_DECRYPTED;
1185 ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
1186 struct ieee80211_rx_status *rxs)
1188 struct ath_common *common = ath5k_hw_common(sc->ah);
1191 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1193 if (ieee80211_is_beacon(mgmt->frame_control) &&
1194 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
1195 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
1197 * Received an IBSS beacon with the same BSSID. Hardware *must*
1198 * have updated the local TSF. We have to work around various
1199 * hardware bugs, though...
1201 tsf = ath5k_hw_get_tsf64(sc->ah);
1202 bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
1203 hw_tu = TSF_TO_TU(tsf);
1205 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1206 "beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
1207 (unsigned long long)bc_tstamp,
1208 (unsigned long long)rxs->mactime,
1209 (unsigned long long)(rxs->mactime - bc_tstamp),
1210 (unsigned long long)tsf);
1213 * Sometimes the HW will give us a wrong tstamp in the rx
1214 * status, causing the timestamp extension to go wrong.
1215 * (This seems to happen especially with beacon frames bigger
1216 * than 78 byte (incl. FCS))
1217 * But we know that the receive timestamp must be later than the
1218 * timestamp of the beacon since HW must have synced to that.
1220 * NOTE: here we assume mactime to be after the frame was
1221 * received, not like mac80211 which defines it at the start.
1223 if (bc_tstamp > rxs->mactime) {
1224 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1225 "fixing mactime from %llx to %llx\n",
1226 (unsigned long long)rxs->mactime,
1227 (unsigned long long)tsf);
1232 * Local TSF might have moved higher than our beacon timers,
1233 * in that case we have to update them to continue sending
1234 * beacons. This also takes care of synchronizing beacon sending
1235 * times with other stations.
1237 if (hw_tu >= sc->nexttbtt)
1238 ath5k_beacon_update_timers(sc, bc_tstamp);
1240 /* Check if the beacon timers are still correct, because a TSF
1241 * update might have created a window between them - for a
1242 * longer description see the comment of this function: */
1243 if (!ath5k_hw_check_beacon_timers(sc->ah, sc->bintval)) {
1244 ath5k_beacon_update_timers(sc, bc_tstamp);
1245 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1246 "fixed beacon timers after beacon receive\n");
1252 ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
1254 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1255 struct ath5k_hw *ah = sc->ah;
1256 struct ath_common *common = ath5k_hw_common(ah);
1258 /* only beacons from our BSSID */
1259 if (!ieee80211_is_beacon(mgmt->frame_control) ||
1260 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
1263 ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1265 /* in IBSS mode we should keep RSSI statistics per neighbour */
1266 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
1270 * Compute padding position. skb must contain an IEEE 802.11 frame
1272 static int ath5k_common_padpos(struct sk_buff *skb)
1274 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1275 __le16 frame_control = hdr->frame_control;
1278 if (ieee80211_has_a4(frame_control)) {
1281 if (ieee80211_is_data_qos(frame_control)) {
1282 padpos += IEEE80211_QOS_CTL_LEN;
1289 * This function expects an 802.11 frame and returns the number of
1290 * bytes added, or -1 if we don't have enough header room.
1292 static int ath5k_add_padding(struct sk_buff *skb)
1294 int padpos = ath5k_common_padpos(skb);
1295 int padsize = padpos & 3;
1297 if (padsize && skb->len>padpos) {
1299 if (skb_headroom(skb) < padsize)
1302 skb_push(skb, padsize);
1303 memmove(skb->data, skb->data+padsize, padpos);
1311 * The MAC header is padded to have 32-bit boundary if the
1312 * packet payload is non-zero. The general calculation for
1313 * padsize would take into account odd header lengths:
1314 * padsize = 4 - (hdrlen & 3); however, since only
1315 * even-length headers are used, padding can only be 0 or 2
1316 * bytes and we can optimize this a bit. We must not try to
1317 * remove padding from short control frames that do not have a
1320 * This function expects an 802.11 frame and returns the number of
1323 static int ath5k_remove_padding(struct sk_buff *skb)
1325 int padpos = ath5k_common_padpos(skb);
1326 int padsize = padpos & 3;
1328 if (padsize && skb->len>=padpos+padsize) {
1329 memmove(skb->data + padsize, skb->data, padpos);
1330 skb_pull(skb, padsize);
1338 ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1339 struct ath5k_rx_status *rs)
1341 struct ieee80211_rx_status *rxs;
1343 ath5k_remove_padding(skb);
1345 rxs = IEEE80211_SKB_RXCB(skb);
1348 if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
1349 rxs->flag |= RX_FLAG_MMIC_ERROR;
1352 * always extend the mac timestamp, since this information is
1353 * also needed for proper IBSS merging.
1355 * XXX: it might be too late to do it here, since rs_tstamp is
1356 * 15bit only. that means TSF extension has to be done within
1357 * 32768usec (about 32ms). it might be necessary to move this to
1358 * the interrupt handler, like it is done in madwifi.
1360 * Unfortunately we don't know when the hardware takes the rx
1361 * timestamp (beginning of phy frame, data frame, end of rx?).
1362 * The only thing we know is that it is hardware specific...
1363 * On AR5213 it seems the rx timestamp is at the end of the
1364 * frame, but i'm not sure.
1366 * NOTE: mac80211 defines mactime at the beginning of the first
1367 * data symbol. Since we don't have any time references it's
1368 * impossible to comply to that. This affects IBSS merge only
1369 * right now, so it's not too bad...
1371 rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
1372 rxs->flag |= RX_FLAG_TSFT;
1374 rxs->freq = sc->curchan->center_freq;
1375 rxs->band = sc->curband->band;
1377 rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
1379 rxs->antenna = rs->rs_antenna;
1381 if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1382 sc->stats.antenna_rx[rs->rs_antenna]++;
1384 sc->stats.antenna_rx[0]++; /* invalid */
1386 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
1387 rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
1389 if (rxs->rate_idx >= 0 && rs->rs_rate ==
1390 sc->curband->bitrates[rxs->rate_idx].hw_value_short)
1391 rxs->flag |= RX_FLAG_SHORTPRE;
1393 ath5k_debug_dump_skb(sc, skb, "RX ", 0);
1395 ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
1397 /* check beacons in IBSS mode */
1398 if (sc->opmode == NL80211_IFTYPE_ADHOC)
1399 ath5k_check_ibss_tsf(sc, skb, rxs);
1401 ieee80211_rx(sc->hw, skb);
1404 /** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
1406 * Check if we want to further process this frame or not. Also update
1407 * statistics. Return true if we want this frame, false if not.
1410 ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
1412 sc->stats.rx_all_count++;
1413 sc->stats.rx_bytes_count += rs->rs_datalen;
1415 if (unlikely(rs->rs_status)) {
1416 if (rs->rs_status & AR5K_RXERR_CRC)
1417 sc->stats.rxerr_crc++;
1418 if (rs->rs_status & AR5K_RXERR_FIFO)
1419 sc->stats.rxerr_fifo++;
1420 if (rs->rs_status & AR5K_RXERR_PHY) {
1421 sc->stats.rxerr_phy++;
1422 if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1423 sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
1426 if (rs->rs_status & AR5K_RXERR_DECRYPT) {
1428 * Decrypt error. If the error occurred
1429 * because there was no hardware key, then
1430 * let the frame through so the upper layers
1431 * can process it. This is necessary for 5210
1432 * parts which have no way to setup a ``clear''
1435 * XXX do key cache faulting
1437 sc->stats.rxerr_decrypt++;
1438 if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
1439 !(rs->rs_status & AR5K_RXERR_CRC))
1442 if (rs->rs_status & AR5K_RXERR_MIC) {
1443 sc->stats.rxerr_mic++;
1447 /* reject any frames with non-crypto errors */
1448 if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
1452 if (unlikely(rs->rs_more)) {
1453 sc->stats.rxerr_jumbo++;
1460 ath5k_tasklet_rx(unsigned long data)
1462 struct ath5k_rx_status rs = {};
1463 struct sk_buff *skb, *next_skb;
1464 dma_addr_t next_skb_addr;
1465 struct ath5k_softc *sc = (void *)data;
1466 struct ath5k_hw *ah = sc->ah;
1467 struct ath_common *common = ath5k_hw_common(ah);
1468 struct ath5k_buf *bf;
1469 struct ath5k_desc *ds;
1472 spin_lock(&sc->rxbuflock);
1473 if (list_empty(&sc->rxbuf)) {
1474 ATH5K_WARN(sc, "empty rx buf pool\n");
1478 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
1479 BUG_ON(bf->skb == NULL);
1483 /* bail if HW is still using self-linked descriptor */
1484 if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr)
1487 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
1488 if (unlikely(ret == -EINPROGRESS))
1490 else if (unlikely(ret)) {
1491 ATH5K_ERR(sc, "error in processing rx descriptor\n");
1492 sc->stats.rxerr_proc++;
1496 if (ath5k_receive_frame_ok(sc, &rs)) {
1497 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
1500 * If we can't replace bf->skb with a new skb under
1501 * memory pressure, just skip this packet
1506 dma_unmap_single(sc->dev, bf->skbaddr,
1510 skb_put(skb, rs.rs_datalen);
1512 ath5k_receive_frame(sc, skb, &rs);
1515 bf->skbaddr = next_skb_addr;
1518 list_move_tail(&bf->list, &sc->rxbuf);
1519 } while (ath5k_rxbuf_setup(sc, bf) == 0);
1521 spin_unlock(&sc->rxbuflock);
1530 ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1531 struct ath5k_txq *txq)
1533 struct ath5k_softc *sc = hw->priv;
1534 struct ath5k_buf *bf;
1535 unsigned long flags;
1538 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
1541 * The hardware expects the header padded to 4 byte boundaries.
1542 * If this is not the case, we add the padding after the header.
1544 padsize = ath5k_add_padding(skb);
1546 ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
1547 " headroom to pad");
1551 if (txq->txq_len >= ATH5K_TXQ_LEN_MAX)
1552 ieee80211_stop_queue(hw, txq->qnum);
1554 spin_lock_irqsave(&sc->txbuflock, flags);
1555 if (list_empty(&sc->txbuf)) {
1556 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
1557 spin_unlock_irqrestore(&sc->txbuflock, flags);
1558 ieee80211_stop_queues(hw);
1561 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
1562 list_del(&bf->list);
1564 if (list_empty(&sc->txbuf))
1565 ieee80211_stop_queues(hw);
1566 spin_unlock_irqrestore(&sc->txbuflock, flags);
1570 if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
1572 spin_lock_irqsave(&sc->txbuflock, flags);
1573 list_add_tail(&bf->list, &sc->txbuf);
1575 spin_unlock_irqrestore(&sc->txbuflock, flags);
1578 return NETDEV_TX_OK;
1581 dev_kfree_skb_any(skb);
1582 return NETDEV_TX_OK;
1586 ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1587 struct ath5k_tx_status *ts)
1589 struct ieee80211_tx_info *info;
1592 sc->stats.tx_all_count++;
1593 sc->stats.tx_bytes_count += skb->len;
1594 info = IEEE80211_SKB_CB(skb);
1596 ieee80211_tx_info_clear_status(info);
1597 for (i = 0; i < 4; i++) {
1598 struct ieee80211_tx_rate *r =
1599 &info->status.rates[i];
1601 if (ts->ts_rate[i]) {
1602 r->idx = ath5k_hw_to_driver_rix(sc, ts->ts_rate[i]);
1603 r->count = ts->ts_retry[i];
1610 /* count the successful attempt as well */
1611 info->status.rates[ts->ts_final_idx].count++;
1613 if (unlikely(ts->ts_status)) {
1614 sc->stats.ack_fail++;
1615 if (ts->ts_status & AR5K_TXERR_FILT) {
1616 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1617 sc->stats.txerr_filt++;
1619 if (ts->ts_status & AR5K_TXERR_XRETRY)
1620 sc->stats.txerr_retry++;
1621 if (ts->ts_status & AR5K_TXERR_FIFO)
1622 sc->stats.txerr_fifo++;
1624 info->flags |= IEEE80211_TX_STAT_ACK;
1625 info->status.ack_signal = ts->ts_rssi;
1629 * Remove MAC header padding before giving the frame
1632 ath5k_remove_padding(skb);
1634 if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
1635 sc->stats.antenna_tx[ts->ts_antenna]++;
1637 sc->stats.antenna_tx[0]++; /* invalid */
1639 ieee80211_tx_status(sc->hw, skb);
1643 ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1645 struct ath5k_tx_status ts = {};
1646 struct ath5k_buf *bf, *bf0;
1647 struct ath5k_desc *ds;
1648 struct sk_buff *skb;
1651 spin_lock(&txq->lock);
1652 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1654 txq->txq_poll_mark = false;
1656 /* skb might already have been processed last time. */
1657 if (bf->skb != NULL) {
1660 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
1661 if (unlikely(ret == -EINPROGRESS))
1663 else if (unlikely(ret)) {
1665 "error %d while processing "
1666 "queue %u\n", ret, txq->qnum);
1673 dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
1675 ath5k_tx_frame_completed(sc, skb, &ts);
1679 * It's possible that the hardware can say the buffer is
1680 * completed when it hasn't yet loaded the ds_link from
1681 * host memory and moved on.
1682 * Always keep the last descriptor to avoid HW races...
1684 if (ath5k_hw_get_txdp(sc->ah, txq->qnum) != bf->daddr) {
1685 spin_lock(&sc->txbuflock);
1686 list_move_tail(&bf->list, &sc->txbuf);
1689 spin_unlock(&sc->txbuflock);
1692 spin_unlock(&txq->lock);
1693 if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
1694 ieee80211_wake_queue(sc->hw, txq->qnum);
1698 ath5k_tasklet_tx(unsigned long data)
1701 struct ath5k_softc *sc = (void *)data;
1703 for (i=0; i < AR5K_NUM_TX_QUEUES; i++)
1704 if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i)))
1705 ath5k_tx_processq(sc, &sc->txqs[i]);
1714 * Setup the beacon frame for transmit.
1717 ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1719 struct sk_buff *skb = bf->skb;
1720 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1721 struct ath5k_hw *ah = sc->ah;
1722 struct ath5k_desc *ds;
1726 const int padsize = 0;
1728 bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
1730 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1731 "skbaddr %llx\n", skb, skb->data, skb->len,
1732 (unsigned long long)bf->skbaddr);
1734 if (dma_mapping_error(sc->dev, bf->skbaddr)) {
1735 ATH5K_ERR(sc, "beacon DMA mapping failed\n");
1740 antenna = ah->ah_tx_ant;
1742 flags = AR5K_TXDESC_NOACK;
1743 if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1744 ds->ds_link = bf->daddr; /* self-linked */
1745 flags |= AR5K_TXDESC_VEOL;
1750 * If we use multiple antennas on AP and use
1751 * the Sectored AP scenario, switch antenna every
1752 * 4 beacons to make sure everybody hears our AP.
1753 * When a client tries to associate, hw will keep
1754 * track of the tx antenna to be used for this client
1755 * automaticaly, based on ACKed packets.
1757 * Note: AP still listens and transmits RTS on the
1758 * default antenna which is supposed to be an omni.
1760 * Note2: On sectored scenarios it's possible to have
1761 * multiple antennas (1 omni -- the default -- and 14
1762 * sectors), so if we choose to actually support this
1763 * mode, we need to allow the user to set how many antennas
1764 * we have and tweak the code below to send beacons
1767 if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
1768 antenna = sc->bsent & 4 ? 2 : 1;
1771 /* FIXME: If we are in g mode and rate is a CCK rate
1772 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
1773 * from tx power (value is in dB units already) */
1774 ds->ds_data = bf->skbaddr;
1775 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1776 ieee80211_get_hdrlen_from_skb(skb), padsize,
1777 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
1778 ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1779 1, AR5K_TXKEYIX_INVALID,
1780 antenna, flags, 0, 0);
1786 dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1791 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
1792 * this is called only once at config_bss time, for AP we do it every
1793 * SWBA interrupt so that the TIM will reflect buffered frames.
1795 * Called with the beacon lock.
1798 ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1801 struct ath5k_softc *sc = hw->priv;
1802 struct ath5k_vif *avf = (void *)vif->drv_priv;
1803 struct sk_buff *skb;
1805 if (WARN_ON(!vif)) {
1810 skb = ieee80211_beacon_get(hw, vif);
1817 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
1819 ath5k_txbuf_free_skb(sc, avf->bbuf);
1820 avf->bbuf->skb = skb;
1821 ret = ath5k_beacon_setup(sc, avf->bbuf);
1823 avf->bbuf->skb = NULL;
1829 * Transmit a beacon frame at SWBA. Dynamic updates to the
1830 * frame contents are done as needed and the slot time is
1831 * also adjusted based on current state.
1833 * This is called from software irq context (beacontq tasklets)
1834 * or user context from ath5k_beacon_config.
1837 ath5k_beacon_send(struct ath5k_softc *sc)
1839 struct ath5k_hw *ah = sc->ah;
1840 struct ieee80211_vif *vif;
1841 struct ath5k_vif *avf;
1842 struct ath5k_buf *bf;
1843 struct sk_buff *skb;
1845 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1848 * Check if the previous beacon has gone out. If
1849 * not, don't don't try to post another: skip this
1850 * period and wait for the next. Missed beacons
1851 * indicate a problem and should not occur. If we
1852 * miss too many consecutive beacons reset the device.
1854 if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) {
1856 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1857 "missed %u consecutive beacons\n", sc->bmisscount);
1858 if (sc->bmisscount > 10) { /* NB: 10 is a guess */
1859 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1860 "stuck beacon time (%u missed)\n",
1862 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
1863 "stuck beacon, resetting\n");
1864 ieee80211_queue_work(sc->hw, &sc->reset_work);
1868 if (unlikely(sc->bmisscount != 0)) {
1869 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1870 "resume beacon xmit after %u misses\n",
1875 if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) ||
1876 sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1877 u64 tsf = ath5k_hw_get_tsf64(ah);
1878 u32 tsftu = TSF_TO_TU(tsf);
1879 int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval;
1880 vif = sc->bslot[(slot + 1) % ATH_BCBUF];
1881 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1882 "tsf %llx tsftu %x intval %u slot %u vif %p\n",
1883 (unsigned long long)tsf, tsftu, sc->bintval, slot, vif);
1884 } else /* only one interface */
1890 avf = (void *)vif->drv_priv;
1892 if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION ||
1893 sc->opmode == NL80211_IFTYPE_MONITOR)) {
1894 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
1899 * Stop any current dma and put the new frame on the queue.
1900 * This should never fail since we check above that no frames
1901 * are still pending on the queue.
1903 if (unlikely(ath5k_hw_stop_beacon_queue(ah, sc->bhalq))) {
1904 ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
1905 /* NB: hw still stops DMA, so proceed */
1908 /* refresh the beacon for AP or MESH mode */
1909 if (sc->opmode == NL80211_IFTYPE_AP ||
1910 sc->opmode == NL80211_IFTYPE_MESH_POINT)
1911 ath5k_beacon_update(sc->hw, vif);
1913 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
1914 ath5k_hw_start_tx_dma(ah, sc->bhalq);
1915 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
1916 sc->bhalq, (unsigned long long)bf->daddr, bf->desc);
1918 skb = ieee80211_get_buffered_bc(sc->hw, vif);
1920 ath5k_tx_queue(sc->hw, skb, sc->cabq);
1921 skb = ieee80211_get_buffered_bc(sc->hw, vif);
1928 * ath5k_beacon_update_timers - update beacon timers
1930 * @sc: struct ath5k_softc pointer we are operating on
1931 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
1932 * beacon timer update based on the current HW TSF.
1934 * Calculate the next target beacon transmit time (TBTT) based on the timestamp
1935 * of a received beacon or the current local hardware TSF and write it to the
1936 * beacon timer registers.
1938 * This is called in a variety of situations, e.g. when a beacon is received,
1939 * when a TSF update has been detected, but also when an new IBSS is created or
1940 * when we otherwise know we have to update the timers, but we keep it in this
1941 * function to have it all together in one place.
1944 ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
1946 struct ath5k_hw *ah = sc->ah;
1947 u32 nexttbtt, intval, hw_tu, bc_tu;
1950 intval = sc->bintval & AR5K_BEACON_PERIOD;
1951 if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {
1952 intval /= ATH_BCBUF; /* staggered multi-bss beacons */
1954 ATH5K_WARN(sc, "intval %u is too low, min 15\n",
1957 if (WARN_ON(!intval))
1960 /* beacon TSF converted to TU */
1961 bc_tu = TSF_TO_TU(bc_tsf);
1963 /* current TSF converted to TU */
1964 hw_tsf = ath5k_hw_get_tsf64(ah);
1965 hw_tu = TSF_TO_TU(hw_tsf);
1967 #define FUDGE AR5K_TUNE_SW_BEACON_RESP + 3
1968 /* We use FUDGE to make sure the next TBTT is ahead of the current TU.
1969 * Since we later substract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
1970 * configuration we need to make sure it is bigger than that. */
1974 * no beacons received, called internally.
1975 * just need to refresh timers based on HW TSF.
1977 nexttbtt = roundup(hw_tu + FUDGE, intval);
1978 } else if (bc_tsf == 0) {
1980 * no beacon received, probably called by ath5k_reset_tsf().
1981 * reset TSF to start with 0.
1984 intval |= AR5K_BEACON_RESET_TSF;
1985 } else if (bc_tsf > hw_tsf) {
1987 * beacon received, SW merge happend but HW TSF not yet updated.
1988 * not possible to reconfigure timers yet, but next time we
1989 * receive a beacon with the same BSSID, the hardware will
1990 * automatically update the TSF and then we need to reconfigure
1993 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1994 "need to wait for HW TSF sync\n");
1998 * most important case for beacon synchronization between STA.
2000 * beacon received and HW TSF has been already updated by HW.
2001 * update next TBTT based on the TSF of the beacon, but make
2002 * sure it is ahead of our local TSF timer.
2004 nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
2008 sc->nexttbtt = nexttbtt;
2010 intval |= AR5K_BEACON_ENA;
2011 ath5k_hw_init_beacon(ah, nexttbtt, intval);
2014 * debugging output last in order to preserve the time critical aspect
2018 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2019 "reconfigured timers based on HW TSF\n");
2020 else if (bc_tsf == 0)
2021 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2022 "reset HW TSF and timers\n");
2024 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2025 "updated timers based on beacon TSF\n");
2027 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2028 "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
2029 (unsigned long long) bc_tsf,
2030 (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2031 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2032 intval & AR5K_BEACON_PERIOD,
2033 intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
2034 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2038 * ath5k_beacon_config - Configure the beacon queues and interrupts
2040 * @sc: struct ath5k_softc pointer we are operating on
2042 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2043 * interrupts to detect TSF updates only.
2046 ath5k_beacon_config(struct ath5k_softc *sc)
2048 struct ath5k_hw *ah = sc->ah;
2049 unsigned long flags;
2051 spin_lock_irqsave(&sc->block, flags);
2053 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2055 if (sc->enable_beacon) {
2057 * In IBSS mode we use a self-linked tx descriptor and let the
2058 * hardware send the beacons automatically. We have to load it
2060 * We use the SWBA interrupt only to keep track of the beacon
2061 * timers in order to detect automatic TSF updates.
2063 ath5k_beaconq_config(sc);
2065 sc->imask |= AR5K_INT_SWBA;
2067 if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2068 if (ath5k_hw_hasveol(ah))
2069 ath5k_beacon_send(sc);
2071 ath5k_beacon_update_timers(sc, -1);
2073 ath5k_hw_stop_beacon_queue(sc->ah, sc->bhalq);
2076 ath5k_hw_set_imr(ah, sc->imask);
2078 spin_unlock_irqrestore(&sc->block, flags);
2081 static void ath5k_tasklet_beacon(unsigned long data)
2083 struct ath5k_softc *sc = (struct ath5k_softc *) data;
2086 * Software beacon alert--time to send a beacon.
2088 * In IBSS mode we use this interrupt just to
2089 * keep track of the next TBTT (target beacon
2090 * transmission time) in order to detect wether
2091 * automatic TSF updates happened.
2093 if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2094 /* XXX: only if VEOL suppported */
2095 u64 tsf = ath5k_hw_get_tsf64(sc->ah);
2096 sc->nexttbtt += sc->bintval;
2097 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
2098 "SWBA nexttbtt: %x hw_tu: %x "
2102 (unsigned long long) tsf);
2104 spin_lock(&sc->block);
2105 ath5k_beacon_send(sc);
2106 spin_unlock(&sc->block);
2111 /********************\
2112 * Interrupt handling *
2113 \********************/
2116 ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2118 if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
2119 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
2120 /* run ANI only when full calibration is not active */
2121 ah->ah_cal_next_ani = jiffies +
2122 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2123 tasklet_schedule(&ah->ah_sc->ani_tasklet);
2125 } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2126 ah->ah_cal_next_full = jiffies +
2127 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2128 tasklet_schedule(&ah->ah_sc->calib);
2130 /* we could use SWI to generate enough interrupts to meet our
2131 * calibration interval requirements, if necessary:
2132 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
2136 ath5k_intr(int irq, void *dev_id)
2138 struct ath5k_softc *sc = dev_id;
2139 struct ath5k_hw *ah = sc->ah;
2140 enum ath5k_int status;
2141 unsigned int counter = 1000;
2143 if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
2144 ((ath5k_get_bus_type(ah) != ATH_AHB) &&
2145 !ath5k_hw_is_intr_pending(ah))))
2149 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
2150 ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
2152 if (unlikely(status & AR5K_INT_FATAL)) {
2154 * Fatal errors are unrecoverable.
2155 * Typically these are caused by DMA errors.
2157 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2158 "fatal int, resetting\n");
2159 ieee80211_queue_work(sc->hw, &sc->reset_work);
2160 } else if (unlikely(status & AR5K_INT_RXORN)) {
2162 * Receive buffers are full. Either the bus is busy or
2163 * the CPU is not fast enough to process all received
2165 * Older chipsets need a reset to come out of this
2166 * condition, but we treat it as RX for newer chips.
2167 * We don't know exactly which versions need a reset -
2168 * this guess is copied from the HAL.
2170 sc->stats.rxorn_intr++;
2171 if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2172 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2173 "rx overrun, resetting\n");
2174 ieee80211_queue_work(sc->hw, &sc->reset_work);
2177 tasklet_schedule(&sc->rxtq);
2179 if (status & AR5K_INT_SWBA) {
2180 tasklet_hi_schedule(&sc->beacontq);
2182 if (status & AR5K_INT_RXEOL) {
2184 * NB: the hardware should re-read the link when
2185 * RXE bit is written, but it doesn't work at
2186 * least on older hardware revs.
2188 sc->stats.rxeol_intr++;
2190 if (status & AR5K_INT_TXURN) {
2191 /* bump tx trigger level */
2192 ath5k_hw_update_tx_triglevel(ah, true);
2194 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2195 tasklet_schedule(&sc->rxtq);
2196 if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
2197 | AR5K_INT_TXERR | AR5K_INT_TXEOL))
2198 tasklet_schedule(&sc->txtq);
2199 if (status & AR5K_INT_BMISS) {
2202 if (status & AR5K_INT_MIB) {
2203 sc->stats.mib_intr++;
2204 ath5k_hw_update_mib_counters(ah);
2205 ath5k_ani_mib_intr(ah);
2207 if (status & AR5K_INT_GPIO)
2208 tasklet_schedule(&sc->rf_kill.toggleq);
2212 if (ath5k_get_bus_type(ah) == ATH_AHB)
2215 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2217 if (unlikely(!counter))
2218 ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
2220 ath5k_intr_calibration_poll(ah);
2226 * Periodically recalibrate the PHY to account
2227 * for temperature/environment changes.
2230 ath5k_tasklet_calibrate(unsigned long data)
2232 struct ath5k_softc *sc = (void *)data;
2233 struct ath5k_hw *ah = sc->ah;
2235 /* Only full calibration for now */
2236 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2238 ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
2239 ieee80211_frequency_to_channel(sc->curchan->center_freq),
2240 sc->curchan->hw_value);
2242 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2244 * Rfgain is out of bounds, reset the chip
2245 * to load new gain values.
2247 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2248 ieee80211_queue_work(sc->hw, &sc->reset_work);
2250 if (ath5k_hw_phy_calibrate(ah, sc->curchan))
2251 ATH5K_ERR(sc, "calibration of channel %u failed\n",
2252 ieee80211_frequency_to_channel(
2253 sc->curchan->center_freq));
2255 /* Noise floor calibration interrupts rx/tx path while I/Q calibration
2257 * TODO: We should stop TX here, so that it doesn't interfere.
2258 * Note that stopping the queues is not enough to stop TX! */
2259 if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
2260 ah->ah_cal_next_nf = jiffies +
2261 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
2262 ath5k_hw_update_noise_floor(ah);
2265 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2270 ath5k_tasklet_ani(unsigned long data)
2272 struct ath5k_softc *sc = (void *)data;
2273 struct ath5k_hw *ah = sc->ah;
2275 ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
2276 ath5k_ani_calibration(ah);
2277 ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2282 ath5k_tx_complete_poll_work(struct work_struct *work)
2284 struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
2285 tx_complete_work.work);
2286 struct ath5k_txq *txq;
2288 bool needreset = false;
2290 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
2291 if (sc->txqs[i].setup) {
2293 spin_lock_bh(&txq->lock);
2294 if (txq->txq_len > 1) {
2295 if (txq->txq_poll_mark) {
2296 ATH5K_DBG(sc, ATH5K_DEBUG_XMIT,
2297 "TX queue stuck %d\n",
2301 spin_unlock_bh(&txq->lock);
2304 txq->txq_poll_mark = true;
2307 spin_unlock_bh(&txq->lock);
2312 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2313 "TX queues stuck, resetting\n");
2314 ath5k_reset(sc, NULL, true);
2317 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2318 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2322 /*************************\
2323 * Initialization routines *
2324 \*************************/
2327 ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
2329 struct ieee80211_hw *hw = sc->hw;
2330 struct ath_common *common;
2334 /* Initialize driver private data */
2335 SET_IEEE80211_DEV(hw, sc->dev);
2336 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2337 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2338 IEEE80211_HW_SIGNAL_DBM |
2339 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2341 hw->wiphy->interface_modes =
2342 BIT(NL80211_IFTYPE_AP) |
2343 BIT(NL80211_IFTYPE_STATION) |
2344 BIT(NL80211_IFTYPE_ADHOC) |
2345 BIT(NL80211_IFTYPE_MESH_POINT);
2347 /* both antennas can be configured as RX or TX */
2348 hw->wiphy->available_antennas_tx = 0x3;
2349 hw->wiphy->available_antennas_rx = 0x3;
2351 hw->extra_tx_headroom = 2;
2352 hw->channel_change_time = 5000;
2355 * Mark the device as detached to avoid processing
2356 * interrupts until setup is complete.
2358 __set_bit(ATH_STAT_INVALID, sc->status);
2360 sc->opmode = NL80211_IFTYPE_STATION;
2362 mutex_init(&sc->lock);
2363 spin_lock_init(&sc->rxbuflock);
2364 spin_lock_init(&sc->txbuflock);
2365 spin_lock_init(&sc->block);
2368 /* Setup interrupt handler */
2369 ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
2371 ATH5K_ERR(sc, "request_irq failed\n");
2375 /* If we passed the test, malloc an ath5k_hw struct */
2376 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
2379 ATH5K_ERR(sc, "out of memory\n");
2384 sc->ah->ah_iobase = sc->iobase;
2385 common = ath5k_hw_common(sc->ah);
2386 common->ops = &ath5k_common_ops;
2387 common->bus_ops = bus_ops;
2388 common->ah = sc->ah;
2393 * Cache line size is used to size and align various
2394 * structures used to communicate with the hardware.
2396 ath5k_read_cachesize(common, &csz);
2397 common->cachelsz = csz << 2; /* convert to bytes */
2399 spin_lock_init(&common->cc_lock);
2401 /* Initialize device */
2402 ret = ath5k_hw_init(sc);
2406 /* set up multi-rate retry capabilities */
2407 if (sc->ah->ah_version == AR5K_AR5212) {
2409 hw->max_rate_tries = 11;
2412 hw->vif_data_size = sizeof(struct ath5k_vif);
2414 /* Finish private driver data initialization */
2415 ret = ath5k_init(hw);
2419 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
2420 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
2421 sc->ah->ah_mac_srev,
2422 sc->ah->ah_phy_revision);
2424 if (!sc->ah->ah_single_chip) {
2425 /* Single chip radio (!RF5111) */
2426 if (sc->ah->ah_radio_5ghz_revision &&
2427 !sc->ah->ah_radio_2ghz_revision) {
2428 /* No 5GHz support -> report 2GHz radio */
2429 if (!test_bit(AR5K_MODE_11A,
2430 sc->ah->ah_capabilities.cap_mode)) {
2431 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
2432 ath5k_chip_name(AR5K_VERSION_RAD,
2433 sc->ah->ah_radio_5ghz_revision),
2434 sc->ah->ah_radio_5ghz_revision);
2435 /* No 2GHz support (5110 and some
2436 * 5Ghz only cards) -> report 5Ghz radio */
2437 } else if (!test_bit(AR5K_MODE_11B,
2438 sc->ah->ah_capabilities.cap_mode)) {
2439 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
2440 ath5k_chip_name(AR5K_VERSION_RAD,
2441 sc->ah->ah_radio_5ghz_revision),
2442 sc->ah->ah_radio_5ghz_revision);
2443 /* Multiband radio */
2445 ATH5K_INFO(sc, "RF%s multiband radio found"
2447 ath5k_chip_name(AR5K_VERSION_RAD,
2448 sc->ah->ah_radio_5ghz_revision),
2449 sc->ah->ah_radio_5ghz_revision);
2452 /* Multi chip radio (RF5111 - RF2111) ->
2453 * report both 2GHz/5GHz radios */
2454 else if (sc->ah->ah_radio_5ghz_revision &&
2455 sc->ah->ah_radio_2ghz_revision){
2456 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
2457 ath5k_chip_name(AR5K_VERSION_RAD,
2458 sc->ah->ah_radio_5ghz_revision),
2459 sc->ah->ah_radio_5ghz_revision);
2460 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
2461 ath5k_chip_name(AR5K_VERSION_RAD,
2462 sc->ah->ah_radio_2ghz_revision),
2463 sc->ah->ah_radio_2ghz_revision);
2467 ath5k_debug_init_device(sc);
2469 /* ready to process interrupts */
2470 __clear_bit(ATH_STAT_INVALID, sc->status);
2474 ath5k_hw_deinit(sc->ah);
2478 free_irq(sc->irq, sc);
2484 ath5k_stop_locked(struct ath5k_softc *sc)
2486 struct ath5k_hw *ah = sc->ah;
2488 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
2489 test_bit(ATH_STAT_INVALID, sc->status));
2492 * Shutdown the hardware and driver:
2493 * stop output from above
2494 * disable interrupts
2496 * turn off the radio
2497 * clear transmit machinery
2498 * clear receive machinery
2499 * drain and release tx queues
2500 * reclaim beacon resources
2501 * power down hardware
2503 * Note that some of this work is not possible if the
2504 * hardware is gone (invalid).
2506 ieee80211_stop_queues(sc->hw);
2508 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2510 ath5k_hw_set_imr(ah, 0);
2511 synchronize_irq(sc->irq);
2513 ath5k_hw_dma_stop(ah);
2514 ath5k_drain_tx_buffs(sc);
2515 ath5k_hw_phy_disable(ah);
2522 ath5k_init_hw(struct ath5k_softc *sc)
2524 struct ath5k_hw *ah = sc->ah;
2525 struct ath_common *common = ath5k_hw_common(ah);
2528 mutex_lock(&sc->lock);
2530 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2533 * Stop anything previously setup. This is safe
2534 * no matter this is the first time through or not.
2536 ath5k_stop_locked(sc);
2539 * The basic interface to setting the hardware in a good
2540 * state is ``reset''. On return the hardware is known to
2541 * be powered up and with interrupts disabled. This must
2542 * be followed by initialization of the appropriate bits
2543 * and then setup of the interrupt mask.
2545 sc->curchan = sc->hw->conf.channel;
2546 sc->curband = &sc->sbands[sc->curchan->band];
2547 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2548 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2549 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2551 ret = ath5k_reset(sc, NULL, false);
2555 ath5k_rfkill_hw_start(ah);
2558 * Reset the key cache since some parts do not reset the
2559 * contents on initial power up or resume from suspend.
2561 for (i = 0; i < common->keymax; i++)
2562 ath_hw_keyreset(common, (u16) i);
2564 /* Use higher rates for acks instead of base
2566 ah->ah_ack_bitrate_high = true;
2568 for (i = 0; i < ARRAY_SIZE(sc->bslot); i++)
2569 sc->bslot[i] = NULL;
2574 mutex_unlock(&sc->lock);
2576 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2577 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2582 static void stop_tasklets(struct ath5k_softc *sc)
2584 tasklet_kill(&sc->rxtq);
2585 tasklet_kill(&sc->txtq);
2586 tasklet_kill(&sc->calib);
2587 tasklet_kill(&sc->beacontq);
2588 tasklet_kill(&sc->ani_tasklet);
2592 * Stop the device, grabbing the top-level lock to protect
2593 * against concurrent entry through ath5k_init (which can happen
2594 * if another thread does a system call and the thread doing the
2595 * stop is preempted).
2598 ath5k_stop_hw(struct ath5k_softc *sc)
2602 mutex_lock(&sc->lock);
2603 ret = ath5k_stop_locked(sc);
2604 if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
2606 * Don't set the card in full sleep mode!
2608 * a) When the device is in this state it must be carefully
2609 * woken up or references to registers in the PCI clock
2610 * domain may freeze the bus (and system). This varies
2611 * by chip and is mostly an issue with newer parts
2612 * (madwifi sources mentioned srev >= 0x78) that go to
2613 * sleep more quickly.
2615 * b) On older chips full sleep results a weird behaviour
2616 * during wakeup. I tested various cards with srev < 0x78
2617 * and they don't wake up after module reload, a second
2618 * module reload is needed to bring the card up again.
2620 * Until we figure out what's going on don't enable
2621 * full chip reset on any chip (this is what Legacy HAL
2622 * and Sam's HAL do anyway). Instead Perform a full reset
2623 * on the device (same as initial state after attach) and
2624 * leave it idle (keep MAC/BB on warm reset) */
2625 ret = ath5k_hw_on_hold(sc->ah);
2627 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2628 "putting device to sleep\n");
2632 mutex_unlock(&sc->lock);
2636 cancel_delayed_work_sync(&sc->tx_complete_work);
2638 ath5k_rfkill_hw_stop(sc->ah);
2644 * Reset the hardware. If chan is not NULL, then also pause rx/tx
2645 * and change to the given channel.
2647 * This should be called with sc->lock.
2650 ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
2653 struct ath5k_hw *ah = sc->ah;
2654 struct ath_common *common = ath5k_hw_common(ah);
2657 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
2659 ath5k_hw_set_imr(ah, 0);
2660 synchronize_irq(sc->irq);
2663 /* Save ani mode and disable ANI durring
2664 * reset. If we don't we might get false
2665 * PHY error interrupts. */
2666 ani_mode = ah->ah_sc->ani_state.ani_mode;
2667 ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
2669 /* We are going to empty hw queues
2670 * so we should also free any remaining
2672 ath5k_drain_tx_buffs(sc);
2675 sc->curband = &sc->sbands[chan->band];
2677 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
2680 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
2684 ret = ath5k_rx_start(sc);
2686 ATH5K_ERR(sc, "can't start recv logic\n");
2690 ath5k_ani_init(ah, ani_mode);
2692 ah->ah_cal_next_full = jiffies;
2693 ah->ah_cal_next_ani = jiffies;
2694 ah->ah_cal_next_nf = jiffies;
2695 ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2697 /* clear survey data and cycle counters */
2698 memset(&sc->survey, 0, sizeof(sc->survey));
2699 spin_lock_bh(&common->cc_lock);
2700 ath_hw_cycle_counters_update(common);
2701 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
2702 memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2703 spin_unlock_bh(&common->cc_lock);
2706 * Change channels and update the h/w rate map if we're switching;
2707 * e.g. 11a to 11b/g.
2709 * We may be doing a reset in response to an ioctl that changes the
2710 * channel so update any state that might change as a result.
2714 /* ath5k_chan_change(sc, c); */
2716 ath5k_beacon_config(sc);
2717 /* intrs are enabled by ath5k_beacon_config */
2719 ieee80211_wake_queues(sc->hw);
2726 static void ath5k_reset_work(struct work_struct *work)
2728 struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
2731 mutex_lock(&sc->lock);
2732 ath5k_reset(sc, NULL, true);
2733 mutex_unlock(&sc->lock);
2737 ath5k_init(struct ieee80211_hw *hw)
2740 struct ath5k_softc *sc = hw->priv;
2741 struct ath5k_hw *ah = sc->ah;
2742 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
2743 struct ath5k_txq *txq;
2744 u8 mac[ETH_ALEN] = {};
2749 * Check if the MAC has multi-rate retry support.
2750 * We do this by trying to setup a fake extended
2751 * descriptor. MACs that don't have support will
2752 * return false w/o doing anything. MACs that do
2753 * support it will return true w/o doing anything.
2755 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
2760 __set_bit(ATH_STAT_MRRETRY, sc->status);
2763 * Collect the channel list. The 802.11 layer
2764 * is resposible for filtering this list based
2765 * on settings like the phy mode and regulatory
2766 * domain restrictions.
2768 ret = ath5k_setup_bands(hw);
2770 ATH5K_ERR(sc, "can't get channels\n");
2774 /* NB: setup here so ath5k_rate_update is happy */
2775 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
2776 ath5k_setcurmode(sc, AR5K_MODE_11A);
2778 ath5k_setcurmode(sc, AR5K_MODE_11B);
2781 * Allocate tx+rx descriptors and populate the lists.
2783 ret = ath5k_desc_alloc(sc);
2785 ATH5K_ERR(sc, "can't allocate descriptors\n");
2790 * Allocate hardware transmit queues: one queue for
2791 * beacon frames and one data queue for each QoS
2792 * priority. Note that hw functions handle resetting
2793 * these queues at the needed time.
2795 ret = ath5k_beaconq_setup(ah);
2797 ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
2801 sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
2802 if (IS_ERR(sc->cabq)) {
2803 ATH5K_ERR(sc, "can't setup cab queue\n");
2804 ret = PTR_ERR(sc->cabq);
2808 /* 5211 and 5212 usually support 10 queues but we better rely on the
2809 * capability information */
2810 if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
2811 /* This order matches mac80211's queue priority, so we can
2812 * directly use the mac80211 queue number without any mapping */
2813 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2815 ATH5K_ERR(sc, "can't setup xmit queue\n");
2819 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2821 ATH5K_ERR(sc, "can't setup xmit queue\n");
2825 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2827 ATH5K_ERR(sc, "can't setup xmit queue\n");
2831 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2833 ATH5K_ERR(sc, "can't setup xmit queue\n");
2839 /* older hardware (5210) can only support one data queue */
2840 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2842 ATH5K_ERR(sc, "can't setup xmit queue\n");
2849 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
2850 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
2851 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
2852 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
2853 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
2855 INIT_WORK(&sc->reset_work, ath5k_reset_work);
2856 INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
2858 ret = ath5k_eeprom_read_mac(ah, mac);
2860 ATH5K_ERR(sc, "unable to read address from EEPROM\n");
2864 SET_IEEE80211_PERM_ADDR(hw, mac);
2865 memcpy(&sc->lladdr, mac, ETH_ALEN);
2866 /* All MAC address bits matter for ACKs */
2867 ath5k_update_bssid_mask_and_opmode(sc, NULL);
2869 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
2870 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
2872 ATH5K_ERR(sc, "can't initialize regulatory system\n");
2876 ret = ieee80211_register_hw(hw);
2878 ATH5K_ERR(sc, "can't register ieee80211 hw\n");
2882 if (!ath_is_world_regd(regulatory))
2883 regulatory_hint(hw->wiphy, regulatory->alpha2);
2885 ath5k_init_leds(sc);
2887 ath5k_sysfs_register(sc);
2891 ath5k_txq_release(sc);
2893 ath5k_hw_release_tx_queue(ah, sc->bhalq);
2895 ath5k_desc_free(sc);
2901 ath5k_deinit_softc(struct ath5k_softc *sc)
2903 struct ieee80211_hw *hw = sc->hw;
2906 * NB: the order of these is important:
2907 * o call the 802.11 layer before detaching ath5k_hw to
2908 * ensure callbacks into the driver to delete global
2909 * key cache entries can be handled
2910 * o reclaim the tx queue data structures after calling
2911 * the 802.11 layer as we'll get called back to reclaim
2912 * node state and potentially want to use them
2913 * o to cleanup the tx queues the hal is called, so detach
2915 * XXX: ??? detach ath5k_hw ???
2916 * Other than that, it's straightforward...
2918 ath5k_debug_finish_device(sc);
2919 ieee80211_unregister_hw(hw);
2920 ath5k_desc_free(sc);
2921 ath5k_txq_release(sc);
2922 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
2923 ath5k_unregister_leds(sc);
2925 ath5k_sysfs_unregister(sc);
2927 * NB: can't reclaim these until after ieee80211_ifdetach
2928 * returns because we'll get called back to reclaim node
2929 * state and potentially want to use them.
2931 ath5k_hw_deinit(sc->ah);
2932 free_irq(sc->irq, sc);
2936 ath_any_vif_assoc(struct ath5k_softc *sc)
2938 struct ath_vif_iter_data iter_data;
2939 iter_data.hw_macaddr = NULL;
2940 iter_data.any_assoc = false;
2941 iter_data.need_set_hw_addr = false;
2942 iter_data.found_active = true;
2944 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
2946 return iter_data.any_assoc;
2950 set_beacon_filter(struct ieee80211_hw *hw, bool enable)
2952 struct ath5k_softc *sc = hw->priv;
2953 struct ath5k_hw *ah = sc->ah;
2955 rfilt = ath5k_hw_get_rx_filter(ah);
2957 rfilt |= AR5K_RX_FILTER_BEACON;
2959 rfilt &= ~AR5K_RX_FILTER_BEACON;
2960 ath5k_hw_set_rx_filter(ah, rfilt);
2961 sc->filter_flags = rfilt;