2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004-2005 Atheros Communications, Inc.
4 * Copyright (c) 2006 Devicescape Software, Inc.
5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
18 * redistribution must be conditioned upon including a substantially
19 * similar Disclaimer requirement for further binary redistribution.
20 * 3. Neither the names of the above-listed copyright holders nor the names
21 * of any contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * Alternatively, this software may be distributed under the terms of the
25 * GNU General Public License ("GPL") version 2 as published by the Free
26 * Software Foundation.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
32 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
33 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
34 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
39 * THE POSSIBILITY OF SUCH DAMAGES.
43 #include <linux/module.h>
44 #include <linux/delay.h>
45 #include <linux/hardirq.h>
48 #include <linux/netdevice.h>
49 #include <linux/cache.h>
50 #include <linux/ethtool.h>
51 #include <linux/uaccess.h>
52 #include <linux/slab.h>
53 #include <linux/etherdevice.h>
55 #include <net/ieee80211_radiotap.h>
57 #include <asm/unaligned.h>
64 #define CREATE_TRACE_POINTS
67 int ath5k_modparam_nohwcrypt;
68 module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
69 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
71 static int modparam_all_channels;
72 module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
73 MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
75 static int modparam_fastchanswitch;
76 module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
77 MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
81 MODULE_AUTHOR("Jiri Slaby");
82 MODULE_AUTHOR("Nick Kossifidis");
83 MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
84 MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
85 MODULE_LICENSE("Dual BSD/GPL");
87 static int ath5k_init(struct ieee80211_hw *hw);
88 static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
92 static const struct ath5k_srev_name srev_names[] = {
93 #ifdef CONFIG_ATHEROS_AR231X
94 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 },
95 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 },
96 { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 },
97 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 },
98 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 },
99 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 },
100 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 },
102 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
103 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
104 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
105 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B },
106 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 },
107 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 },
108 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 },
109 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A },
110 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 },
111 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 },
112 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 },
113 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 },
114 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 },
115 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 },
116 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 },
117 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
118 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
119 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
121 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
122 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
123 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
124 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A },
125 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 },
126 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 },
127 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
128 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B },
129 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
130 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
131 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
132 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
133 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
134 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
135 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
136 #ifdef CONFIG_ATHEROS_AR231X
137 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
138 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
140 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
143 static const struct ieee80211_rate ath5k_rates[] = {
145 .hw_value = ATH5K_RATE_CODE_1M, },
147 .hw_value = ATH5K_RATE_CODE_2M,
148 .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
149 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
151 .hw_value = ATH5K_RATE_CODE_5_5M,
152 .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
153 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
155 .hw_value = ATH5K_RATE_CODE_11M,
156 .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
157 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
159 .hw_value = ATH5K_RATE_CODE_6M,
162 .hw_value = ATH5K_RATE_CODE_9M,
165 .hw_value = ATH5K_RATE_CODE_12M,
168 .hw_value = ATH5K_RATE_CODE_18M,
171 .hw_value = ATH5K_RATE_CODE_24M,
174 .hw_value = ATH5K_RATE_CODE_36M,
177 .hw_value = ATH5K_RATE_CODE_48M,
180 .hw_value = ATH5K_RATE_CODE_54M,
185 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
187 u64 tsf = ath5k_hw_get_tsf64(ah);
189 if ((tsf & 0x7fff) < rstamp)
192 return (tsf & ~0x7fff) | rstamp;
196 ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
198 const char *name = "xxxxx";
201 for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
202 if (srev_names[i].sr_type != type)
205 if ((val & 0xf0) == srev_names[i].sr_val)
206 name = srev_names[i].sr_name;
208 if ((val & 0xff) == srev_names[i].sr_val) {
209 name = srev_names[i].sr_name;
216 static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
218 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
219 return ath5k_hw_reg_read(ah, reg_offset);
222 static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
224 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
225 ath5k_hw_reg_write(ah, val, reg_offset);
228 static const struct ath_ops ath5k_common_ops = {
229 .read = ath5k_ioread32,
230 .write = ath5k_iowrite32,
233 /***********************\
234 * Driver Initialization *
235 \***********************/
237 static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
239 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
240 struct ath5k_softc *sc = hw->priv;
241 struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
243 return ath_reg_notifier_apply(wiphy, request, regulatory);
246 /********************\
247 * Channel/mode setup *
248 \********************/
251 * Returns true for the channel numbers used without all_channels modparam.
253 static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
255 if (band == IEEE80211_BAND_2GHZ && chan <= 14)
258 return /* UNII 1,2 */
259 (((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
261 ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
263 ((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
264 /* 802.11j 5.030-5.080 GHz (20MHz) */
265 (chan == 8 || chan == 12 || chan == 16) ||
266 /* 802.11j 4.9GHz (20MHz) */
267 (chan == 184 || chan == 188 || chan == 192 || chan == 196));
271 ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
272 unsigned int mode, unsigned int max)
274 unsigned int count, size, chfreq, freq, ch;
275 enum ieee80211_band band;
279 /* 1..220, but 2GHz frequencies are filtered by check_channel */
281 chfreq = CHANNEL_5GHZ;
282 band = IEEE80211_BAND_5GHZ;
287 chfreq = CHANNEL_2GHZ;
288 band = IEEE80211_BAND_2GHZ;
291 ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
296 for (ch = 1; ch <= size && count < max; ch++) {
297 freq = ieee80211_channel_to_frequency(ch, band);
299 if (freq == 0) /* mapping failed - not a standard channel */
302 /* Check if channel is supported by the chipset */
303 if (!ath5k_channel_ok(ah, freq, chfreq))
306 if (!modparam_all_channels &&
307 !ath5k_is_standard_channel(ch, band))
310 /* Write channel info and increment counter */
311 channels[count].center_freq = freq;
312 channels[count].band = band;
316 channels[count].hw_value = chfreq | CHANNEL_OFDM;
319 channels[count].hw_value = CHANNEL_B;
329 ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
333 for (i = 0; i < AR5K_MAX_RATES; i++)
334 sc->rate_idx[b->band][i] = -1;
336 for (i = 0; i < b->n_bitrates; i++) {
337 sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
338 if (b->bitrates[i].hw_value_short)
339 sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
344 ath5k_setup_bands(struct ieee80211_hw *hw)
346 struct ath5k_softc *sc = hw->priv;
347 struct ath5k_hw *ah = sc->ah;
348 struct ieee80211_supported_band *sband;
349 int max_c, count_c = 0;
352 BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
353 max_c = ARRAY_SIZE(sc->channels);
356 sband = &sc->sbands[IEEE80211_BAND_2GHZ];
357 sband->band = IEEE80211_BAND_2GHZ;
358 sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
360 if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
362 memcpy(sband->bitrates, &ath5k_rates[0],
363 sizeof(struct ieee80211_rate) * 12);
364 sband->n_bitrates = 12;
366 sband->channels = sc->channels;
367 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
368 AR5K_MODE_11G, max_c);
370 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
371 count_c = sband->n_channels;
373 } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
375 memcpy(sband->bitrates, &ath5k_rates[0],
376 sizeof(struct ieee80211_rate) * 4);
377 sband->n_bitrates = 4;
379 /* 5211 only supports B rates and uses 4bit rate codes
380 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
383 if (ah->ah_version == AR5K_AR5211) {
384 for (i = 0; i < 4; i++) {
385 sband->bitrates[i].hw_value =
386 sband->bitrates[i].hw_value & 0xF;
387 sband->bitrates[i].hw_value_short =
388 sband->bitrates[i].hw_value_short & 0xF;
392 sband->channels = sc->channels;
393 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
394 AR5K_MODE_11B, max_c);
396 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
397 count_c = sband->n_channels;
400 ath5k_setup_rate_idx(sc, sband);
402 /* 5GHz band, A mode */
403 if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
404 sband = &sc->sbands[IEEE80211_BAND_5GHZ];
405 sband->band = IEEE80211_BAND_5GHZ;
406 sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
408 memcpy(sband->bitrates, &ath5k_rates[4],
409 sizeof(struct ieee80211_rate) * 8);
410 sband->n_bitrates = 8;
412 sband->channels = &sc->channels[count_c];
413 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
414 AR5K_MODE_11A, max_c);
416 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
418 ath5k_setup_rate_idx(sc, sband);
420 ath5k_debug_dump_bands(sc);
426 * Set/change channels. We always reset the chip.
427 * To accomplish this we must first cleanup any pending DMA,
428 * then restart stuff after a la ath5k_init.
430 * Called with sc->lock.
433 ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
435 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
436 "channel set, resetting (%u -> %u MHz)\n",
437 sc->curchan->center_freq, chan->center_freq);
440 * To switch channels clear any pending DMA operations;
441 * wait long enough for the RX fifo to drain, reset the
442 * hardware at the new frequency, and then re-enable
443 * the relevant bits of the h/w.
445 return ath5k_reset(sc, chan, true);
448 void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
450 struct ath5k_vif_iter_data *iter_data = data;
452 struct ath5k_vif *avf = (void *)vif->drv_priv;
454 if (iter_data->hw_macaddr)
455 for (i = 0; i < ETH_ALEN; i++)
456 iter_data->mask[i] &=
457 ~(iter_data->hw_macaddr[i] ^ mac[i]);
459 if (!iter_data->found_active) {
460 iter_data->found_active = true;
461 memcpy(iter_data->active_mac, mac, ETH_ALEN);
464 if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
465 if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
466 iter_data->need_set_hw_addr = false;
468 if (!iter_data->any_assoc) {
470 iter_data->any_assoc = true;
473 /* Calculate combined mode - when APs are active, operate in AP mode.
474 * Otherwise use the mode of the new interface. This can currently
475 * only deal with combinations of APs and STAs. Only one ad-hoc
476 * interfaces is allowed.
478 if (avf->opmode == NL80211_IFTYPE_AP)
479 iter_data->opmode = NL80211_IFTYPE_AP;
481 if (avf->opmode == NL80211_IFTYPE_STATION)
483 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
484 iter_data->opmode = avf->opmode;
489 ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
490 struct ieee80211_vif *vif)
492 struct ath_common *common = ath5k_hw_common(sc->ah);
493 struct ath5k_vif_iter_data iter_data;
497 * Use the hardware MAC address as reference, the hardware uses it
498 * together with the BSSID mask when matching addresses.
500 iter_data.hw_macaddr = common->macaddr;
501 memset(&iter_data.mask, 0xff, ETH_ALEN);
502 iter_data.found_active = false;
503 iter_data.need_set_hw_addr = true;
504 iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
505 iter_data.n_stas = 0;
508 ath5k_vif_iter(&iter_data, vif->addr, vif);
510 /* Get list of all active MAC addresses */
511 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
513 memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);
515 sc->opmode = iter_data.opmode;
516 if (sc->opmode == NL80211_IFTYPE_UNSPECIFIED)
517 /* Nothing active, default to station mode */
518 sc->opmode = NL80211_IFTYPE_STATION;
520 ath5k_hw_set_opmode(sc->ah, sc->opmode);
521 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
522 sc->opmode, ath_opmode_to_string(sc->opmode));
524 if (iter_data.need_set_hw_addr && iter_data.found_active)
525 ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);
527 if (ath5k_hw_hasbssidmask(sc->ah))
528 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
530 /* Set up RX Filter */
531 if (iter_data.n_stas > 1) {
532 /* If you have multiple STA interfaces connected to
533 * different APs, ARPs are not received (most of the time?)
534 * Enabling PROMISC appears to fix that probem.
536 sc->filter_flags |= AR5K_RX_FILTER_PROM;
539 rfilt = sc->filter_flags;
540 ath5k_hw_set_rx_filter(sc->ah, rfilt);
541 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
545 ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
549 /* return base rate on errors */
550 if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
551 "hw_rix out of bounds: %x\n", hw_rix))
554 rix = sc->rate_idx[sc->curchan->band][hw_rix];
555 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
566 struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
568 struct ath_common *common = ath5k_hw_common(sc->ah);
572 * Allocate buffer with headroom_needed space for the
573 * fake physical layer header at the start.
575 skb = ath_rxbuf_alloc(common,
580 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
585 *skb_addr = dma_map_single(sc->dev,
586 skb->data, common->rx_bufsize,
589 if (unlikely(dma_mapping_error(sc->dev, *skb_addr))) {
590 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
598 ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
600 struct ath5k_hw *ah = sc->ah;
601 struct sk_buff *skb = bf->skb;
602 struct ath5k_desc *ds;
606 skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
613 * Setup descriptors. For receive we always terminate
614 * the descriptor list with a self-linked entry so we'll
615 * not get overrun under high load (as can happen with a
616 * 5212 when ANI processing enables PHY error frames).
618 * To ensure the last descriptor is self-linked we create
619 * each descriptor as self-linked and add it to the end. As
620 * each additional descriptor is added the previous self-linked
621 * entry is "fixed" naturally. This should be safe even
622 * if DMA is happening. When processing RX interrupts we
623 * never remove/process the last, self-linked, entry on the
624 * descriptor list. This ensures the hardware always has
625 * someplace to write a new frame.
628 ds->ds_link = bf->daddr; /* link to self */
629 ds->ds_data = bf->skbaddr;
630 ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
632 ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
636 if (sc->rxlink != NULL)
637 *sc->rxlink = bf->daddr;
638 sc->rxlink = &ds->ds_link;
642 static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
644 struct ieee80211_hdr *hdr;
645 enum ath5k_pkt_type htype;
648 hdr = (struct ieee80211_hdr *)skb->data;
649 fc = hdr->frame_control;
651 if (ieee80211_is_beacon(fc))
652 htype = AR5K_PKT_TYPE_BEACON;
653 else if (ieee80211_is_probe_resp(fc))
654 htype = AR5K_PKT_TYPE_PROBE_RESP;
655 else if (ieee80211_is_atim(fc))
656 htype = AR5K_PKT_TYPE_ATIM;
657 else if (ieee80211_is_pspoll(fc))
658 htype = AR5K_PKT_TYPE_PSPOLL;
660 htype = AR5K_PKT_TYPE_NORMAL;
666 ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
667 struct ath5k_txq *txq, int padsize)
669 struct ath5k_hw *ah = sc->ah;
670 struct ath5k_desc *ds = bf->desc;
671 struct sk_buff *skb = bf->skb;
672 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
673 unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
674 struct ieee80211_rate *rate;
675 unsigned int mrr_rate[3], mrr_tries[3];
682 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
685 bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
688 rate = ieee80211_get_tx_rate(sc->hw, info);
694 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
695 flags |= AR5K_TXDESC_NOACK;
697 rc_flags = info->control.rates[0].flags;
698 hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
699 rate->hw_value_short : rate->hw_value;
703 /* FIXME: If we are in g mode and rate is a CCK rate
704 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
705 * from tx power (value is in dB units already) */
706 if (info->control.hw_key) {
707 keyidx = info->control.hw_key->hw_key_idx;
708 pktlen += info->control.hw_key->icv_len;
710 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
711 flags |= AR5K_TXDESC_RTSENA;
712 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
713 duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
714 info->control.vif, pktlen, info));
716 if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
717 flags |= AR5K_TXDESC_CTSENA;
718 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
719 duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
720 info->control.vif, pktlen, info));
722 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
723 ieee80211_get_hdrlen_from_skb(skb), padsize,
724 get_hw_packet_type(skb),
725 (sc->power_level * 2),
727 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
732 memset(mrr_rate, 0, sizeof(mrr_rate));
733 memset(mrr_tries, 0, sizeof(mrr_tries));
734 for (i = 0; i < 3; i++) {
735 rate = ieee80211_get_alt_retry_rate(sc->hw, info, i);
739 mrr_rate[i] = rate->hw_value;
740 mrr_tries[i] = info->control.rates[i + 1].count;
743 ath5k_hw_setup_mrr_tx_desc(ah, ds,
744 mrr_rate[0], mrr_tries[0],
745 mrr_rate[1], mrr_tries[1],
746 mrr_rate[2], mrr_tries[2]);
749 ds->ds_data = bf->skbaddr;
751 spin_lock_bh(&txq->lock);
752 list_add_tail(&bf->list, &txq->q);
754 if (txq->link == NULL) /* is this first packet? */
755 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
756 else /* no, so only link it */
757 *txq->link = bf->daddr;
759 txq->link = &ds->ds_link;
760 ath5k_hw_start_tx_dma(ah, txq->qnum);
762 spin_unlock_bh(&txq->lock);
766 dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
770 /*******************\
771 * Descriptors setup *
772 \*******************/
775 ath5k_desc_alloc(struct ath5k_softc *sc)
777 struct ath5k_desc *ds;
778 struct ath5k_buf *bf;
783 /* allocate descriptors */
784 sc->desc_len = sizeof(struct ath5k_desc) *
785 (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
787 sc->desc = dma_alloc_coherent(sc->dev, sc->desc_len,
788 &sc->desc_daddr, GFP_KERNEL);
789 if (sc->desc == NULL) {
790 ATH5K_ERR(sc, "can't allocate descriptors\n");
796 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
797 ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
799 bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
800 sizeof(struct ath5k_buf), GFP_KERNEL);
802 ATH5K_ERR(sc, "can't allocate bufptr\n");
808 INIT_LIST_HEAD(&sc->rxbuf);
809 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
812 list_add_tail(&bf->list, &sc->rxbuf);
815 INIT_LIST_HEAD(&sc->txbuf);
816 sc->txbuf_len = ATH_TXBUF;
817 for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
820 list_add_tail(&bf->list, &sc->txbuf);
824 INIT_LIST_HEAD(&sc->bcbuf);
825 for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
828 list_add_tail(&bf->list, &sc->bcbuf);
833 dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
840 ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
845 dma_unmap_single(sc->dev, bf->skbaddr, bf->skb->len,
847 dev_kfree_skb_any(bf->skb);
850 bf->desc->ds_data = 0;
854 ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
856 struct ath5k_hw *ah = sc->ah;
857 struct ath_common *common = ath5k_hw_common(ah);
862 dma_unmap_single(sc->dev, bf->skbaddr, common->rx_bufsize,
864 dev_kfree_skb_any(bf->skb);
867 bf->desc->ds_data = 0;
871 ath5k_desc_free(struct ath5k_softc *sc)
873 struct ath5k_buf *bf;
875 list_for_each_entry(bf, &sc->txbuf, list)
876 ath5k_txbuf_free_skb(sc, bf);
877 list_for_each_entry(bf, &sc->rxbuf, list)
878 ath5k_rxbuf_free_skb(sc, bf);
879 list_for_each_entry(bf, &sc->bcbuf, list)
880 ath5k_txbuf_free_skb(sc, bf);
882 /* Free memory associated with all descriptors */
883 dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
896 static struct ath5k_txq *
897 ath5k_txq_setup(struct ath5k_softc *sc,
898 int qtype, int subtype)
900 struct ath5k_hw *ah = sc->ah;
901 struct ath5k_txq *txq;
902 struct ath5k_txq_info qi = {
903 .tqi_subtype = subtype,
904 /* XXX: default values not correct for B and XR channels,
906 .tqi_aifs = AR5K_TUNE_AIFS,
907 .tqi_cw_min = AR5K_TUNE_CWMIN,
908 .tqi_cw_max = AR5K_TUNE_CWMAX
913 * Enable interrupts only for EOL and DESC conditions.
914 * We mark tx descriptors to receive a DESC interrupt
915 * when a tx queue gets deep; otherwise we wait for the
916 * EOL to reap descriptors. Note that this is done to
917 * reduce interrupt load and this only defers reaping
918 * descriptors, never transmitting frames. Aside from
919 * reducing interrupts this also permits more concurrency.
920 * The only potential downside is if the tx queue backs
921 * up in which case the top half of the kernel may backup
922 * due to a lack of tx descriptors.
924 qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
925 AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
926 qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
929 * NB: don't print a message, this happens
930 * normally on parts with too few tx queues
932 return ERR_PTR(qnum);
934 if (qnum >= ARRAY_SIZE(sc->txqs)) {
935 ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n",
936 qnum, ARRAY_SIZE(sc->txqs));
937 ath5k_hw_release_tx_queue(ah, qnum);
938 return ERR_PTR(-EINVAL);
940 txq = &sc->txqs[qnum];
944 INIT_LIST_HEAD(&txq->q);
945 spin_lock_init(&txq->lock);
948 txq->txq_max = ATH5K_TXQ_LEN_MAX;
949 txq->txq_poll_mark = false;
952 return &sc->txqs[qnum];
956 ath5k_beaconq_setup(struct ath5k_hw *ah)
958 struct ath5k_txq_info qi = {
959 /* XXX: default values not correct for B and XR channels,
961 .tqi_aifs = AR5K_TUNE_AIFS,
962 .tqi_cw_min = AR5K_TUNE_CWMIN,
963 .tqi_cw_max = AR5K_TUNE_CWMAX,
964 /* NB: for dynamic turbo, don't enable any other interrupts */
965 .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
968 return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
972 ath5k_beaconq_config(struct ath5k_softc *sc)
974 struct ath5k_hw *ah = sc->ah;
975 struct ath5k_txq_info qi;
978 ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
982 if (sc->opmode == NL80211_IFTYPE_AP ||
983 sc->opmode == NL80211_IFTYPE_MESH_POINT) {
985 * Always burst out beacon and CAB traffic
986 * (aifs = cwmin = cwmax = 0)
991 } else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
993 * Adhoc mode; backoff between 0 and (2 * cw_min).
997 qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
1000 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1001 "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
1002 qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
1004 ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
1006 ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
1007 "hardware queue!\n", __func__);
1010 ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
1014 /* reconfigure cabq with ready time to 80% of beacon_interval */
1015 ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1019 qi.tqi_ready_time = (sc->bintval * 80) / 100;
1020 ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1024 ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1030 * ath5k_drain_tx_buffs - Empty tx buffers
1032 * @sc The &struct ath5k_softc
1034 * Empty tx buffers from all queues in preparation
1035 * of a reset or during shutdown.
1037 * NB: this assumes output has been stopped and
1038 * we do not need to block ath5k_tx_tasklet
1041 ath5k_drain_tx_buffs(struct ath5k_softc *sc)
1043 struct ath5k_txq *txq;
1044 struct ath5k_buf *bf, *bf0;
1047 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
1048 if (sc->txqs[i].setup) {
1050 spin_lock_bh(&txq->lock);
1051 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1052 ath5k_debug_printtxbuf(sc, bf);
1054 ath5k_txbuf_free_skb(sc, bf);
1056 spin_lock_bh(&sc->txbuflock);
1057 list_move_tail(&bf->list, &sc->txbuf);
1060 spin_unlock_bh(&sc->txbuflock);
1063 txq->txq_poll_mark = false;
1064 spin_unlock_bh(&txq->lock);
1070 ath5k_txq_release(struct ath5k_softc *sc)
1072 struct ath5k_txq *txq = sc->txqs;
1075 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++)
1077 ath5k_hw_release_tx_queue(sc->ah, txq->qnum);
1088 * Enable the receive h/w following a reset.
1091 ath5k_rx_start(struct ath5k_softc *sc)
1093 struct ath5k_hw *ah = sc->ah;
1094 struct ath_common *common = ath5k_hw_common(ah);
1095 struct ath5k_buf *bf;
1098 common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1100 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1101 common->cachelsz, common->rx_bufsize);
1103 spin_lock_bh(&sc->rxbuflock);
1105 list_for_each_entry(bf, &sc->rxbuf, list) {
1106 ret = ath5k_rxbuf_setup(sc, bf);
1108 spin_unlock_bh(&sc->rxbuflock);
1112 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
1113 ath5k_hw_set_rxdp(ah, bf->daddr);
1114 spin_unlock_bh(&sc->rxbuflock);
1116 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
1117 ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */
1118 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1126 * Disable the receive logic on PCU (DRU)
1127 * In preparation for a shutdown.
1129 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
1133 ath5k_rx_stop(struct ath5k_softc *sc)
1135 struct ath5k_hw *ah = sc->ah;
1137 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1138 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1140 ath5k_debug_printrxbuffs(sc, ah);
1144 ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
1145 struct ath5k_rx_status *rs)
1147 struct ath5k_hw *ah = sc->ah;
1148 struct ath_common *common = ath5k_hw_common(ah);
1149 struct ieee80211_hdr *hdr = (void *)skb->data;
1150 unsigned int keyix, hlen;
1152 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1153 rs->rs_keyix != AR5K_RXKEYIX_INVALID)
1154 return RX_FLAG_DECRYPTED;
1156 /* Apparently when a default key is used to decrypt the packet
1157 the hw does not set the index used to decrypt. In such cases
1158 get the index from the packet. */
1159 hlen = ieee80211_hdrlen(hdr->frame_control);
1160 if (ieee80211_has_protected(hdr->frame_control) &&
1161 !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1162 skb->len >= hlen + 4) {
1163 keyix = skb->data[hlen + 3] >> 6;
1165 if (test_bit(keyix, common->keymap))
1166 return RX_FLAG_DECRYPTED;
1174 ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
1175 struct ieee80211_rx_status *rxs)
1177 struct ath_common *common = ath5k_hw_common(sc->ah);
1180 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1182 if (ieee80211_is_beacon(mgmt->frame_control) &&
1183 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
1184 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
1186 * Received an IBSS beacon with the same BSSID. Hardware *must*
1187 * have updated the local TSF. We have to work around various
1188 * hardware bugs, though...
1190 tsf = ath5k_hw_get_tsf64(sc->ah);
1191 bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
1192 hw_tu = TSF_TO_TU(tsf);
1194 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1195 "beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
1196 (unsigned long long)bc_tstamp,
1197 (unsigned long long)rxs->mactime,
1198 (unsigned long long)(rxs->mactime - bc_tstamp),
1199 (unsigned long long)tsf);
1202 * Sometimes the HW will give us a wrong tstamp in the rx
1203 * status, causing the timestamp extension to go wrong.
1204 * (This seems to happen especially with beacon frames bigger
1205 * than 78 byte (incl. FCS))
1206 * But we know that the receive timestamp must be later than the
1207 * timestamp of the beacon since HW must have synced to that.
1209 * NOTE: here we assume mactime to be after the frame was
1210 * received, not like mac80211 which defines it at the start.
1212 if (bc_tstamp > rxs->mactime) {
1213 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1214 "fixing mactime from %llx to %llx\n",
1215 (unsigned long long)rxs->mactime,
1216 (unsigned long long)tsf);
1221 * Local TSF might have moved higher than our beacon timers,
1222 * in that case we have to update them to continue sending
1223 * beacons. This also takes care of synchronizing beacon sending
1224 * times with other stations.
1226 if (hw_tu >= sc->nexttbtt)
1227 ath5k_beacon_update_timers(sc, bc_tstamp);
1229 /* Check if the beacon timers are still correct, because a TSF
1230 * update might have created a window between them - for a
1231 * longer description see the comment of this function: */
1232 if (!ath5k_hw_check_beacon_timers(sc->ah, sc->bintval)) {
1233 ath5k_beacon_update_timers(sc, bc_tstamp);
1234 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1235 "fixed beacon timers after beacon receive\n");
1241 ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
1243 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1244 struct ath5k_hw *ah = sc->ah;
1245 struct ath_common *common = ath5k_hw_common(ah);
1247 /* only beacons from our BSSID */
1248 if (!ieee80211_is_beacon(mgmt->frame_control) ||
1249 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
1252 ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1254 /* in IBSS mode we should keep RSSI statistics per neighbour */
1255 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
1259 * Compute padding position. skb must contain an IEEE 802.11 frame
1261 static int ath5k_common_padpos(struct sk_buff *skb)
1263 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1264 __le16 frame_control = hdr->frame_control;
1267 if (ieee80211_has_a4(frame_control))
1270 if (ieee80211_is_data_qos(frame_control))
1271 padpos += IEEE80211_QOS_CTL_LEN;
1277 * This function expects an 802.11 frame and returns the number of
1278 * bytes added, or -1 if we don't have enough header room.
1280 static int ath5k_add_padding(struct sk_buff *skb)
1282 int padpos = ath5k_common_padpos(skb);
1283 int padsize = padpos & 3;
1285 if (padsize && skb->len > padpos) {
1287 if (skb_headroom(skb) < padsize)
1290 skb_push(skb, padsize);
1291 memmove(skb->data, skb->data + padsize, padpos);
1299 * The MAC header is padded to have 32-bit boundary if the
1300 * packet payload is non-zero. The general calculation for
1301 * padsize would take into account odd header lengths:
1302 * padsize = 4 - (hdrlen & 3); however, since only
1303 * even-length headers are used, padding can only be 0 or 2
1304 * bytes and we can optimize this a bit. We must not try to
1305 * remove padding from short control frames that do not have a
1308 * This function expects an 802.11 frame and returns the number of
1311 static int ath5k_remove_padding(struct sk_buff *skb)
1313 int padpos = ath5k_common_padpos(skb);
1314 int padsize = padpos & 3;
1316 if (padsize && skb->len >= padpos + padsize) {
1317 memmove(skb->data + padsize, skb->data, padpos);
1318 skb_pull(skb, padsize);
1326 ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1327 struct ath5k_rx_status *rs)
1329 struct ieee80211_rx_status *rxs;
1331 ath5k_remove_padding(skb);
1333 rxs = IEEE80211_SKB_RXCB(skb);
1336 if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
1337 rxs->flag |= RX_FLAG_MMIC_ERROR;
1340 * always extend the mac timestamp, since this information is
1341 * also needed for proper IBSS merging.
1343 * XXX: it might be too late to do it here, since rs_tstamp is
1344 * 15bit only. that means TSF extension has to be done within
1345 * 32768usec (about 32ms). it might be necessary to move this to
1346 * the interrupt handler, like it is done in madwifi.
1348 * Unfortunately we don't know when the hardware takes the rx
1349 * timestamp (beginning of phy frame, data frame, end of rx?).
1350 * The only thing we know is that it is hardware specific...
1351 * On AR5213 it seems the rx timestamp is at the end of the
1352 * frame, but i'm not sure.
1354 * NOTE: mac80211 defines mactime at the beginning of the first
1355 * data symbol. Since we don't have any time references it's
1356 * impossible to comply to that. This affects IBSS merge only
1357 * right now, so it's not too bad...
1359 rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
1360 rxs->flag |= RX_FLAG_MACTIME_MPDU;
1362 rxs->freq = sc->curchan->center_freq;
1363 rxs->band = sc->curchan->band;
1365 rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
1367 rxs->antenna = rs->rs_antenna;
1369 if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1370 sc->stats.antenna_rx[rs->rs_antenna]++;
1372 sc->stats.antenna_rx[0]++; /* invalid */
1374 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
1375 rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
1377 if (rxs->rate_idx >= 0 && rs->rs_rate ==
1378 sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1379 rxs->flag |= RX_FLAG_SHORTPRE;
1381 trace_ath5k_rx(sc, skb);
1383 ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
1385 /* check beacons in IBSS mode */
1386 if (sc->opmode == NL80211_IFTYPE_ADHOC)
1387 ath5k_check_ibss_tsf(sc, skb, rxs);
1389 ieee80211_rx(sc->hw, skb);
1392 /** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
1394 * Check if we want to further process this frame or not. Also update
1395 * statistics. Return true if we want this frame, false if not.
1398 ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
1400 sc->stats.rx_all_count++;
1401 sc->stats.rx_bytes_count += rs->rs_datalen;
1403 if (unlikely(rs->rs_status)) {
1404 if (rs->rs_status & AR5K_RXERR_CRC)
1405 sc->stats.rxerr_crc++;
1406 if (rs->rs_status & AR5K_RXERR_FIFO)
1407 sc->stats.rxerr_fifo++;
1408 if (rs->rs_status & AR5K_RXERR_PHY) {
1409 sc->stats.rxerr_phy++;
1410 if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1411 sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
1414 if (rs->rs_status & AR5K_RXERR_DECRYPT) {
1416 * Decrypt error. If the error occurred
1417 * because there was no hardware key, then
1418 * let the frame through so the upper layers
1419 * can process it. This is necessary for 5210
1420 * parts which have no way to setup a ``clear''
1423 * XXX do key cache faulting
1425 sc->stats.rxerr_decrypt++;
1426 if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
1427 !(rs->rs_status & AR5K_RXERR_CRC))
1430 if (rs->rs_status & AR5K_RXERR_MIC) {
1431 sc->stats.rxerr_mic++;
1435 /* reject any frames with non-crypto errors */
1436 if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
1440 if (unlikely(rs->rs_more)) {
1441 sc->stats.rxerr_jumbo++;
1448 ath5k_set_current_imask(struct ath5k_softc *sc)
1450 enum ath5k_int imask;
1451 unsigned long flags;
1453 spin_lock_irqsave(&sc->irqlock, flags);
1456 imask &= ~AR5K_INT_RX_ALL;
1458 imask &= ~AR5K_INT_TX_ALL;
1459 ath5k_hw_set_imr(sc->ah, imask);
1460 spin_unlock_irqrestore(&sc->irqlock, flags);
1464 ath5k_tasklet_rx(unsigned long data)
1466 struct ath5k_rx_status rs = {};
1467 struct sk_buff *skb, *next_skb;
1468 dma_addr_t next_skb_addr;
1469 struct ath5k_softc *sc = (void *)data;
1470 struct ath5k_hw *ah = sc->ah;
1471 struct ath_common *common = ath5k_hw_common(ah);
1472 struct ath5k_buf *bf;
1473 struct ath5k_desc *ds;
1476 spin_lock(&sc->rxbuflock);
1477 if (list_empty(&sc->rxbuf)) {
1478 ATH5K_WARN(sc, "empty rx buf pool\n");
1482 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
1483 BUG_ON(bf->skb == NULL);
1487 /* bail if HW is still using self-linked descriptor */
1488 if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr)
1491 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
1492 if (unlikely(ret == -EINPROGRESS))
1494 else if (unlikely(ret)) {
1495 ATH5K_ERR(sc, "error in processing rx descriptor\n");
1496 sc->stats.rxerr_proc++;
1500 if (ath5k_receive_frame_ok(sc, &rs)) {
1501 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
1504 * If we can't replace bf->skb with a new skb under
1505 * memory pressure, just skip this packet
1510 dma_unmap_single(sc->dev, bf->skbaddr,
1514 skb_put(skb, rs.rs_datalen);
1516 ath5k_receive_frame(sc, skb, &rs);
1519 bf->skbaddr = next_skb_addr;
1522 list_move_tail(&bf->list, &sc->rxbuf);
1523 } while (ath5k_rxbuf_setup(sc, bf) == 0);
1525 spin_unlock(&sc->rxbuflock);
1526 sc->rx_pending = false;
1527 ath5k_set_current_imask(sc);
1536 ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1537 struct ath5k_txq *txq)
1539 struct ath5k_softc *sc = hw->priv;
1540 struct ath5k_buf *bf;
1541 unsigned long flags;
1544 trace_ath5k_tx(sc, skb, txq);
1547 * The hardware expects the header padded to 4 byte boundaries.
1548 * If this is not the case, we add the padding after the header.
1550 padsize = ath5k_add_padding(skb);
1552 ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
1553 " headroom to pad");
1557 if (txq->txq_len >= txq->txq_max)
1558 ieee80211_stop_queue(hw, txq->qnum);
1560 spin_lock_irqsave(&sc->txbuflock, flags);
1561 if (list_empty(&sc->txbuf)) {
1562 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
1563 spin_unlock_irqrestore(&sc->txbuflock, flags);
1564 ieee80211_stop_queues(hw);
1567 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
1568 list_del(&bf->list);
1570 if (list_empty(&sc->txbuf))
1571 ieee80211_stop_queues(hw);
1572 spin_unlock_irqrestore(&sc->txbuflock, flags);
1576 if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
1578 spin_lock_irqsave(&sc->txbuflock, flags);
1579 list_add_tail(&bf->list, &sc->txbuf);
1581 spin_unlock_irqrestore(&sc->txbuflock, flags);
1587 dev_kfree_skb_any(skb);
1591 ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1592 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1594 struct ieee80211_tx_info *info;
1598 sc->stats.tx_all_count++;
1599 sc->stats.tx_bytes_count += skb->len;
1600 info = IEEE80211_SKB_CB(skb);
1602 tries[0] = info->status.rates[0].count;
1603 tries[1] = info->status.rates[1].count;
1604 tries[2] = info->status.rates[2].count;
1606 ieee80211_tx_info_clear_status(info);
1608 for (i = 0; i < ts->ts_final_idx; i++) {
1609 struct ieee80211_tx_rate *r =
1610 &info->status.rates[i];
1612 r->count = tries[i];
1615 info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1616 info->status.rates[ts->ts_final_idx + 1].idx = -1;
1618 if (unlikely(ts->ts_status)) {
1619 sc->stats.ack_fail++;
1620 if (ts->ts_status & AR5K_TXERR_FILT) {
1621 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1622 sc->stats.txerr_filt++;
1624 if (ts->ts_status & AR5K_TXERR_XRETRY)
1625 sc->stats.txerr_retry++;
1626 if (ts->ts_status & AR5K_TXERR_FIFO)
1627 sc->stats.txerr_fifo++;
1629 info->flags |= IEEE80211_TX_STAT_ACK;
1630 info->status.ack_signal = ts->ts_rssi;
1632 /* count the successful attempt as well */
1633 info->status.rates[ts->ts_final_idx].count++;
1637 * Remove MAC header padding before giving the frame
1640 ath5k_remove_padding(skb);
1642 if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
1643 sc->stats.antenna_tx[ts->ts_antenna]++;
1645 sc->stats.antenna_tx[0]++; /* invalid */
1647 trace_ath5k_tx_complete(sc, skb, txq, ts);
1648 ieee80211_tx_status(sc->hw, skb);
1652 ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1654 struct ath5k_tx_status ts = {};
1655 struct ath5k_buf *bf, *bf0;
1656 struct ath5k_desc *ds;
1657 struct sk_buff *skb;
1660 spin_lock(&txq->lock);
1661 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1663 txq->txq_poll_mark = false;
1665 /* skb might already have been processed last time. */
1666 if (bf->skb != NULL) {
1669 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
1670 if (unlikely(ret == -EINPROGRESS))
1672 else if (unlikely(ret)) {
1674 "error %d while processing "
1675 "queue %u\n", ret, txq->qnum);
1682 dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
1684 ath5k_tx_frame_completed(sc, skb, txq, &ts);
1688 * It's possible that the hardware can say the buffer is
1689 * completed when it hasn't yet loaded the ds_link from
1690 * host memory and moved on.
1691 * Always keep the last descriptor to avoid HW races...
1693 if (ath5k_hw_get_txdp(sc->ah, txq->qnum) != bf->daddr) {
1694 spin_lock(&sc->txbuflock);
1695 list_move_tail(&bf->list, &sc->txbuf);
1698 spin_unlock(&sc->txbuflock);
1701 spin_unlock(&txq->lock);
1702 if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
1703 ieee80211_wake_queue(sc->hw, txq->qnum);
1707 ath5k_tasklet_tx(unsigned long data)
1710 struct ath5k_softc *sc = (void *)data;
1712 for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
1713 if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i)))
1714 ath5k_tx_processq(sc, &sc->txqs[i]);
1716 sc->tx_pending = false;
1717 ath5k_set_current_imask(sc);
1726 * Setup the beacon frame for transmit.
1729 ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1731 struct sk_buff *skb = bf->skb;
1732 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1733 struct ath5k_hw *ah = sc->ah;
1734 struct ath5k_desc *ds;
1738 const int padsize = 0;
1740 bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
1742 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1743 "skbaddr %llx\n", skb, skb->data, skb->len,
1744 (unsigned long long)bf->skbaddr);
1746 if (dma_mapping_error(sc->dev, bf->skbaddr)) {
1747 ATH5K_ERR(sc, "beacon DMA mapping failed\n");
1752 antenna = ah->ah_tx_ant;
1754 flags = AR5K_TXDESC_NOACK;
1755 if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1756 ds->ds_link = bf->daddr; /* self-linked */
1757 flags |= AR5K_TXDESC_VEOL;
1762 * If we use multiple antennas on AP and use
1763 * the Sectored AP scenario, switch antenna every
1764 * 4 beacons to make sure everybody hears our AP.
1765 * When a client tries to associate, hw will keep
1766 * track of the tx antenna to be used for this client
1767 * automaticaly, based on ACKed packets.
1769 * Note: AP still listens and transmits RTS on the
1770 * default antenna which is supposed to be an omni.
1772 * Note2: On sectored scenarios it's possible to have
1773 * multiple antennas (1 omni -- the default -- and 14
1774 * sectors), so if we choose to actually support this
1775 * mode, we need to allow the user to set how many antennas
1776 * we have and tweak the code below to send beacons
1779 if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
1780 antenna = sc->bsent & 4 ? 2 : 1;
1783 /* FIXME: If we are in g mode and rate is a CCK rate
1784 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
1785 * from tx power (value is in dB units already) */
1786 ds->ds_data = bf->skbaddr;
1787 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1788 ieee80211_get_hdrlen_from_skb(skb), padsize,
1789 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
1790 ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1791 1, AR5K_TXKEYIX_INVALID,
1792 antenna, flags, 0, 0);
1798 dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1803 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
1804 * this is called only once at config_bss time, for AP we do it every
1805 * SWBA interrupt so that the TIM will reflect buffered frames.
1807 * Called with the beacon lock.
1810 ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1813 struct ath5k_softc *sc = hw->priv;
1814 struct ath5k_vif *avf = (void *)vif->drv_priv;
1815 struct sk_buff *skb;
1817 if (WARN_ON(!vif)) {
1822 skb = ieee80211_beacon_get(hw, vif);
1829 ath5k_txbuf_free_skb(sc, avf->bbuf);
1830 avf->bbuf->skb = skb;
1831 ret = ath5k_beacon_setup(sc, avf->bbuf);
1833 avf->bbuf->skb = NULL;
1839 * Transmit a beacon frame at SWBA. Dynamic updates to the
1840 * frame contents are done as needed and the slot time is
1841 * also adjusted based on current state.
1843 * This is called from software irq context (beacontq tasklets)
1844 * or user context from ath5k_beacon_config.
1847 ath5k_beacon_send(struct ath5k_softc *sc)
1849 struct ath5k_hw *ah = sc->ah;
1850 struct ieee80211_vif *vif;
1851 struct ath5k_vif *avf;
1852 struct ath5k_buf *bf;
1853 struct sk_buff *skb;
1855 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1858 * Check if the previous beacon has gone out. If
1859 * not, don't don't try to post another: skip this
1860 * period and wait for the next. Missed beacons
1861 * indicate a problem and should not occur. If we
1862 * miss too many consecutive beacons reset the device.
1864 if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) {
1866 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1867 "missed %u consecutive beacons\n", sc->bmisscount);
1868 if (sc->bmisscount > 10) { /* NB: 10 is a guess */
1869 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1870 "stuck beacon time (%u missed)\n",
1872 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
1873 "stuck beacon, resetting\n");
1874 ieee80211_queue_work(sc->hw, &sc->reset_work);
1878 if (unlikely(sc->bmisscount != 0)) {
1879 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1880 "resume beacon xmit after %u misses\n",
1885 if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) ||
1886 sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1887 u64 tsf = ath5k_hw_get_tsf64(ah);
1888 u32 tsftu = TSF_TO_TU(tsf);
1889 int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval;
1890 vif = sc->bslot[(slot + 1) % ATH_BCBUF];
1891 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1892 "tsf %llx tsftu %x intval %u slot %u vif %p\n",
1893 (unsigned long long)tsf, tsftu, sc->bintval, slot, vif);
1894 } else /* only one interface */
1900 avf = (void *)vif->drv_priv;
1902 if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION ||
1903 sc->opmode == NL80211_IFTYPE_MONITOR)) {
1904 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
1909 * Stop any current dma and put the new frame on the queue.
1910 * This should never fail since we check above that no frames
1911 * are still pending on the queue.
1913 if (unlikely(ath5k_hw_stop_beacon_queue(ah, sc->bhalq))) {
1914 ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
1915 /* NB: hw still stops DMA, so proceed */
1918 /* refresh the beacon for AP or MESH mode */
1919 if (sc->opmode == NL80211_IFTYPE_AP ||
1920 sc->opmode == NL80211_IFTYPE_MESH_POINT)
1921 ath5k_beacon_update(sc->hw, vif);
1923 trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);
1925 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
1926 ath5k_hw_start_tx_dma(ah, sc->bhalq);
1927 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
1928 sc->bhalq, (unsigned long long)bf->daddr, bf->desc);
1930 skb = ieee80211_get_buffered_bc(sc->hw, vif);
1932 ath5k_tx_queue(sc->hw, skb, sc->cabq);
1933 skb = ieee80211_get_buffered_bc(sc->hw, vif);
1940 * ath5k_beacon_update_timers - update beacon timers
1942 * @sc: struct ath5k_softc pointer we are operating on
1943 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
1944 * beacon timer update based on the current HW TSF.
1946 * Calculate the next target beacon transmit time (TBTT) based on the timestamp
1947 * of a received beacon or the current local hardware TSF and write it to the
1948 * beacon timer registers.
1950 * This is called in a variety of situations, e.g. when a beacon is received,
1951 * when a TSF update has been detected, but also when an new IBSS is created or
1952 * when we otherwise know we have to update the timers, but we keep it in this
1953 * function to have it all together in one place.
1956 ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
1958 struct ath5k_hw *ah = sc->ah;
1959 u32 nexttbtt, intval, hw_tu, bc_tu;
1962 intval = sc->bintval & AR5K_BEACON_PERIOD;
1963 if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {
1964 intval /= ATH_BCBUF; /* staggered multi-bss beacons */
1966 ATH5K_WARN(sc, "intval %u is too low, min 15\n",
1969 if (WARN_ON(!intval))
1972 /* beacon TSF converted to TU */
1973 bc_tu = TSF_TO_TU(bc_tsf);
1975 /* current TSF converted to TU */
1976 hw_tsf = ath5k_hw_get_tsf64(ah);
1977 hw_tu = TSF_TO_TU(hw_tsf);
1979 #define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
1980 /* We use FUDGE to make sure the next TBTT is ahead of the current TU.
1981 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
1982 * configuration we need to make sure it is bigger than that. */
1986 * no beacons received, called internally.
1987 * just need to refresh timers based on HW TSF.
1989 nexttbtt = roundup(hw_tu + FUDGE, intval);
1990 } else if (bc_tsf == 0) {
1992 * no beacon received, probably called by ath5k_reset_tsf().
1993 * reset TSF to start with 0.
1996 intval |= AR5K_BEACON_RESET_TSF;
1997 } else if (bc_tsf > hw_tsf) {
1999 * beacon received, SW merge happened but HW TSF not yet updated.
2000 * not possible to reconfigure timers yet, but next time we
2001 * receive a beacon with the same BSSID, the hardware will
2002 * automatically update the TSF and then we need to reconfigure
2005 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2006 "need to wait for HW TSF sync\n");
2010 * most important case for beacon synchronization between STA.
2012 * beacon received and HW TSF has been already updated by HW.
2013 * update next TBTT based on the TSF of the beacon, but make
2014 * sure it is ahead of our local TSF timer.
2016 nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
2020 sc->nexttbtt = nexttbtt;
2022 intval |= AR5K_BEACON_ENA;
2023 ath5k_hw_init_beacon(ah, nexttbtt, intval);
2026 * debugging output last in order to preserve the time critical aspect
2030 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2031 "reconfigured timers based on HW TSF\n");
2032 else if (bc_tsf == 0)
2033 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2034 "reset HW TSF and timers\n");
2036 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2037 "updated timers based on beacon TSF\n");
2039 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2040 "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
2041 (unsigned long long) bc_tsf,
2042 (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2043 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2044 intval & AR5K_BEACON_PERIOD,
2045 intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
2046 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2050 * ath5k_beacon_config - Configure the beacon queues and interrupts
2052 * @sc: struct ath5k_softc pointer we are operating on
2054 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2055 * interrupts to detect TSF updates only.
2058 ath5k_beacon_config(struct ath5k_softc *sc)
2060 struct ath5k_hw *ah = sc->ah;
2061 unsigned long flags;
2063 spin_lock_irqsave(&sc->block, flags);
2065 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2067 if (sc->enable_beacon) {
2069 * In IBSS mode we use a self-linked tx descriptor and let the
2070 * hardware send the beacons automatically. We have to load it
2072 * We use the SWBA interrupt only to keep track of the beacon
2073 * timers in order to detect automatic TSF updates.
2075 ath5k_beaconq_config(sc);
2077 sc->imask |= AR5K_INT_SWBA;
2079 if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2080 if (ath5k_hw_hasveol(ah))
2081 ath5k_beacon_send(sc);
2083 ath5k_beacon_update_timers(sc, -1);
2085 ath5k_hw_stop_beacon_queue(sc->ah, sc->bhalq);
2088 ath5k_hw_set_imr(ah, sc->imask);
2090 spin_unlock_irqrestore(&sc->block, flags);
2093 static void ath5k_tasklet_beacon(unsigned long data)
2095 struct ath5k_softc *sc = (struct ath5k_softc *) data;
2098 * Software beacon alert--time to send a beacon.
2100 * In IBSS mode we use this interrupt just to
2101 * keep track of the next TBTT (target beacon
2102 * transmission time) in order to detect wether
2103 * automatic TSF updates happened.
2105 if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2106 /* XXX: only if VEOL suppported */
2107 u64 tsf = ath5k_hw_get_tsf64(sc->ah);
2108 sc->nexttbtt += sc->bintval;
2109 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
2110 "SWBA nexttbtt: %x hw_tu: %x "
2114 (unsigned long long) tsf);
2116 spin_lock(&sc->block);
2117 ath5k_beacon_send(sc);
2118 spin_unlock(&sc->block);
2123 /********************\
2124 * Interrupt handling *
2125 \********************/
2128 ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2130 if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
2131 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
2132 /* run ANI only when full calibration is not active */
2133 ah->ah_cal_next_ani = jiffies +
2134 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2135 tasklet_schedule(&ah->ah_sc->ani_tasklet);
2137 } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2138 ah->ah_cal_next_full = jiffies +
2139 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2140 tasklet_schedule(&ah->ah_sc->calib);
2142 /* we could use SWI to generate enough interrupts to meet our
2143 * calibration interval requirements, if necessary:
2144 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
2148 ath5k_schedule_rx(struct ath5k_softc *sc)
2150 sc->rx_pending = true;
2151 tasklet_schedule(&sc->rxtq);
2155 ath5k_schedule_tx(struct ath5k_softc *sc)
2157 sc->tx_pending = true;
2158 tasklet_schedule(&sc->txtq);
2162 ath5k_intr(int irq, void *dev_id)
2164 struct ath5k_softc *sc = dev_id;
2165 struct ath5k_hw *ah = sc->ah;
2166 enum ath5k_int status;
2167 unsigned int counter = 1000;
2169 if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
2170 ((ath5k_get_bus_type(ah) != ATH_AHB) &&
2171 !ath5k_hw_is_intr_pending(ah))))
2175 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
2176 ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
2178 if (unlikely(status & AR5K_INT_FATAL)) {
2180 * Fatal errors are unrecoverable.
2181 * Typically these are caused by DMA errors.
2183 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2184 "fatal int, resetting\n");
2185 ieee80211_queue_work(sc->hw, &sc->reset_work);
2186 } else if (unlikely(status & AR5K_INT_RXORN)) {
2188 * Receive buffers are full. Either the bus is busy or
2189 * the CPU is not fast enough to process all received
2191 * Older chipsets need a reset to come out of this
2192 * condition, but we treat it as RX for newer chips.
2193 * We don't know exactly which versions need a reset -
2194 * this guess is copied from the HAL.
2196 sc->stats.rxorn_intr++;
2197 if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2198 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2199 "rx overrun, resetting\n");
2200 ieee80211_queue_work(sc->hw, &sc->reset_work);
2202 ath5k_schedule_rx(sc);
2204 if (status & AR5K_INT_SWBA)
2205 tasklet_hi_schedule(&sc->beacontq);
2207 if (status & AR5K_INT_RXEOL) {
2209 * NB: the hardware should re-read the link when
2210 * RXE bit is written, but it doesn't work at
2211 * least on older hardware revs.
2213 sc->stats.rxeol_intr++;
2215 if (status & AR5K_INT_TXURN) {
2216 /* bump tx trigger level */
2217 ath5k_hw_update_tx_triglevel(ah, true);
2219 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2220 ath5k_schedule_rx(sc);
2221 if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
2222 | AR5K_INT_TXERR | AR5K_INT_TXEOL))
2223 ath5k_schedule_tx(sc);
2224 if (status & AR5K_INT_BMISS) {
2227 if (status & AR5K_INT_MIB) {
2228 sc->stats.mib_intr++;
2229 ath5k_hw_update_mib_counters(ah);
2230 ath5k_ani_mib_intr(ah);
2232 if (status & AR5K_INT_GPIO)
2233 tasklet_schedule(&sc->rf_kill.toggleq);
2237 if (ath5k_get_bus_type(ah) == ATH_AHB)
2240 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2242 if (sc->rx_pending || sc->tx_pending)
2243 ath5k_set_current_imask(sc);
2245 if (unlikely(!counter))
2246 ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
2248 ath5k_intr_calibration_poll(ah);
2254 * Periodically recalibrate the PHY to account
2255 * for temperature/environment changes.
2258 ath5k_tasklet_calibrate(unsigned long data)
2260 struct ath5k_softc *sc = (void *)data;
2261 struct ath5k_hw *ah = sc->ah;
2263 /* Only full calibration for now */
2264 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2266 ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
2267 ieee80211_frequency_to_channel(sc->curchan->center_freq),
2268 sc->curchan->hw_value);
2270 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2272 * Rfgain is out of bounds, reset the chip
2273 * to load new gain values.
2275 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2276 ieee80211_queue_work(sc->hw, &sc->reset_work);
2278 if (ath5k_hw_phy_calibrate(ah, sc->curchan))
2279 ATH5K_ERR(sc, "calibration of channel %u failed\n",
2280 ieee80211_frequency_to_channel(
2281 sc->curchan->center_freq));
2283 /* Noise floor calibration interrupts rx/tx path while I/Q calibration
2285 * TODO: We should stop TX here, so that it doesn't interfere.
2286 * Note that stopping the queues is not enough to stop TX! */
2287 if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
2288 ah->ah_cal_next_nf = jiffies +
2289 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
2290 ath5k_hw_update_noise_floor(ah);
2293 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2298 ath5k_tasklet_ani(unsigned long data)
2300 struct ath5k_softc *sc = (void *)data;
2301 struct ath5k_hw *ah = sc->ah;
2303 ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
2304 ath5k_ani_calibration(ah);
2305 ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2310 ath5k_tx_complete_poll_work(struct work_struct *work)
2312 struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
2313 tx_complete_work.work);
2314 struct ath5k_txq *txq;
2316 bool needreset = false;
2318 mutex_lock(&sc->lock);
2320 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
2321 if (sc->txqs[i].setup) {
2323 spin_lock_bh(&txq->lock);
2324 if (txq->txq_len > 1) {
2325 if (txq->txq_poll_mark) {
2326 ATH5K_DBG(sc, ATH5K_DEBUG_XMIT,
2327 "TX queue stuck %d\n",
2331 spin_unlock_bh(&txq->lock);
2334 txq->txq_poll_mark = true;
2337 spin_unlock_bh(&txq->lock);
2342 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2343 "TX queues stuck, resetting\n");
2344 ath5k_reset(sc, NULL, true);
2347 mutex_unlock(&sc->lock);
2349 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2350 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2354 /*************************\
2355 * Initialization routines *
2356 \*************************/
2359 ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
2361 struct ieee80211_hw *hw = sc->hw;
2362 struct ath_common *common;
2366 /* Initialize driver private data */
2367 SET_IEEE80211_DEV(hw, sc->dev);
2368 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2369 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2370 IEEE80211_HW_SIGNAL_DBM |
2371 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2373 hw->wiphy->interface_modes =
2374 BIT(NL80211_IFTYPE_AP) |
2375 BIT(NL80211_IFTYPE_STATION) |
2376 BIT(NL80211_IFTYPE_ADHOC) |
2377 BIT(NL80211_IFTYPE_MESH_POINT);
2379 /* both antennas can be configured as RX or TX */
2380 hw->wiphy->available_antennas_tx = 0x3;
2381 hw->wiphy->available_antennas_rx = 0x3;
2383 hw->extra_tx_headroom = 2;
2384 hw->channel_change_time = 5000;
2387 * Mark the device as detached to avoid processing
2388 * interrupts until setup is complete.
2390 __set_bit(ATH_STAT_INVALID, sc->status);
2392 sc->opmode = NL80211_IFTYPE_STATION;
2394 mutex_init(&sc->lock);
2395 spin_lock_init(&sc->rxbuflock);
2396 spin_lock_init(&sc->txbuflock);
2397 spin_lock_init(&sc->block);
2398 spin_lock_init(&sc->irqlock);
2400 /* Setup interrupt handler */
2401 ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
2403 ATH5K_ERR(sc, "request_irq failed\n");
2407 /* If we passed the test, malloc an ath5k_hw struct */
2408 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
2411 ATH5K_ERR(sc, "out of memory\n");
2416 sc->ah->ah_iobase = sc->iobase;
2417 common = ath5k_hw_common(sc->ah);
2418 common->ops = &ath5k_common_ops;
2419 common->bus_ops = bus_ops;
2420 common->ah = sc->ah;
2425 * Cache line size is used to size and align various
2426 * structures used to communicate with the hardware.
2428 ath5k_read_cachesize(common, &csz);
2429 common->cachelsz = csz << 2; /* convert to bytes */
2431 spin_lock_init(&common->cc_lock);
2433 /* Initialize device */
2434 ret = ath5k_hw_init(sc);
2438 /* set up multi-rate retry capabilities */
2439 if (sc->ah->ah_version == AR5K_AR5212) {
2441 hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
2442 AR5K_INIT_RETRY_LONG);
2445 hw->vif_data_size = sizeof(struct ath5k_vif);
2447 /* Finish private driver data initialization */
2448 ret = ath5k_init(hw);
2452 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
2453 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
2454 sc->ah->ah_mac_srev,
2455 sc->ah->ah_phy_revision);
2457 if (!sc->ah->ah_single_chip) {
2458 /* Single chip radio (!RF5111) */
2459 if (sc->ah->ah_radio_5ghz_revision &&
2460 !sc->ah->ah_radio_2ghz_revision) {
2461 /* No 5GHz support -> report 2GHz radio */
2462 if (!test_bit(AR5K_MODE_11A,
2463 sc->ah->ah_capabilities.cap_mode)) {
2464 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
2465 ath5k_chip_name(AR5K_VERSION_RAD,
2466 sc->ah->ah_radio_5ghz_revision),
2467 sc->ah->ah_radio_5ghz_revision);
2468 /* No 2GHz support (5110 and some
2469 * 5Ghz only cards) -> report 5Ghz radio */
2470 } else if (!test_bit(AR5K_MODE_11B,
2471 sc->ah->ah_capabilities.cap_mode)) {
2472 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
2473 ath5k_chip_name(AR5K_VERSION_RAD,
2474 sc->ah->ah_radio_5ghz_revision),
2475 sc->ah->ah_radio_5ghz_revision);
2476 /* Multiband radio */
2478 ATH5K_INFO(sc, "RF%s multiband radio found"
2480 ath5k_chip_name(AR5K_VERSION_RAD,
2481 sc->ah->ah_radio_5ghz_revision),
2482 sc->ah->ah_radio_5ghz_revision);
2485 /* Multi chip radio (RF5111 - RF2111) ->
2486 * report both 2GHz/5GHz radios */
2487 else if (sc->ah->ah_radio_5ghz_revision &&
2488 sc->ah->ah_radio_2ghz_revision) {
2489 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
2490 ath5k_chip_name(AR5K_VERSION_RAD,
2491 sc->ah->ah_radio_5ghz_revision),
2492 sc->ah->ah_radio_5ghz_revision);
2493 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
2494 ath5k_chip_name(AR5K_VERSION_RAD,
2495 sc->ah->ah_radio_2ghz_revision),
2496 sc->ah->ah_radio_2ghz_revision);
2500 ath5k_debug_init_device(sc);
2502 /* ready to process interrupts */
2503 __clear_bit(ATH_STAT_INVALID, sc->status);
2507 ath5k_hw_deinit(sc->ah);
2511 free_irq(sc->irq, sc);
2517 ath5k_stop_locked(struct ath5k_softc *sc)
2519 struct ath5k_hw *ah = sc->ah;
2521 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
2522 test_bit(ATH_STAT_INVALID, sc->status));
2525 * Shutdown the hardware and driver:
2526 * stop output from above
2527 * disable interrupts
2529 * turn off the radio
2530 * clear transmit machinery
2531 * clear receive machinery
2532 * drain and release tx queues
2533 * reclaim beacon resources
2534 * power down hardware
2536 * Note that some of this work is not possible if the
2537 * hardware is gone (invalid).
2539 ieee80211_stop_queues(sc->hw);
2541 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2543 ath5k_hw_set_imr(ah, 0);
2544 synchronize_irq(sc->irq);
2546 ath5k_hw_dma_stop(ah);
2547 ath5k_drain_tx_buffs(sc);
2548 ath5k_hw_phy_disable(ah);
2555 ath5k_init_hw(struct ath5k_softc *sc)
2557 struct ath5k_hw *ah = sc->ah;
2558 struct ath_common *common = ath5k_hw_common(ah);
2561 mutex_lock(&sc->lock);
2563 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2566 * Stop anything previously setup. This is safe
2567 * no matter this is the first time through or not.
2569 ath5k_stop_locked(sc);
2572 * The basic interface to setting the hardware in a good
2573 * state is ``reset''. On return the hardware is known to
2574 * be powered up and with interrupts disabled. This must
2575 * be followed by initialization of the appropriate bits
2576 * and then setup of the interrupt mask.
2578 sc->curchan = sc->hw->conf.channel;
2579 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2580 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2581 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2583 ret = ath5k_reset(sc, NULL, false);
2587 ath5k_rfkill_hw_start(ah);
2590 * Reset the key cache since some parts do not reset the
2591 * contents on initial power up or resume from suspend.
2593 for (i = 0; i < common->keymax; i++)
2594 ath_hw_keyreset(common, (u16) i);
2596 /* Use higher rates for acks instead of base
2598 ah->ah_ack_bitrate_high = true;
2600 for (i = 0; i < ARRAY_SIZE(sc->bslot); i++)
2601 sc->bslot[i] = NULL;
2606 mutex_unlock(&sc->lock);
2608 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2609 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2614 static void ath5k_stop_tasklets(struct ath5k_softc *sc)
2616 sc->rx_pending = false;
2617 sc->tx_pending = false;
2618 tasklet_kill(&sc->rxtq);
2619 tasklet_kill(&sc->txtq);
2620 tasklet_kill(&sc->calib);
2621 tasklet_kill(&sc->beacontq);
2622 tasklet_kill(&sc->ani_tasklet);
2626 * Stop the device, grabbing the top-level lock to protect
2627 * against concurrent entry through ath5k_init (which can happen
2628 * if another thread does a system call and the thread doing the
2629 * stop is preempted).
2632 ath5k_stop_hw(struct ath5k_softc *sc)
2636 mutex_lock(&sc->lock);
2637 ret = ath5k_stop_locked(sc);
2638 if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
2640 * Don't set the card in full sleep mode!
2642 * a) When the device is in this state it must be carefully
2643 * woken up or references to registers in the PCI clock
2644 * domain may freeze the bus (and system). This varies
2645 * by chip and is mostly an issue with newer parts
2646 * (madwifi sources mentioned srev >= 0x78) that go to
2647 * sleep more quickly.
2649 * b) On older chips full sleep results a weird behaviour
2650 * during wakeup. I tested various cards with srev < 0x78
2651 * and they don't wake up after module reload, a second
2652 * module reload is needed to bring the card up again.
2654 * Until we figure out what's going on don't enable
2655 * full chip reset on any chip (this is what Legacy HAL
2656 * and Sam's HAL do anyway). Instead Perform a full reset
2657 * on the device (same as initial state after attach) and
2658 * leave it idle (keep MAC/BB on warm reset) */
2659 ret = ath5k_hw_on_hold(sc->ah);
2661 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2662 "putting device to sleep\n");
2666 mutex_unlock(&sc->lock);
2668 ath5k_stop_tasklets(sc);
2670 cancel_delayed_work_sync(&sc->tx_complete_work);
2672 ath5k_rfkill_hw_stop(sc->ah);
2678 * Reset the hardware. If chan is not NULL, then also pause rx/tx
2679 * and change to the given channel.
2681 * This should be called with sc->lock.
2684 ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
2687 struct ath5k_hw *ah = sc->ah;
2688 struct ath_common *common = ath5k_hw_common(ah);
2692 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
2694 ath5k_hw_set_imr(ah, 0);
2695 synchronize_irq(sc->irq);
2696 ath5k_stop_tasklets(sc);
2698 /* Save ani mode and disable ANI during
2699 * reset. If we don't we might get false
2700 * PHY error interrupts. */
2701 ani_mode = ah->ah_sc->ani_state.ani_mode;
2702 ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
2704 /* We are going to empty hw queues
2705 * so we should also free any remaining
2707 ath5k_drain_tx_buffs(sc);
2711 fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
2713 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, fast, skip_pcu);
2715 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
2719 ret = ath5k_rx_start(sc);
2721 ATH5K_ERR(sc, "can't start recv logic\n");
2725 ath5k_ani_init(ah, ani_mode);
2727 ah->ah_cal_next_full = jiffies;
2728 ah->ah_cal_next_ani = jiffies;
2729 ah->ah_cal_next_nf = jiffies;
2730 ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2732 /* clear survey data and cycle counters */
2733 memset(&sc->survey, 0, sizeof(sc->survey));
2734 spin_lock_bh(&common->cc_lock);
2735 ath_hw_cycle_counters_update(common);
2736 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
2737 memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2738 spin_unlock_bh(&common->cc_lock);
2741 * Change channels and update the h/w rate map if we're switching;
2742 * e.g. 11a to 11b/g.
2744 * We may be doing a reset in response to an ioctl that changes the
2745 * channel so update any state that might change as a result.
2749 /* ath5k_chan_change(sc, c); */
2751 ath5k_beacon_config(sc);
2752 /* intrs are enabled by ath5k_beacon_config */
2754 ieee80211_wake_queues(sc->hw);
2761 static void ath5k_reset_work(struct work_struct *work)
2763 struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
2766 mutex_lock(&sc->lock);
2767 ath5k_reset(sc, NULL, true);
2768 mutex_unlock(&sc->lock);
2771 static int __devinit
2772 ath5k_init(struct ieee80211_hw *hw)
2775 struct ath5k_softc *sc = hw->priv;
2776 struct ath5k_hw *ah = sc->ah;
2777 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
2778 struct ath5k_txq *txq;
2779 u8 mac[ETH_ALEN] = {};
2784 * Check if the MAC has multi-rate retry support.
2785 * We do this by trying to setup a fake extended
2786 * descriptor. MACs that don't have support will
2787 * return false w/o doing anything. MACs that do
2788 * support it will return true w/o doing anything.
2790 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
2795 __set_bit(ATH_STAT_MRRETRY, sc->status);
2798 * Collect the channel list. The 802.11 layer
2799 * is resposible for filtering this list based
2800 * on settings like the phy mode and regulatory
2801 * domain restrictions.
2803 ret = ath5k_setup_bands(hw);
2805 ATH5K_ERR(sc, "can't get channels\n");
2810 * Allocate tx+rx descriptors and populate the lists.
2812 ret = ath5k_desc_alloc(sc);
2814 ATH5K_ERR(sc, "can't allocate descriptors\n");
2819 * Allocate hardware transmit queues: one queue for
2820 * beacon frames and one data queue for each QoS
2821 * priority. Note that hw functions handle resetting
2822 * these queues at the needed time.
2824 ret = ath5k_beaconq_setup(ah);
2826 ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
2830 sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
2831 if (IS_ERR(sc->cabq)) {
2832 ATH5K_ERR(sc, "can't setup cab queue\n");
2833 ret = PTR_ERR(sc->cabq);
2837 /* 5211 and 5212 usually support 10 queues but we better rely on the
2838 * capability information */
2839 if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
2840 /* This order matches mac80211's queue priority, so we can
2841 * directly use the mac80211 queue number without any mapping */
2842 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2844 ATH5K_ERR(sc, "can't setup xmit queue\n");
2848 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2850 ATH5K_ERR(sc, "can't setup xmit queue\n");
2854 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2856 ATH5K_ERR(sc, "can't setup xmit queue\n");
2860 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2862 ATH5K_ERR(sc, "can't setup xmit queue\n");
2868 /* older hardware (5210) can only support one data queue */
2869 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2871 ATH5K_ERR(sc, "can't setup xmit queue\n");
2878 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
2879 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
2880 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
2881 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
2882 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
2884 INIT_WORK(&sc->reset_work, ath5k_reset_work);
2885 INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
2887 ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
2889 ATH5K_ERR(sc, "unable to read address from EEPROM\n");
2893 SET_IEEE80211_PERM_ADDR(hw, mac);
2894 memcpy(&sc->lladdr, mac, ETH_ALEN);
2895 /* All MAC address bits matter for ACKs */
2896 ath5k_update_bssid_mask_and_opmode(sc, NULL);
2898 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
2899 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
2901 ATH5K_ERR(sc, "can't initialize regulatory system\n");
2905 ret = ieee80211_register_hw(hw);
2907 ATH5K_ERR(sc, "can't register ieee80211 hw\n");
2911 if (!ath_is_world_regd(regulatory))
2912 regulatory_hint(hw->wiphy, regulatory->alpha2);
2914 ath5k_init_leds(sc);
2916 ath5k_sysfs_register(sc);
2920 ath5k_txq_release(sc);
2922 ath5k_hw_release_tx_queue(ah, sc->bhalq);
2924 ath5k_desc_free(sc);
2930 ath5k_deinit_softc(struct ath5k_softc *sc)
2932 struct ieee80211_hw *hw = sc->hw;
2935 * NB: the order of these is important:
2936 * o call the 802.11 layer before detaching ath5k_hw to
2937 * ensure callbacks into the driver to delete global
2938 * key cache entries can be handled
2939 * o reclaim the tx queue data structures after calling
2940 * the 802.11 layer as we'll get called back to reclaim
2941 * node state and potentially want to use them
2942 * o to cleanup the tx queues the hal is called, so detach
2944 * XXX: ??? detach ath5k_hw ???
2945 * Other than that, it's straightforward...
2947 ieee80211_unregister_hw(hw);
2948 ath5k_desc_free(sc);
2949 ath5k_txq_release(sc);
2950 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
2951 ath5k_unregister_leds(sc);
2953 ath5k_sysfs_unregister(sc);
2955 * NB: can't reclaim these until after ieee80211_ifdetach
2956 * returns because we'll get called back to reclaim node
2957 * state and potentially want to use them.
2959 ath5k_hw_deinit(sc->ah);
2961 free_irq(sc->irq, sc);
2965 ath5k_any_vif_assoc(struct ath5k_softc *sc)
2967 struct ath5k_vif_iter_data iter_data;
2968 iter_data.hw_macaddr = NULL;
2969 iter_data.any_assoc = false;
2970 iter_data.need_set_hw_addr = false;
2971 iter_data.found_active = true;
2973 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
2975 return iter_data.any_assoc;
2979 ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
2981 struct ath5k_softc *sc = hw->priv;
2982 struct ath5k_hw *ah = sc->ah;
2984 rfilt = ath5k_hw_get_rx_filter(ah);
2986 rfilt |= AR5K_RX_FILTER_BEACON;
2988 rfilt &= ~AR5K_RX_FILTER_BEACON;
2989 ath5k_hw_set_rx_filter(ah, rfilt);
2990 sc->filter_flags = rfilt;