2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/slab.h>
21 static char *dev_info = "ath9k";
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29 module_param_named(debug, ath9k_debug, uint, 0);
30 MODULE_PARM_DESC(debug, "Debugging mask");
32 int modparam_nohwcrypt;
33 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
37 module_param_named(blink, led_blink, int, 0444);
38 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
40 static int ath9k_btcoex_enable;
41 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
42 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
44 /* We use the hw_value as an index into our private channel structure */
46 #define CHAN2G(_freq, _idx) { \
47 .center_freq = (_freq), \
52 #define CHAN5G(_freq, _idx) { \
53 .band = IEEE80211_BAND_5GHZ, \
54 .center_freq = (_freq), \
59 /* Some 2 GHz radios are actually tunable on 2312-2732
60 * on 5 MHz steps, we support the channels which we know
61 * we have calibration data for all cards though to make
63 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
64 CHAN2G(2412, 0), /* Channel 1 */
65 CHAN2G(2417, 1), /* Channel 2 */
66 CHAN2G(2422, 2), /* Channel 3 */
67 CHAN2G(2427, 3), /* Channel 4 */
68 CHAN2G(2432, 4), /* Channel 5 */
69 CHAN2G(2437, 5), /* Channel 6 */
70 CHAN2G(2442, 6), /* Channel 7 */
71 CHAN2G(2447, 7), /* Channel 8 */
72 CHAN2G(2452, 8), /* Channel 9 */
73 CHAN2G(2457, 9), /* Channel 10 */
74 CHAN2G(2462, 10), /* Channel 11 */
75 CHAN2G(2467, 11), /* Channel 12 */
76 CHAN2G(2472, 12), /* Channel 13 */
77 CHAN2G(2484, 13), /* Channel 14 */
80 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
81 * on 5 MHz steps, we support the channels which we know
82 * we have calibration data for all cards though to make
84 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
85 /* _We_ call this UNII 1 */
86 CHAN5G(5180, 14), /* Channel 36 */
87 CHAN5G(5200, 15), /* Channel 40 */
88 CHAN5G(5220, 16), /* Channel 44 */
89 CHAN5G(5240, 17), /* Channel 48 */
90 /* _We_ call this UNII 2 */
91 CHAN5G(5260, 18), /* Channel 52 */
92 CHAN5G(5280, 19), /* Channel 56 */
93 CHAN5G(5300, 20), /* Channel 60 */
94 CHAN5G(5320, 21), /* Channel 64 */
95 /* _We_ call this "Middle band" */
96 CHAN5G(5500, 22), /* Channel 100 */
97 CHAN5G(5520, 23), /* Channel 104 */
98 CHAN5G(5540, 24), /* Channel 108 */
99 CHAN5G(5560, 25), /* Channel 112 */
100 CHAN5G(5580, 26), /* Channel 116 */
101 CHAN5G(5600, 27), /* Channel 120 */
102 CHAN5G(5620, 28), /* Channel 124 */
103 CHAN5G(5640, 29), /* Channel 128 */
104 CHAN5G(5660, 30), /* Channel 132 */
105 CHAN5G(5680, 31), /* Channel 136 */
106 CHAN5G(5700, 32), /* Channel 140 */
107 /* _We_ call this UNII 3 */
108 CHAN5G(5745, 33), /* Channel 149 */
109 CHAN5G(5765, 34), /* Channel 153 */
110 CHAN5G(5785, 35), /* Channel 157 */
111 CHAN5G(5805, 36), /* Channel 161 */
112 CHAN5G(5825, 37), /* Channel 165 */
115 /* Atheros hardware rate code addition for short premble */
116 #define SHPCHECK(__hw_rate, __flags) \
117 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
119 #define RATE(_bitrate, _hw_rate, _flags) { \
120 .bitrate = (_bitrate), \
122 .hw_value = (_hw_rate), \
123 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
126 static struct ieee80211_rate ath9k_legacy_rates[] = {
128 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
129 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
130 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
141 static void ath9k_deinit_softc(struct ath_softc *sc);
144 * Read and write, they both share the same lock. We do this to serialize
145 * reads and writes on Atheros 802.11n PCI devices only. This is required
146 * as the FIFO on these devices can only accept sanely 2 requests.
149 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
151 struct ath_hw *ah = (struct ath_hw *) hw_priv;
152 struct ath_common *common = ath9k_hw_common(ah);
153 struct ath_softc *sc = (struct ath_softc *) common->priv;
155 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
157 spin_lock_irqsave(&sc->sc_serial_rw, flags);
158 iowrite32(val, sc->mem + reg_offset);
159 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
161 iowrite32(val, sc->mem + reg_offset);
164 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
166 struct ath_hw *ah = (struct ath_hw *) hw_priv;
167 struct ath_common *common = ath9k_hw_common(ah);
168 struct ath_softc *sc = (struct ath_softc *) common->priv;
171 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
173 spin_lock_irqsave(&sc->sc_serial_rw, flags);
174 val = ioread32(sc->mem + reg_offset);
175 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
177 val = ioread32(sc->mem + reg_offset);
181 static const struct ath_ops ath9k_common_ops = {
182 .read = ath9k_ioread32,
183 .write = ath9k_iowrite32,
186 /**************************/
188 /**************************/
190 static void setup_ht_cap(struct ath_softc *sc,
191 struct ieee80211_sta_ht_cap *ht_info)
193 struct ath_hw *ah = sc->sc_ah;
194 struct ath_common *common = ath9k_hw_common(ah);
195 u8 tx_streams, rx_streams;
198 ht_info->ht_supported = true;
199 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
200 IEEE80211_HT_CAP_SM_PS |
201 IEEE80211_HT_CAP_SGI_40 |
202 IEEE80211_HT_CAP_DSSSCCK40;
204 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
205 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
207 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
208 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
210 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
211 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
213 if (AR_SREV_9300_20_OR_LATER(ah))
218 if (AR_SREV_9280_20_OR_LATER(ah)) {
219 if (max_streams >= 2)
220 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
221 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
224 /* set up supported mcs set */
225 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
226 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
227 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
229 ath_print(common, ATH_DBG_CONFIG,
230 "TX streams %d, RX streams: %d\n",
231 tx_streams, rx_streams);
233 if (tx_streams != rx_streams) {
234 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
235 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
236 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
239 for (i = 0; i < rx_streams; i++)
240 ht_info->mcs.rx_mask[i] = 0xff;
242 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
245 static int ath9k_reg_notifier(struct wiphy *wiphy,
246 struct regulatory_request *request)
248 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
249 struct ath_wiphy *aphy = hw->priv;
250 struct ath_softc *sc = aphy->sc;
251 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
253 return ath_reg_notifier_apply(wiphy, request, reg);
257 * This function will allocate both the DMA descriptor structure, and the
258 * buffers it contains. These are used to contain the descriptors used
261 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
262 struct list_head *head, const char *name,
263 int nbuf, int ndesc, bool is_tx)
265 #define DS2PHYS(_dd, _ds) \
266 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
267 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
268 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
269 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
272 int i, bsize, error, desc_len;
274 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
277 INIT_LIST_HEAD(head);
280 desc_len = sc->sc_ah->caps.tx_desc_len;
282 desc_len = sizeof(struct ath_desc);
284 /* ath_desc must be a multiple of DWORDs */
285 if ((desc_len % 4) != 0) {
286 ath_err(common, "ath_desc not DWORD aligned\n");
287 BUG_ON((desc_len % 4) != 0);
292 dd->dd_desc_len = desc_len * nbuf * ndesc;
295 * Need additional DMA memory because we can't use
296 * descriptors that cross the 4K page boundary. Assume
297 * one skipped descriptor per 4K page.
299 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
301 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
304 while (ndesc_skipped) {
305 dma_len = ndesc_skipped * desc_len;
306 dd->dd_desc_len += dma_len;
308 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
312 /* allocate descriptors */
313 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
314 &dd->dd_desc_paddr, GFP_KERNEL);
315 if (dd->dd_desc == NULL) {
319 ds = (u8 *) dd->dd_desc;
320 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
321 name, ds, (u32) dd->dd_desc_len,
322 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
324 /* allocate buffers */
325 bsize = sizeof(struct ath_buf) * nbuf;
326 bf = kzalloc(bsize, GFP_KERNEL);
333 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
335 bf->bf_daddr = DS2PHYS(dd, ds);
337 if (!(sc->sc_ah->caps.hw_caps &
338 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
340 * Skip descriptor addresses which can cause 4KB
341 * boundary crossing (addr + length) with a 32 dword
344 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
345 BUG_ON((caddr_t) bf->bf_desc >=
346 ((caddr_t) dd->dd_desc +
349 ds += (desc_len * ndesc);
351 bf->bf_daddr = DS2PHYS(dd, ds);
354 list_add_tail(&bf->list, head);
358 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
361 memset(dd, 0, sizeof(*dd));
363 #undef ATH_DESC_4KB_BOUND_CHECK
364 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
368 static void ath9k_init_crypto(struct ath_softc *sc)
370 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
373 /* Get the hardware key cache size. */
374 common->keymax = sc->sc_ah->caps.keycache_size;
375 if (common->keymax > ATH_KEYMAX) {
376 ath_print(common, ATH_DBG_ANY,
377 "Warning, using only %u entries in %u key cache\n",
378 ATH_KEYMAX, common->keymax);
379 common->keymax = ATH_KEYMAX;
383 * Reset the key cache since some parts do not
384 * reset the contents on initial power up.
386 for (i = 0; i < common->keymax; i++)
387 ath_hw_keyreset(common, (u16) i);
390 * Check whether the separate key cache entries
391 * are required to handle both tx+rx MIC keys.
392 * With split mic keys the number of stations is limited
393 * to 27 otherwise 59.
395 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
396 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
399 static int ath9k_init_btcoex(struct ath_softc *sc)
404 switch (sc->sc_ah->btcoex_hw.scheme) {
405 case ATH_BTCOEX_CFG_NONE:
407 case ATH_BTCOEX_CFG_2WIRE:
408 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
410 case ATH_BTCOEX_CFG_3WIRE:
411 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
412 r = ath_init_btcoex_timer(sc);
415 txq = sc->tx.txq_map[WME_AC_BE];
416 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
417 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
427 static int ath9k_init_queues(struct ath_softc *sc)
431 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
432 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
434 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
437 for (i = 0; i < WME_NUM_AC; i++)
438 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
443 static int ath9k_init_channels_rates(struct ath_softc *sc)
447 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
448 ARRAY_SIZE(ath9k_5ghz_chantable) !=
451 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
452 channels = kmemdup(ath9k_2ghz_chantable,
453 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
457 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
458 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
459 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
460 ARRAY_SIZE(ath9k_2ghz_chantable);
461 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
462 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
463 ARRAY_SIZE(ath9k_legacy_rates);
466 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
467 channels = kmemdup(ath9k_5ghz_chantable,
468 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
470 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
471 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
475 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
476 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
477 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
478 ARRAY_SIZE(ath9k_5ghz_chantable);
479 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
480 ath9k_legacy_rates + 4;
481 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
482 ARRAY_SIZE(ath9k_legacy_rates) - 4;
487 static void ath9k_init_misc(struct ath_softc *sc)
489 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
492 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
494 sc->config.txpowlimit = ATH_TXPOWER_MAX;
496 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
497 sc->sc_flags |= SC_OP_TXAGGR;
498 sc->sc_flags |= SC_OP_RXAGGR;
501 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
502 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
504 ath9k_hw_set_diversity(sc->sc_ah, true);
505 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
507 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
509 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
511 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
512 sc->beacon.bslot[i] = NULL;
513 sc->beacon.bslot_aphy[i] = NULL;
516 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
517 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
520 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
521 const struct ath_bus_ops *bus_ops)
523 struct ath_hw *ah = NULL;
524 struct ath_common *common;
528 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
532 ah->hw_version.devid = devid;
533 ah->hw_version.subsysid = subsysid;
536 if (!sc->dev->platform_data)
537 ah->ah_flags |= AH_USE_EEPROM;
539 common = ath9k_hw_common(ah);
540 common->ops = &ath9k_common_ops;
541 common->bus_ops = bus_ops;
545 common->debug_mask = ath9k_debug;
546 common->btcoex_enabled = ath9k_btcoex_enable == 1;
547 spin_lock_init(&common->cc_lock);
549 spin_lock_init(&sc->wiphy_lock);
550 spin_lock_init(&sc->sc_serial_rw);
551 spin_lock_init(&sc->sc_pm_lock);
552 mutex_init(&sc->mutex);
553 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
554 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
558 * Cache line size is used to size and align various
559 * structures used to communicate with the hardware.
561 ath_read_cachesize(common, &csz);
562 common->cachelsz = csz << 2; /* convert to bytes */
564 /* Initializes the hardware for all supported chipsets */
565 ret = ath9k_hw_init(ah);
569 ret = ath9k_init_queues(sc);
573 ret = ath9k_init_btcoex(sc);
577 ret = ath9k_init_channels_rates(sc);
581 ath9k_init_crypto(sc);
587 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
588 if (ATH_TXQ_SETUP(sc, i))
589 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
593 tasklet_kill(&sc->intr_tq);
594 tasklet_kill(&sc->bcon_tasklet);
602 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
604 struct ieee80211_supported_band *sband;
605 struct ieee80211_channel *chan;
606 struct ath_hw *ah = sc->sc_ah;
607 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
610 sband = &sc->sbands[band];
611 for (i = 0; i < sband->n_channels; i++) {
612 chan = &sband->channels[i];
613 ah->curchan = &ah->channels[chan->hw_value];
614 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
615 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
616 chan->max_power = reg->max_power_level / 2;
620 static void ath9k_init_txpower_limits(struct ath_softc *sc)
622 struct ath_hw *ah = sc->sc_ah;
623 struct ath9k_channel *curchan = ah->curchan;
625 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
626 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
627 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
628 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
630 ah->curchan = curchan;
633 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
635 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
637 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
638 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
639 IEEE80211_HW_SIGNAL_DBM |
640 IEEE80211_HW_SUPPORTS_PS |
641 IEEE80211_HW_PS_NULLFUNC_STACK |
642 IEEE80211_HW_SPECTRUM_MGMT |
643 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
645 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
646 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
648 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
649 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
651 hw->wiphy->interface_modes =
652 BIT(NL80211_IFTYPE_P2P_GO) |
653 BIT(NL80211_IFTYPE_P2P_CLIENT) |
654 BIT(NL80211_IFTYPE_AP) |
655 BIT(NL80211_IFTYPE_WDS) |
656 BIT(NL80211_IFTYPE_STATION) |
657 BIT(NL80211_IFTYPE_ADHOC) |
658 BIT(NL80211_IFTYPE_MESH_POINT);
660 if (AR_SREV_5416(sc->sc_ah))
661 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
665 hw->channel_change_time = 5000;
666 hw->max_listen_interval = 10;
667 hw->max_rate_tries = 10;
668 hw->sta_data_size = sizeof(struct ath_node);
669 hw->vif_data_size = sizeof(struct ath_vif);
671 #ifdef CONFIG_ATH9K_RATE_CONTROL
672 hw->rate_control_algorithm = "ath9k_rate_control";
675 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
676 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
677 &sc->sbands[IEEE80211_BAND_2GHZ];
678 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
679 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
680 &sc->sbands[IEEE80211_BAND_5GHZ];
682 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
683 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
684 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
685 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
686 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
689 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
692 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
693 const struct ath_bus_ops *bus_ops)
695 struct ieee80211_hw *hw = sc->hw;
696 struct ath_wiphy *aphy = hw->priv;
697 struct ath_common *common;
700 struct ath_regulatory *reg;
702 /* Bring up device */
703 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
708 common = ath9k_hw_common(ah);
709 ath9k_set_hw_capab(sc, hw);
711 /* Initialize regulatory */
712 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
717 reg = &common->regulatory;
720 error = ath_tx_init(sc, ATH_TXBUF);
725 error = ath_rx_init(sc, ATH_RXBUF);
729 ath9k_init_txpower_limits(sc);
731 /* Register with mac80211 */
732 error = ieee80211_register_hw(hw);
736 error = ath9k_init_debug(ah);
738 ath_err(common, "Unable to create debugfs files\n");
742 /* Handle world regulatory */
743 if (!ath_is_world_regd(reg)) {
744 error = regulatory_hint(hw->wiphy, reg->alpha2);
749 INIT_WORK(&sc->hw_check_work, ath_hw_check);
750 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
751 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
752 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
753 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
754 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
757 ath_start_rfkill_poll(sc);
759 pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
760 PM_QOS_DEFAULT_VALUE);
765 ieee80211_unregister_hw(hw);
773 ath9k_deinit_softc(sc);
778 /*****************************/
779 /* De-Initialization */
780 /*****************************/
782 static void ath9k_deinit_softc(struct ath_softc *sc)
786 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
787 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
789 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
790 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
792 if ((sc->btcoex.no_stomp_timer) &&
793 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
794 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
796 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
797 if (ATH_TXQ_SETUP(sc, i))
798 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
800 ath9k_hw_deinit(sc->sc_ah);
802 tasklet_kill(&sc->intr_tq);
803 tasklet_kill(&sc->bcon_tasklet);
809 void ath9k_deinit_device(struct ath_softc *sc)
811 struct ieee80211_hw *hw = sc->hw;
816 wiphy_rfkill_stop_polling(sc->hw->wiphy);
819 for (i = 0; i < sc->num_sec_wiphy; i++) {
820 struct ath_wiphy *aphy = sc->sec_wiphy[i];
823 sc->sec_wiphy[i] = NULL;
824 ieee80211_unregister_hw(aphy->hw);
825 ieee80211_free_hw(aphy->hw);
828 ieee80211_unregister_hw(hw);
829 pm_qos_remove_request(&sc->pm_qos_req);
832 ath9k_deinit_softc(sc);
833 kfree(sc->sec_wiphy);
836 void ath_descdma_cleanup(struct ath_softc *sc,
837 struct ath_descdma *dd,
838 struct list_head *head)
840 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
843 INIT_LIST_HEAD(head);
844 kfree(dd->dd_bufptr);
845 memset(dd, 0, sizeof(*dd));
848 /************************/
850 /************************/
852 static int __init ath9k_init(void)
856 /* Register rate control algorithm */
857 error = ath_rate_control_register();
860 "ath9k: Unable to register rate control "
866 error = ath_pci_init();
869 "ath9k: No PCI devices found, driver not installed.\n");
871 goto err_rate_unregister;
874 error = ath_ahb_init();
886 ath_rate_control_unregister();
890 module_init(ath9k_init);
892 static void __exit ath9k_exit(void)
896 ath_rate_control_unregister();
897 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
899 module_exit(ath9k_exit);