2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 struct ath9k_vif_iter_data {
24 static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
26 struct ath9k_vif_iter_data *iter_data = data;
29 nbuf = krealloc(iter_data->addr, (iter_data->count + 1) * ETH_ALEN,
34 memcpy(nbuf + iter_data->count * ETH_ALEN, mac, ETH_ALEN);
35 iter_data->addr = nbuf;
39 void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
41 struct ath_wiphy *aphy = hw->priv;
42 struct ath_softc *sc = aphy->sc;
43 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
44 struct ath9k_vif_iter_data iter_data;
49 * Add primary MAC address even if it is not in active use since it
50 * will be configured to the hardware as the starting point and the
51 * BSSID mask will need to be changed if another address is active.
53 iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC);
55 memcpy(iter_data.addr, common->macaddr, ETH_ALEN);
60 /* Get list of all active MAC addresses */
61 spin_lock_bh(&sc->wiphy_lock);
62 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
64 for (i = 0; i < sc->num_sec_wiphy; i++) {
65 if (sc->sec_wiphy[i] == NULL)
67 ieee80211_iterate_active_interfaces_atomic(
68 sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
70 spin_unlock_bh(&sc->wiphy_lock);
72 /* Generate an address mask to cover all active addresses */
73 memset(mask, 0, ETH_ALEN);
74 for (i = 0; i < iter_data.count; i++) {
75 u8 *a1 = iter_data.addr + i * ETH_ALEN;
76 for (j = i + 1; j < iter_data.count; j++) {
77 u8 *a2 = iter_data.addr + j * ETH_ALEN;
78 mask[0] |= a1[0] ^ a2[0];
79 mask[1] |= a1[1] ^ a2[1];
80 mask[2] |= a1[2] ^ a2[2];
81 mask[3] |= a1[3] ^ a2[3];
82 mask[4] |= a1[4] ^ a2[4];
83 mask[5] |= a1[5] ^ a2[5];
87 kfree(iter_data.addr);
89 /* Invert the mask and configure hardware */
90 common->bssidmask[0] = ~mask[0];
91 common->bssidmask[1] = ~mask[1];
92 common->bssidmask[2] = ~mask[2];
93 common->bssidmask[3] = ~mask[3];
94 common->bssidmask[4] = ~mask[4];
95 common->bssidmask[5] = ~mask[5];
97 ath_hw_setbssidmask(common);
100 int ath9k_wiphy_add(struct ath_softc *sc)
103 struct ath_wiphy *aphy;
104 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
105 struct ieee80211_hw *hw;
108 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
112 spin_lock_bh(&sc->wiphy_lock);
113 for (i = 0; i < sc->num_sec_wiphy; i++) {
114 if (sc->sec_wiphy[i] == NULL)
118 if (i == sc->num_sec_wiphy) {
119 /* No empty slot available; increase array length */
120 struct ath_wiphy **n;
121 n = krealloc(sc->sec_wiphy,
122 (sc->num_sec_wiphy + 1) *
123 sizeof(struct ath_wiphy *),
126 spin_unlock_bh(&sc->wiphy_lock);
127 ieee80211_free_hw(hw);
135 SET_IEEE80211_DEV(hw, sc->dev);
140 sc->sec_wiphy[i] = aphy;
141 spin_unlock_bh(&sc->wiphy_lock);
143 memcpy(addr, common->macaddr, ETH_ALEN);
144 addr[0] |= 0x02; /* Locally managed address */
146 * XOR virtual wiphy index into the least significant bits to generate
147 * a different MAC address for each virtual wiphy.
150 addr[4] ^= (i & 0xff00) >> 8;
151 addr[3] ^= (i & 0xff0000) >> 16;
153 SET_IEEE80211_PERM_ADDR(hw, addr);
155 ath9k_set_hw_capab(sc, hw);
157 error = ieee80211_register_hw(hw);
160 /* Make sure wiphy scheduler is started (if enabled) */
161 ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
167 int ath9k_wiphy_del(struct ath_wiphy *aphy)
169 struct ath_softc *sc = aphy->sc;
172 spin_lock_bh(&sc->wiphy_lock);
173 for (i = 0; i < sc->num_sec_wiphy; i++) {
174 if (aphy == sc->sec_wiphy[i]) {
175 sc->sec_wiphy[i] = NULL;
176 spin_unlock_bh(&sc->wiphy_lock);
177 ieee80211_unregister_hw(aphy->hw);
178 ieee80211_free_hw(aphy->hw);
182 spin_unlock_bh(&sc->wiphy_lock);
186 static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
187 struct ieee80211_vif *vif, const u8 *bssid,
190 struct ath_softc *sc = aphy->sc;
191 struct ath_tx_control txctl;
193 struct ieee80211_hdr *hdr;
195 struct ieee80211_tx_info *info;
197 skb = dev_alloc_skb(24);
200 hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
202 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
203 IEEE80211_FCTL_TODS);
205 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
206 hdr->frame_control = fc;
207 memcpy(hdr->addr1, bssid, ETH_ALEN);
208 memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
209 memcpy(hdr->addr3, bssid, ETH_ALEN);
211 info = IEEE80211_SKB_CB(skb);
212 memset(info, 0, sizeof(*info));
213 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
214 info->control.vif = vif;
215 info->control.rates[0].idx = 0;
216 info->control.rates[0].count = 4;
217 info->control.rates[1].idx = -1;
219 memset(&txctl, 0, sizeof(struct ath_tx_control));
220 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
221 txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE;
223 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
228 dev_kfree_skb_any(skb);
232 static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
235 if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
237 for (i = 0; i < sc->num_sec_wiphy; i++) {
238 if (sc->sec_wiphy[i] &&
239 sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
245 static bool ath9k_wiphy_pausing(struct ath_softc *sc)
248 spin_lock_bh(&sc->wiphy_lock);
249 ret = __ath9k_wiphy_pausing(sc);
250 spin_unlock_bh(&sc->wiphy_lock);
254 static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
257 if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
259 for (i = 0; i < sc->num_sec_wiphy; i++) {
260 if (sc->sec_wiphy[i] &&
261 sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
267 bool ath9k_wiphy_scanning(struct ath_softc *sc)
270 spin_lock_bh(&sc->wiphy_lock);
271 ret = __ath9k_wiphy_scanning(sc);
272 spin_unlock_bh(&sc->wiphy_lock);
276 static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
278 /* caller must hold wiphy_lock */
279 static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
283 if (aphy->chan_idx != aphy->sc->chan_idx)
284 return; /* wiphy not on the selected channel */
285 __ath9k_wiphy_unpause(aphy);
288 static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
291 spin_lock_bh(&sc->wiphy_lock);
292 __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
293 for (i = 0; i < sc->num_sec_wiphy; i++)
294 __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
295 spin_unlock_bh(&sc->wiphy_lock);
298 void ath9k_wiphy_chan_work(struct work_struct *work)
300 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
301 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
302 struct ath_wiphy *aphy = sc->next_wiphy;
308 * All pending interfaces paused; ready to change
312 /* Change channels */
313 mutex_lock(&sc->mutex);
314 /* XXX: remove me eventually */
315 ath9k_update_ichannel(sc, aphy->hw,
316 &sc->sc_ah->channels[sc->chan_idx]);
318 /* sync hw configuration for hw code */
319 common->hw = aphy->hw;
321 ath_update_chainmask(sc, sc->chan_is_ht);
322 if (ath_set_channel(sc, aphy->hw,
323 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
324 printk(KERN_DEBUG "ath9k: Failed to set channel for new "
326 mutex_unlock(&sc->mutex);
329 mutex_unlock(&sc->mutex);
331 ath9k_wiphy_unpause_channel(sc);
335 * ath9k version of ieee80211_tx_status() for TX frames that are generated
336 * internally in the driver.
338 void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
340 struct ath_wiphy *aphy = hw->priv;
341 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
343 if ((tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_PAUSE) &&
344 aphy->state == ATH_WIPHY_PAUSING) {
345 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
346 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
347 "frame\n", wiphy_name(hw->wiphy));
349 * The AP did not reply; ignore this to allow us to
353 aphy->state = ATH_WIPHY_PAUSED;
354 if (!ath9k_wiphy_pausing(aphy->sc)) {
356 * Drop from tasklet to work to allow mutex for channel
359 ieee80211_queue_work(aphy->sc->hw,
360 &aphy->sc->chan_work);
367 static void ath9k_mark_paused(struct ath_wiphy *aphy)
369 struct ath_softc *sc = aphy->sc;
370 aphy->state = ATH_WIPHY_PAUSED;
371 if (!__ath9k_wiphy_pausing(sc))
372 ieee80211_queue_work(sc->hw, &sc->chan_work);
375 static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
377 struct ath_wiphy *aphy = data;
378 struct ath_vif *avp = (void *) vif->drv_priv;
381 case NL80211_IFTYPE_STATION:
382 if (!vif->bss_conf.assoc) {
383 ath9k_mark_paused(aphy);
386 /* TODO: could avoid this if already in PS mode */
387 if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
388 printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
390 ath9k_mark_paused(aphy);
393 case NL80211_IFTYPE_AP:
394 /* Beacon transmission is paused by aphy->state change */
395 ath9k_mark_paused(aphy);
402 /* caller must hold wiphy_lock */
403 static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
405 ieee80211_stop_queues(aphy->hw);
406 aphy->state = ATH_WIPHY_PAUSING;
408 * TODO: handle PAUSING->PAUSED for the case where there are multiple
409 * active vifs (now we do it on the first vif getting ready; should be
412 ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
417 int ath9k_wiphy_pause(struct ath_wiphy *aphy)
420 spin_lock_bh(&aphy->sc->wiphy_lock);
421 ret = __ath9k_wiphy_pause(aphy);
422 spin_unlock_bh(&aphy->sc->wiphy_lock);
426 static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
428 struct ath_wiphy *aphy = data;
429 struct ath_vif *avp = (void *) vif->drv_priv;
432 case NL80211_IFTYPE_STATION:
433 if (!vif->bss_conf.assoc)
435 ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
437 case NL80211_IFTYPE_AP:
438 /* Beacon transmission is re-enabled by aphy->state change */
445 /* caller must hold wiphy_lock */
446 static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
448 ieee80211_iterate_active_interfaces_atomic(aphy->hw,
449 ath9k_unpause_iter, aphy);
450 aphy->state = ATH_WIPHY_ACTIVE;
451 ieee80211_wake_queues(aphy->hw);
455 int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
458 spin_lock_bh(&aphy->sc->wiphy_lock);
459 ret = __ath9k_wiphy_unpause(aphy);
460 spin_unlock_bh(&aphy->sc->wiphy_lock);
464 static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
467 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
468 sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
469 for (i = 0; i < sc->num_sec_wiphy; i++) {
470 if (sc->sec_wiphy[i] &&
471 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
472 sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
476 /* caller must hold wiphy_lock */
477 static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
480 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
481 __ath9k_wiphy_pause(sc->pri_wiphy);
482 for (i = 0; i < sc->num_sec_wiphy; i++) {
483 if (sc->sec_wiphy[i] &&
484 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
485 __ath9k_wiphy_pause(sc->sec_wiphy[i]);
489 int ath9k_wiphy_select(struct ath_wiphy *aphy)
491 struct ath_softc *sc = aphy->sc;
494 spin_lock_bh(&sc->wiphy_lock);
495 if (__ath9k_wiphy_scanning(sc)) {
497 * For now, we are using mac80211 sw scan and it expects to
498 * have full control over channel changes, so avoid wiphy
499 * scheduling during a scan. This could be optimized if the
500 * scanning control were moved into the driver.
502 spin_unlock_bh(&sc->wiphy_lock);
505 if (__ath9k_wiphy_pausing(sc)) {
506 if (sc->wiphy_select_failures == 0)
507 sc->wiphy_select_first_fail = jiffies;
508 sc->wiphy_select_failures++;
509 if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
511 printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
512 "out; disable/enable hw to recover\n");
513 __ath9k_wiphy_mark_all_paused(sc);
515 * TODO: this workaround to fix hardware is unlikely to
516 * be specific to virtual wiphy changes. It can happen
517 * on normal channel change, too, and as such, this
518 * should really be made more generic. For example,
519 * tricker radio disable/enable on GTT interrupt burst
520 * (say, 10 GTT interrupts received without any TX
521 * frame being completed)
523 spin_unlock_bh(&sc->wiphy_lock);
524 ath_radio_disable(sc, aphy->hw);
525 ath_radio_enable(sc, aphy->hw);
526 /* Only the primary wiphy hw is used for queuing work */
527 ieee80211_queue_work(aphy->sc->hw,
528 &aphy->sc->chan_work);
529 return -EBUSY; /* previous select still in progress */
531 spin_unlock_bh(&sc->wiphy_lock);
532 return -EBUSY; /* previous select still in progress */
534 sc->wiphy_select_failures = 0;
536 /* Store the new channel */
537 sc->chan_idx = aphy->chan_idx;
538 sc->chan_is_ht = aphy->chan_is_ht;
539 sc->next_wiphy = aphy;
541 __ath9k_wiphy_pause_all(sc);
542 now = !__ath9k_wiphy_pausing(aphy->sc);
543 spin_unlock_bh(&sc->wiphy_lock);
546 /* Ready to request channel change immediately */
547 ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
551 * wiphys will be unpaused in ath9k_tx_status() once channel has been
552 * changed if any wiphy needs time to become paused.
558 bool ath9k_wiphy_started(struct ath_softc *sc)
561 spin_lock_bh(&sc->wiphy_lock);
562 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
563 spin_unlock_bh(&sc->wiphy_lock);
566 for (i = 0; i < sc->num_sec_wiphy; i++) {
567 if (sc->sec_wiphy[i] &&
568 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
569 spin_unlock_bh(&sc->wiphy_lock);
573 spin_unlock_bh(&sc->wiphy_lock);
577 static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
578 struct ath_wiphy *selected)
580 if (selected->state == ATH_WIPHY_SCAN) {
581 if (aphy == selected)
584 * Pause all other wiphys for the duration of the scan even if
585 * they are on the current channel now.
587 } else if (aphy->chan_idx == selected->chan_idx)
589 aphy->state = ATH_WIPHY_PAUSED;
590 ieee80211_stop_queues(aphy->hw);
593 void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
594 struct ath_wiphy *selected)
597 spin_lock_bh(&sc->wiphy_lock);
598 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
599 ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
600 for (i = 0; i < sc->num_sec_wiphy; i++) {
601 if (sc->sec_wiphy[i] &&
602 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
603 ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
605 spin_unlock_bh(&sc->wiphy_lock);
608 void ath9k_wiphy_work(struct work_struct *work)
610 struct ath_softc *sc = container_of(work, struct ath_softc,
612 struct ath_wiphy *aphy = NULL;
615 spin_lock_bh(&sc->wiphy_lock);
617 if (sc->wiphy_scheduler_int == 0) {
618 /* wiphy scheduler is disabled */
619 spin_unlock_bh(&sc->wiphy_lock);
624 sc->wiphy_scheduler_index++;
625 while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
626 aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
627 if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
630 sc->wiphy_scheduler_index++;
634 sc->wiphy_scheduler_index = 0;
635 if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
640 /* No wiphy is ready to be scheduled */
642 aphy = sc->pri_wiphy;
645 spin_unlock_bh(&sc->wiphy_lock);
648 aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
649 ath9k_wiphy_select(aphy)) {
650 printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
654 ieee80211_queue_delayed_work(sc->hw,
656 sc->wiphy_scheduler_int);
659 void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
661 cancel_delayed_work_sync(&sc->wiphy_work);
662 sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
663 if (sc->wiphy_scheduler_int)
664 ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
665 sc->wiphy_scheduler_int);
668 /* caller must hold wiphy_lock */
669 bool ath9k_all_wiphys_idle(struct ath_softc *sc)
672 if (!sc->pri_wiphy->idle)
674 for (i = 0; i < sc->num_sec_wiphy; i++) {
675 struct ath_wiphy *aphy = sc->sec_wiphy[i];
684 /* caller must hold wiphy_lock */
685 void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
687 struct ath_softc *sc = aphy->sc;
690 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
691 "Marking %s as %s\n",
692 wiphy_name(aphy->hw->wiphy),
693 idle ? "idle" : "not-idle");
695 /* Only bother starting a queue on an active virtual wiphy */
696 void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
698 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
701 spin_lock_bh(&sc->wiphy_lock);
703 /* Start the primary wiphy */
704 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
705 ieee80211_wake_queue(hw, skb_queue);
709 /* Now start the secondary wiphy queues */
710 for (i = 0; i < sc->num_sec_wiphy; i++) {
711 struct ath_wiphy *aphy = sc->sec_wiphy[i];
714 if (aphy->state != ATH_WIPHY_ACTIVE)
718 ieee80211_wake_queue(hw, skb_queue);
723 spin_unlock_bh(&sc->wiphy_lock);
726 /* Go ahead and propagate information to all virtual wiphys, it won't hurt */
727 void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
729 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
732 spin_lock_bh(&sc->wiphy_lock);
734 /* Stop the primary wiphy */
735 ieee80211_stop_queue(hw, skb_queue);
737 /* Now stop the secondary wiphy queues */
738 for (i = 0; i < sc->num_sec_wiphy; i++) {
739 struct ath_wiphy *aphy = sc->sec_wiphy[i];
743 ieee80211_stop_queue(hw, skb_queue);
745 spin_unlock_bh(&sc->wiphy_lock);