3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param;
59 static int bug_on_recovery = -1;
60 static int no_recovery = -1;
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
86 wl1271_info("Association completed.");
90 static void wl1271_reg_notify(struct wiphy *wiphy,
91 struct regulatory_request *request)
93 struct ieee80211_supported_band *band;
94 struct ieee80211_channel *ch;
96 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
97 struct wl1271 *wl = hw->priv;
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
111 wlcore_regdomain_config(wl);
114 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
119 /* we should hold wl->mutex */
120 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
139 int period = wl->conf.rx_streaming.interval;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
145 /* reconfigure/disable according to new streaming_period */
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
148 (wl->conf.rx_streaming.always ||
149 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 ret = wl1271_set_rx_streaming(wl, wlvif, true);
152 ret = wl1271_set_rx_streaming(wl, wlvif, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif->rx_streaming_timer);
160 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
163 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
164 rx_streaming_enable_work);
165 struct wl1271 *wl = wlvif->wl;
167 mutex_lock(&wl->mutex);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
171 (!wl->conf.rx_streaming.always &&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
175 if (!wl->conf.rx_streaming.interval)
178 ret = wl1271_ps_elp_wakeup(wl);
182 ret = wl1271_set_rx_streaming(wl, wlvif, true);
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif->rx_streaming_timer,
188 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
191 wl1271_ps_elp_sleep(wl);
193 mutex_unlock(&wl->mutex);
196 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
199 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
200 rx_streaming_disable_work);
201 struct wl1271 *wl = wlvif->wl;
203 mutex_lock(&wl->mutex);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
208 ret = wl1271_ps_elp_wakeup(wl);
212 ret = wl1271_set_rx_streaming(wl, wlvif, false);
217 wl1271_ps_elp_sleep(wl);
219 mutex_unlock(&wl->mutex);
222 static void wl1271_rx_streaming_timer(unsigned long data)
224 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
225 struct wl1271 *wl = wlvif->wl;
226 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl->tx_allocated_blocks == 0)
236 cancel_delayed_work(&wl->tx_watchdog_work);
237 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
238 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
241 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 struct delayed_work *dwork;
246 dwork = container_of(work, struct delayed_work, work);
247 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249 mutex_lock(&wl->mutex);
251 if (unlikely(wl->state != WLCORE_STATE_ON))
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl->tx_allocated_blocks == 0))
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
263 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
264 wl->conf.tx.tx_watchdog_timeout);
265 wl12xx_rearm_tx_watchdog_locked(wl);
270 * if a scan is in progress, we might not have any Tx for a long
273 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
275 wl->conf.tx.tx_watchdog_timeout);
276 wl12xx_rearm_tx_watchdog_locked(wl);
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl->active_sta_count) {
287 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289 wl->conf.tx.tx_watchdog_timeout,
290 wl->active_sta_count);
291 wl12xx_rearm_tx_watchdog_locked(wl);
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl->conf.tx.tx_watchdog_timeout);
297 wl12xx_queue_recovery_work(wl);
300 mutex_unlock(&wl->mutex);
303 static void wlcore_adjust_conf(struct wl1271 *wl)
305 /* Adjust settings according to optional module parameters */
308 if (!strcmp(fwlog_param, "continuous")) {
309 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
310 } else if (!strcmp(fwlog_param, "ondemand")) {
311 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
312 } else if (!strcmp(fwlog_param, "dbgpins")) {
313 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
314 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
315 } else if (!strcmp(fwlog_param, "disable")) {
316 wl->conf.fwlog.mem_blocks = 0;
317 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
319 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
323 if (bug_on_recovery != -1)
324 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
326 if (no_recovery != -1)
327 wl->conf.recovery.no_recovery = (u8) no_recovery;
330 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
331 struct wl12xx_vif *wlvif,
336 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
339 * Wake up from high level PS if the STA is asleep with too little
340 * packets in FW or if the STA is awake.
342 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
343 wl12xx_ps_link_end(wl, wlvif, hlid);
346 * Start high-level PS if the STA is asleep with enough blocks in FW.
347 * Make an exception if this is the only connected link. In this
348 * case FW-memory congestion is less of a problem.
349 * Note that a single connected STA means 3 active links, since we must
350 * account for the global and broadcast AP links. The "fw_ps" check
351 * assures us the third link is a STA connected to the AP. Otherwise
352 * the FW would not set the PSM bit.
354 else if (wl->active_link_count > 3 && fw_ps &&
355 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_start(wl, wlvif, hlid, true);
359 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
360 struct wl12xx_vif *wlvif,
361 struct wl_fw_status_2 *status)
366 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
367 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
368 wl1271_debug(DEBUG_PSM,
369 "link ps prev 0x%x cur 0x%x changed 0x%x",
370 wl->ap_fw_ps_map, cur_fw_ps_map,
371 wl->ap_fw_ps_map ^ cur_fw_ps_map);
373 wl->ap_fw_ps_map = cur_fw_ps_map;
376 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
377 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
378 wl->links[hlid].allocated_pkts);
381 static int wlcore_fw_status(struct wl1271 *wl,
382 struct wl_fw_status_1 *status_1,
383 struct wl_fw_status_2 *status_2)
385 struct wl12xx_vif *wlvif;
387 u32 old_tx_blk_count = wl->tx_blocks_available;
388 int avail, freed_blocks;
392 struct wl1271_link *lnk;
394 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
395 sizeof(*status_2) + wl->fw_status_priv_len;
397 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
402 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
403 "drv_rx_counter = %d, tx_results_counter = %d)",
405 status_1->fw_rx_counter,
406 status_1->drv_rx_counter,
407 status_1->tx_results_counter);
409 for (i = 0; i < NUM_TX_QUEUES; i++) {
410 /* prevent wrap-around in freed-packets counter */
411 wl->tx_allocated_pkts[i] -=
412 (status_2->counters.tx_released_pkts[i] -
413 wl->tx_pkts_freed[i]) & 0xff;
415 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
419 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
423 /* prevent wrap-around in freed-packets counter */
424 diff = (status_2->counters.tx_lnk_free_pkts[i] -
425 lnk->prev_freed_pkts) & 0xff;
430 lnk->allocated_pkts -= diff;
431 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
433 /* accumulate the prev_freed_pkts counter */
434 lnk->total_freed_pkts += diff;
437 /* prevent wrap-around in total blocks counter */
438 if (likely(wl->tx_blocks_freed <=
439 le32_to_cpu(status_2->total_released_blks)))
440 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
443 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
444 le32_to_cpu(status_2->total_released_blks);
446 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
448 wl->tx_allocated_blocks -= freed_blocks;
451 * If the FW freed some blocks:
452 * If we still have allocated blocks - re-arm the timer, Tx is
453 * not stuck. Otherwise, cancel the timer (no Tx currently).
456 if (wl->tx_allocated_blocks)
457 wl12xx_rearm_tx_watchdog_locked(wl);
459 cancel_delayed_work(&wl->tx_watchdog_work);
462 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
465 * The FW might change the total number of TX memblocks before
466 * we get a notification about blocks being released. Thus, the
467 * available blocks calculation might yield a temporary result
468 * which is lower than the actual available blocks. Keeping in
469 * mind that only blocks that were allocated can be moved from
470 * TX to RX, tx_blocks_available should never decrease here.
472 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
475 /* if more blocks are available now, tx work can be scheduled */
476 if (wl->tx_blocks_available > old_tx_blk_count)
477 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
479 /* for AP update num of allocated TX blocks per link and ps status */
480 wl12xx_for_each_wlvif_ap(wl, wlvif) {
481 wl12xx_irq_update_links_status(wl, wlvif, status_2);
484 /* update the host-chipset time offset */
486 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
487 (s64)le32_to_cpu(status_2->fw_localtime);
489 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
494 static void wl1271_flush_deferred_work(struct wl1271 *wl)
498 /* Pass all received frames to the network stack */
499 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
500 ieee80211_rx_ni(wl->hw, skb);
502 /* Return sent skbs to the network stack */
503 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
504 ieee80211_tx_status_ni(wl->hw, skb);
507 static void wl1271_netstack_work(struct work_struct *work)
510 container_of(work, struct wl1271, netstack_work);
513 wl1271_flush_deferred_work(wl);
514 } while (skb_queue_len(&wl->deferred_rx_queue));
517 #define WL1271_IRQ_MAX_LOOPS 256
519 static int wlcore_irq_locked(struct wl1271 *wl)
523 int loopcount = WL1271_IRQ_MAX_LOOPS;
525 unsigned int defer_count;
529 * In case edge triggered interrupt must be used, we cannot iterate
530 * more than once without introducing race conditions with the hardirq.
532 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
535 wl1271_debug(DEBUG_IRQ, "IRQ work");
537 if (unlikely(wl->state != WLCORE_STATE_ON))
540 ret = wl1271_ps_elp_wakeup(wl);
544 while (!done && loopcount--) {
546 * In order to avoid a race with the hardirq, clear the flag
547 * before acknowledging the chip. Since the mutex is held,
548 * wl1271_ps_elp_wakeup cannot be called concurrently.
550 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
551 smp_mb__after_clear_bit();
553 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
557 wlcore_hw_tx_immediate_compl(wl);
559 intr = le32_to_cpu(wl->fw_status_1->intr);
560 intr &= WLCORE_ALL_INTR_MASK;
566 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
567 wl1271_error("HW watchdog interrupt received! starting recovery.");
568 wl->watchdog_recovery = true;
571 /* restarting the chip. ignore any other interrupt. */
575 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
576 wl1271_error("SW watchdog interrupt received! "
577 "starting recovery.");
578 wl->watchdog_recovery = true;
581 /* restarting the chip. ignore any other interrupt. */
585 if (likely(intr & WL1271_ACX_INTR_DATA)) {
586 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
588 ret = wlcore_rx(wl, wl->fw_status_1);
592 /* Check if any tx blocks were freed */
593 spin_lock_irqsave(&wl->wl_lock, flags);
594 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
595 wl1271_tx_total_queue_count(wl) > 0) {
596 spin_unlock_irqrestore(&wl->wl_lock, flags);
598 * In order to avoid starvation of the TX path,
599 * call the work function directly.
601 ret = wlcore_tx_work_locked(wl);
605 spin_unlock_irqrestore(&wl->wl_lock, flags);
608 /* check for tx results */
609 ret = wlcore_hw_tx_delayed_compl(wl);
613 /* Make sure the deferred queues don't get too long */
614 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
615 skb_queue_len(&wl->deferred_rx_queue);
616 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
617 wl1271_flush_deferred_work(wl);
620 if (intr & WL1271_ACX_INTR_EVENT_A) {
621 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
622 ret = wl1271_event_handle(wl, 0);
627 if (intr & WL1271_ACX_INTR_EVENT_B) {
628 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
629 ret = wl1271_event_handle(wl, 1);
634 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
635 wl1271_debug(DEBUG_IRQ,
636 "WL1271_ACX_INTR_INIT_COMPLETE");
638 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
639 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
642 wl1271_ps_elp_sleep(wl);
648 static irqreturn_t wlcore_irq(int irq, void *cookie)
652 struct wl1271 *wl = cookie;
654 /* complete the ELP completion */
655 spin_lock_irqsave(&wl->wl_lock, flags);
656 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
658 complete(wl->elp_compl);
659 wl->elp_compl = NULL;
662 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
663 /* don't enqueue a work right now. mark it as pending */
664 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
665 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
666 disable_irq_nosync(wl->irq);
667 pm_wakeup_event(wl->dev, 0);
668 spin_unlock_irqrestore(&wl->wl_lock, flags);
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
673 /* TX might be handled here, avoid redundant work */
674 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
675 cancel_work_sync(&wl->tx_work);
677 mutex_lock(&wl->mutex);
679 ret = wlcore_irq_locked(wl);
681 wl12xx_queue_recovery_work(wl);
683 spin_lock_irqsave(&wl->wl_lock, flags);
684 /* In case TX was not handled here, queue TX work */
685 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
686 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
687 wl1271_tx_total_queue_count(wl) > 0)
688 ieee80211_queue_work(wl->hw, &wl->tx_work);
689 spin_unlock_irqrestore(&wl->wl_lock, flags);
691 mutex_unlock(&wl->mutex);
696 struct vif_counter_data {
699 struct ieee80211_vif *cur_vif;
700 bool cur_vif_running;
703 static void wl12xx_vif_count_iter(void *data, u8 *mac,
704 struct ieee80211_vif *vif)
706 struct vif_counter_data *counter = data;
709 if (counter->cur_vif == vif)
710 counter->cur_vif_running = true;
713 /* caller must not hold wl->mutex, as it might deadlock */
714 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
715 struct ieee80211_vif *cur_vif,
716 struct vif_counter_data *data)
718 memset(data, 0, sizeof(*data));
719 data->cur_vif = cur_vif;
721 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
722 wl12xx_vif_count_iter, data);
725 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
727 const struct firmware *fw;
729 enum wl12xx_fw_type fw_type;
733 fw_type = WL12XX_FW_TYPE_PLT;
734 fw_name = wl->plt_fw_name;
737 * we can't call wl12xx_get_vif_count() here because
738 * wl->mutex is taken, so use the cached last_vif_count value
740 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
741 fw_type = WL12XX_FW_TYPE_MULTI;
742 fw_name = wl->mr_fw_name;
744 fw_type = WL12XX_FW_TYPE_NORMAL;
745 fw_name = wl->sr_fw_name;
749 if (wl->fw_type == fw_type)
752 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
754 ret = request_firmware(&fw, fw_name, wl->dev);
757 wl1271_error("could not get firmware %s: %d", fw_name, ret);
762 wl1271_error("firmware size is not multiple of 32 bits: %zu",
769 wl->fw_type = WL12XX_FW_TYPE_NONE;
770 wl->fw_len = fw->size;
771 wl->fw = vmalloc(wl->fw_len);
774 wl1271_error("could not allocate memory for the firmware");
779 memcpy(wl->fw, fw->data, wl->fw_len);
781 wl->fw_type = fw_type;
783 release_firmware(fw);
788 void wl12xx_queue_recovery_work(struct wl1271 *wl)
790 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
792 /* Avoid a recursive recovery */
793 if (wl->state == WLCORE_STATE_ON) {
794 wl->state = WLCORE_STATE_RESTARTING;
795 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
796 wlcore_disable_interrupts_nosync(wl);
797 ieee80211_queue_work(wl->hw, &wl->recovery_work);
801 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
805 /* The FW log is a length-value list, find where the log end */
806 while (len < maxlen) {
807 if (memblock[len] == 0)
809 if (len + memblock[len] + 1 > maxlen)
811 len += memblock[len] + 1;
814 /* Make sure we have enough room */
815 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
817 /* Fill the FW log file, consumed by the sysfs fwlog entry */
818 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
819 wl->fwlog_size += len;
824 #define WLCORE_FW_LOG_END 0x2000000
826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
834 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
835 (wl->conf.fwlog.mem_blocks == 0))
838 wl1271_info("Reading FW panic log");
840 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
845 * Make sure the chip is awake and the logger isn't active.
846 * Do not send a stop fwlog command if the fw is hanged or if
847 * dbgpins are used (due to some fw bug).
849 if (wl1271_ps_elp_wakeup(wl))
851 if (!wl->watchdog_recovery &&
852 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
853 wl12xx_cmd_stop_fwlog(wl);
855 /* Read the first memory block address */
856 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
860 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
864 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
865 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
866 end_of_log = WLCORE_FW_LOG_END;
868 offset = sizeof(addr);
872 /* Traverse the memory blocks linked list */
874 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
875 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
881 * Memory blocks are linked to one another. The first 4 bytes
882 * of each memory block hold the hardware address of the next
883 * one. The last memory block points to the first one in
884 * on demand mode and is equal to 0x2000000 in continuous mode.
886 addr = le32_to_cpup((__le32 *)block);
887 if (!wl12xx_copy_fwlog(wl, block + offset,
888 WL12XX_HW_BLOCK_SIZE - offset))
890 } while (addr && (addr != end_of_log));
892 wake_up_interruptible(&wl->fwlog_waitq);
898 static void wlcore_print_recovery(struct wl1271 *wl)
904 wl1271_info("Hardware recovery in progress. FW ver: %s",
905 wl->chip.fw_ver_str);
907 /* change partitions momentarily so we can read the FW pc */
908 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
912 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
916 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
920 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
921 pc, hint_sts, ++wl->recovery_count);
923 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
927 static void wl1271_recovery_work(struct work_struct *work)
930 container_of(work, struct wl1271, recovery_work);
931 struct wl12xx_vif *wlvif;
932 struct ieee80211_vif *vif;
934 mutex_lock(&wl->mutex);
936 if (wl->state == WLCORE_STATE_OFF || wl->plt)
939 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
940 wl12xx_read_fwlog_panic(wl);
941 wlcore_print_recovery(wl);
944 BUG_ON(wl->conf.recovery.bug_on_recovery &&
945 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
947 if (wl->conf.recovery.no_recovery) {
948 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
952 /* Prevent spurious TX during FW restart */
953 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
955 /* reboot the chipset */
956 while (!list_empty(&wl->wlvif_list)) {
957 wlvif = list_first_entry(&wl->wlvif_list,
958 struct wl12xx_vif, list);
959 vif = wl12xx_wlvif_to_vif(wlvif);
960 __wl1271_op_remove_interface(wl, vif, false);
963 wlcore_op_stop_locked(wl);
965 ieee80211_restart_hw(wl->hw);
968 * Its safe to enable TX now - the queues are stopped after a request
971 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
974 wl->watchdog_recovery = false;
975 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
976 mutex_unlock(&wl->mutex);
979 static int wlcore_fw_wakeup(struct wl1271 *wl)
981 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
984 static int wl1271_setup(struct wl1271 *wl)
986 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
987 sizeof(*wl->fw_status_2) +
988 wl->fw_status_priv_len, GFP_KERNEL);
989 if (!wl->fw_status_1)
992 wl->fw_status_2 = (struct wl_fw_status_2 *)
993 (((u8 *) wl->fw_status_1) +
994 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
996 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
997 if (!wl->tx_res_if) {
998 kfree(wl->fw_status_1);
1005 static int wl12xx_set_power_on(struct wl1271 *wl)
1009 msleep(WL1271_PRE_POWER_ON_SLEEP);
1010 ret = wl1271_power_on(wl);
1013 msleep(WL1271_POWER_ON_SLEEP);
1014 wl1271_io_reset(wl);
1017 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1021 /* ELP module wake up */
1022 ret = wlcore_fw_wakeup(wl);
1030 wl1271_power_off(wl);
1034 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1038 ret = wl12xx_set_power_on(wl);
1043 * For wl127x based devices we could use the default block
1044 * size (512 bytes), but due to a bug in the sdio driver, we
1045 * need to set it explicitly after the chip is powered on. To
1046 * simplify the code and since the performance impact is
1047 * negligible, we use the same block size for all different
1050 * Check if the bus supports blocksize alignment and, if it
1051 * doesn't, make sure we don't have the quirk.
1053 if (!wl1271_set_block_size(wl))
1054 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1056 /* TODO: make sure the lower driver has set things up correctly */
1058 ret = wl1271_setup(wl);
1062 ret = wl12xx_fetch_firmware(wl, plt);
1070 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1072 int retries = WL1271_BOOT_RETRIES;
1073 struct wiphy *wiphy = wl->hw->wiphy;
1075 static const char* const PLT_MODE[] = {
1083 mutex_lock(&wl->mutex);
1085 wl1271_notice("power up");
1087 if (wl->state != WLCORE_STATE_OFF) {
1088 wl1271_error("cannot go into PLT state because not "
1089 "in off state: %d", wl->state);
1094 /* Indicate to lower levels that we are now in PLT mode */
1096 wl->plt_mode = plt_mode;
1100 ret = wl12xx_chip_wakeup(wl, true);
1104 ret = wl->ops->plt_init(wl);
1108 wl->state = WLCORE_STATE_ON;
1109 wl1271_notice("firmware booted in PLT mode %s (%s)",
1111 wl->chip.fw_ver_str);
1113 /* update hw/fw version info in wiphy struct */
1114 wiphy->hw_version = wl->chip.id;
1115 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1116 sizeof(wiphy->fw_version));
1121 wl1271_power_off(wl);
1125 wl->plt_mode = PLT_OFF;
1127 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1128 WL1271_BOOT_RETRIES);
1130 mutex_unlock(&wl->mutex);
1135 int wl1271_plt_stop(struct wl1271 *wl)
1139 wl1271_notice("power down");
1142 * Interrupts must be disabled before setting the state to OFF.
1143 * Otherwise, the interrupt handler might be called and exit without
1144 * reading the interrupt status.
1146 wlcore_disable_interrupts(wl);
1147 mutex_lock(&wl->mutex);
1149 mutex_unlock(&wl->mutex);
1152 * This will not necessarily enable interrupts as interrupts
1153 * may have been disabled when op_stop was called. It will,
1154 * however, balance the above call to disable_interrupts().
1156 wlcore_enable_interrupts(wl);
1158 wl1271_error("cannot power down because not in PLT "
1159 "state: %d", wl->state);
1164 mutex_unlock(&wl->mutex);
1166 wl1271_flush_deferred_work(wl);
1167 cancel_work_sync(&wl->netstack_work);
1168 cancel_work_sync(&wl->recovery_work);
1169 cancel_delayed_work_sync(&wl->elp_work);
1170 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1172 mutex_lock(&wl->mutex);
1173 wl1271_power_off(wl);
1175 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1176 wl->state = WLCORE_STATE_OFF;
1178 wl->plt_mode = PLT_OFF;
1180 mutex_unlock(&wl->mutex);
1186 static void wl1271_op_tx(struct ieee80211_hw *hw,
1187 struct ieee80211_tx_control *control,
1188 struct sk_buff *skb)
1190 struct wl1271 *wl = hw->priv;
1191 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1192 struct ieee80211_vif *vif = info->control.vif;
1193 struct wl12xx_vif *wlvif = NULL;
1194 unsigned long flags;
1199 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1200 ieee80211_free_txskb(hw, skb);
1204 wlvif = wl12xx_vif_to_data(vif);
1205 mapping = skb_get_queue_mapping(skb);
1206 q = wl1271_tx_get_queue(mapping);
1208 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1210 spin_lock_irqsave(&wl->wl_lock, flags);
1213 * drop the packet if the link is invalid or the queue is stopped
1214 * for any reason but watermark. Watermark is a "soft"-stop so we
1215 * allow these packets through.
1217 if (hlid == WL12XX_INVALID_LINK_ID ||
1218 (!test_bit(hlid, wlvif->links_map)) ||
1219 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1220 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1221 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1222 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1223 ieee80211_free_txskb(hw, skb);
1227 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1229 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1231 wl->tx_queue_count[q]++;
1232 wlvif->tx_queue_count[q]++;
1235 * The workqueue is slow to process the tx_queue and we need stop
1236 * the queue here, otherwise the queue will get too long.
1238 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1239 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1240 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1241 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1242 wlcore_stop_queue_locked(wl, wlvif, q,
1243 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1247 * The chip specific setup must run before the first TX packet -
1248 * before that, the tx_work will not be initialized!
1251 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1252 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1253 ieee80211_queue_work(wl->hw, &wl->tx_work);
1256 spin_unlock_irqrestore(&wl->wl_lock, flags);
1259 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1261 unsigned long flags;
1264 /* no need to queue a new dummy packet if one is already pending */
1265 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1268 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1270 spin_lock_irqsave(&wl->wl_lock, flags);
1271 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1272 wl->tx_queue_count[q]++;
1273 spin_unlock_irqrestore(&wl->wl_lock, flags);
1275 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1276 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1277 return wlcore_tx_work_locked(wl);
1280 * If the FW TX is busy, TX work will be scheduled by the threaded
1281 * interrupt handler function
1287 * The size of the dummy packet should be at least 1400 bytes. However, in
1288 * order to minimize the number of bus transactions, aligning it to 512 bytes
1289 * boundaries could be beneficial, performance wise
1291 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1293 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1295 struct sk_buff *skb;
1296 struct ieee80211_hdr_3addr *hdr;
1297 unsigned int dummy_packet_size;
1299 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1300 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1302 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1304 wl1271_warning("Failed to allocate a dummy packet skb");
1308 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1310 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1311 memset(hdr, 0, sizeof(*hdr));
1312 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1313 IEEE80211_STYPE_NULLFUNC |
1314 IEEE80211_FCTL_TODS);
1316 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1318 /* Dummy packets require the TID to be management */
1319 skb->priority = WL1271_TID_MGMT;
1321 /* Initialize all fields that might be used */
1322 skb_set_queue_mapping(skb, 0);
1323 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1331 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1333 int num_fields = 0, in_field = 0, fields_size = 0;
1334 int i, pattern_len = 0;
1337 wl1271_warning("No mask in WoWLAN pattern");
1342 * The pattern is broken up into segments of bytes at different offsets
1343 * that need to be checked by the FW filter. Each segment is called
1344 * a field in the FW API. We verify that the total number of fields
1345 * required for this pattern won't exceed FW limits (8)
1346 * as well as the total fields buffer won't exceed the FW limit.
1347 * Note that if there's a pattern which crosses Ethernet/IP header
1348 * boundary a new field is required.
1350 for (i = 0; i < p->pattern_len; i++) {
1351 if (test_bit(i, (unsigned long *)p->mask)) {
1356 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1358 fields_size += pattern_len +
1359 RX_FILTER_FIELD_OVERHEAD;
1367 fields_size += pattern_len +
1368 RX_FILTER_FIELD_OVERHEAD;
1375 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1379 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1380 wl1271_warning("RX Filter too complex. Too many segments");
1384 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1385 wl1271_warning("RX filter pattern is too big");
1392 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1394 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1397 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1404 for (i = 0; i < filter->num_fields; i++)
1405 kfree(filter->fields[i].pattern);
1410 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1411 u16 offset, u8 flags,
1412 u8 *pattern, u8 len)
1414 struct wl12xx_rx_filter_field *field;
1416 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1417 wl1271_warning("Max fields per RX filter. can't alloc another");
1421 field = &filter->fields[filter->num_fields];
1423 field->pattern = kzalloc(len, GFP_KERNEL);
1424 if (!field->pattern) {
1425 wl1271_warning("Failed to allocate RX filter pattern");
1429 filter->num_fields++;
1431 field->offset = cpu_to_le16(offset);
1432 field->flags = flags;
1434 memcpy(field->pattern, pattern, len);
1439 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1441 int i, fields_size = 0;
1443 for (i = 0; i < filter->num_fields; i++)
1444 fields_size += filter->fields[i].len +
1445 sizeof(struct wl12xx_rx_filter_field) -
1451 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1455 struct wl12xx_rx_filter_field *field;
1457 for (i = 0; i < filter->num_fields; i++) {
1458 field = (struct wl12xx_rx_filter_field *)buf;
1460 field->offset = filter->fields[i].offset;
1461 field->flags = filter->fields[i].flags;
1462 field->len = filter->fields[i].len;
1464 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1465 buf += sizeof(struct wl12xx_rx_filter_field) -
1466 sizeof(u8 *) + field->len;
1471 * Allocates an RX filter returned through f
1472 * which needs to be freed using rx_filter_free()
1474 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1475 struct cfg80211_wowlan_trig_pkt_pattern *p,
1476 struct wl12xx_rx_filter **f)
1479 struct wl12xx_rx_filter *filter;
1483 filter = wl1271_rx_filter_alloc();
1485 wl1271_warning("Failed to alloc rx filter");
1491 while (i < p->pattern_len) {
1492 if (!test_bit(i, (unsigned long *)p->mask)) {
1497 for (j = i; j < p->pattern_len; j++) {
1498 if (!test_bit(j, (unsigned long *)p->mask))
1501 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1502 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1506 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1508 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1510 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1511 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1516 ret = wl1271_rx_filter_alloc_field(filter,
1519 &p->pattern[i], len);
1526 filter->action = FILTER_SIGNAL;
1532 wl1271_rx_filter_free(filter);
1538 static int wl1271_configure_wowlan(struct wl1271 *wl,
1539 struct cfg80211_wowlan *wow)
1543 if (!wow || wow->any || !wow->n_patterns) {
1544 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1549 ret = wl1271_rx_filter_clear_all(wl);
1556 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1559 /* Validate all incoming patterns before clearing current FW state */
1560 for (i = 0; i < wow->n_patterns; i++) {
1561 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1563 wl1271_warning("Bad wowlan pattern %d", i);
1568 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1572 ret = wl1271_rx_filter_clear_all(wl);
1576 /* Translate WoWLAN patterns into filters */
1577 for (i = 0; i < wow->n_patterns; i++) {
1578 struct cfg80211_wowlan_trig_pkt_pattern *p;
1579 struct wl12xx_rx_filter *filter = NULL;
1581 p = &wow->patterns[i];
1583 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1585 wl1271_warning("Failed to create an RX filter from "
1586 "wowlan pattern %d", i);
1590 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1592 wl1271_rx_filter_free(filter);
1597 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1603 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1604 struct wl12xx_vif *wlvif,
1605 struct cfg80211_wowlan *wow)
1609 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1612 ret = wl1271_ps_elp_wakeup(wl);
1616 ret = wl1271_configure_wowlan(wl, wow);
1620 if ((wl->conf.conn.suspend_wake_up_event ==
1621 wl->conf.conn.wake_up_event) &&
1622 (wl->conf.conn.suspend_listen_interval ==
1623 wl->conf.conn.listen_interval))
1626 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1627 wl->conf.conn.suspend_wake_up_event,
1628 wl->conf.conn.suspend_listen_interval);
1631 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1634 wl1271_ps_elp_sleep(wl);
1640 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1641 struct wl12xx_vif *wlvif)
1645 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1648 ret = wl1271_ps_elp_wakeup(wl);
1652 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1654 wl1271_ps_elp_sleep(wl);
1660 static int wl1271_configure_suspend(struct wl1271 *wl,
1661 struct wl12xx_vif *wlvif,
1662 struct cfg80211_wowlan *wow)
1664 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1665 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1666 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1667 return wl1271_configure_suspend_ap(wl, wlvif);
1671 static void wl1271_configure_resume(struct wl1271 *wl,
1672 struct wl12xx_vif *wlvif)
1675 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1676 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1678 if ((!is_ap) && (!is_sta))
1681 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1684 ret = wl1271_ps_elp_wakeup(wl);
1689 wl1271_configure_wowlan(wl, NULL);
1691 if ((wl->conf.conn.suspend_wake_up_event ==
1692 wl->conf.conn.wake_up_event) &&
1693 (wl->conf.conn.suspend_listen_interval ==
1694 wl->conf.conn.listen_interval))
1697 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1698 wl->conf.conn.wake_up_event,
1699 wl->conf.conn.listen_interval);
1702 wl1271_error("resume: wake up conditions failed: %d",
1706 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1710 wl1271_ps_elp_sleep(wl);
1713 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1714 struct cfg80211_wowlan *wow)
1716 struct wl1271 *wl = hw->priv;
1717 struct wl12xx_vif *wlvif;
1720 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1723 /* we want to perform the recovery before suspending */
1724 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1725 wl1271_warning("postponing suspend to perform recovery");
1729 wl1271_tx_flush(wl);
1731 mutex_lock(&wl->mutex);
1732 wl->wow_enabled = true;
1733 wl12xx_for_each_wlvif(wl, wlvif) {
1734 ret = wl1271_configure_suspend(wl, wlvif, wow);
1736 mutex_unlock(&wl->mutex);
1737 wl1271_warning("couldn't prepare device to suspend");
1741 mutex_unlock(&wl->mutex);
1742 /* flush any remaining work */
1743 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1746 * disable and re-enable interrupts in order to flush
1749 wlcore_disable_interrupts(wl);
1752 * set suspended flag to avoid triggering a new threaded_irq
1753 * work. no need for spinlock as interrupts are disabled.
1755 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1757 wlcore_enable_interrupts(wl);
1758 flush_work(&wl->tx_work);
1759 flush_delayed_work(&wl->elp_work);
1764 static int wl1271_op_resume(struct ieee80211_hw *hw)
1766 struct wl1271 *wl = hw->priv;
1767 struct wl12xx_vif *wlvif;
1768 unsigned long flags;
1769 bool run_irq_work = false, pending_recovery;
1772 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1774 WARN_ON(!wl->wow_enabled);
1777 * re-enable irq_work enqueuing, and call irq_work directly if
1778 * there is a pending work.
1780 spin_lock_irqsave(&wl->wl_lock, flags);
1781 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1782 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1783 run_irq_work = true;
1784 spin_unlock_irqrestore(&wl->wl_lock, flags);
1786 mutex_lock(&wl->mutex);
1788 /* test the recovery flag before calling any SDIO functions */
1789 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1793 wl1271_debug(DEBUG_MAC80211,
1794 "run postponed irq_work directly");
1796 /* don't talk to the HW if recovery is pending */
1797 if (!pending_recovery) {
1798 ret = wlcore_irq_locked(wl);
1800 wl12xx_queue_recovery_work(wl);
1803 wlcore_enable_interrupts(wl);
1806 if (pending_recovery) {
1807 wl1271_warning("queuing forgotten recovery on resume");
1808 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1812 wl12xx_for_each_wlvif(wl, wlvif) {
1813 wl1271_configure_resume(wl, wlvif);
1817 wl->wow_enabled = false;
1818 mutex_unlock(&wl->mutex);
1824 static int wl1271_op_start(struct ieee80211_hw *hw)
1826 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1829 * We have to delay the booting of the hardware because
1830 * we need to know the local MAC address before downloading and
1831 * initializing the firmware. The MAC address cannot be changed
1832 * after boot, and without the proper MAC address, the firmware
1833 * will not function properly.
1835 * The MAC address is first known when the corresponding interface
1836 * is added. That is where we will initialize the hardware.
1842 static void wlcore_op_stop_locked(struct wl1271 *wl)
1846 if (wl->state == WLCORE_STATE_OFF) {
1847 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1849 wlcore_enable_interrupts(wl);
1855 * this must be before the cancel_work calls below, so that the work
1856 * functions don't perform further work.
1858 wl->state = WLCORE_STATE_OFF;
1861 * Use the nosync variant to disable interrupts, so the mutex could be
1862 * held while doing so without deadlocking.
1864 wlcore_disable_interrupts_nosync(wl);
1866 mutex_unlock(&wl->mutex);
1868 wlcore_synchronize_interrupts(wl);
1869 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1870 cancel_work_sync(&wl->recovery_work);
1871 wl1271_flush_deferred_work(wl);
1872 cancel_delayed_work_sync(&wl->scan_complete_work);
1873 cancel_work_sync(&wl->netstack_work);
1874 cancel_work_sync(&wl->tx_work);
1875 cancel_delayed_work_sync(&wl->elp_work);
1876 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1878 /* let's notify MAC80211 about the remaining pending TX frames */
1879 mutex_lock(&wl->mutex);
1880 wl12xx_tx_reset(wl);
1882 wl1271_power_off(wl);
1884 * In case a recovery was scheduled, interrupts were disabled to avoid
1885 * an interrupt storm. Now that the power is down, it is safe to
1886 * re-enable interrupts to balance the disable depth
1888 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1889 wlcore_enable_interrupts(wl);
1891 wl->band = IEEE80211_BAND_2GHZ;
1894 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1895 wl->channel_type = NL80211_CHAN_NO_HT;
1896 wl->tx_blocks_available = 0;
1897 wl->tx_allocated_blocks = 0;
1898 wl->tx_results_count = 0;
1899 wl->tx_packets_count = 0;
1900 wl->time_offset = 0;
1901 wl->ap_fw_ps_map = 0;
1903 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1904 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1905 memset(wl->links_map, 0, sizeof(wl->links_map));
1906 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1907 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1908 wl->active_sta_count = 0;
1909 wl->active_link_count = 0;
1911 /* The system link is always allocated */
1912 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1913 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1914 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1917 * this is performed after the cancel_work calls and the associated
1918 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1919 * get executed before all these vars have been reset.
1923 wl->tx_blocks_freed = 0;
1925 for (i = 0; i < NUM_TX_QUEUES; i++) {
1926 wl->tx_pkts_freed[i] = 0;
1927 wl->tx_allocated_pkts[i] = 0;
1930 wl1271_debugfs_reset(wl);
1932 kfree(wl->fw_status_1);
1933 wl->fw_status_1 = NULL;
1934 wl->fw_status_2 = NULL;
1935 kfree(wl->tx_res_if);
1936 wl->tx_res_if = NULL;
1937 kfree(wl->target_mem_map);
1938 wl->target_mem_map = NULL;
1941 * FW channels must be re-calibrated after recovery,
1942 * clear the last Reg-Domain channel configuration.
1944 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1947 static void wlcore_op_stop(struct ieee80211_hw *hw)
1949 struct wl1271 *wl = hw->priv;
1951 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1953 mutex_lock(&wl->mutex);
1955 wlcore_op_stop_locked(wl);
1957 mutex_unlock(&wl->mutex);
1960 static void wlcore_channel_switch_work(struct work_struct *work)
1962 struct delayed_work *dwork;
1964 struct ieee80211_vif *vif;
1965 struct wl12xx_vif *wlvif;
1968 dwork = container_of(work, struct delayed_work, work);
1969 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1972 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1974 mutex_lock(&wl->mutex);
1976 if (unlikely(wl->state != WLCORE_STATE_ON))
1979 /* check the channel switch is still ongoing */
1980 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1983 vif = wl12xx_wlvif_to_vif(wlvif);
1984 ieee80211_chswitch_done(vif, false);
1986 ret = wl1271_ps_elp_wakeup(wl);
1990 wl12xx_cmd_stop_channel_switch(wl, wlvif);
1992 wl1271_ps_elp_sleep(wl);
1994 mutex_unlock(&wl->mutex);
1997 static void wlcore_connection_loss_work(struct work_struct *work)
1999 struct delayed_work *dwork;
2001 struct ieee80211_vif *vif;
2002 struct wl12xx_vif *wlvif;
2004 dwork = container_of(work, struct delayed_work, work);
2005 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2008 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2010 mutex_lock(&wl->mutex);
2012 if (unlikely(wl->state != WLCORE_STATE_ON))
2015 /* Call mac80211 connection loss */
2016 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2019 vif = wl12xx_wlvif_to_vif(wlvif);
2020 ieee80211_connection_loss(vif);
2022 mutex_unlock(&wl->mutex);
2025 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2027 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2028 WL12XX_MAX_RATE_POLICIES);
2029 if (policy >= WL12XX_MAX_RATE_POLICIES)
2032 __set_bit(policy, wl->rate_policies_map);
2037 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2039 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2042 __clear_bit(*idx, wl->rate_policies_map);
2043 *idx = WL12XX_MAX_RATE_POLICIES;
2046 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2048 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2049 WLCORE_MAX_KLV_TEMPLATES);
2050 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2053 __set_bit(policy, wl->klv_templates_map);
2058 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2060 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2063 __clear_bit(*idx, wl->klv_templates_map);
2064 *idx = WLCORE_MAX_KLV_TEMPLATES;
2067 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2069 switch (wlvif->bss_type) {
2070 case BSS_TYPE_AP_BSS:
2072 return WL1271_ROLE_P2P_GO;
2074 return WL1271_ROLE_AP;
2076 case BSS_TYPE_STA_BSS:
2078 return WL1271_ROLE_P2P_CL;
2080 return WL1271_ROLE_STA;
2083 return WL1271_ROLE_IBSS;
2086 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2088 return WL12XX_INVALID_ROLE_TYPE;
2091 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2093 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2096 /* clear everything but the persistent data */
2097 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2099 switch (ieee80211_vif_type_p2p(vif)) {
2100 case NL80211_IFTYPE_P2P_CLIENT:
2103 case NL80211_IFTYPE_STATION:
2104 wlvif->bss_type = BSS_TYPE_STA_BSS;
2106 case NL80211_IFTYPE_ADHOC:
2107 wlvif->bss_type = BSS_TYPE_IBSS;
2109 case NL80211_IFTYPE_P2P_GO:
2112 case NL80211_IFTYPE_AP:
2113 wlvif->bss_type = BSS_TYPE_AP_BSS;
2116 wlvif->bss_type = MAX_BSS_TYPE;
2120 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2121 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2122 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2124 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2125 wlvif->bss_type == BSS_TYPE_IBSS) {
2126 /* init sta/ibss data */
2127 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2128 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2129 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2130 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2131 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2132 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2133 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2134 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2137 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2138 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2139 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2140 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2141 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2142 wl12xx_allocate_rate_policy(wl,
2143 &wlvif->ap.ucast_rate_idx[i]);
2144 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2146 * TODO: check if basic_rate shouldn't be
2147 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2148 * instead (the same thing for STA above).
2150 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2151 /* TODO: this seems to be used only for STA, check it */
2152 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2155 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2156 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2157 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2160 * mac80211 configures some values globally, while we treat them
2161 * per-interface. thus, on init, we have to copy them from wl
2163 wlvif->band = wl->band;
2164 wlvif->channel = wl->channel;
2165 wlvif->power_level = wl->power_level;
2166 wlvif->channel_type = wl->channel_type;
2168 INIT_WORK(&wlvif->rx_streaming_enable_work,
2169 wl1271_rx_streaming_enable_work);
2170 INIT_WORK(&wlvif->rx_streaming_disable_work,
2171 wl1271_rx_streaming_disable_work);
2172 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2173 wlcore_channel_switch_work);
2174 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2175 wlcore_connection_loss_work);
2176 INIT_LIST_HEAD(&wlvif->list);
2178 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2179 (unsigned long) wlvif);
2183 static int wl12xx_init_fw(struct wl1271 *wl)
2185 int retries = WL1271_BOOT_RETRIES;
2186 bool booted = false;
2187 struct wiphy *wiphy = wl->hw->wiphy;
2192 ret = wl12xx_chip_wakeup(wl, false);
2196 ret = wl->ops->boot(wl);
2200 ret = wl1271_hw_init(wl);
2208 mutex_unlock(&wl->mutex);
2209 /* Unlocking the mutex in the middle of handling is
2210 inherently unsafe. In this case we deem it safe to do,
2211 because we need to let any possibly pending IRQ out of
2212 the system (and while we are WLCORE_STATE_OFF the IRQ
2213 work function will not do anything.) Also, any other
2214 possible concurrent operations will fail due to the
2215 current state, hence the wl1271 struct should be safe. */
2216 wlcore_disable_interrupts(wl);
2217 wl1271_flush_deferred_work(wl);
2218 cancel_work_sync(&wl->netstack_work);
2219 mutex_lock(&wl->mutex);
2221 wl1271_power_off(wl);
2225 wl1271_error("firmware boot failed despite %d retries",
2226 WL1271_BOOT_RETRIES);
2230 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2232 /* update hw/fw version info in wiphy struct */
2233 wiphy->hw_version = wl->chip.id;
2234 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2235 sizeof(wiphy->fw_version));
2238 * Now we know if 11a is supported (info from the NVS), so disable
2239 * 11a channels if not supported
2241 if (!wl->enable_11a)
2242 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2244 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2245 wl->enable_11a ? "" : "not ");
2247 wl->state = WLCORE_STATE_ON;
2252 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2254 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2258 * Check whether a fw switch (i.e. moving from one loaded
2259 * fw to another) is needed. This function is also responsible
2260 * for updating wl->last_vif_count, so it must be called before
2261 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2264 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2265 struct vif_counter_data vif_counter_data,
2268 enum wl12xx_fw_type current_fw = wl->fw_type;
2269 u8 vif_count = vif_counter_data.counter;
2271 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2274 /* increase the vif count if this is a new vif */
2275 if (add && !vif_counter_data.cur_vif_running)
2278 wl->last_vif_count = vif_count;
2280 /* no need for fw change if the device is OFF */
2281 if (wl->state == WLCORE_STATE_OFF)
2284 /* no need for fw change if a single fw is used */
2285 if (!wl->mr_fw_name)
2288 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2290 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2297 * Enter "forced psm". Make sure the sta is in psm against the ap,
2298 * to make the fw switch a bit more disconnection-persistent.
2300 static void wl12xx_force_active_psm(struct wl1271 *wl)
2302 struct wl12xx_vif *wlvif;
2304 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2305 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2309 struct wlcore_hw_queue_iter_data {
2310 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2312 struct ieee80211_vif *vif;
2313 /* is the current vif among those iterated */
2317 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2318 struct ieee80211_vif *vif)
2320 struct wlcore_hw_queue_iter_data *iter_data = data;
2322 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2325 if (iter_data->cur_running || vif == iter_data->vif) {
2326 iter_data->cur_running = true;
2330 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2333 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2334 struct wl12xx_vif *wlvif)
2336 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2337 struct wlcore_hw_queue_iter_data iter_data = {};
2340 iter_data.vif = vif;
2342 /* mark all bits taken by active interfaces */
2343 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2344 IEEE80211_IFACE_ITER_RESUME_ALL,
2345 wlcore_hw_queue_iter, &iter_data);
2347 /* the current vif is already running in mac80211 (resume/recovery) */
2348 if (iter_data.cur_running) {
2349 wlvif->hw_queue_base = vif->hw_queue[0];
2350 wl1271_debug(DEBUG_MAC80211,
2351 "using pre-allocated hw queue base %d",
2352 wlvif->hw_queue_base);
2354 /* interface type might have changed type */
2355 goto adjust_cab_queue;
2358 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2359 WLCORE_NUM_MAC_ADDRESSES);
2360 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2363 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2364 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2365 wlvif->hw_queue_base);
2367 for (i = 0; i < NUM_TX_QUEUES; i++) {
2368 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2369 /* register hw queues in mac80211 */
2370 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2374 /* the last places are reserved for cab queues per interface */
2375 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2376 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2377 wlvif->hw_queue_base / NUM_TX_QUEUES;
2379 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2384 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2385 struct ieee80211_vif *vif)
2387 struct wl1271 *wl = hw->priv;
2388 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2389 struct vif_counter_data vif_count;
2393 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2394 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2396 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2397 ieee80211_vif_type_p2p(vif), vif->addr);
2399 wl12xx_get_vif_count(hw, vif, &vif_count);
2401 mutex_lock(&wl->mutex);
2402 ret = wl1271_ps_elp_wakeup(wl);
2407 * in some very corner case HW recovery scenarios its possible to
2408 * get here before __wl1271_op_remove_interface is complete, so
2409 * opt out if that is the case.
2411 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2412 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2418 ret = wl12xx_init_vif_data(wl, vif);
2423 role_type = wl12xx_get_role_type(wl, wlvif);
2424 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2429 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2433 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2434 wl12xx_force_active_psm(wl);
2435 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2436 mutex_unlock(&wl->mutex);
2437 wl1271_recovery_work(&wl->recovery_work);
2442 * TODO: after the nvs issue will be solved, move this block
2443 * to start(), and make sure here the driver is ON.
2445 if (wl->state == WLCORE_STATE_OFF) {
2447 * we still need this in order to configure the fw
2448 * while uploading the nvs
2450 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2452 ret = wl12xx_init_fw(wl);
2457 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2458 role_type, &wlvif->role_id);
2462 ret = wl1271_init_vif_specific(wl, vif);
2466 list_add(&wlvif->list, &wl->wlvif_list);
2467 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2469 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2474 wl1271_ps_elp_sleep(wl);
2476 mutex_unlock(&wl->mutex);
2481 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2482 struct ieee80211_vif *vif,
2483 bool reset_tx_queues)
2485 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2487 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2489 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2491 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2494 /* because of hardware recovery, we may get here twice */
2495 if (wl->state == WLCORE_STATE_OFF)
2498 wl1271_info("down");
2500 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2501 wl->scan_wlvif == wlvif) {
2503 * Rearm the tx watchdog just before idling scan. This
2504 * prevents just-finished scans from triggering the watchdog
2506 wl12xx_rearm_tx_watchdog_locked(wl);
2508 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2509 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2510 wl->scan_wlvif = NULL;
2511 wl->scan.req = NULL;
2512 ieee80211_scan_completed(wl->hw, true);
2515 if (wl->sched_vif == wlvif) {
2516 ieee80211_sched_scan_stopped(wl->hw);
2517 wl->sched_vif = NULL;
2520 if (wl->roc_vif == vif) {
2522 ieee80211_remain_on_channel_expired(wl->hw);
2525 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2526 /* disable active roles */
2527 ret = wl1271_ps_elp_wakeup(wl);
2531 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2532 wlvif->bss_type == BSS_TYPE_IBSS) {
2533 if (wl12xx_dev_role_started(wlvif))
2534 wl12xx_stop_dev(wl, wlvif);
2537 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2541 wl1271_ps_elp_sleep(wl);
2544 wl12xx_tx_reset_wlvif(wl, wlvif);
2546 /* clear all hlids (except system_hlid) */
2547 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2549 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2550 wlvif->bss_type == BSS_TYPE_IBSS) {
2551 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2552 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2553 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2554 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2555 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2557 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2558 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2559 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2560 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2561 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2562 wl12xx_free_rate_policy(wl,
2563 &wlvif->ap.ucast_rate_idx[i]);
2564 wl1271_free_ap_keys(wl, wlvif);
2567 dev_kfree_skb(wlvif->probereq);
2568 wlvif->probereq = NULL;
2569 if (wl->last_wlvif == wlvif)
2570 wl->last_wlvif = NULL;
2571 list_del(&wlvif->list);
2572 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2573 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2574 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2582 * Last AP, have more stations. Configure sleep auth according to STA.
2583 * Don't do thin on unintended recovery.
2585 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2586 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2589 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2590 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2591 /* Configure for power according to debugfs */
2592 if (sta_auth != WL1271_PSM_ILLEGAL)
2593 wl1271_acx_sleep_auth(wl, sta_auth);
2594 /* Configure for ELP power saving */
2596 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2600 mutex_unlock(&wl->mutex);
2602 del_timer_sync(&wlvif->rx_streaming_timer);
2603 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2604 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2605 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2607 mutex_lock(&wl->mutex);
2610 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2611 struct ieee80211_vif *vif)
2613 struct wl1271 *wl = hw->priv;
2614 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2615 struct wl12xx_vif *iter;
2616 struct vif_counter_data vif_count;
2618 wl12xx_get_vif_count(hw, vif, &vif_count);
2619 mutex_lock(&wl->mutex);
2621 if (wl->state == WLCORE_STATE_OFF ||
2622 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2626 * wl->vif can be null here if someone shuts down the interface
2627 * just when hardware recovery has been started.
2629 wl12xx_for_each_wlvif(wl, iter) {
2633 __wl1271_op_remove_interface(wl, vif, true);
2636 WARN_ON(iter != wlvif);
2637 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2638 wl12xx_force_active_psm(wl);
2639 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2640 wl12xx_queue_recovery_work(wl);
2643 mutex_unlock(&wl->mutex);
2646 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2647 struct ieee80211_vif *vif,
2648 enum nl80211_iftype new_type, bool p2p)
2650 struct wl1271 *wl = hw->priv;
2653 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2654 wl1271_op_remove_interface(hw, vif);
2656 vif->type = new_type;
2658 ret = wl1271_op_add_interface(hw, vif);
2660 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2664 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2667 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2670 * One of the side effects of the JOIN command is that is clears
2671 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2672 * to a WPA/WPA2 access point will therefore kill the data-path.
2673 * Currently the only valid scenario for JOIN during association
2674 * is on roaming, in which case we will also be given new keys.
2675 * Keep the below message for now, unless it starts bothering
2676 * users who really like to roam a lot :)
2678 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2679 wl1271_info("JOIN while associated.");
2681 /* clear encryption type */
2682 wlvif->encryption_type = KEY_NONE;
2685 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2687 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2689 * TODO: this is an ugly workaround for wl12xx fw
2690 * bug - we are not able to tx/rx after the first
2691 * start_sta, so make dummy start+stop calls,
2692 * and then call start_sta again.
2693 * this should be fixed in the fw.
2695 wl12xx_cmd_role_start_sta(wl, wlvif);
2696 wl12xx_cmd_role_stop_sta(wl, wlvif);
2699 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2705 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2709 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2713 wl1271_error("No SSID in IEs!");
2718 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2719 wl1271_error("SSID is too long!");
2723 wlvif->ssid_len = ssid_len;
2724 memcpy(wlvif->ssid, ptr+2, ssid_len);
2728 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2730 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2731 struct sk_buff *skb;
2734 /* we currently only support setting the ssid from the ap probe req */
2735 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2738 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2742 ieoffset = offsetof(struct ieee80211_mgmt,
2743 u.probe_req.variable);
2744 wl1271_ssid_set(wlvif, skb, ieoffset);
2750 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2751 struct ieee80211_bss_conf *bss_conf,
2757 wlvif->aid = bss_conf->aid;
2758 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2759 wlvif->beacon_int = bss_conf->beacon_int;
2760 wlvif->wmm_enabled = bss_conf->qos;
2762 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2765 * with wl1271, we don't need to update the
2766 * beacon_int and dtim_period, because the firmware
2767 * updates it by itself when the first beacon is
2768 * received after a join.
2770 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2775 * Get a template for hardware connection maintenance
2777 dev_kfree_skb(wlvif->probereq);
2778 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2781 ieoffset = offsetof(struct ieee80211_mgmt,
2782 u.probe_req.variable);
2783 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2785 /* enable the connection monitoring feature */
2786 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2791 * The join command disable the keep-alive mode, shut down its process,
2792 * and also clear the template config, so we need to reset it all after
2793 * the join. The acx_aid starts the keep-alive process, and the order
2794 * of the commands below is relevant.
2796 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2800 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2804 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2808 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2809 wlvif->sta.klv_template_id,
2810 ACX_KEEP_ALIVE_TPL_VALID);
2815 * The default fw psm configuration is AUTO, while mac80211 default
2816 * setting is off (ACTIVE), so sync the fw with the correct value.
2818 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2824 wl1271_tx_enabled_rates_get(wl,
2827 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2835 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2838 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2840 /* make sure we are connected (sta) joined */
2842 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2845 /* make sure we are joined (ibss) */
2847 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2851 /* use defaults when not associated */
2854 /* free probe-request template */
2855 dev_kfree_skb(wlvif->probereq);
2856 wlvif->probereq = NULL;
2858 /* disable connection monitor features */
2859 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2863 /* Disable the keep-alive feature */
2864 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2869 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2870 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2872 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2873 ieee80211_chswitch_done(vif, false);
2874 cancel_delayed_work(&wlvif->channel_switch_work);
2877 /* invalidate keep-alive template */
2878 wl1271_acx_keep_alive_config(wl, wlvif,
2879 wlvif->sta.klv_template_id,
2880 ACX_KEEP_ALIVE_TPL_INVALID);
2885 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2887 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2888 wlvif->rate_set = wlvif->basic_rate_set;
2891 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2892 struct ieee80211_conf *conf, u32 changed)
2896 if (conf->power_level != wlvif->power_level) {
2897 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2901 wlvif->power_level = conf->power_level;
2907 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2909 struct wl1271 *wl = hw->priv;
2910 struct wl12xx_vif *wlvif;
2911 struct ieee80211_conf *conf = &hw->conf;
2914 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2916 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2918 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2921 mutex_lock(&wl->mutex);
2923 if (changed & IEEE80211_CONF_CHANGE_POWER)
2924 wl->power_level = conf->power_level;
2926 if (unlikely(wl->state != WLCORE_STATE_ON))
2929 ret = wl1271_ps_elp_wakeup(wl);
2933 /* configure each interface */
2934 wl12xx_for_each_wlvif(wl, wlvif) {
2935 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2941 wl1271_ps_elp_sleep(wl);
2944 mutex_unlock(&wl->mutex);
2949 struct wl1271_filter_params {
2952 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2955 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2956 struct netdev_hw_addr_list *mc_list)
2958 struct wl1271_filter_params *fp;
2959 struct netdev_hw_addr *ha;
2961 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2963 wl1271_error("Out of memory setting filters.");
2967 /* update multicast filtering parameters */
2968 fp->mc_list_length = 0;
2969 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2970 fp->enabled = false;
2973 netdev_hw_addr_list_for_each(ha, mc_list) {
2974 memcpy(fp->mc_list[fp->mc_list_length],
2975 ha->addr, ETH_ALEN);
2976 fp->mc_list_length++;
2980 return (u64)(unsigned long)fp;
2983 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2986 FIF_BCN_PRBRESP_PROMISC | \
2990 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2991 unsigned int changed,
2992 unsigned int *total, u64 multicast)
2994 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2995 struct wl1271 *wl = hw->priv;
2996 struct wl12xx_vif *wlvif;
3000 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3001 " total %x", changed, *total);
3003 mutex_lock(&wl->mutex);
3005 *total &= WL1271_SUPPORTED_FILTERS;
3006 changed &= WL1271_SUPPORTED_FILTERS;
3008 if (unlikely(wl->state != WLCORE_STATE_ON))
3011 ret = wl1271_ps_elp_wakeup(wl);
3015 wl12xx_for_each_wlvif(wl, wlvif) {
3016 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3017 if (*total & FIF_ALLMULTI)
3018 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3022 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3025 fp->mc_list_length);
3032 * the fw doesn't provide an api to configure the filters. instead,
3033 * the filters configuration is based on the active roles / ROC
3038 wl1271_ps_elp_sleep(wl);
3041 mutex_unlock(&wl->mutex);
3045 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3046 u8 id, u8 key_type, u8 key_size,
3047 const u8 *key, u8 hlid, u32 tx_seq_32,
3050 struct wl1271_ap_key *ap_key;
3053 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3055 if (key_size > MAX_KEY_SIZE)
3059 * Find next free entry in ap_keys. Also check we are not replacing
3062 for (i = 0; i < MAX_NUM_KEYS; i++) {
3063 if (wlvif->ap.recorded_keys[i] == NULL)
3066 if (wlvif->ap.recorded_keys[i]->id == id) {
3067 wl1271_warning("trying to record key replacement");
3072 if (i == MAX_NUM_KEYS)
3075 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3080 ap_key->key_type = key_type;
3081 ap_key->key_size = key_size;
3082 memcpy(ap_key->key, key, key_size);
3083 ap_key->hlid = hlid;
3084 ap_key->tx_seq_32 = tx_seq_32;
3085 ap_key->tx_seq_16 = tx_seq_16;
3087 wlvif->ap.recorded_keys[i] = ap_key;
3091 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3095 for (i = 0; i < MAX_NUM_KEYS; i++) {
3096 kfree(wlvif->ap.recorded_keys[i]);
3097 wlvif->ap.recorded_keys[i] = NULL;
3101 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3104 struct wl1271_ap_key *key;
3105 bool wep_key_added = false;
3107 for (i = 0; i < MAX_NUM_KEYS; i++) {
3109 if (wlvif->ap.recorded_keys[i] == NULL)
3112 key = wlvif->ap.recorded_keys[i];
3114 if (hlid == WL12XX_INVALID_LINK_ID)
3115 hlid = wlvif->ap.bcast_hlid;
3117 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3118 key->id, key->key_type,
3119 key->key_size, key->key,
3120 hlid, key->tx_seq_32,
3125 if (key->key_type == KEY_WEP)
3126 wep_key_added = true;
3129 if (wep_key_added) {
3130 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3131 wlvif->ap.bcast_hlid);
3137 wl1271_free_ap_keys(wl, wlvif);
3141 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3142 u16 action, u8 id, u8 key_type,
3143 u8 key_size, const u8 *key, u32 tx_seq_32,
3144 u16 tx_seq_16, struct ieee80211_sta *sta)
3147 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3150 struct wl1271_station *wl_sta;
3154 wl_sta = (struct wl1271_station *)sta->drv_priv;
3155 hlid = wl_sta->hlid;
3157 hlid = wlvif->ap.bcast_hlid;
3160 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3162 * We do not support removing keys after AP shutdown.
3163 * Pretend we do to make mac80211 happy.
3165 if (action != KEY_ADD_OR_REPLACE)
3168 ret = wl1271_record_ap_key(wl, wlvif, id,
3170 key, hlid, tx_seq_32,
3173 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3174 id, key_type, key_size,
3175 key, hlid, tx_seq_32,
3183 static const u8 bcast_addr[ETH_ALEN] = {
3184 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3187 addr = sta ? sta->addr : bcast_addr;
3189 if (is_zero_ether_addr(addr)) {
3190 /* We dont support TX only encryption */
3194 /* The wl1271 does not allow to remove unicast keys - they
3195 will be cleared automatically on next CMD_JOIN. Ignore the
3196 request silently, as we dont want the mac80211 to emit
3197 an error message. */
3198 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3201 /* don't remove key if hlid was already deleted */
3202 if (action == KEY_REMOVE &&
3203 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3206 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3207 id, key_type, key_size,
3208 key, addr, tx_seq_32,
3213 /* the default WEP key needs to be configured at least once */
3214 if (key_type == KEY_WEP) {
3215 ret = wl12xx_cmd_set_default_wep_key(wl,
3226 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3227 struct ieee80211_vif *vif,
3228 struct ieee80211_sta *sta,
3229 struct ieee80211_key_conf *key_conf)
3231 struct wl1271 *wl = hw->priv;
3233 bool might_change_spare =
3234 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3235 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3237 if (might_change_spare) {
3239 * stop the queues and flush to ensure the next packets are
3240 * in sync with FW spare block accounting
3242 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3243 wl1271_tx_flush(wl);
3246 mutex_lock(&wl->mutex);
3248 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3250 goto out_wake_queues;
3253 ret = wl1271_ps_elp_wakeup(wl);
3255 goto out_wake_queues;
3257 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3259 wl1271_ps_elp_sleep(wl);
3262 if (might_change_spare)
3263 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3265 mutex_unlock(&wl->mutex);
3270 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3271 struct ieee80211_vif *vif,
3272 struct ieee80211_sta *sta,
3273 struct ieee80211_key_conf *key_conf)
3275 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3282 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3284 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3285 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3286 key_conf->cipher, key_conf->keyidx,
3287 key_conf->keylen, key_conf->flags);
3288 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3290 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3292 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3293 hlid = wl_sta->hlid;
3295 hlid = wlvif->ap.bcast_hlid;
3298 hlid = wlvif->sta.hlid;
3300 if (hlid != WL12XX_INVALID_LINK_ID) {
3301 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3302 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3303 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3306 switch (key_conf->cipher) {
3307 case WLAN_CIPHER_SUITE_WEP40:
3308 case WLAN_CIPHER_SUITE_WEP104:
3311 key_conf->hw_key_idx = key_conf->keyidx;
3313 case WLAN_CIPHER_SUITE_TKIP:
3314 key_type = KEY_TKIP;
3315 key_conf->hw_key_idx = key_conf->keyidx;
3317 case WLAN_CIPHER_SUITE_CCMP:
3319 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3321 case WL1271_CIPHER_SUITE_GEM:
3325 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3332 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3333 key_conf->keyidx, key_type,
3334 key_conf->keylen, key_conf->key,
3335 tx_seq_32, tx_seq_16, sta);
3337 wl1271_error("Could not add or replace key");
3342 * reconfiguring arp response if the unicast (or common)
3343 * encryption key type was changed
3345 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3346 (sta || key_type == KEY_WEP) &&
3347 wlvif->encryption_type != key_type) {
3348 wlvif->encryption_type = key_type;
3349 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3351 wl1271_warning("build arp rsp failed: %d", ret);
3358 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3359 key_conf->keyidx, key_type,
3360 key_conf->keylen, key_conf->key,
3363 wl1271_error("Could not remove key");
3369 wl1271_error("Unsupported key cmd 0x%x", cmd);
3375 EXPORT_SYMBOL_GPL(wlcore_set_key);
3377 void wlcore_regdomain_config(struct wl1271 *wl)
3381 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3384 mutex_lock(&wl->mutex);
3386 if (unlikely(wl->state != WLCORE_STATE_ON))
3389 ret = wl1271_ps_elp_wakeup(wl);
3393 ret = wlcore_cmd_regdomain_config_locked(wl);
3395 wl12xx_queue_recovery_work(wl);
3399 wl1271_ps_elp_sleep(wl);
3401 mutex_unlock(&wl->mutex);
3404 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3405 struct ieee80211_vif *vif,
3406 struct cfg80211_scan_request *req)
3408 struct wl1271 *wl = hw->priv;
3413 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3416 ssid = req->ssids[0].ssid;
3417 len = req->ssids[0].ssid_len;
3420 mutex_lock(&wl->mutex);
3422 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3424 * We cannot return -EBUSY here because cfg80211 will expect
3425 * a call to ieee80211_scan_completed if we do - in this case
3426 * there won't be any call.
3432 ret = wl1271_ps_elp_wakeup(wl);
3436 /* fail if there is any role in ROC */
3437 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3438 /* don't allow scanning right now */
3443 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3445 wl1271_ps_elp_sleep(wl);
3447 mutex_unlock(&wl->mutex);
3452 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3453 struct ieee80211_vif *vif)
3455 struct wl1271 *wl = hw->priv;
3456 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3459 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3461 mutex_lock(&wl->mutex);
3463 if (unlikely(wl->state != WLCORE_STATE_ON))
3466 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3469 ret = wl1271_ps_elp_wakeup(wl);
3473 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3474 ret = wl->ops->scan_stop(wl, wlvif);
3480 * Rearm the tx watchdog just before idling scan. This
3481 * prevents just-finished scans from triggering the watchdog
3483 wl12xx_rearm_tx_watchdog_locked(wl);
3485 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3486 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3487 wl->scan_wlvif = NULL;
3488 wl->scan.req = NULL;
3489 ieee80211_scan_completed(wl->hw, true);
3492 wl1271_ps_elp_sleep(wl);
3494 mutex_unlock(&wl->mutex);
3496 cancel_delayed_work_sync(&wl->scan_complete_work);
3499 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3500 struct ieee80211_vif *vif,
3501 struct cfg80211_sched_scan_request *req,
3502 struct ieee80211_sched_scan_ies *ies)
3504 struct wl1271 *wl = hw->priv;
3505 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3508 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3510 mutex_lock(&wl->mutex);
3512 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3517 ret = wl1271_ps_elp_wakeup(wl);
3521 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3525 wl->sched_vif = wlvif;
3528 wl1271_ps_elp_sleep(wl);
3530 mutex_unlock(&wl->mutex);
3534 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3535 struct ieee80211_vif *vif)
3537 struct wl1271 *wl = hw->priv;
3538 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3541 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3543 mutex_lock(&wl->mutex);
3545 if (unlikely(wl->state != WLCORE_STATE_ON))
3548 ret = wl1271_ps_elp_wakeup(wl);
3552 wl->ops->sched_scan_stop(wl, wlvif);
3554 wl1271_ps_elp_sleep(wl);
3556 mutex_unlock(&wl->mutex);
3559 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3561 struct wl1271 *wl = hw->priv;
3564 mutex_lock(&wl->mutex);
3566 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3571 ret = wl1271_ps_elp_wakeup(wl);
3575 ret = wl1271_acx_frag_threshold(wl, value);
3577 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3579 wl1271_ps_elp_sleep(wl);
3582 mutex_unlock(&wl->mutex);
3587 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3589 struct wl1271 *wl = hw->priv;
3590 struct wl12xx_vif *wlvif;
3593 mutex_lock(&wl->mutex);
3595 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3600 ret = wl1271_ps_elp_wakeup(wl);
3604 wl12xx_for_each_wlvif(wl, wlvif) {
3605 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3607 wl1271_warning("set rts threshold failed: %d", ret);
3609 wl1271_ps_elp_sleep(wl);
3612 mutex_unlock(&wl->mutex);
3617 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3620 const u8 *next, *end = skb->data + skb->len;
3621 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3622 skb->len - ieoffset);
3627 memmove(ie, next, end - next);
3628 skb_trim(skb, skb->len - len);
3631 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3632 unsigned int oui, u8 oui_type,
3636 const u8 *next, *end = skb->data + skb->len;
3637 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3638 skb->data + ieoffset,
3639 skb->len - ieoffset);
3644 memmove(ie, next, end - next);
3645 skb_trim(skb, skb->len - len);
3648 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3649 struct ieee80211_vif *vif)
3651 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3652 struct sk_buff *skb;
3655 skb = ieee80211_proberesp_get(wl->hw, vif);
3659 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3660 CMD_TEMPL_AP_PROBE_RESPONSE,
3669 wl1271_debug(DEBUG_AP, "probe response updated");
3670 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3676 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3677 struct ieee80211_vif *vif,
3679 size_t probe_rsp_len,
3682 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3683 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3684 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3685 int ssid_ie_offset, ie_offset, templ_len;
3688 /* no need to change probe response if the SSID is set correctly */
3689 if (wlvif->ssid_len > 0)
3690 return wl1271_cmd_template_set(wl, wlvif->role_id,
3691 CMD_TEMPL_AP_PROBE_RESPONSE,
3696 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3697 wl1271_error("probe_rsp template too big");
3701 /* start searching from IE offset */
3702 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3704 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3705 probe_rsp_len - ie_offset);
3707 wl1271_error("No SSID in beacon!");
3711 ssid_ie_offset = ptr - probe_rsp_data;
3712 ptr += (ptr[1] + 2);
3714 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3716 /* insert SSID from bss_conf */
3717 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3718 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3719 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3720 bss_conf->ssid, bss_conf->ssid_len);
3721 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3723 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3724 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3725 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3727 return wl1271_cmd_template_set(wl, wlvif->role_id,
3728 CMD_TEMPL_AP_PROBE_RESPONSE,
3734 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3735 struct ieee80211_vif *vif,
3736 struct ieee80211_bss_conf *bss_conf,
3739 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3742 if (changed & BSS_CHANGED_ERP_SLOT) {
3743 if (bss_conf->use_short_slot)
3744 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3746 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3748 wl1271_warning("Set slot time failed %d", ret);
3753 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3754 if (bss_conf->use_short_preamble)
3755 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3757 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3760 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3761 if (bss_conf->use_cts_prot)
3762 ret = wl1271_acx_cts_protect(wl, wlvif,
3765 ret = wl1271_acx_cts_protect(wl, wlvif,
3766 CTSPROTECT_DISABLE);
3768 wl1271_warning("Set ctsprotect failed %d", ret);
3777 static int wlcore_set_beacon_template(struct wl1271 *wl,
3778 struct ieee80211_vif *vif,
3781 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3782 struct ieee80211_hdr *hdr;
3785 int ieoffset = offsetof(struct ieee80211_mgmt,
3787 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3795 wl1271_debug(DEBUG_MASTER, "beacon updated");
3797 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3799 dev_kfree_skb(beacon);
3802 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3803 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3805 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3810 dev_kfree_skb(beacon);
3814 wlvif->wmm_enabled =
3815 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3816 WLAN_OUI_TYPE_MICROSOFT_WMM,
3817 beacon->data + ieoffset,
3818 beacon->len - ieoffset);
3821 * In case we already have a probe-resp beacon set explicitly
3822 * by usermode, don't use the beacon data.
3824 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3827 /* remove TIM ie from probe response */
3828 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3831 * remove p2p ie from probe response.
3832 * the fw reponds to probe requests that don't include
3833 * the p2p ie. probe requests with p2p ie will be passed,
3834 * and will be responded by the supplicant (the spec
3835 * forbids including the p2p ie when responding to probe
3836 * requests that didn't include it).
3838 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3839 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3841 hdr = (struct ieee80211_hdr *) beacon->data;
3842 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3843 IEEE80211_STYPE_PROBE_RESP);
3845 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3850 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3851 CMD_TEMPL_PROBE_RESPONSE,
3856 dev_kfree_skb(beacon);
3864 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3865 struct ieee80211_vif *vif,
3866 struct ieee80211_bss_conf *bss_conf,
3869 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3870 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3873 if (changed & BSS_CHANGED_BEACON_INT) {
3874 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3875 bss_conf->beacon_int);
3877 wlvif->beacon_int = bss_conf->beacon_int;
3880 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3881 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3883 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3886 if (changed & BSS_CHANGED_BEACON) {
3887 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3894 wl1271_error("beacon info change failed: %d", ret);
3898 /* AP mode changes */
3899 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3900 struct ieee80211_vif *vif,
3901 struct ieee80211_bss_conf *bss_conf,
3904 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3907 if (changed & BSS_CHANGED_BASIC_RATES) {
3908 u32 rates = bss_conf->basic_rates;
3910 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3912 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3913 wlvif->basic_rate_set);
3915 ret = wl1271_init_ap_rates(wl, wlvif);
3917 wl1271_error("AP rate policy change failed %d", ret);
3921 ret = wl1271_ap_init_templates(wl, vif);
3925 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3929 ret = wlcore_set_beacon_template(wl, vif, true);
3934 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3938 if (changed & BSS_CHANGED_BEACON_ENABLED) {
3939 if (bss_conf->enable_beacon) {
3940 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3941 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3945 ret = wl1271_ap_init_hwenc(wl, wlvif);
3949 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3950 wl1271_debug(DEBUG_AP, "started AP");
3953 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3954 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3958 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3959 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3961 wl1271_debug(DEBUG_AP, "stopped AP");
3966 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3970 /* Handle HT information change */
3971 if ((changed & BSS_CHANGED_HT) &&
3972 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
3973 ret = wl1271_acx_set_ht_information(wl, wlvif,
3974 bss_conf->ht_operation_mode);
3976 wl1271_warning("Set ht information failed %d", ret);
3985 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3986 struct ieee80211_bss_conf *bss_conf,
3992 wl1271_debug(DEBUG_MAC80211,
3993 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3994 bss_conf->bssid, bss_conf->aid,
3995 bss_conf->beacon_int,
3996 bss_conf->basic_rates, sta_rate_set);
3998 wlvif->beacon_int = bss_conf->beacon_int;
3999 rates = bss_conf->basic_rates;
4000 wlvif->basic_rate_set =
4001 wl1271_tx_enabled_rates_get(wl, rates,
4004 wl1271_tx_min_rate_get(wl,
4005 wlvif->basic_rate_set);
4009 wl1271_tx_enabled_rates_get(wl,
4013 /* we only support sched_scan while not connected */
4014 if (wl->sched_vif == wlvif)
4015 wl->ops->sched_scan_stop(wl, wlvif);
4017 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4021 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4025 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4029 wlcore_set_ssid(wl, wlvif);
4031 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4036 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4040 /* revert back to minimum rates for the current band */
4041 wl1271_set_band_rate(wl, wlvif);
4042 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4044 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4048 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4049 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4050 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4055 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4058 /* STA/IBSS mode changes */
4059 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4060 struct ieee80211_vif *vif,
4061 struct ieee80211_bss_conf *bss_conf,
4064 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4065 bool do_join = false;
4066 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4067 bool ibss_joined = false;
4068 u32 sta_rate_set = 0;
4070 struct ieee80211_sta *sta;
4071 bool sta_exists = false;
4072 struct ieee80211_sta_ht_cap sta_ht_cap;
4075 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4081 if (changed & BSS_CHANGED_IBSS) {
4082 if (bss_conf->ibss_joined) {
4083 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4086 wlcore_unset_assoc(wl, wlvif);
4087 wl12xx_cmd_role_stop_sta(wl, wlvif);
4091 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4094 /* Need to update the SSID (for filtering etc) */
4095 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4098 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4099 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4100 bss_conf->enable_beacon ? "enabled" : "disabled");
4105 if (changed & BSS_CHANGED_CQM) {
4106 bool enable = false;
4107 if (bss_conf->cqm_rssi_thold)
4109 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4110 bss_conf->cqm_rssi_thold,
4111 bss_conf->cqm_rssi_hyst);
4114 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4117 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4118 BSS_CHANGED_ASSOC)) {
4120 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4122 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4124 /* save the supp_rates of the ap */
4125 sta_rate_set = sta->supp_rates[wlvif->band];
4126 if (sta->ht_cap.ht_supported)
4128 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4129 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4130 sta_ht_cap = sta->ht_cap;
4137 if (changed & BSS_CHANGED_BSSID) {
4138 if (!is_zero_ether_addr(bss_conf->bssid)) {
4139 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4144 /* Need to update the BSSID (for filtering etc) */
4147 ret = wlcore_clear_bssid(wl, wlvif);
4153 if (changed & BSS_CHANGED_IBSS) {
4154 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4155 bss_conf->ibss_joined);
4157 if (bss_conf->ibss_joined) {
4158 u32 rates = bss_conf->basic_rates;
4159 wlvif->basic_rate_set =
4160 wl1271_tx_enabled_rates_get(wl, rates,
4163 wl1271_tx_min_rate_get(wl,
4164 wlvif->basic_rate_set);
4166 /* by default, use 11b + OFDM rates */
4167 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4168 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4174 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4179 ret = wlcore_join(wl, wlvif);
4181 wl1271_warning("cmd join failed %d", ret);
4186 if (changed & BSS_CHANGED_ASSOC) {
4187 if (bss_conf->assoc) {
4188 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4193 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4194 wl12xx_set_authorized(wl, wlvif);
4196 wlcore_unset_assoc(wl, wlvif);
4200 if (changed & BSS_CHANGED_PS) {
4201 if ((bss_conf->ps) &&
4202 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4203 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4207 if (wl->conf.conn.forced_ps) {
4208 ps_mode = STATION_POWER_SAVE_MODE;
4209 ps_mode_str = "forced";
4211 ps_mode = STATION_AUTO_PS_MODE;
4212 ps_mode_str = "auto";
4215 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4217 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4219 wl1271_warning("enter %s ps failed %d",
4221 } else if (!bss_conf->ps &&
4222 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4223 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4225 ret = wl1271_ps_set_mode(wl, wlvif,
4226 STATION_ACTIVE_MODE);
4228 wl1271_warning("exit auto ps failed %d", ret);
4232 /* Handle new association with HT. Do this after join. */
4234 (changed & BSS_CHANGED_HT)) {
4236 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4238 ret = wlcore_hw_set_peer_cap(wl,
4244 wl1271_warning("Set ht cap failed %d", ret);
4250 ret = wl1271_acx_set_ht_information(wl, wlvif,
4251 bss_conf->ht_operation_mode);
4253 wl1271_warning("Set ht information failed %d",
4260 /* Handle arp filtering. Done after join. */
4261 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4262 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4263 __be32 addr = bss_conf->arp_addr_list[0];
4264 wlvif->sta.qos = bss_conf->qos;
4265 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4267 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4268 wlvif->ip_addr = addr;
4270 * The template should have been configured only upon
4271 * association. however, it seems that the correct ip
4272 * isn't being set (when sending), so we have to
4273 * reconfigure the template upon every ip change.
4275 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4277 wl1271_warning("build arp rsp failed: %d", ret);
4281 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4282 (ACX_ARP_FILTER_ARP_FILTERING |
4283 ACX_ARP_FILTER_AUTO_ARP),
4287 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4298 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4299 struct ieee80211_vif *vif,
4300 struct ieee80211_bss_conf *bss_conf,
4303 struct wl1271 *wl = hw->priv;
4304 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4305 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4308 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4309 wlvif->role_id, (int)changed);
4312 * make sure to cancel pending disconnections if our association
4315 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4316 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4318 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4319 !bss_conf->enable_beacon)
4320 wl1271_tx_flush(wl);
4322 mutex_lock(&wl->mutex);
4324 if (unlikely(wl->state != WLCORE_STATE_ON))
4327 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4330 ret = wl1271_ps_elp_wakeup(wl);
4335 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4337 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4339 wl1271_ps_elp_sleep(wl);
4342 mutex_unlock(&wl->mutex);
4345 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4346 struct ieee80211_chanctx_conf *ctx)
4348 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4349 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4350 cfg80211_get_chandef_type(&ctx->def));
4354 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4355 struct ieee80211_chanctx_conf *ctx)
4357 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4358 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4359 cfg80211_get_chandef_type(&ctx->def));
4362 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4363 struct ieee80211_chanctx_conf *ctx,
4366 wl1271_debug(DEBUG_MAC80211,
4367 "mac80211 change chanctx %d (type %d) changed 0x%x",
4368 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4369 cfg80211_get_chandef_type(&ctx->def), changed);
4372 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4373 struct ieee80211_vif *vif,
4374 struct ieee80211_chanctx_conf *ctx)
4376 struct wl1271 *wl = hw->priv;
4377 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4378 int channel = ieee80211_frequency_to_channel(
4379 ctx->def.chan->center_freq);
4381 wl1271_debug(DEBUG_MAC80211,
4382 "mac80211 assign chanctx (role %d) %d (type %d)",
4383 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4385 mutex_lock(&wl->mutex);
4387 wlvif->band = ctx->def.chan->band;
4388 wlvif->channel = channel;
4389 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4391 /* update default rates according to the band */
4392 wl1271_set_band_rate(wl, wlvif);
4394 mutex_unlock(&wl->mutex);
4399 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4400 struct ieee80211_vif *vif,
4401 struct ieee80211_chanctx_conf *ctx)
4403 struct wl1271 *wl = hw->priv;
4404 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4406 wl1271_debug(DEBUG_MAC80211,
4407 "mac80211 unassign chanctx (role %d) %d (type %d)",
4409 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4410 cfg80211_get_chandef_type(&ctx->def));
4412 wl1271_tx_flush(wl);
4415 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4416 struct ieee80211_vif *vif, u16 queue,
4417 const struct ieee80211_tx_queue_params *params)
4419 struct wl1271 *wl = hw->priv;
4420 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4424 mutex_lock(&wl->mutex);
4426 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4429 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4431 ps_scheme = CONF_PS_SCHEME_LEGACY;
4433 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4436 ret = wl1271_ps_elp_wakeup(wl);
4441 * the txop is confed in units of 32us by the mac80211,
4444 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4445 params->cw_min, params->cw_max,
4446 params->aifs, params->txop << 5);
4450 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4451 CONF_CHANNEL_TYPE_EDCF,
4452 wl1271_tx_get_queue(queue),
4453 ps_scheme, CONF_ACK_POLICY_LEGACY,
4457 wl1271_ps_elp_sleep(wl);
4460 mutex_unlock(&wl->mutex);
4465 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4466 struct ieee80211_vif *vif)
4469 struct wl1271 *wl = hw->priv;
4470 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4471 u64 mactime = ULLONG_MAX;
4474 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4476 mutex_lock(&wl->mutex);
4478 if (unlikely(wl->state != WLCORE_STATE_ON))
4481 ret = wl1271_ps_elp_wakeup(wl);
4485 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4490 wl1271_ps_elp_sleep(wl);
4493 mutex_unlock(&wl->mutex);
4497 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4498 struct survey_info *survey)
4500 struct ieee80211_conf *conf = &hw->conf;
4505 survey->channel = conf->channel;
4510 static int wl1271_allocate_sta(struct wl1271 *wl,
4511 struct wl12xx_vif *wlvif,
4512 struct ieee80211_sta *sta)
4514 struct wl1271_station *wl_sta;
4518 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4519 wl1271_warning("could not allocate HLID - too much stations");
4523 wl_sta = (struct wl1271_station *)sta->drv_priv;
4524 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4526 wl1271_warning("could not allocate HLID - too many links");
4530 /* use the previous security seq, if this is a recovery/resume */
4531 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4533 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4534 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4535 wl->active_sta_count++;
4539 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4541 struct wl1271_station *wl_sta;
4542 struct ieee80211_sta *sta;
4543 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4545 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4548 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4549 __clear_bit(hlid, &wl->ap_ps_map);
4550 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4553 * save the last used PN in the private part of iee80211_sta,
4554 * in case of recovery/suspend
4557 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4559 wl_sta = (void *)sta->drv_priv;
4560 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4563 * increment the initial seq number on recovery to account for
4564 * transmitted packets that we haven't yet got in the FW status
4566 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4567 wl_sta->total_freed_pkts +=
4568 WL1271_TX_SQN_POST_RECOVERY_PADDING;
4572 wl12xx_free_link(wl, wlvif, &hlid);
4573 wl->active_sta_count--;
4576 * rearm the tx watchdog when the last STA is freed - give the FW a
4577 * chance to return STA-buffered packets before complaining.
4579 if (wl->active_sta_count == 0)
4580 wl12xx_rearm_tx_watchdog_locked(wl);
4583 static int wl12xx_sta_add(struct wl1271 *wl,
4584 struct wl12xx_vif *wlvif,
4585 struct ieee80211_sta *sta)
4587 struct wl1271_station *wl_sta;
4591 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4593 ret = wl1271_allocate_sta(wl, wlvif, sta);
4597 wl_sta = (struct wl1271_station *)sta->drv_priv;
4598 hlid = wl_sta->hlid;
4600 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4602 wl1271_free_sta(wl, wlvif, hlid);
4607 static int wl12xx_sta_remove(struct wl1271 *wl,
4608 struct wl12xx_vif *wlvif,
4609 struct ieee80211_sta *sta)
4611 struct wl1271_station *wl_sta;
4614 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4616 wl_sta = (struct wl1271_station *)sta->drv_priv;
4618 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4621 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4625 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4629 static void wlcore_roc_if_possible(struct wl1271 *wl,
4630 struct wl12xx_vif *wlvif)
4632 if (find_first_bit(wl->roc_map,
4633 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4636 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4639 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4642 static void wlcore_update_inconn_sta(struct wl1271 *wl,
4643 struct wl12xx_vif *wlvif,
4644 struct wl1271_station *wl_sta,
4647 if (in_connection) {
4648 if (WARN_ON(wl_sta->in_connection))
4650 wl_sta->in_connection = true;
4651 if (!wlvif->inconn_count++)
4652 wlcore_roc_if_possible(wl, wlvif);
4654 if (!wl_sta->in_connection)
4657 wl_sta->in_connection = false;
4658 wlvif->inconn_count--;
4659 if (WARN_ON(wlvif->inconn_count < 0))
4662 if (!wlvif->inconn_count)
4663 if (test_bit(wlvif->role_id, wl->roc_map))
4664 wl12xx_croc(wl, wlvif->role_id);
4668 static int wl12xx_update_sta_state(struct wl1271 *wl,
4669 struct wl12xx_vif *wlvif,
4670 struct ieee80211_sta *sta,
4671 enum ieee80211_sta_state old_state,
4672 enum ieee80211_sta_state new_state)
4674 struct wl1271_station *wl_sta;
4675 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4676 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4679 wl_sta = (struct wl1271_station *)sta->drv_priv;
4681 /* Add station (AP mode) */
4683 old_state == IEEE80211_STA_NOTEXIST &&
4684 new_state == IEEE80211_STA_NONE) {
4685 ret = wl12xx_sta_add(wl, wlvif, sta);
4689 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4692 /* Remove station (AP mode) */
4694 old_state == IEEE80211_STA_NONE &&
4695 new_state == IEEE80211_STA_NOTEXIST) {
4697 wl12xx_sta_remove(wl, wlvif, sta);
4699 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4702 /* Authorize station (AP mode) */
4704 new_state == IEEE80211_STA_AUTHORIZED) {
4705 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4709 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4714 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4717 /* Authorize station */
4719 new_state == IEEE80211_STA_AUTHORIZED) {
4720 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4721 ret = wl12xx_set_authorized(wl, wlvif);
4727 old_state == IEEE80211_STA_AUTHORIZED &&
4728 new_state == IEEE80211_STA_ASSOC) {
4729 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4730 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4733 /* clear ROCs on failure or authorization */
4735 (new_state == IEEE80211_STA_AUTHORIZED ||
4736 new_state == IEEE80211_STA_NOTEXIST)) {
4737 if (test_bit(wlvif->role_id, wl->roc_map))
4738 wl12xx_croc(wl, wlvif->role_id);
4742 old_state == IEEE80211_STA_NOTEXIST &&
4743 new_state == IEEE80211_STA_NONE) {
4744 if (find_first_bit(wl->roc_map,
4745 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4746 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4747 wl12xx_roc(wl, wlvif, wlvif->role_id,
4748 wlvif->band, wlvif->channel);
4754 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4755 struct ieee80211_vif *vif,
4756 struct ieee80211_sta *sta,
4757 enum ieee80211_sta_state old_state,
4758 enum ieee80211_sta_state new_state)
4760 struct wl1271 *wl = hw->priv;
4761 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4764 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4765 sta->aid, old_state, new_state);
4767 mutex_lock(&wl->mutex);
4769 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4774 ret = wl1271_ps_elp_wakeup(wl);
4778 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4780 wl1271_ps_elp_sleep(wl);
4782 mutex_unlock(&wl->mutex);
4783 if (new_state < old_state)
4788 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4789 struct ieee80211_vif *vif,
4790 enum ieee80211_ampdu_mlme_action action,
4791 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4794 struct wl1271 *wl = hw->priv;
4795 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4797 u8 hlid, *ba_bitmap;
4799 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4802 /* sanity check - the fields in FW are only 8bits wide */
4803 if (WARN_ON(tid > 0xFF))
4806 mutex_lock(&wl->mutex);
4808 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4813 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4814 hlid = wlvif->sta.hlid;
4815 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4816 struct wl1271_station *wl_sta;
4818 wl_sta = (struct wl1271_station *)sta->drv_priv;
4819 hlid = wl_sta->hlid;
4825 ba_bitmap = &wl->links[hlid].ba_bitmap;
4827 ret = wl1271_ps_elp_wakeup(wl);
4831 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4835 case IEEE80211_AMPDU_RX_START:
4836 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4841 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4843 wl1271_error("exceeded max RX BA sessions");
4847 if (*ba_bitmap & BIT(tid)) {
4849 wl1271_error("cannot enable RX BA session on active "
4854 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4857 *ba_bitmap |= BIT(tid);
4858 wl->ba_rx_session_count++;
4862 case IEEE80211_AMPDU_RX_STOP:
4863 if (!(*ba_bitmap & BIT(tid))) {
4865 * this happens on reconfig - so only output a debug
4866 * message for now, and don't fail the function.
4868 wl1271_debug(DEBUG_MAC80211,
4869 "no active RX BA session on tid: %d",
4875 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4878 *ba_bitmap &= ~BIT(tid);
4879 wl->ba_rx_session_count--;
4884 * The BA initiator session management in FW independently.
4885 * Falling break here on purpose for all TX APDU commands.
4887 case IEEE80211_AMPDU_TX_START:
4888 case IEEE80211_AMPDU_TX_STOP_CONT:
4889 case IEEE80211_AMPDU_TX_STOP_FLUSH:
4890 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
4891 case IEEE80211_AMPDU_TX_OPERATIONAL:
4896 wl1271_error("Incorrect ampdu action id=%x\n", action);
4900 wl1271_ps_elp_sleep(wl);
4903 mutex_unlock(&wl->mutex);
4908 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4909 struct ieee80211_vif *vif,
4910 const struct cfg80211_bitrate_mask *mask)
4912 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4913 struct wl1271 *wl = hw->priv;
4916 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4917 mask->control[NL80211_BAND_2GHZ].legacy,
4918 mask->control[NL80211_BAND_5GHZ].legacy);
4920 mutex_lock(&wl->mutex);
4922 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4923 wlvif->bitrate_masks[i] =
4924 wl1271_tx_enabled_rates_get(wl,
4925 mask->control[i].legacy,
4928 if (unlikely(wl->state != WLCORE_STATE_ON))
4931 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4932 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4934 ret = wl1271_ps_elp_wakeup(wl);
4938 wl1271_set_band_rate(wl, wlvif);
4940 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4941 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4943 wl1271_ps_elp_sleep(wl);
4946 mutex_unlock(&wl->mutex);
4951 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4952 struct ieee80211_channel_switch *ch_switch)
4954 struct wl1271 *wl = hw->priv;
4955 struct wl12xx_vif *wlvif;
4958 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4960 wl1271_tx_flush(wl);
4962 mutex_lock(&wl->mutex);
4964 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4965 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4966 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4967 ieee80211_chswitch_done(vif, false);
4970 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4974 ret = wl1271_ps_elp_wakeup(wl);
4978 /* TODO: change mac80211 to pass vif as param */
4979 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4980 unsigned long delay_usec;
4982 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
4986 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4988 /* indicate failure 5 seconds after channel switch time */
4989 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
4991 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
4992 usecs_to_jiffies(delay_usec) +
4993 msecs_to_jiffies(5000));
4997 wl1271_ps_elp_sleep(wl);
5000 mutex_unlock(&wl->mutex);
5003 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
5005 struct wl1271 *wl = hw->priv;
5007 wl1271_tx_flush(wl);
5010 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5011 struct ieee80211_vif *vif,
5012 struct ieee80211_channel *chan,
5014 enum ieee80211_roc_type type)
5016 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5017 struct wl1271 *wl = hw->priv;
5018 int channel, ret = 0;
5020 channel = ieee80211_frequency_to_channel(chan->center_freq);
5022 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5023 channel, wlvif->role_id);
5025 mutex_lock(&wl->mutex);
5027 if (unlikely(wl->state != WLCORE_STATE_ON))
5030 /* return EBUSY if we can't ROC right now */
5031 if (WARN_ON(wl->roc_vif ||
5032 find_first_bit(wl->roc_map,
5033 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5038 ret = wl1271_ps_elp_wakeup(wl);
5042 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5047 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5048 msecs_to_jiffies(duration));
5050 wl1271_ps_elp_sleep(wl);
5052 mutex_unlock(&wl->mutex);
5056 static int __wlcore_roc_completed(struct wl1271 *wl)
5058 struct wl12xx_vif *wlvif;
5061 /* already completed */
5062 if (unlikely(!wl->roc_vif))
5065 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5067 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5070 ret = wl12xx_stop_dev(wl, wlvif);
5079 static int wlcore_roc_completed(struct wl1271 *wl)
5083 wl1271_debug(DEBUG_MAC80211, "roc complete");
5085 mutex_lock(&wl->mutex);
5087 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5092 ret = wl1271_ps_elp_wakeup(wl);
5096 ret = __wlcore_roc_completed(wl);
5098 wl1271_ps_elp_sleep(wl);
5100 mutex_unlock(&wl->mutex);
5105 static void wlcore_roc_complete_work(struct work_struct *work)
5107 struct delayed_work *dwork;
5111 dwork = container_of(work, struct delayed_work, work);
5112 wl = container_of(dwork, struct wl1271, roc_complete_work);
5114 ret = wlcore_roc_completed(wl);
5116 ieee80211_remain_on_channel_expired(wl->hw);
5119 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5121 struct wl1271 *wl = hw->priv;
5123 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5126 wl1271_tx_flush(wl);
5129 * we can't just flush_work here, because it might deadlock
5130 * (as we might get called from the same workqueue)
5132 cancel_delayed_work_sync(&wl->roc_complete_work);
5133 wlcore_roc_completed(wl);
5138 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5139 struct ieee80211_vif *vif,
5140 struct ieee80211_sta *sta,
5143 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5144 struct wl1271 *wl = hw->priv;
5146 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5149 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5150 struct ieee80211_vif *vif,
5151 struct ieee80211_sta *sta,
5154 struct wl1271 *wl = hw->priv;
5155 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5158 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5160 mutex_lock(&wl->mutex);
5162 if (unlikely(wl->state != WLCORE_STATE_ON))
5165 ret = wl1271_ps_elp_wakeup(wl);
5169 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5174 wl1271_ps_elp_sleep(wl);
5177 mutex_unlock(&wl->mutex);
5182 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5184 struct wl1271 *wl = hw->priv;
5187 mutex_lock(&wl->mutex);
5189 if (unlikely(wl->state != WLCORE_STATE_ON))
5192 /* packets are considered pending if in the TX queue or the FW */
5193 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5195 mutex_unlock(&wl->mutex);
5200 /* can't be const, mac80211 writes to this */
5201 static struct ieee80211_rate wl1271_rates[] = {
5203 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5204 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5206 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5207 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5208 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5210 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5211 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5212 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5214 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5215 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5216 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5218 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5219 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5221 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5222 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5224 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5225 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5227 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5228 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5230 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5231 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5233 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5234 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5236 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5237 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5239 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5240 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5243 /* can't be const, mac80211 writes to this */
5244 static struct ieee80211_channel wl1271_channels[] = {
5245 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5246 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5247 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5248 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5249 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5250 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5251 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5252 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5253 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5254 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5255 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5256 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5257 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5258 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5261 /* can't be const, mac80211 writes to this */
5262 static struct ieee80211_supported_band wl1271_band_2ghz = {
5263 .channels = wl1271_channels,
5264 .n_channels = ARRAY_SIZE(wl1271_channels),
5265 .bitrates = wl1271_rates,
5266 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5269 /* 5 GHz data rates for WL1273 */
5270 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5272 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5273 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5275 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5276 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5278 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5279 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5281 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5282 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5284 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5285 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5287 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5288 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5290 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5291 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5293 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5294 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5297 /* 5 GHz band channels for WL1273 */
5298 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5299 { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
5300 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5301 { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
5302 { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
5303 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5304 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5305 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5306 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5307 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5308 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5309 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5310 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5311 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5312 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5313 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5314 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5315 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5316 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5317 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5318 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5319 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5320 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5321 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5322 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5323 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5324 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5325 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5326 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5327 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5328 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5329 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5330 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5331 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5332 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5335 static struct ieee80211_supported_band wl1271_band_5ghz = {
5336 .channels = wl1271_channels_5ghz,
5337 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5338 .bitrates = wl1271_rates_5ghz,
5339 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5342 static const struct ieee80211_ops wl1271_ops = {
5343 .start = wl1271_op_start,
5344 .stop = wlcore_op_stop,
5345 .add_interface = wl1271_op_add_interface,
5346 .remove_interface = wl1271_op_remove_interface,
5347 .change_interface = wl12xx_op_change_interface,
5349 .suspend = wl1271_op_suspend,
5350 .resume = wl1271_op_resume,
5352 .config = wl1271_op_config,
5353 .prepare_multicast = wl1271_op_prepare_multicast,
5354 .configure_filter = wl1271_op_configure_filter,
5356 .set_key = wlcore_op_set_key,
5357 .hw_scan = wl1271_op_hw_scan,
5358 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5359 .sched_scan_start = wl1271_op_sched_scan_start,
5360 .sched_scan_stop = wl1271_op_sched_scan_stop,
5361 .bss_info_changed = wl1271_op_bss_info_changed,
5362 .set_frag_threshold = wl1271_op_set_frag_threshold,
5363 .set_rts_threshold = wl1271_op_set_rts_threshold,
5364 .conf_tx = wl1271_op_conf_tx,
5365 .get_tsf = wl1271_op_get_tsf,
5366 .get_survey = wl1271_op_get_survey,
5367 .sta_state = wl12xx_op_sta_state,
5368 .ampdu_action = wl1271_op_ampdu_action,
5369 .tx_frames_pending = wl1271_tx_frames_pending,
5370 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5371 .channel_switch = wl12xx_op_channel_switch,
5372 .flush = wlcore_op_flush,
5373 .remain_on_channel = wlcore_op_remain_on_channel,
5374 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5375 .add_chanctx = wlcore_op_add_chanctx,
5376 .remove_chanctx = wlcore_op_remove_chanctx,
5377 .change_chanctx = wlcore_op_change_chanctx,
5378 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5379 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5380 .sta_rc_update = wlcore_op_sta_rc_update,
5381 .get_rssi = wlcore_op_get_rssi,
5382 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5386 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5392 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5393 wl1271_error("Illegal RX rate from HW: %d", rate);
5397 idx = wl->band_rate_to_idx[band][rate];
5398 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5399 wl1271_error("Unsupported RX rate from HW: %d", rate);
5406 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5407 struct device_attribute *attr,
5410 struct wl1271 *wl = dev_get_drvdata(dev);
5415 mutex_lock(&wl->mutex);
5416 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5418 mutex_unlock(&wl->mutex);
5424 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5425 struct device_attribute *attr,
5426 const char *buf, size_t count)
5428 struct wl1271 *wl = dev_get_drvdata(dev);
5432 ret = kstrtoul(buf, 10, &res);
5434 wl1271_warning("incorrect value written to bt_coex_mode");
5438 mutex_lock(&wl->mutex);
5442 if (res == wl->sg_enabled)
5445 wl->sg_enabled = res;
5447 if (unlikely(wl->state != WLCORE_STATE_ON))
5450 ret = wl1271_ps_elp_wakeup(wl);
5454 wl1271_acx_sg_enable(wl, wl->sg_enabled);
5455 wl1271_ps_elp_sleep(wl);
5458 mutex_unlock(&wl->mutex);
5462 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5463 wl1271_sysfs_show_bt_coex_state,
5464 wl1271_sysfs_store_bt_coex_state);
5466 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5467 struct device_attribute *attr,
5470 struct wl1271 *wl = dev_get_drvdata(dev);
5475 mutex_lock(&wl->mutex);
5476 if (wl->hw_pg_ver >= 0)
5477 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5479 len = snprintf(buf, len, "n/a\n");
5480 mutex_unlock(&wl->mutex);
5485 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5486 wl1271_sysfs_show_hw_pg_ver, NULL);
5488 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5489 struct bin_attribute *bin_attr,
5490 char *buffer, loff_t pos, size_t count)
5492 struct device *dev = container_of(kobj, struct device, kobj);
5493 struct wl1271 *wl = dev_get_drvdata(dev);
5497 ret = mutex_lock_interruptible(&wl->mutex);
5499 return -ERESTARTSYS;
5501 /* Let only one thread read the log at a time, blocking others */
5502 while (wl->fwlog_size == 0) {
5505 prepare_to_wait_exclusive(&wl->fwlog_waitq,
5507 TASK_INTERRUPTIBLE);
5509 if (wl->fwlog_size != 0) {
5510 finish_wait(&wl->fwlog_waitq, &wait);
5514 mutex_unlock(&wl->mutex);
5517 finish_wait(&wl->fwlog_waitq, &wait);
5519 if (signal_pending(current))
5520 return -ERESTARTSYS;
5522 ret = mutex_lock_interruptible(&wl->mutex);
5524 return -ERESTARTSYS;
5527 /* Check if the fwlog is still valid */
5528 if (wl->fwlog_size < 0) {
5529 mutex_unlock(&wl->mutex);
5533 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5534 len = min(count, (size_t)wl->fwlog_size);
5535 wl->fwlog_size -= len;
5536 memcpy(buffer, wl->fwlog, len);
5538 /* Make room for new messages */
5539 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5541 mutex_unlock(&wl->mutex);
5546 static struct bin_attribute fwlog_attr = {
5547 .attr = {.name = "fwlog", .mode = S_IRUSR},
5548 .read = wl1271_sysfs_read_fwlog,
5551 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5555 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5558 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5559 wl1271_warning("NIC part of the MAC address wraps around!");
5561 for (i = 0; i < wl->num_mac_addr; i++) {
5562 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5563 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5564 wl->addresses[i].addr[2] = (u8) oui;
5565 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5566 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5567 wl->addresses[i].addr[5] = (u8) nic;
5571 /* we may be one address short at the most */
5572 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5575 * turn on the LAA bit in the first address and use it as
5578 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5579 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5580 memcpy(&wl->addresses[idx], &wl->addresses[0],
5581 sizeof(wl->addresses[0]));
5583 wl->addresses[idx].addr[2] |= BIT(1);
5586 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5587 wl->hw->wiphy->addresses = wl->addresses;
5590 static int wl12xx_get_hw_info(struct wl1271 *wl)
5594 ret = wl12xx_set_power_on(wl);
5598 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5602 wl->fuse_oui_addr = 0;
5603 wl->fuse_nic_addr = 0;
5605 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5609 if (wl->ops->get_mac)
5610 ret = wl->ops->get_mac(wl);
5613 wl1271_power_off(wl);
5617 static int wl1271_register_hw(struct wl1271 *wl)
5620 u32 oui_addr = 0, nic_addr = 0;
5622 if (wl->mac80211_registered)
5625 if (wl->nvs_len >= 12) {
5626 /* NOTE: The wl->nvs->nvs element must be first, in
5627 * order to simplify the casting, we assume it is at
5628 * the beginning of the wl->nvs structure.
5630 u8 *nvs_ptr = (u8 *)wl->nvs;
5633 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5635 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5638 /* if the MAC address is zeroed in the NVS derive from fuse */
5639 if (oui_addr == 0 && nic_addr == 0) {
5640 oui_addr = wl->fuse_oui_addr;
5641 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5642 nic_addr = wl->fuse_nic_addr + 1;
5645 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5647 ret = ieee80211_register_hw(wl->hw);
5649 wl1271_error("unable to register mac80211 hw: %d", ret);
5653 wl->mac80211_registered = true;
5655 wl1271_debugfs_init(wl);
5657 wl1271_notice("loaded");
5663 static void wl1271_unregister_hw(struct wl1271 *wl)
5666 wl1271_plt_stop(wl);
5668 ieee80211_unregister_hw(wl->hw);
5669 wl->mac80211_registered = false;
5673 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5676 .types = BIT(NL80211_IFTYPE_STATION),
5680 .types = BIT(NL80211_IFTYPE_AP) |
5681 BIT(NL80211_IFTYPE_P2P_GO) |
5682 BIT(NL80211_IFTYPE_P2P_CLIENT),
5686 static struct ieee80211_iface_combination
5687 wlcore_iface_combinations[] = {
5689 .max_interfaces = 3,
5690 .limits = wlcore_iface_limits,
5691 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5695 static int wl1271_init_ieee80211(struct wl1271 *wl)
5698 static const u32 cipher_suites[] = {
5699 WLAN_CIPHER_SUITE_WEP40,
5700 WLAN_CIPHER_SUITE_WEP104,
5701 WLAN_CIPHER_SUITE_TKIP,
5702 WLAN_CIPHER_SUITE_CCMP,
5703 WL1271_CIPHER_SUITE_GEM,
5706 /* The tx descriptor buffer */
5707 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5709 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5710 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5713 /* FIXME: find a proper value */
5714 wl->hw->channel_change_time = 10000;
5715 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5717 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5718 IEEE80211_HW_SUPPORTS_PS |
5719 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5720 IEEE80211_HW_SUPPORTS_UAPSD |
5721 IEEE80211_HW_HAS_RATE_CONTROL |
5722 IEEE80211_HW_CONNECTION_MONITOR |
5723 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5724 IEEE80211_HW_SPECTRUM_MGMT |
5725 IEEE80211_HW_AP_LINK_PS |
5726 IEEE80211_HW_AMPDU_AGGREGATION |
5727 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5728 IEEE80211_HW_QUEUE_CONTROL;
5730 wl->hw->wiphy->cipher_suites = cipher_suites;
5731 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5733 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5734 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5735 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5736 wl->hw->wiphy->max_scan_ssids = 1;
5737 wl->hw->wiphy->max_sched_scan_ssids = 16;
5738 wl->hw->wiphy->max_match_sets = 16;
5740 * Maximum length of elements in scanning probe request templates
5741 * should be the maximum length possible for a template, without
5742 * the IEEE80211 header of the template
5744 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5745 sizeof(struct ieee80211_header);
5747 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5748 sizeof(struct ieee80211_header);
5750 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5752 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5753 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5755 /* make sure all our channels fit in the scanned_ch bitmask */
5756 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5757 ARRAY_SIZE(wl1271_channels_5ghz) >
5758 WL1271_MAX_CHANNELS);
5760 * clear channel flags from the previous usage
5761 * and restore max_power & max_antenna_gain values.
5763 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5764 wl1271_band_2ghz.channels[i].flags = 0;
5765 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5766 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5769 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5770 wl1271_band_5ghz.channels[i].flags = 0;
5771 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5772 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5776 * We keep local copies of the band structs because we need to
5777 * modify them on a per-device basis.
5779 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5780 sizeof(wl1271_band_2ghz));
5781 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5782 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5783 sizeof(*wl->ht_cap));
5784 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5785 sizeof(wl1271_band_5ghz));
5786 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5787 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5788 sizeof(*wl->ht_cap));
5790 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5791 &wl->bands[IEEE80211_BAND_2GHZ];
5792 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5793 &wl->bands[IEEE80211_BAND_5GHZ];
5796 * allow 4 queues per mac address we support +
5797 * 1 cab queue per mac + one global offchannel Tx queue
5799 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5801 /* the last queue is the offchannel queue */
5802 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5803 wl->hw->max_rates = 1;
5805 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5807 /* the FW answers probe-requests in AP-mode */
5808 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5809 wl->hw->wiphy->probe_resp_offload =
5810 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5811 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5812 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5814 /* allowed interface combinations */
5815 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5816 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5817 wl->hw->wiphy->n_iface_combinations =
5818 ARRAY_SIZE(wlcore_iface_combinations);
5820 SET_IEEE80211_DEV(wl->hw, wl->dev);
5822 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5823 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5825 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5830 #define WL1271_DEFAULT_CHANNEL 0
5832 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5835 struct ieee80211_hw *hw;
5840 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5842 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5844 wl1271_error("could not alloc ieee80211_hw");
5850 memset(wl, 0, sizeof(*wl));
5852 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5854 wl1271_error("could not alloc wl priv");
5856 goto err_priv_alloc;
5859 INIT_LIST_HEAD(&wl->wlvif_list);
5863 for (i = 0; i < NUM_TX_QUEUES; i++)
5864 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5865 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5867 skb_queue_head_init(&wl->deferred_rx_queue);
5868 skb_queue_head_init(&wl->deferred_tx_queue);
5870 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5871 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5872 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5873 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5874 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5875 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5876 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5878 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5879 if (!wl->freezable_wq) {
5884 wl->channel = WL1271_DEFAULT_CHANNEL;
5886 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5887 wl->band = IEEE80211_BAND_2GHZ;
5888 wl->channel_type = NL80211_CHAN_NO_HT;
5890 wl->sg_enabled = true;
5891 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5892 wl->recovery_count = 0;
5895 wl->ap_fw_ps_map = 0;
5897 wl->platform_quirks = 0;
5898 wl->system_hlid = WL12XX_SYSTEM_HLID;
5899 wl->active_sta_count = 0;
5900 wl->active_link_count = 0;
5902 init_waitqueue_head(&wl->fwlog_waitq);
5904 /* The system link is always allocated */
5905 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5907 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5908 for (i = 0; i < wl->num_tx_desc; i++)
5909 wl->tx_frames[i] = NULL;
5911 spin_lock_init(&wl->wl_lock);
5913 wl->state = WLCORE_STATE_OFF;
5914 wl->fw_type = WL12XX_FW_TYPE_NONE;
5915 mutex_init(&wl->mutex);
5916 mutex_init(&wl->flush_mutex);
5917 init_completion(&wl->nvs_loading_complete);
5919 order = get_order(aggr_buf_size);
5920 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5921 if (!wl->aggr_buf) {
5925 wl->aggr_buf_size = aggr_buf_size;
5927 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5928 if (!wl->dummy_packet) {
5933 /* Allocate one page for the FW log */
5934 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5937 goto err_dummy_packet;
5940 wl->mbox_size = mbox_size;
5941 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5947 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5948 if (!wl->buffer_32) {
5959 free_page((unsigned long)wl->fwlog);
5962 dev_kfree_skb(wl->dummy_packet);
5965 free_pages((unsigned long)wl->aggr_buf, order);
5968 destroy_workqueue(wl->freezable_wq);
5971 wl1271_debugfs_exit(wl);
5975 ieee80211_free_hw(hw);
5979 return ERR_PTR(ret);
5981 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5983 int wlcore_free_hw(struct wl1271 *wl)
5985 /* Unblock any fwlog readers */
5986 mutex_lock(&wl->mutex);
5987 wl->fwlog_size = -1;
5988 wake_up_interruptible_all(&wl->fwlog_waitq);
5989 mutex_unlock(&wl->mutex);
5991 device_remove_bin_file(wl->dev, &fwlog_attr);
5993 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5995 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5996 kfree(wl->buffer_32);
5998 free_page((unsigned long)wl->fwlog);
5999 dev_kfree_skb(wl->dummy_packet);
6000 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6002 wl1271_debugfs_exit(wl);
6006 wl->fw_type = WL12XX_FW_TYPE_NONE;
6010 kfree(wl->fw_status_1);
6011 kfree(wl->tx_res_if);
6012 destroy_workqueue(wl->freezable_wq);
6015 ieee80211_free_hw(wl->hw);
6019 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6021 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6023 struct wl1271 *wl = context;
6024 struct platform_device *pdev = wl->pdev;
6025 struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
6026 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6027 unsigned long irqflags;
6031 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6033 wl1271_error("Could not allocate nvs data");
6036 wl->nvs_len = fw->size;
6038 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6044 ret = wl->ops->setup(wl);
6048 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6050 /* adjust some runtime configuration parameters */
6051 wlcore_adjust_conf(wl);
6053 wl->irq = platform_get_irq(pdev, 0);
6054 wl->platform_quirks = pdata->platform_quirks;
6055 wl->if_ops = pdev_data->if_ops;
6057 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
6058 irqflags = IRQF_TRIGGER_RISING;
6060 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6062 ret = request_threaded_irq(wl->irq, NULL, wlcore_irq,
6063 irqflags, pdev->name, wl);
6065 wl1271_error("request_irq() failed: %d", ret);
6070 ret = enable_irq_wake(wl->irq);
6072 wl->irq_wake_enabled = true;
6073 device_init_wakeup(wl->dev, 1);
6074 if (pdata->pwr_in_suspend) {
6075 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
6076 wl->hw->wiphy->wowlan.n_patterns =
6077 WL1271_MAX_RX_FILTERS;
6078 wl->hw->wiphy->wowlan.pattern_min_len = 1;
6079 wl->hw->wiphy->wowlan.pattern_max_len =
6080 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
6084 disable_irq(wl->irq);
6086 ret = wl12xx_get_hw_info(wl);
6088 wl1271_error("couldn't get hw info");
6092 ret = wl->ops->identify_chip(wl);
6096 ret = wl1271_init_ieee80211(wl);
6100 ret = wl1271_register_hw(wl);
6104 /* Create sysfs file to control bt coex state */
6105 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
6107 wl1271_error("failed to create sysfs file bt_coex_state");
6111 /* Create sysfs file to get HW PG version */
6112 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
6114 wl1271_error("failed to create sysfs file hw_pg_ver");
6115 goto out_bt_coex_state;
6118 /* Create sysfs file for the FW log */
6119 ret = device_create_bin_file(wl->dev, &fwlog_attr);
6121 wl1271_error("failed to create sysfs file fwlog");
6125 wl->initialized = true;
6129 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
6132 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
6135 wl1271_unregister_hw(wl);
6138 free_irq(wl->irq, wl);
6144 release_firmware(fw);
6145 complete_all(&wl->nvs_loading_complete);
6148 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6152 if (!wl->ops || !wl->ptable)
6155 wl->dev = &pdev->dev;
6157 platform_set_drvdata(pdev, wl);
6159 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6160 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6163 wl1271_error("request_firmware_nowait failed: %d", ret);
6164 complete_all(&wl->nvs_loading_complete);
6169 EXPORT_SYMBOL_GPL(wlcore_probe);
6171 int wlcore_remove(struct platform_device *pdev)
6173 struct wl1271 *wl = platform_get_drvdata(pdev);
6175 wait_for_completion(&wl->nvs_loading_complete);
6176 if (!wl->initialized)
6179 if (wl->irq_wake_enabled) {
6180 device_init_wakeup(wl->dev, 0);
6181 disable_irq_wake(wl->irq);
6183 wl1271_unregister_hw(wl);
6184 free_irq(wl->irq, wl);
6189 EXPORT_SYMBOL_GPL(wlcore_remove);
6191 u32 wl12xx_debug_level = DEBUG_NONE;
6192 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6193 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6194 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6196 module_param_named(fwlog, fwlog_param, charp, 0);
6197 MODULE_PARM_DESC(fwlog,
6198 "FW logger options: continuous, ondemand, dbgpins or disable");
6200 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6201 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6203 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6204 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6206 MODULE_LICENSE("GPL");
6207 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6208 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6209 MODULE_FIRMWARE(WL12XX_NVS_NAME);