Merge branch 'bugzilla-21212' into release
[pandora-kernel.git] / drivers / net / wireless / ath / carl9170 / tx.c
1 /*
2  * Atheros CARL9170 driver
3  *
4  * 802.11 xmit & status routines
5  *
6  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7  * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; see the file COPYING.  If not, see
21  * http://www.gnu.org/licenses/.
22  *
23  * This file incorporates work covered by the following copyright and
24  * permission notice:
25  *    Copyright (c) 2007-2008 Atheros Communications, Inc.
26  *
27  *    Permission to use, copy, modify, and/or distribute this software for any
28  *    purpose with or without fee is hereby granted, provided that the above
29  *    copyright notice and this permission notice appear in all copies.
30  *
31  *    THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32  *    WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33  *    MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34  *    ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35  *    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36  *    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37  *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38  */
39
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <net/mac80211.h>
45 #include "carl9170.h"
46 #include "hw.h"
47 #include "cmd.h"
48
49 static inline unsigned int __carl9170_get_queue(struct ar9170 *ar,
50                                                 unsigned int queue)
51 {
52         if (unlikely(modparam_noht)) {
53                 return queue;
54         } else {
55                 /*
56                  * This is just another workaround, until
57                  * someone figures out how to get QoS and
58                  * AMPDU to play nicely together.
59                  */
60
61                 return 2;               /* AC_BE */
62         }
63 }
64
65 static inline unsigned int carl9170_get_queue(struct ar9170 *ar,
66                                               struct sk_buff *skb)
67 {
68         return __carl9170_get_queue(ar, skb_get_queue_mapping(skb));
69 }
70
71 static bool is_mem_full(struct ar9170 *ar)
72 {
73         return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) >
74                 atomic_read(&ar->mem_free_blocks));
75 }
76
77 static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
78 {
79         int queue, i;
80         bool mem_full;
81
82         atomic_inc(&ar->tx_total_queued);
83
84         queue = skb_get_queue_mapping(skb);
85         spin_lock_bh(&ar->tx_stats_lock);
86
87         /*
88          * The driver has to accept the frame, regardless if the queue is
89          * full to the brim, or not. We have to do the queuing internally,
90          * since mac80211 assumes that a driver which can operate with
91          * aggregated frames does not reject frames for this reason.
92          */
93         ar->tx_stats[queue].len++;
94         ar->tx_stats[queue].count++;
95
96         mem_full = is_mem_full(ar);
97         for (i = 0; i < ar->hw->queues; i++) {
98                 if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
99                         ieee80211_stop_queue(ar->hw, i);
100                         ar->queue_stop_timeout[i] = jiffies;
101                 }
102         }
103
104         spin_unlock_bh(&ar->tx_stats_lock);
105 }
106
107 static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
108 {
109         struct ieee80211_tx_info *txinfo;
110         int queue;
111
112         txinfo = IEEE80211_SKB_CB(skb);
113         queue = skb_get_queue_mapping(skb);
114
115         spin_lock_bh(&ar->tx_stats_lock);
116
117         ar->tx_stats[queue].len--;
118
119         if (!is_mem_full(ar)) {
120                 unsigned int i;
121                 for (i = 0; i < ar->hw->queues; i++) {
122                         if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT)
123                                 continue;
124
125                         if (ieee80211_queue_stopped(ar->hw, i)) {
126                                 unsigned long tmp;
127
128                                 tmp = jiffies - ar->queue_stop_timeout[i];
129                                 if (tmp > ar->max_queue_stop_timeout[i])
130                                         ar->max_queue_stop_timeout[i] = tmp;
131                         }
132
133                         ieee80211_wake_queue(ar->hw, i);
134                 }
135         }
136
137         spin_unlock_bh(&ar->tx_stats_lock);
138         if (atomic_dec_and_test(&ar->tx_total_queued))
139                 complete(&ar->tx_flush);
140 }
141
142 static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
143 {
144         struct _carl9170_tx_superframe *super = (void *) skb->data;
145         unsigned int chunks;
146         int cookie = -1;
147
148         atomic_inc(&ar->mem_allocs);
149
150         chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
151         if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
152                 atomic_add(chunks, &ar->mem_free_blocks);
153                 return -ENOSPC;
154         }
155
156         spin_lock_bh(&ar->mem_lock);
157         cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0);
158         spin_unlock_bh(&ar->mem_lock);
159
160         if (unlikely(cookie < 0)) {
161                 atomic_add(chunks, &ar->mem_free_blocks);
162                 return -ENOSPC;
163         }
164
165         super = (void *) skb->data;
166
167         /*
168          * Cookie #0 serves two special purposes:
169          *  1. The firmware might use it generate BlockACK frames
170          *     in responds of an incoming BlockAckReqs.
171          *
172          *  2. Prevent double-free bugs.
173          */
174         super->s.cookie = (u8) cookie + 1;
175         return 0;
176 }
177
178 static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
179 {
180         struct _carl9170_tx_superframe *super = (void *) skb->data;
181         int cookie;
182
183         /* make a local copy of the cookie */
184         cookie = super->s.cookie;
185         /* invalidate cookie */
186         super->s.cookie = 0;
187
188         /*
189          * Do a out-of-bounds check on the cookie:
190          *
191          *  * cookie "0" is reserved and won't be assigned to any
192          *    out-going frame. Internally however, it is used to
193          *    mark no longer/un-accounted frames and serves as a
194          *    cheap way of preventing frames from being freed
195          *    twice by _accident_. NB: There is a tiny race...
196          *
197          *  * obviously, cookie number is limited by the amount
198          *    of available memory blocks, so the number can
199          *    never execeed the mem_blocks count.
200          */
201         if (unlikely(WARN_ON_ONCE(cookie == 0) ||
202             WARN_ON_ONCE(cookie > ar->fw.mem_blocks)))
203                 return;
204
205         atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
206                    &ar->mem_free_blocks);
207
208         spin_lock_bh(&ar->mem_lock);
209         bitmap_release_region(ar->mem_bitmap, cookie - 1, 0);
210         spin_unlock_bh(&ar->mem_lock);
211 }
212
213 /* Called from any context */
214 static void carl9170_tx_release(struct kref *ref)
215 {
216         struct ar9170 *ar;
217         struct carl9170_tx_info *arinfo;
218         struct ieee80211_tx_info *txinfo;
219         struct sk_buff *skb;
220
221         arinfo = container_of(ref, struct carl9170_tx_info, ref);
222         txinfo = container_of((void *) arinfo, struct ieee80211_tx_info,
223                               rate_driver_data);
224         skb = container_of((void *) txinfo, struct sk_buff, cb);
225
226         ar = arinfo->ar;
227         if (WARN_ON_ONCE(!ar))
228                 return;
229
230         BUILD_BUG_ON(
231             offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23);
232
233         memset(&txinfo->status.ampdu_ack_len, 0,
234                sizeof(struct ieee80211_tx_info) -
235                offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
236
237         if (atomic_read(&ar->tx_total_queued))
238                 ar->tx_schedule = true;
239
240         if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) {
241                 if (!atomic_read(&ar->tx_ampdu_upload))
242                         ar->tx_ampdu_schedule = true;
243
244                 if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
245                         txinfo->status.ampdu_len = txinfo->pad[0];
246                         txinfo->status.ampdu_ack_len = txinfo->pad[1];
247                         txinfo->pad[0] = txinfo->pad[1] = 0;
248                 } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) {
249                         /*
250                          * drop redundant tx_status reports:
251                          *
252                          * 1. ampdu_ack_len of the final tx_status does
253                          *    include the feedback of this particular frame.
254                          *
255                          * 2. tx_status_irqsafe only queues up to 128
256                          *    tx feedback reports and discards the rest.
257                          *
258                          * 3. minstrel_ht is picky, it only accepts
259                          *    reports of frames with the TX_STATUS_AMPDU flag.
260                          */
261
262                         dev_kfree_skb_any(skb);
263                         return;
264                 } else {
265                         /*
266                          * Frame has failed, but we want to keep it in
267                          * case it was lost due to a power-state
268                          * transition.
269                          */
270                 }
271         }
272
273         skb_pull(skb, sizeof(struct _carl9170_tx_superframe));
274         ieee80211_tx_status_irqsafe(ar->hw, skb);
275 }
276
277 void carl9170_tx_get_skb(struct sk_buff *skb)
278 {
279         struct carl9170_tx_info *arinfo = (void *)
280                 (IEEE80211_SKB_CB(skb))->rate_driver_data;
281         kref_get(&arinfo->ref);
282 }
283
284 int carl9170_tx_put_skb(struct sk_buff *skb)
285 {
286         struct carl9170_tx_info *arinfo = (void *)
287                 (IEEE80211_SKB_CB(skb))->rate_driver_data;
288
289         return kref_put(&arinfo->ref, carl9170_tx_release);
290 }
291
292 /* Caller must hold the tid_info->lock & rcu_read_lock */
293 static void carl9170_tx_shift_bm(struct ar9170 *ar,
294         struct carl9170_sta_tid *tid_info, u16 seq)
295 {
296         u16 off;
297
298         off = SEQ_DIFF(seq, tid_info->bsn);
299
300         if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
301                 return;
302
303         /*
304          * Sanity check. For each MPDU we set the bit in bitmap and
305          * clear it once we received the tx_status.
306          * But if the bit is already cleared then we've been bitten
307          * by a bug.
308          */
309         WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap));
310
311         off = SEQ_DIFF(tid_info->snx, tid_info->bsn);
312         if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
313                 return;
314
315         if (!bitmap_empty(tid_info->bitmap, off))
316                 off = find_first_bit(tid_info->bitmap, off);
317
318         tid_info->bsn += off;
319         tid_info->bsn &= 0x0fff;
320
321         bitmap_shift_right(tid_info->bitmap, tid_info->bitmap,
322                            off, CARL9170_BAW_BITS);
323 }
324
325 static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
326         struct sk_buff *skb, struct ieee80211_tx_info *txinfo)
327 {
328         struct _carl9170_tx_superframe *super = (void *) skb->data;
329         struct ieee80211_hdr *hdr = (void *) super->frame_data;
330         struct ieee80211_tx_info *tx_info;
331         struct carl9170_tx_info *ar_info;
332         struct carl9170_sta_info *sta_info;
333         struct ieee80211_sta *sta;
334         struct carl9170_sta_tid *tid_info;
335         struct ieee80211_vif *vif;
336         unsigned int vif_id;
337         u8 tid;
338
339         if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
340             txinfo->flags & IEEE80211_TX_CTL_INJECTED)
341                 return;
342
343         tx_info = IEEE80211_SKB_CB(skb);
344         ar_info = (void *) tx_info->rate_driver_data;
345
346         vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
347                  CARL9170_TX_SUPER_MISC_VIF_ID_S;
348
349         if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC))
350                 return;
351
352         rcu_read_lock();
353         vif = rcu_dereference(ar->vif_priv[vif_id].vif);
354         if (unlikely(!vif))
355                 goto out_rcu;
356
357         /*
358          * Normally we should use wrappers like ieee80211_get_DA to get
359          * the correct peer ieee80211_sta.
360          *
361          * But there is a problem with indirect traffic (broadcasts, or
362          * data which is designated for other stations) in station mode.
363          * The frame will be directed to the AP for distribution and not
364          * to the actual destination.
365          */
366         sta = ieee80211_find_sta(vif, hdr->addr1);
367         if (unlikely(!sta))
368                 goto out_rcu;
369
370         tid = get_tid_h(hdr);
371
372         sta_info = (void *) sta->drv_priv;
373         tid_info = rcu_dereference(sta_info->agg[tid]);
374         if (!tid_info)
375                 goto out_rcu;
376
377         spin_lock_bh(&tid_info->lock);
378         if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE))
379                 carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr));
380
381         if (sta_info->stats[tid].clear) {
382                 sta_info->stats[tid].clear = false;
383                 sta_info->stats[tid].ampdu_len = 0;
384                 sta_info->stats[tid].ampdu_ack_len = 0;
385         }
386
387         sta_info->stats[tid].ampdu_len++;
388         if (txinfo->status.rates[0].count == 1)
389                 sta_info->stats[tid].ampdu_ack_len++;
390
391         if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
392                 txinfo->pad[0] = sta_info->stats[tid].ampdu_len;
393                 txinfo->pad[1] = sta_info->stats[tid].ampdu_ack_len;
394                 txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
395                 sta_info->stats[tid].clear = true;
396         }
397         spin_unlock_bh(&tid_info->lock);
398
399 out_rcu:
400         rcu_read_unlock();
401 }
402
403 void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
404                         const bool success)
405 {
406         struct ieee80211_tx_info *txinfo;
407
408         carl9170_tx_accounting_free(ar, skb);
409
410         txinfo = IEEE80211_SKB_CB(skb);
411
412         if (success)
413                 txinfo->flags |= IEEE80211_TX_STAT_ACK;
414         else
415                 ar->tx_ack_failures++;
416
417         if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
418                 carl9170_tx_status_process_ampdu(ar, skb, txinfo);
419
420         carl9170_tx_put_skb(skb);
421 }
422
423 /* This function may be called form any context */
424 void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
425 {
426         struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
427
428         atomic_dec(&ar->tx_total_pending);
429
430         if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
431                 atomic_dec(&ar->tx_ampdu_upload);
432
433         if (carl9170_tx_put_skb(skb))
434                 tasklet_hi_schedule(&ar->usb_tasklet);
435 }
436
437 static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie,
438                                                struct sk_buff_head *queue)
439 {
440         struct sk_buff *skb;
441
442         spin_lock_bh(&queue->lock);
443         skb_queue_walk(queue, skb) {
444                 struct _carl9170_tx_superframe *txc = (void *) skb->data;
445
446                 if (txc->s.cookie != cookie)
447                         continue;
448
449                 __skb_unlink(skb, queue);
450                 spin_unlock_bh(&queue->lock);
451
452                 carl9170_release_dev_space(ar, skb);
453                 return skb;
454         }
455         spin_unlock_bh(&queue->lock);
456
457         return NULL;
458 }
459
460 static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix,
461         unsigned int tries, struct ieee80211_tx_info *txinfo)
462 {
463         unsigned int i;
464
465         for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
466                 if (txinfo->status.rates[i].idx < 0)
467                         break;
468
469                 if (i == rix) {
470                         txinfo->status.rates[i].count = tries;
471                         i++;
472                         break;
473                 }
474         }
475
476         for (; i < IEEE80211_TX_MAX_RATES; i++) {
477                 txinfo->status.rates[i].idx = -1;
478                 txinfo->status.rates[i].count = 0;
479         }
480 }
481
482 static void carl9170_check_queue_stop_timeout(struct ar9170 *ar)
483 {
484         int i;
485         struct sk_buff *skb;
486         struct ieee80211_tx_info *txinfo;
487         struct carl9170_tx_info *arinfo;
488         bool restart = false;
489
490         for (i = 0; i < ar->hw->queues; i++) {
491                 spin_lock_bh(&ar->tx_status[i].lock);
492
493                 skb = skb_peek(&ar->tx_status[i]);
494
495                 if (!skb)
496                         goto next;
497
498                 txinfo = IEEE80211_SKB_CB(skb);
499                 arinfo = (void *) txinfo->rate_driver_data;
500
501                 if (time_is_before_jiffies(arinfo->timeout +
502                     msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true)
503                         restart = true;
504
505 next:
506                 spin_unlock_bh(&ar->tx_status[i].lock);
507         }
508
509         if (restart) {
510                 /*
511                  * At least one queue has been stuck for long enough.
512                  * Give the device a kick and hope it gets back to
513                  * work.
514                  *
515                  * possible reasons may include:
516                  *  - frames got lost/corrupted (bad connection to the device)
517                  *  - stalled rx processing/usb controller hiccups
518                  *  - firmware errors/bugs
519                  *  - every bug you can think of.
520                  *  - all bugs you can't...
521                  *  - ...
522                  */
523                 carl9170_restart(ar, CARL9170_RR_STUCK_TX);
524         }
525 }
526
527 void carl9170_tx_janitor(struct work_struct *work)
528 {
529         struct ar9170 *ar = container_of(work, struct ar9170,
530                                          tx_janitor.work);
531         if (!IS_STARTED(ar))
532                 return;
533
534         ar->tx_janitor_last_run = jiffies;
535
536         carl9170_check_queue_stop_timeout(ar);
537
538         if (!atomic_read(&ar->tx_total_queued))
539                 return;
540
541         ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
542                 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
543 }
544
545 static void __carl9170_tx_process_status(struct ar9170 *ar,
546         const uint8_t cookie, const uint8_t info)
547 {
548         struct sk_buff *skb;
549         struct ieee80211_tx_info *txinfo;
550         struct carl9170_tx_info *arinfo;
551         unsigned int r, t, q;
552         bool success = true;
553
554         q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE];
555
556         skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
557         if (!skb) {
558                 /*
559                  * We have lost the race to another thread.
560                  */
561
562                 return ;
563         }
564
565         txinfo = IEEE80211_SKB_CB(skb);
566         arinfo = (void *) txinfo->rate_driver_data;
567
568         if (!(info & CARL9170_TX_STATUS_SUCCESS))
569                 success = false;
570
571         r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S;
572         t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S;
573
574         carl9170_tx_fill_rateinfo(ar, r, t, txinfo);
575         carl9170_tx_status(ar, skb, success);
576 }
577
578 void carl9170_tx_process_status(struct ar9170 *ar,
579                                 const struct carl9170_rsp *cmd)
580 {
581         unsigned int i;
582
583         for (i = 0;  i < cmd->hdr.ext; i++) {
584                 if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) {
585                         print_hex_dump_bytes("UU:", DUMP_PREFIX_NONE,
586                                              (void *) cmd, cmd->hdr.len + 4);
587                         break;
588                 }
589
590                 __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie,
591                                              cmd->_tx_status[i].info);
592         }
593 }
594
595 static __le32 carl9170_tx_physet(struct ar9170 *ar,
596         struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
597 {
598         struct ieee80211_rate *rate = NULL;
599         u32 power, chains;
600         __le32 tmp;
601
602         tmp = cpu_to_le32(0);
603
604         if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
605                 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ <<
606                         AR9170_TX_PHY_BW_S);
607         /* this works because 40 MHz is 2 and dup is 3 */
608         if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
609                 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP <<
610                         AR9170_TX_PHY_BW_S);
611
612         if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
613                 tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
614
615         if (txrate->flags & IEEE80211_TX_RC_MCS) {
616                 u32 r = txrate->idx;
617                 u8 *txpower;
618
619                 /* heavy clip control */
620                 tmp |= cpu_to_le32((r & 0x7) <<
621                         AR9170_TX_PHY_TX_HEAVY_CLIP_S);
622
623                 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
624                         if (info->band == IEEE80211_BAND_5GHZ)
625                                 txpower = ar->power_5G_ht40;
626                         else
627                                 txpower = ar->power_2G_ht40;
628                 } else {
629                         if (info->band == IEEE80211_BAND_5GHZ)
630                                 txpower = ar->power_5G_ht20;
631                         else
632                                 txpower = ar->power_2G_ht20;
633                 }
634
635                 power = txpower[r & 7];
636
637                 /* +1 dBm for HT40 */
638                 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
639                         power += 2;
640
641                 r <<= AR9170_TX_PHY_MCS_S;
642                 BUG_ON(r & ~AR9170_TX_PHY_MCS);
643
644                 tmp |= cpu_to_le32(r & AR9170_TX_PHY_MCS);
645                 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
646
647                 /*
648                  * green field preamble does not work.
649                  *
650                  * if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
651                  * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
652                  */
653         } else {
654                 u8 *txpower;
655                 u32 mod;
656                 u32 phyrate;
657                 u8 idx = txrate->idx;
658
659                 if (info->band != IEEE80211_BAND_2GHZ) {
660                         idx += 4;
661                         txpower = ar->power_5G_leg;
662                         mod = AR9170_TX_PHY_MOD_OFDM;
663                 } else {
664                         if (idx < 4) {
665                                 txpower = ar->power_2G_cck;
666                                 mod = AR9170_TX_PHY_MOD_CCK;
667                         } else {
668                                 mod = AR9170_TX_PHY_MOD_OFDM;
669                                 txpower = ar->power_2G_ofdm;
670                         }
671                 }
672
673                 rate = &__carl9170_ratetable[idx];
674
675                 phyrate = rate->hw_value & 0xF;
676                 power = txpower[(rate->hw_value & 0x30) >> 4];
677                 phyrate <<= AR9170_TX_PHY_MCS_S;
678
679                 tmp |= cpu_to_le32(mod);
680                 tmp |= cpu_to_le32(phyrate);
681
682                 /*
683                  * short preamble seems to be broken too.
684                  *
685                  * if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
686                  *      tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
687                  */
688         }
689         power <<= AR9170_TX_PHY_TX_PWR_S;
690         power &= AR9170_TX_PHY_TX_PWR;
691         tmp |= cpu_to_le32(power);
692
693         /* set TX chains */
694         if (ar->eeprom.tx_mask == 1) {
695                 chains = AR9170_TX_PHY_TXCHAIN_1;
696         } else {
697                 chains = AR9170_TX_PHY_TXCHAIN_2;
698
699                 /* >= 36M legacy OFDM - use only one chain */
700                 if (rate && rate->bitrate >= 360 &&
701                     !(txrate->flags & IEEE80211_TX_RC_MCS))
702                         chains = AR9170_TX_PHY_TXCHAIN_1;
703         }
704         tmp |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_S);
705
706         return tmp;
707 }
708
709 static bool carl9170_tx_rts_check(struct ar9170 *ar,
710                                   struct ieee80211_tx_rate *rate,
711                                   bool ampdu, bool multi)
712 {
713         switch (ar->erp_mode) {
714         case CARL9170_ERP_AUTO:
715                 if (ampdu)
716                         break;
717
718         case CARL9170_ERP_MAC80211:
719                 if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
720                         break;
721
722         case CARL9170_ERP_RTS:
723                 if (likely(!multi))
724                         return true;
725
726         default:
727                 break;
728         }
729
730         return false;
731 }
732
733 static bool carl9170_tx_cts_check(struct ar9170 *ar,
734                                   struct ieee80211_tx_rate *rate)
735 {
736         switch (ar->erp_mode) {
737         case CARL9170_ERP_AUTO:
738         case CARL9170_ERP_MAC80211:
739                 if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
740                         break;
741
742         case CARL9170_ERP_CTS:
743                 return true;
744
745         default:
746                 break;
747         }
748
749         return false;
750 }
751
752 static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
753 {
754         struct ieee80211_hdr *hdr;
755         struct _carl9170_tx_superframe *txc;
756         struct carl9170_vif_info *cvif;
757         struct ieee80211_tx_info *info;
758         struct ieee80211_tx_rate *txrate;
759         struct ieee80211_sta *sta;
760         struct carl9170_tx_info *arinfo;
761         unsigned int hw_queue;
762         int i;
763         __le16 mac_tmp;
764         u16 len;
765         bool ampdu, no_ack;
766
767         BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
768         BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
769                      CARL9170_TX_SUPERDESC_LEN);
770
771         BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
772                      AR9170_TX_HWDESC_LEN);
773
774         BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
775
776         BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
777                 ((CARL9170_TX_SUPER_MISC_VIF_ID >>
778                  CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
779
780         hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)];
781
782         hdr = (void *)skb->data;
783         info = IEEE80211_SKB_CB(skb);
784         len = skb->len;
785
786         /*
787          * Note: If the frame was sent through a monitor interface,
788          * the ieee80211_vif pointer can be NULL.
789          */
790         if (likely(info->control.vif))
791                 cvif = (void *) info->control.vif->drv_priv;
792         else
793                 cvif = NULL;
794
795         sta = info->control.sta;
796
797         txc = (void *)skb_push(skb, sizeof(*txc));
798         memset(txc, 0, sizeof(*txc));
799
800         SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue);
801
802         if (likely(cvif))
803                 SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc, cvif->id);
804
805         if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
806                 txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
807
808         if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
809                 txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
810
811         mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
812                               AR9170_TX_MAC_BACKOFF);
813         mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
814                                AR9170_TX_MAC_QOS);
815
816         no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
817         if (unlikely(no_ack))
818                 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
819
820         if (info->control.hw_key) {
821                 len += info->control.hw_key->icv_len;
822
823                 switch (info->control.hw_key->cipher) {
824                 case WLAN_CIPHER_SUITE_WEP40:
825                 case WLAN_CIPHER_SUITE_WEP104:
826                 case WLAN_CIPHER_SUITE_TKIP:
827                         mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_RC4);
828                         break;
829                 case WLAN_CIPHER_SUITE_CCMP:
830                         mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_AES);
831                         break;
832                 default:
833                         WARN_ON(1);
834                         goto err_out;
835                 }
836         }
837
838         ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
839         if (ampdu) {
840                 unsigned int density, factor;
841
842                 if (unlikely(!sta || !cvif))
843                         goto err_out;
844
845                 factor = min_t(unsigned int, 1u,
846                          info->control.sta->ht_cap.ampdu_factor);
847
848                 density = info->control.sta->ht_cap.ampdu_density;
849
850                 if (density) {
851                         /*
852                          * Watch out!
853                          *
854                          * Otus uses slightly different density values than
855                          * those from the 802.11n spec.
856                          */
857
858                         density = max_t(unsigned int, density + 1, 7u);
859                 }
860
861                 SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY,
862                         txc->s.ampdu_settings, density);
863
864                 SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
865                         txc->s.ampdu_settings, factor);
866
867                 for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
868                         txrate = &info->control.rates[i];
869                         if (txrate->idx >= 0) {
870                                 txc->s.ri[i] =
871                                         CARL9170_TX_SUPER_RI_AMPDU;
872
873                                 if (WARN_ON(!(txrate->flags &
874                                               IEEE80211_TX_RC_MCS))) {
875                                         /*
876                                          * Not sure if it's even possible
877                                          * to aggregate non-ht rates with
878                                          * this HW.
879                                          */
880                                         goto err_out;
881                                 }
882                                 continue;
883                         }
884
885                         txrate->idx = 0;
886                         txrate->count = ar->hw->max_rate_tries;
887                 }
888
889                 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
890         }
891
892         /*
893          * NOTE: For the first rate, the ERP & AMPDU flags are directly
894          * taken from mac_control. For all fallback rate, the firmware
895          * updates the mac_control flags from the rate info field.
896          */
897         for (i = 1; i < CARL9170_TX_MAX_RATES; i++) {
898                 txrate = &info->control.rates[i];
899                 if (txrate->idx < 0)
900                         break;
901
902                 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
903                         txrate->count);
904
905                 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
906                         txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
907                                 CARL9170_TX_SUPER_RI_ERP_PROT_S);
908                 else if (carl9170_tx_cts_check(ar, txrate))
909                         txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
910                                 CARL9170_TX_SUPER_RI_ERP_PROT_S);
911
912                 txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate);
913         }
914
915         txrate = &info->control.rates[0];
916         SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count);
917
918         if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
919                 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
920         else if (carl9170_tx_cts_check(ar, txrate))
921                 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
922
923         txc->s.len = cpu_to_le16(skb->len);
924         txc->f.length = cpu_to_le16(len + FCS_LEN);
925         txc->f.mac_control = mac_tmp;
926         txc->f.phy_control = carl9170_tx_physet(ar, info, txrate);
927
928         arinfo = (void *)info->rate_driver_data;
929         arinfo->timeout = jiffies;
930         arinfo->ar = ar;
931         kref_init(&arinfo->ref);
932         return 0;
933
934 err_out:
935         skb_pull(skb, sizeof(*txc));
936         return -EINVAL;
937 }
938
939 static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb)
940 {
941         struct _carl9170_tx_superframe *super;
942
943         super = (void *) skb->data;
944         super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA);
945 }
946
947 static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
948 {
949         struct _carl9170_tx_superframe *super;
950         int tmp;
951
952         super = (void *) skb->data;
953
954         tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) <<
955                 CARL9170_TX_SUPER_AMPDU_DENSITY_S;
956
957         /*
958          * If you haven't noticed carl9170_tx_prepare has already filled
959          * in all ampdu spacing & factor parameters.
960          * Now it's the time to check whenever the settings have to be
961          * updated by the firmware, or if everything is still the same.
962          *
963          * There's no sane way to handle different density values with
964          * this hardware, so we may as well just do the compare in the
965          * driver.
966          */
967
968         if (tmp != ar->current_density) {
969                 ar->current_density = tmp;
970                 super->s.ampdu_settings |=
971                         CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY;
972         }
973
974         tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) <<
975                 CARL9170_TX_SUPER_AMPDU_FACTOR_S;
976
977         if (tmp != ar->current_factor) {
978                 ar->current_factor = tmp;
979                 super->s.ampdu_settings |=
980                         CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR;
981         }
982 }
983
984 static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest,
985                                    struct sk_buff *_src)
986 {
987         struct _carl9170_tx_superframe *dest, *src;
988
989         dest = (void *) _dest->data;
990         src = (void *) _src->data;
991
992         /*
993          * The mac80211 rate control algorithm expects that all MPDUs in
994          * an AMPDU share the same tx vectors.
995          * This is not really obvious right now, because the hardware
996          * does the AMPDU setup according to its own rulebook.
997          * Our nicely assembled, strictly monotonic increasing mpdu
998          * chains will be broken up, mashed back together...
999          */
1000
1001         return (dest->f.phy_control == src->f.phy_control);
1002 }
1003
1004 static void carl9170_tx_ampdu(struct ar9170 *ar)
1005 {
1006         struct sk_buff_head agg;
1007         struct carl9170_sta_tid *tid_info;
1008         struct sk_buff *skb, *first;
1009         unsigned int i = 0, done_ampdus = 0;
1010         u16 seq, queue, tmpssn;
1011
1012         atomic_inc(&ar->tx_ampdu_scheduler);
1013         ar->tx_ampdu_schedule = false;
1014
1015         if (atomic_read(&ar->tx_ampdu_upload))
1016                 return;
1017
1018         if (!ar->tx_ampdu_list_len)
1019                 return;
1020
1021         __skb_queue_head_init(&agg);
1022
1023         rcu_read_lock();
1024         tid_info = rcu_dereference(ar->tx_ampdu_iter);
1025         if (WARN_ON_ONCE(!tid_info)) {
1026                 rcu_read_unlock();
1027                 return;
1028         }
1029
1030 retry:
1031         list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) {
1032                 i++;
1033
1034                 if (tid_info->state < CARL9170_TID_STATE_PROGRESS)
1035                         continue;
1036
1037                 queue = TID_TO_WME_AC(tid_info->tid);
1038
1039                 spin_lock_bh(&tid_info->lock);
1040                 if (tid_info->state != CARL9170_TID_STATE_XMIT)
1041                         goto processed;
1042
1043                 tid_info->counter++;
1044                 first = skb_peek(&tid_info->queue);
1045                 tmpssn = carl9170_get_seq(first);
1046                 seq = tid_info->snx;
1047
1048                 if (unlikely(tmpssn != seq)) {
1049                         tid_info->state = CARL9170_TID_STATE_IDLE;
1050
1051                         goto processed;
1052                 }
1053
1054                 while ((skb = skb_peek(&tid_info->queue))) {
1055                         /* strict 0, 1, ..., n - 1, n frame sequence order */
1056                         if (unlikely(carl9170_get_seq(skb) != seq))
1057                                 break;
1058
1059                         /* don't upload more than AMPDU FACTOR allows. */
1060                         if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >=
1061                             (tid_info->max - 1)))
1062                                 break;
1063
1064                         if (!carl9170_tx_rate_check(ar, skb, first))
1065                                 break;
1066
1067                         atomic_inc(&ar->tx_ampdu_upload);
1068                         tid_info->snx = seq = SEQ_NEXT(seq);
1069                         __skb_unlink(skb, &tid_info->queue);
1070
1071                         __skb_queue_tail(&agg, skb);
1072
1073                         if (skb_queue_len(&agg) >= CARL9170_NUM_TX_AGG_MAX)
1074                                 break;
1075                 }
1076
1077                 if (skb_queue_empty(&tid_info->queue) ||
1078                     carl9170_get_seq(skb_peek(&tid_info->queue)) !=
1079                     tid_info->snx) {
1080                         /*
1081                          * stop TID, if A-MPDU frames are still missing,
1082                          * or whenever the queue is empty.
1083                          */
1084
1085                         tid_info->state = CARL9170_TID_STATE_IDLE;
1086                 }
1087                 done_ampdus++;
1088
1089 processed:
1090                 spin_unlock_bh(&tid_info->lock);
1091
1092                 if (skb_queue_empty(&agg))
1093                         continue;
1094
1095                 /* apply ampdu spacing & factor settings */
1096                 carl9170_set_ampdu_params(ar, skb_peek(&agg));
1097
1098                 /* set aggregation push bit */
1099                 carl9170_set_immba(ar, skb_peek_tail(&agg));
1100
1101                 spin_lock_bh(&ar->tx_pending[queue].lock);
1102                 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1103                 spin_unlock_bh(&ar->tx_pending[queue].lock);
1104                 ar->tx_schedule = true;
1105         }
1106         if ((done_ampdus++ == 0) && (i++ == 0))
1107                 goto retry;
1108
1109         rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
1110         rcu_read_unlock();
1111 }
1112
1113 static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar,
1114                                             struct sk_buff_head *queue)
1115 {
1116         struct sk_buff *skb;
1117         struct ieee80211_tx_info *info;
1118         struct carl9170_tx_info *arinfo;
1119
1120         BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1121
1122         spin_lock_bh(&queue->lock);
1123         skb = skb_peek(queue);
1124         if (unlikely(!skb))
1125                 goto err_unlock;
1126
1127         if (carl9170_alloc_dev_space(ar, skb))
1128                 goto err_unlock;
1129
1130         __skb_unlink(skb, queue);
1131         spin_unlock_bh(&queue->lock);
1132
1133         info = IEEE80211_SKB_CB(skb);
1134         arinfo = (void *) info->rate_driver_data;
1135
1136         arinfo->timeout = jiffies;
1137
1138         /*
1139          * increase ref count to "2".
1140          * Ref counting is the easiest way to solve the race between
1141          * the the urb's completion routine: carl9170_tx_callback and
1142          * wlan tx status functions: carl9170_tx_status/janitor.
1143          */
1144         carl9170_tx_get_skb(skb);
1145
1146         return skb;
1147
1148 err_unlock:
1149         spin_unlock_bh(&queue->lock);
1150         return NULL;
1151 }
1152
1153 void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
1154 {
1155         struct _carl9170_tx_superframe *super;
1156         uint8_t q = 0;
1157
1158         ar->tx_dropped++;
1159
1160         super = (void *)skb->data;
1161         SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
1162                 ar9170_qmap[carl9170_get_queue(ar, skb)]);
1163         __carl9170_tx_process_status(ar, super->s.cookie, q);
1164 }
1165
1166 static void carl9170_tx(struct ar9170 *ar)
1167 {
1168         struct sk_buff *skb;
1169         unsigned int i, q;
1170         bool schedule_garbagecollector = false;
1171
1172         ar->tx_schedule = false;
1173
1174         if (unlikely(!IS_STARTED(ar)))
1175                 return;
1176
1177         carl9170_usb_handle_tx_err(ar);
1178
1179         for (i = 0; i < ar->hw->queues; i++) {
1180                 while (!skb_queue_empty(&ar->tx_pending[i])) {
1181                         skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]);
1182                         if (unlikely(!skb))
1183                                 break;
1184
1185                         atomic_inc(&ar->tx_total_pending);
1186
1187                         q = __carl9170_get_queue(ar, i);
1188                         /*
1189                          * NB: tx_status[i] vs. tx_status[q],
1190                          * TODO: Move into pick_skb or alloc_dev_space.
1191                          */
1192                         skb_queue_tail(&ar->tx_status[q], skb);
1193
1194                         carl9170_usb_tx(ar, skb);
1195                         schedule_garbagecollector = true;
1196                 }
1197         }
1198
1199         if (!schedule_garbagecollector)
1200                 return;
1201
1202         ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
1203                 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
1204 }
1205
1206 static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1207         struct ieee80211_sta *sta, struct sk_buff *skb)
1208 {
1209         struct carl9170_sta_info *sta_info;
1210         struct carl9170_sta_tid *agg;
1211         struct sk_buff *iter;
1212         unsigned int max;
1213         u16 tid, seq, qseq, off;
1214         bool run = false;
1215
1216         tid = carl9170_get_tid(skb);
1217         seq = carl9170_get_seq(skb);
1218         sta_info = (void *) sta->drv_priv;
1219
1220         rcu_read_lock();
1221         agg = rcu_dereference(sta_info->agg[tid]);
1222         max = sta_info->ampdu_max_len;
1223
1224         if (!agg)
1225                 goto err_unlock_rcu;
1226
1227         spin_lock_bh(&agg->lock);
1228         if (unlikely(agg->state < CARL9170_TID_STATE_IDLE))
1229                 goto err_unlock;
1230
1231         /* check if sequence is within the BA window */
1232         if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq)))
1233                 goto err_unlock;
1234
1235         if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq)))
1236                 goto err_unlock;
1237
1238         off = SEQ_DIFF(seq, agg->bsn);
1239         if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap)))
1240                 goto err_unlock;
1241
1242         if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) {
1243                 __skb_queue_tail(&agg->queue, skb);
1244                 agg->hsn = seq;
1245                 goto queued;
1246         }
1247
1248         skb_queue_reverse_walk(&agg->queue, iter) {
1249                 qseq = carl9170_get_seq(iter);
1250
1251                 if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) {
1252                         __skb_queue_after(&agg->queue, iter, skb);
1253                         goto queued;
1254                 }
1255         }
1256
1257         __skb_queue_head(&agg->queue, skb);
1258 queued:
1259
1260         if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) {
1261                 if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) {
1262                         agg->state = CARL9170_TID_STATE_XMIT;
1263                         run = true;
1264                 }
1265         }
1266
1267         spin_unlock_bh(&agg->lock);
1268         rcu_read_unlock();
1269
1270         return run;
1271
1272 err_unlock:
1273         spin_unlock_bh(&agg->lock);
1274
1275 err_unlock_rcu:
1276         rcu_read_unlock();
1277         carl9170_tx_status(ar, skb, false);
1278         ar->tx_dropped++;
1279         return false;
1280 }
1281
1282 int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1283 {
1284         struct ar9170 *ar = hw->priv;
1285         struct ieee80211_tx_info *info;
1286         struct ieee80211_sta *sta;
1287         bool run;
1288
1289         if (unlikely(!IS_STARTED(ar)))
1290                 goto err_free;
1291
1292         info = IEEE80211_SKB_CB(skb);
1293         sta = info->control.sta;
1294
1295         if (unlikely(carl9170_tx_prepare(ar, skb)))
1296                 goto err_free;
1297
1298         carl9170_tx_accounting(ar, skb);
1299         /*
1300          * from now on, one has to use carl9170_tx_status to free
1301          * all ressouces which are associated with the frame.
1302          */
1303
1304         if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1305                 if (WARN_ON_ONCE(!sta))
1306                         goto err_free;
1307
1308                 run = carl9170_tx_ampdu_queue(ar, sta, skb);
1309                 if (run)
1310                         carl9170_tx_ampdu(ar);
1311
1312         } else {
1313                 unsigned int queue = skb_get_queue_mapping(skb);
1314
1315                 skb_queue_tail(&ar->tx_pending[queue], skb);
1316         }
1317
1318         carl9170_tx(ar);
1319         return NETDEV_TX_OK;
1320
1321 err_free:
1322         ar->tx_dropped++;
1323         dev_kfree_skb_any(skb);
1324         return NETDEV_TX_OK;
1325 }
1326
1327 void carl9170_tx_scheduler(struct ar9170 *ar)
1328 {
1329
1330         if (ar->tx_ampdu_schedule)
1331                 carl9170_tx_ampdu(ar);
1332
1333         if (ar->tx_schedule)
1334                 carl9170_tx(ar);
1335 }