Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[pandora-kernel.git] / drivers / net / wireless / ath / carl9170 / tx.c
1 /*
2  * Atheros CARL9170 driver
3  *
4  * 802.11 xmit & status routines
5  *
6  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7  * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; see the file COPYING.  If not, see
21  * http://www.gnu.org/licenses/.
22  *
23  * This file incorporates work covered by the following copyright and
24  * permission notice:
25  *    Copyright (c) 2007-2008 Atheros Communications, Inc.
26  *
27  *    Permission to use, copy, modify, and/or distribute this software for any
28  *    purpose with or without fee is hereby granted, provided that the above
29  *    copyright notice and this permission notice appear in all copies.
30  *
31  *    THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32  *    WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33  *    MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34  *    ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35  *    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36  *    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37  *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38  */
39
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <net/mac80211.h>
45 #include "carl9170.h"
46 #include "hw.h"
47 #include "cmd.h"
48
49 static inline unsigned int __carl9170_get_queue(struct ar9170 *ar,
50                                                 unsigned int queue)
51 {
52         if (unlikely(modparam_noht)) {
53                 return queue;
54         } else {
55                 /*
56                  * This is just another workaround, until
57                  * someone figures out how to get QoS and
58                  * AMPDU to play nicely together.
59                  */
60
61                 return 2;               /* AC_BE */
62         }
63 }
64
65 static inline unsigned int carl9170_get_queue(struct ar9170 *ar,
66                                               struct sk_buff *skb)
67 {
68         return __carl9170_get_queue(ar, skb_get_queue_mapping(skb));
69 }
70
71 static bool is_mem_full(struct ar9170 *ar)
72 {
73         return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) >
74                 atomic_read(&ar->mem_free_blocks));
75 }
76
77 static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
78 {
79         int queue, i;
80         bool mem_full;
81
82         atomic_inc(&ar->tx_total_queued);
83
84         queue = skb_get_queue_mapping(skb);
85         spin_lock_bh(&ar->tx_stats_lock);
86
87         /*
88          * The driver has to accept the frame, regardless if the queue is
89          * full to the brim, or not. We have to do the queuing internally,
90          * since mac80211 assumes that a driver which can operate with
91          * aggregated frames does not reject frames for this reason.
92          */
93         ar->tx_stats[queue].len++;
94         ar->tx_stats[queue].count++;
95
96         mem_full = is_mem_full(ar);
97         for (i = 0; i < ar->hw->queues; i++) {
98                 if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
99                         ieee80211_stop_queue(ar->hw, i);
100                         ar->queue_stop_timeout[i] = jiffies;
101                 }
102         }
103
104         spin_unlock_bh(&ar->tx_stats_lock);
105 }
106
107 static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
108 {
109         struct ieee80211_tx_info *txinfo;
110         int queue;
111
112         txinfo = IEEE80211_SKB_CB(skb);
113         queue = skb_get_queue_mapping(skb);
114
115         spin_lock_bh(&ar->tx_stats_lock);
116
117         ar->tx_stats[queue].len--;
118
119         if (!is_mem_full(ar)) {
120                 unsigned int i;
121                 for (i = 0; i < ar->hw->queues; i++) {
122                         if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT)
123                                 continue;
124
125                         if (ieee80211_queue_stopped(ar->hw, i)) {
126                                 unsigned long tmp;
127
128                                 tmp = jiffies - ar->queue_stop_timeout[i];
129                                 if (tmp > ar->max_queue_stop_timeout[i])
130                                         ar->max_queue_stop_timeout[i] = tmp;
131                         }
132
133                         ieee80211_wake_queue(ar->hw, i);
134                 }
135         }
136
137         spin_unlock_bh(&ar->tx_stats_lock);
138         if (atomic_dec_and_test(&ar->tx_total_queued))
139                 complete(&ar->tx_flush);
140 }
141
142 static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
143 {
144         struct _carl9170_tx_superframe *super = (void *) skb->data;
145         unsigned int chunks;
146         int cookie = -1;
147
148         atomic_inc(&ar->mem_allocs);
149
150         chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
151         if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
152                 atomic_add(chunks, &ar->mem_free_blocks);
153                 return -ENOSPC;
154         }
155
156         spin_lock_bh(&ar->mem_lock);
157         cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0);
158         spin_unlock_bh(&ar->mem_lock);
159
160         if (unlikely(cookie < 0)) {
161                 atomic_add(chunks, &ar->mem_free_blocks);
162                 return -ENOSPC;
163         }
164
165         super = (void *) skb->data;
166
167         /*
168          * Cookie #0 serves two special purposes:
169          *  1. The firmware might use it generate BlockACK frames
170          *     in responds of an incoming BlockAckReqs.
171          *
172          *  2. Prevent double-free bugs.
173          */
174         super->s.cookie = (u8) cookie + 1;
175         return 0;
176 }
177
178 static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
179 {
180         struct _carl9170_tx_superframe *super = (void *) skb->data;
181         int cookie;
182
183         /* make a local copy of the cookie */
184         cookie = super->s.cookie;
185         /* invalidate cookie */
186         super->s.cookie = 0;
187
188         /*
189          * Do a out-of-bounds check on the cookie:
190          *
191          *  * cookie "0" is reserved and won't be assigned to any
192          *    out-going frame. Internally however, it is used to
193          *    mark no longer/un-accounted frames and serves as a
194          *    cheap way of preventing frames from being freed
195          *    twice by _accident_. NB: There is a tiny race...
196          *
197          *  * obviously, cookie number is limited by the amount
198          *    of available memory blocks, so the number can
199          *    never execeed the mem_blocks count.
200          */
201         if (unlikely(WARN_ON_ONCE(cookie == 0) ||
202             WARN_ON_ONCE(cookie > ar->fw.mem_blocks)))
203                 return;
204
205         atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
206                    &ar->mem_free_blocks);
207
208         spin_lock_bh(&ar->mem_lock);
209         bitmap_release_region(ar->mem_bitmap, cookie - 1, 0);
210         spin_unlock_bh(&ar->mem_lock);
211 }
212
213 /* Called from any context */
214 static void carl9170_tx_release(struct kref *ref)
215 {
216         struct ar9170 *ar;
217         struct carl9170_tx_info *arinfo;
218         struct ieee80211_tx_info *txinfo;
219         struct sk_buff *skb;
220
221         arinfo = container_of(ref, struct carl9170_tx_info, ref);
222         txinfo = container_of((void *) arinfo, struct ieee80211_tx_info,
223                               rate_driver_data);
224         skb = container_of((void *) txinfo, struct sk_buff, cb);
225
226         ar = arinfo->ar;
227         if (WARN_ON_ONCE(!ar))
228                 return;
229
230         BUILD_BUG_ON(
231             offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23);
232
233         memset(&txinfo->status.ampdu_ack_len, 0,
234                sizeof(struct ieee80211_tx_info) -
235                offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
236
237         if (atomic_read(&ar->tx_total_queued))
238                 ar->tx_schedule = true;
239
240         if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) {
241                 if (!atomic_read(&ar->tx_ampdu_upload))
242                         ar->tx_ampdu_schedule = true;
243
244                 if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
245                         struct _carl9170_tx_superframe *super;
246
247                         super = (void *)skb->data;
248                         txinfo->status.ampdu_len = super->s.rix;
249                         txinfo->status.ampdu_ack_len = super->s.cnt;
250                 } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) {
251                         /*
252                          * drop redundant tx_status reports:
253                          *
254                          * 1. ampdu_ack_len of the final tx_status does
255                          *    include the feedback of this particular frame.
256                          *
257                          * 2. tx_status_irqsafe only queues up to 128
258                          *    tx feedback reports and discards the rest.
259                          *
260                          * 3. minstrel_ht is picky, it only accepts
261                          *    reports of frames with the TX_STATUS_AMPDU flag.
262                          */
263
264                         dev_kfree_skb_any(skb);
265                         return;
266                 } else {
267                         /*
268                          * Frame has failed, but we want to keep it in
269                          * case it was lost due to a power-state
270                          * transition.
271                          */
272                 }
273         }
274
275         skb_pull(skb, sizeof(struct _carl9170_tx_superframe));
276         ieee80211_tx_status_irqsafe(ar->hw, skb);
277 }
278
279 void carl9170_tx_get_skb(struct sk_buff *skb)
280 {
281         struct carl9170_tx_info *arinfo = (void *)
282                 (IEEE80211_SKB_CB(skb))->rate_driver_data;
283         kref_get(&arinfo->ref);
284 }
285
286 int carl9170_tx_put_skb(struct sk_buff *skb)
287 {
288         struct carl9170_tx_info *arinfo = (void *)
289                 (IEEE80211_SKB_CB(skb))->rate_driver_data;
290
291         return kref_put(&arinfo->ref, carl9170_tx_release);
292 }
293
294 /* Caller must hold the tid_info->lock & rcu_read_lock */
295 static void carl9170_tx_shift_bm(struct ar9170 *ar,
296         struct carl9170_sta_tid *tid_info, u16 seq)
297 {
298         u16 off;
299
300         off = SEQ_DIFF(seq, tid_info->bsn);
301
302         if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
303                 return;
304
305         /*
306          * Sanity check. For each MPDU we set the bit in bitmap and
307          * clear it once we received the tx_status.
308          * But if the bit is already cleared then we've been bitten
309          * by a bug.
310          */
311         WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap));
312
313         off = SEQ_DIFF(tid_info->snx, tid_info->bsn);
314         if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
315                 return;
316
317         if (!bitmap_empty(tid_info->bitmap, off))
318                 off = find_first_bit(tid_info->bitmap, off);
319
320         tid_info->bsn += off;
321         tid_info->bsn &= 0x0fff;
322
323         bitmap_shift_right(tid_info->bitmap, tid_info->bitmap,
324                            off, CARL9170_BAW_BITS);
325 }
326
327 static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
328         struct sk_buff *skb, struct ieee80211_tx_info *txinfo)
329 {
330         struct _carl9170_tx_superframe *super = (void *) skb->data;
331         struct ieee80211_hdr *hdr = (void *) super->frame_data;
332         struct ieee80211_tx_info *tx_info;
333         struct carl9170_tx_info *ar_info;
334         struct carl9170_sta_info *sta_info;
335         struct ieee80211_sta *sta;
336         struct carl9170_sta_tid *tid_info;
337         struct ieee80211_vif *vif;
338         unsigned int vif_id;
339         u8 tid;
340
341         if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
342             txinfo->flags & IEEE80211_TX_CTL_INJECTED ||
343            (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
344                 return;
345
346         tx_info = IEEE80211_SKB_CB(skb);
347         ar_info = (void *) tx_info->rate_driver_data;
348
349         vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
350                  CARL9170_TX_SUPER_MISC_VIF_ID_S;
351
352         if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC))
353                 return;
354
355         rcu_read_lock();
356         vif = rcu_dereference(ar->vif_priv[vif_id].vif);
357         if (unlikely(!vif))
358                 goto out_rcu;
359
360         /*
361          * Normally we should use wrappers like ieee80211_get_DA to get
362          * the correct peer ieee80211_sta.
363          *
364          * But there is a problem with indirect traffic (broadcasts, or
365          * data which is designated for other stations) in station mode.
366          * The frame will be directed to the AP for distribution and not
367          * to the actual destination.
368          */
369         sta = ieee80211_find_sta(vif, hdr->addr1);
370         if (unlikely(!sta))
371                 goto out_rcu;
372
373         tid = get_tid_h(hdr);
374
375         sta_info = (void *) sta->drv_priv;
376         tid_info = rcu_dereference(sta_info->agg[tid]);
377         if (!tid_info)
378                 goto out_rcu;
379
380         spin_lock_bh(&tid_info->lock);
381         if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE))
382                 carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr));
383
384         if (sta_info->stats[tid].clear) {
385                 sta_info->stats[tid].clear = false;
386                 sta_info->stats[tid].ampdu_len = 0;
387                 sta_info->stats[tid].ampdu_ack_len = 0;
388         }
389
390         sta_info->stats[tid].ampdu_len++;
391         if (txinfo->status.rates[0].count == 1)
392                 sta_info->stats[tid].ampdu_ack_len++;
393
394         if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
395                 super->s.rix = sta_info->stats[tid].ampdu_len;
396                 super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
397                 txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
398                 sta_info->stats[tid].clear = true;
399         }
400         spin_unlock_bh(&tid_info->lock);
401
402 out_rcu:
403         rcu_read_unlock();
404 }
405
406 void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
407                         const bool success)
408 {
409         struct ieee80211_tx_info *txinfo;
410
411         carl9170_tx_accounting_free(ar, skb);
412
413         txinfo = IEEE80211_SKB_CB(skb);
414
415         if (success)
416                 txinfo->flags |= IEEE80211_TX_STAT_ACK;
417         else
418                 ar->tx_ack_failures++;
419
420         if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
421                 carl9170_tx_status_process_ampdu(ar, skb, txinfo);
422
423         carl9170_tx_put_skb(skb);
424 }
425
426 /* This function may be called form any context */
427 void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
428 {
429         struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
430
431         atomic_dec(&ar->tx_total_pending);
432
433         if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
434                 atomic_dec(&ar->tx_ampdu_upload);
435
436         if (carl9170_tx_put_skb(skb))
437                 tasklet_hi_schedule(&ar->usb_tasklet);
438 }
439
440 static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie,
441                                                struct sk_buff_head *queue)
442 {
443         struct sk_buff *skb;
444
445         spin_lock_bh(&queue->lock);
446         skb_queue_walk(queue, skb) {
447                 struct _carl9170_tx_superframe *txc = (void *) skb->data;
448
449                 if (txc->s.cookie != cookie)
450                         continue;
451
452                 __skb_unlink(skb, queue);
453                 spin_unlock_bh(&queue->lock);
454
455                 carl9170_release_dev_space(ar, skb);
456                 return skb;
457         }
458         spin_unlock_bh(&queue->lock);
459
460         return NULL;
461 }
462
463 static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix,
464         unsigned int tries, struct ieee80211_tx_info *txinfo)
465 {
466         unsigned int i;
467
468         for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
469                 if (txinfo->status.rates[i].idx < 0)
470                         break;
471
472                 if (i == rix) {
473                         txinfo->status.rates[i].count = tries;
474                         i++;
475                         break;
476                 }
477         }
478
479         for (; i < IEEE80211_TX_MAX_RATES; i++) {
480                 txinfo->status.rates[i].idx = -1;
481                 txinfo->status.rates[i].count = 0;
482         }
483 }
484
485 static void carl9170_check_queue_stop_timeout(struct ar9170 *ar)
486 {
487         int i;
488         struct sk_buff *skb;
489         struct ieee80211_tx_info *txinfo;
490         struct carl9170_tx_info *arinfo;
491         bool restart = false;
492
493         for (i = 0; i < ar->hw->queues; i++) {
494                 spin_lock_bh(&ar->tx_status[i].lock);
495
496                 skb = skb_peek(&ar->tx_status[i]);
497
498                 if (!skb)
499                         goto next;
500
501                 txinfo = IEEE80211_SKB_CB(skb);
502                 arinfo = (void *) txinfo->rate_driver_data;
503
504                 if (time_is_before_jiffies(arinfo->timeout +
505                     msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true)
506                         restart = true;
507
508 next:
509                 spin_unlock_bh(&ar->tx_status[i].lock);
510         }
511
512         if (restart) {
513                 /*
514                  * At least one queue has been stuck for long enough.
515                  * Give the device a kick and hope it gets back to
516                  * work.
517                  *
518                  * possible reasons may include:
519                  *  - frames got lost/corrupted (bad connection to the device)
520                  *  - stalled rx processing/usb controller hiccups
521                  *  - firmware errors/bugs
522                  *  - every bug you can think of.
523                  *  - all bugs you can't...
524                  *  - ...
525                  */
526                 carl9170_restart(ar, CARL9170_RR_STUCK_TX);
527         }
528 }
529
530 static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
531 {
532         struct carl9170_sta_tid *iter;
533         struct sk_buff *skb;
534         struct ieee80211_tx_info *txinfo;
535         struct carl9170_tx_info *arinfo;
536         struct _carl9170_tx_superframe *super;
537         struct ieee80211_sta *sta;
538         struct ieee80211_vif *vif;
539         struct ieee80211_hdr *hdr;
540         unsigned int vif_id;
541
542         rcu_read_lock();
543         list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
544                 if (iter->state < CARL9170_TID_STATE_IDLE)
545                         continue;
546
547                 spin_lock_bh(&iter->lock);
548                 skb = skb_peek(&iter->queue);
549                 if (!skb)
550                         goto unlock;
551
552                 txinfo = IEEE80211_SKB_CB(skb);
553                 arinfo = (void *)txinfo->rate_driver_data;
554                 if (time_is_after_jiffies(arinfo->timeout +
555                     msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
556                         goto unlock;
557
558                 super = (void *) skb->data;
559                 hdr = (void *) super->frame_data;
560
561                 vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
562                          CARL9170_TX_SUPER_MISC_VIF_ID_S;
563
564                 if (WARN_ON(vif_id >= AR9170_MAX_VIRTUAL_MAC))
565                         goto unlock;
566
567                 vif = rcu_dereference(ar->vif_priv[vif_id].vif);
568                 if (WARN_ON(!vif))
569                         goto unlock;
570
571                 sta = ieee80211_find_sta(vif, hdr->addr1);
572                 if (WARN_ON(!sta))
573                         goto unlock;
574
575                 ieee80211_stop_tx_ba_session(sta, iter->tid);
576 unlock:
577                 spin_unlock_bh(&iter->lock);
578
579         }
580         rcu_read_unlock();
581 }
582
583 void carl9170_tx_janitor(struct work_struct *work)
584 {
585         struct ar9170 *ar = container_of(work, struct ar9170,
586                                          tx_janitor.work);
587         if (!IS_STARTED(ar))
588                 return;
589
590         ar->tx_janitor_last_run = jiffies;
591
592         carl9170_check_queue_stop_timeout(ar);
593         carl9170_tx_ampdu_timeout(ar);
594
595         if (!atomic_read(&ar->tx_total_queued))
596                 return;
597
598         ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
599                 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
600 }
601
602 static void __carl9170_tx_process_status(struct ar9170 *ar,
603         const uint8_t cookie, const uint8_t info)
604 {
605         struct sk_buff *skb;
606         struct ieee80211_tx_info *txinfo;
607         struct carl9170_tx_info *arinfo;
608         unsigned int r, t, q;
609         bool success = true;
610
611         q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE];
612
613         skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
614         if (!skb) {
615                 /*
616                  * We have lost the race to another thread.
617                  */
618
619                 return ;
620         }
621
622         txinfo = IEEE80211_SKB_CB(skb);
623         arinfo = (void *) txinfo->rate_driver_data;
624
625         if (!(info & CARL9170_TX_STATUS_SUCCESS))
626                 success = false;
627
628         r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S;
629         t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S;
630
631         carl9170_tx_fill_rateinfo(ar, r, t, txinfo);
632         carl9170_tx_status(ar, skb, success);
633 }
634
635 void carl9170_tx_process_status(struct ar9170 *ar,
636                                 const struct carl9170_rsp *cmd)
637 {
638         unsigned int i;
639
640         for (i = 0;  i < cmd->hdr.ext; i++) {
641                 if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) {
642                         print_hex_dump_bytes("UU:", DUMP_PREFIX_NONE,
643                                              (void *) cmd, cmd->hdr.len + 4);
644                         break;
645                 }
646
647                 __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie,
648                                              cmd->_tx_status[i].info);
649         }
650 }
651
652 static __le32 carl9170_tx_physet(struct ar9170 *ar,
653         struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
654 {
655         struct ieee80211_rate *rate = NULL;
656         u32 power, chains;
657         __le32 tmp;
658
659         tmp = cpu_to_le32(0);
660
661         if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
662                 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ <<
663                         AR9170_TX_PHY_BW_S);
664         /* this works because 40 MHz is 2 and dup is 3 */
665         if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
666                 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP <<
667                         AR9170_TX_PHY_BW_S);
668
669         if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
670                 tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
671
672         if (txrate->flags & IEEE80211_TX_RC_MCS) {
673                 u32 r = txrate->idx;
674                 u8 *txpower;
675
676                 /* heavy clip control */
677                 tmp |= cpu_to_le32((r & 0x7) <<
678                         AR9170_TX_PHY_TX_HEAVY_CLIP_S);
679
680                 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
681                         if (info->band == IEEE80211_BAND_5GHZ)
682                                 txpower = ar->power_5G_ht40;
683                         else
684                                 txpower = ar->power_2G_ht40;
685                 } else {
686                         if (info->band == IEEE80211_BAND_5GHZ)
687                                 txpower = ar->power_5G_ht20;
688                         else
689                                 txpower = ar->power_2G_ht20;
690                 }
691
692                 power = txpower[r & 7];
693
694                 /* +1 dBm for HT40 */
695                 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
696                         power += 2;
697
698                 r <<= AR9170_TX_PHY_MCS_S;
699                 BUG_ON(r & ~AR9170_TX_PHY_MCS);
700
701                 tmp |= cpu_to_le32(r & AR9170_TX_PHY_MCS);
702                 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
703
704                 /*
705                  * green field preamble does not work.
706                  *
707                  * if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
708                  * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
709                  */
710         } else {
711                 u8 *txpower;
712                 u32 mod;
713                 u32 phyrate;
714                 u8 idx = txrate->idx;
715
716                 if (info->band != IEEE80211_BAND_2GHZ) {
717                         idx += 4;
718                         txpower = ar->power_5G_leg;
719                         mod = AR9170_TX_PHY_MOD_OFDM;
720                 } else {
721                         if (idx < 4) {
722                                 txpower = ar->power_2G_cck;
723                                 mod = AR9170_TX_PHY_MOD_CCK;
724                         } else {
725                                 mod = AR9170_TX_PHY_MOD_OFDM;
726                                 txpower = ar->power_2G_ofdm;
727                         }
728                 }
729
730                 rate = &__carl9170_ratetable[idx];
731
732                 phyrate = rate->hw_value & 0xF;
733                 power = txpower[(rate->hw_value & 0x30) >> 4];
734                 phyrate <<= AR9170_TX_PHY_MCS_S;
735
736                 tmp |= cpu_to_le32(mod);
737                 tmp |= cpu_to_le32(phyrate);
738
739                 /*
740                  * short preamble seems to be broken too.
741                  *
742                  * if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
743                  *      tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
744                  */
745         }
746         power <<= AR9170_TX_PHY_TX_PWR_S;
747         power &= AR9170_TX_PHY_TX_PWR;
748         tmp |= cpu_to_le32(power);
749
750         /* set TX chains */
751         if (ar->eeprom.tx_mask == 1) {
752                 chains = AR9170_TX_PHY_TXCHAIN_1;
753         } else {
754                 chains = AR9170_TX_PHY_TXCHAIN_2;
755
756                 /* >= 36M legacy OFDM - use only one chain */
757                 if (rate && rate->bitrate >= 360 &&
758                     !(txrate->flags & IEEE80211_TX_RC_MCS))
759                         chains = AR9170_TX_PHY_TXCHAIN_1;
760         }
761         tmp |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_S);
762
763         return tmp;
764 }
765
766 static bool carl9170_tx_rts_check(struct ar9170 *ar,
767                                   struct ieee80211_tx_rate *rate,
768                                   bool ampdu, bool multi)
769 {
770         switch (ar->erp_mode) {
771         case CARL9170_ERP_AUTO:
772                 if (ampdu)
773                         break;
774
775         case CARL9170_ERP_MAC80211:
776                 if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
777                         break;
778
779         case CARL9170_ERP_RTS:
780                 if (likely(!multi))
781                         return true;
782
783         default:
784                 break;
785         }
786
787         return false;
788 }
789
790 static bool carl9170_tx_cts_check(struct ar9170 *ar,
791                                   struct ieee80211_tx_rate *rate)
792 {
793         switch (ar->erp_mode) {
794         case CARL9170_ERP_AUTO:
795         case CARL9170_ERP_MAC80211:
796                 if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
797                         break;
798
799         case CARL9170_ERP_CTS:
800                 return true;
801
802         default:
803                 break;
804         }
805
806         return false;
807 }
808
809 static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
810 {
811         struct ieee80211_hdr *hdr;
812         struct _carl9170_tx_superframe *txc;
813         struct carl9170_vif_info *cvif;
814         struct ieee80211_tx_info *info;
815         struct ieee80211_tx_rate *txrate;
816         struct ieee80211_sta *sta;
817         struct carl9170_tx_info *arinfo;
818         unsigned int hw_queue;
819         int i;
820         __le16 mac_tmp;
821         u16 len;
822         bool ampdu, no_ack;
823
824         BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
825         BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
826                      CARL9170_TX_SUPERDESC_LEN);
827
828         BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
829                      AR9170_TX_HWDESC_LEN);
830
831         BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
832
833         BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
834                 ((CARL9170_TX_SUPER_MISC_VIF_ID >>
835                  CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
836
837         hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)];
838
839         hdr = (void *)skb->data;
840         info = IEEE80211_SKB_CB(skb);
841         len = skb->len;
842
843         /*
844          * Note: If the frame was sent through a monitor interface,
845          * the ieee80211_vif pointer can be NULL.
846          */
847         if (likely(info->control.vif))
848                 cvif = (void *) info->control.vif->drv_priv;
849         else
850                 cvif = NULL;
851
852         sta = info->control.sta;
853
854         txc = (void *)skb_push(skb, sizeof(*txc));
855         memset(txc, 0, sizeof(*txc));
856
857         SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue);
858
859         if (likely(cvif))
860                 SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc, cvif->id);
861
862         if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
863                 txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
864
865         if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
866                 txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
867
868         mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
869                               AR9170_TX_MAC_BACKOFF);
870         mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
871                                AR9170_TX_MAC_QOS);
872
873         no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
874         if (unlikely(no_ack))
875                 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
876
877         if (info->control.hw_key) {
878                 len += info->control.hw_key->icv_len;
879
880                 switch (info->control.hw_key->cipher) {
881                 case WLAN_CIPHER_SUITE_WEP40:
882                 case WLAN_CIPHER_SUITE_WEP104:
883                 case WLAN_CIPHER_SUITE_TKIP:
884                         mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_RC4);
885                         break;
886                 case WLAN_CIPHER_SUITE_CCMP:
887                         mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_AES);
888                         break;
889                 default:
890                         WARN_ON(1);
891                         goto err_out;
892                 }
893         }
894
895         ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
896         if (ampdu) {
897                 unsigned int density, factor;
898
899                 if (unlikely(!sta || !cvif))
900                         goto err_out;
901
902                 factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
903                 density = sta->ht_cap.ampdu_density;
904
905                 if (density) {
906                         /*
907                          * Watch out!
908                          *
909                          * Otus uses slightly different density values than
910                          * those from the 802.11n spec.
911                          */
912
913                         density = max_t(unsigned int, density + 1, 7u);
914                 }
915
916                 SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY,
917                         txc->s.ampdu_settings, density);
918
919                 SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
920                         txc->s.ampdu_settings, factor);
921
922                 for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
923                         txrate = &info->control.rates[i];
924                         if (txrate->idx >= 0) {
925                                 txc->s.ri[i] =
926                                         CARL9170_TX_SUPER_RI_AMPDU;
927
928                                 if (WARN_ON(!(txrate->flags &
929                                               IEEE80211_TX_RC_MCS))) {
930                                         /*
931                                          * Not sure if it's even possible
932                                          * to aggregate non-ht rates with
933                                          * this HW.
934                                          */
935                                         goto err_out;
936                                 }
937                                 continue;
938                         }
939
940                         txrate->idx = 0;
941                         txrate->count = ar->hw->max_rate_tries;
942                 }
943
944                 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
945         }
946
947         /*
948          * NOTE: For the first rate, the ERP & AMPDU flags are directly
949          * taken from mac_control. For all fallback rate, the firmware
950          * updates the mac_control flags from the rate info field.
951          */
952         for (i = 1; i < CARL9170_TX_MAX_RATES; i++) {
953                 txrate = &info->control.rates[i];
954                 if (txrate->idx < 0)
955                         break;
956
957                 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
958                         txrate->count);
959
960                 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
961                         txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
962                                 CARL9170_TX_SUPER_RI_ERP_PROT_S);
963                 else if (carl9170_tx_cts_check(ar, txrate))
964                         txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
965                                 CARL9170_TX_SUPER_RI_ERP_PROT_S);
966
967                 txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate);
968         }
969
970         txrate = &info->control.rates[0];
971         SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count);
972
973         if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
974                 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
975         else if (carl9170_tx_cts_check(ar, txrate))
976                 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
977
978         txc->s.len = cpu_to_le16(skb->len);
979         txc->f.length = cpu_to_le16(len + FCS_LEN);
980         txc->f.mac_control = mac_tmp;
981         txc->f.phy_control = carl9170_tx_physet(ar, info, txrate);
982
983         arinfo = (void *)info->rate_driver_data;
984         arinfo->timeout = jiffies;
985         arinfo->ar = ar;
986         kref_init(&arinfo->ref);
987         return 0;
988
989 err_out:
990         skb_pull(skb, sizeof(*txc));
991         return -EINVAL;
992 }
993
994 static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb)
995 {
996         struct _carl9170_tx_superframe *super;
997
998         super = (void *) skb->data;
999         super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA);
1000 }
1001
1002 static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
1003 {
1004         struct _carl9170_tx_superframe *super;
1005         int tmp;
1006
1007         super = (void *) skb->data;
1008
1009         tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) <<
1010                 CARL9170_TX_SUPER_AMPDU_DENSITY_S;
1011
1012         /*
1013          * If you haven't noticed carl9170_tx_prepare has already filled
1014          * in all ampdu spacing & factor parameters.
1015          * Now it's the time to check whenever the settings have to be
1016          * updated by the firmware, or if everything is still the same.
1017          *
1018          * There's no sane way to handle different density values with
1019          * this hardware, so we may as well just do the compare in the
1020          * driver.
1021          */
1022
1023         if (tmp != ar->current_density) {
1024                 ar->current_density = tmp;
1025                 super->s.ampdu_settings |=
1026                         CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY;
1027         }
1028
1029         tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) <<
1030                 CARL9170_TX_SUPER_AMPDU_FACTOR_S;
1031
1032         if (tmp != ar->current_factor) {
1033                 ar->current_factor = tmp;
1034                 super->s.ampdu_settings |=
1035                         CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR;
1036         }
1037 }
1038
1039 static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest,
1040                                    struct sk_buff *_src)
1041 {
1042         struct _carl9170_tx_superframe *dest, *src;
1043
1044         dest = (void *) _dest->data;
1045         src = (void *) _src->data;
1046
1047         /*
1048          * The mac80211 rate control algorithm expects that all MPDUs in
1049          * an AMPDU share the same tx vectors.
1050          * This is not really obvious right now, because the hardware
1051          * does the AMPDU setup according to its own rulebook.
1052          * Our nicely assembled, strictly monotonic increasing mpdu
1053          * chains will be broken up, mashed back together...
1054          */
1055
1056         return (dest->f.phy_control == src->f.phy_control);
1057 }
1058
1059 static void carl9170_tx_ampdu(struct ar9170 *ar)
1060 {
1061         struct sk_buff_head agg;
1062         struct carl9170_sta_tid *tid_info;
1063         struct sk_buff *skb, *first;
1064         unsigned int i = 0, done_ampdus = 0;
1065         u16 seq, queue, tmpssn;
1066
1067         atomic_inc(&ar->tx_ampdu_scheduler);
1068         ar->tx_ampdu_schedule = false;
1069
1070         if (atomic_read(&ar->tx_ampdu_upload))
1071                 return;
1072
1073         if (!ar->tx_ampdu_list_len)
1074                 return;
1075
1076         __skb_queue_head_init(&agg);
1077
1078         rcu_read_lock();
1079         tid_info = rcu_dereference(ar->tx_ampdu_iter);
1080         if (WARN_ON_ONCE(!tid_info)) {
1081                 rcu_read_unlock();
1082                 return;
1083         }
1084
1085 retry:
1086         list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) {
1087                 i++;
1088
1089                 if (tid_info->state < CARL9170_TID_STATE_PROGRESS)
1090                         continue;
1091
1092                 queue = TID_TO_WME_AC(tid_info->tid);
1093
1094                 spin_lock_bh(&tid_info->lock);
1095                 if (tid_info->state != CARL9170_TID_STATE_XMIT)
1096                         goto processed;
1097
1098                 tid_info->counter++;
1099                 first = skb_peek(&tid_info->queue);
1100                 tmpssn = carl9170_get_seq(first);
1101                 seq = tid_info->snx;
1102
1103                 if (unlikely(tmpssn != seq)) {
1104                         tid_info->state = CARL9170_TID_STATE_IDLE;
1105
1106                         goto processed;
1107                 }
1108
1109                 while ((skb = skb_peek(&tid_info->queue))) {
1110                         /* strict 0, 1, ..., n - 1, n frame sequence order */
1111                         if (unlikely(carl9170_get_seq(skb) != seq))
1112                                 break;
1113
1114                         /* don't upload more than AMPDU FACTOR allows. */
1115                         if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >=
1116                             (tid_info->max - 1)))
1117                                 break;
1118
1119                         if (!carl9170_tx_rate_check(ar, skb, first))
1120                                 break;
1121
1122                         atomic_inc(&ar->tx_ampdu_upload);
1123                         tid_info->snx = seq = SEQ_NEXT(seq);
1124                         __skb_unlink(skb, &tid_info->queue);
1125
1126                         __skb_queue_tail(&agg, skb);
1127
1128                         if (skb_queue_len(&agg) >= CARL9170_NUM_TX_AGG_MAX)
1129                                 break;
1130                 }
1131
1132                 if (skb_queue_empty(&tid_info->queue) ||
1133                     carl9170_get_seq(skb_peek(&tid_info->queue)) !=
1134                     tid_info->snx) {
1135                         /*
1136                          * stop TID, if A-MPDU frames are still missing,
1137                          * or whenever the queue is empty.
1138                          */
1139
1140                         tid_info->state = CARL9170_TID_STATE_IDLE;
1141                 }
1142                 done_ampdus++;
1143
1144 processed:
1145                 spin_unlock_bh(&tid_info->lock);
1146
1147                 if (skb_queue_empty(&agg))
1148                         continue;
1149
1150                 /* apply ampdu spacing & factor settings */
1151                 carl9170_set_ampdu_params(ar, skb_peek(&agg));
1152
1153                 /* set aggregation push bit */
1154                 carl9170_set_immba(ar, skb_peek_tail(&agg));
1155
1156                 spin_lock_bh(&ar->tx_pending[queue].lock);
1157                 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1158                 spin_unlock_bh(&ar->tx_pending[queue].lock);
1159                 ar->tx_schedule = true;
1160         }
1161         if ((done_ampdus++ == 0) && (i++ == 0))
1162                 goto retry;
1163
1164         rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
1165         rcu_read_unlock();
1166 }
1167
1168 static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar,
1169                                             struct sk_buff_head *queue)
1170 {
1171         struct sk_buff *skb;
1172         struct ieee80211_tx_info *info;
1173         struct carl9170_tx_info *arinfo;
1174
1175         BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1176
1177         spin_lock_bh(&queue->lock);
1178         skb = skb_peek(queue);
1179         if (unlikely(!skb))
1180                 goto err_unlock;
1181
1182         if (carl9170_alloc_dev_space(ar, skb))
1183                 goto err_unlock;
1184
1185         __skb_unlink(skb, queue);
1186         spin_unlock_bh(&queue->lock);
1187
1188         info = IEEE80211_SKB_CB(skb);
1189         arinfo = (void *) info->rate_driver_data;
1190
1191         arinfo->timeout = jiffies;
1192
1193         /*
1194          * increase ref count to "2".
1195          * Ref counting is the easiest way to solve the race between
1196          * the the urb's completion routine: carl9170_tx_callback and
1197          * wlan tx status functions: carl9170_tx_status/janitor.
1198          */
1199         carl9170_tx_get_skb(skb);
1200
1201         return skb;
1202
1203 err_unlock:
1204         spin_unlock_bh(&queue->lock);
1205         return NULL;
1206 }
1207
1208 void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
1209 {
1210         struct _carl9170_tx_superframe *super;
1211         uint8_t q = 0;
1212
1213         ar->tx_dropped++;
1214
1215         super = (void *)skb->data;
1216         SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
1217                 ar9170_qmap[carl9170_get_queue(ar, skb)]);
1218         __carl9170_tx_process_status(ar, super->s.cookie, q);
1219 }
1220
1221 static void carl9170_tx(struct ar9170 *ar)
1222 {
1223         struct sk_buff *skb;
1224         unsigned int i, q;
1225         bool schedule_garbagecollector = false;
1226
1227         ar->tx_schedule = false;
1228
1229         if (unlikely(!IS_STARTED(ar)))
1230                 return;
1231
1232         carl9170_usb_handle_tx_err(ar);
1233
1234         for (i = 0; i < ar->hw->queues; i++) {
1235                 while (!skb_queue_empty(&ar->tx_pending[i])) {
1236                         skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]);
1237                         if (unlikely(!skb))
1238                                 break;
1239
1240                         atomic_inc(&ar->tx_total_pending);
1241
1242                         q = __carl9170_get_queue(ar, i);
1243                         /*
1244                          * NB: tx_status[i] vs. tx_status[q],
1245                          * TODO: Move into pick_skb or alloc_dev_space.
1246                          */
1247                         skb_queue_tail(&ar->tx_status[q], skb);
1248
1249                         carl9170_usb_tx(ar, skb);
1250                         schedule_garbagecollector = true;
1251                 }
1252         }
1253
1254         if (!schedule_garbagecollector)
1255                 return;
1256
1257         ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
1258                 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
1259 }
1260
1261 static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1262         struct ieee80211_sta *sta, struct sk_buff *skb)
1263 {
1264         struct _carl9170_tx_superframe *super = (void *) skb->data;
1265         struct carl9170_sta_info *sta_info;
1266         struct carl9170_sta_tid *agg;
1267         struct sk_buff *iter;
1268         unsigned int max;
1269         u16 tid, seq, qseq, off;
1270         bool run = false;
1271
1272         tid = carl9170_get_tid(skb);
1273         seq = carl9170_get_seq(skb);
1274         sta_info = (void *) sta->drv_priv;
1275
1276         rcu_read_lock();
1277         agg = rcu_dereference(sta_info->agg[tid]);
1278         max = sta_info->ampdu_max_len;
1279
1280         if (!agg)
1281                 goto err_unlock_rcu;
1282
1283         spin_lock_bh(&agg->lock);
1284         if (unlikely(agg->state < CARL9170_TID_STATE_IDLE))
1285                 goto err_unlock;
1286
1287         /* check if sequence is within the BA window */
1288         if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq)))
1289                 goto err_unlock;
1290
1291         if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq)))
1292                 goto err_unlock;
1293
1294         off = SEQ_DIFF(seq, agg->bsn);
1295         if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap)))
1296                 goto err_unlock;
1297
1298         if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) {
1299                 __skb_queue_tail(&agg->queue, skb);
1300                 agg->hsn = seq;
1301                 goto queued;
1302         }
1303
1304         skb_queue_reverse_walk(&agg->queue, iter) {
1305                 qseq = carl9170_get_seq(iter);
1306
1307                 if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) {
1308                         __skb_queue_after(&agg->queue, iter, skb);
1309                         goto queued;
1310                 }
1311         }
1312
1313         __skb_queue_head(&agg->queue, skb);
1314 queued:
1315
1316         if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) {
1317                 if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) {
1318                         agg->state = CARL9170_TID_STATE_XMIT;
1319                         run = true;
1320                 }
1321         }
1322
1323         spin_unlock_bh(&agg->lock);
1324         rcu_read_unlock();
1325
1326         return run;
1327
1328 err_unlock:
1329         spin_unlock_bh(&agg->lock);
1330
1331 err_unlock_rcu:
1332         rcu_read_unlock();
1333         super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR);
1334         carl9170_tx_status(ar, skb, false);
1335         ar->tx_dropped++;
1336         return false;
1337 }
1338
1339 int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1340 {
1341         struct ar9170 *ar = hw->priv;
1342         struct ieee80211_tx_info *info;
1343         struct ieee80211_sta *sta;
1344         bool run;
1345
1346         if (unlikely(!IS_STARTED(ar)))
1347                 goto err_free;
1348
1349         info = IEEE80211_SKB_CB(skb);
1350         sta = info->control.sta;
1351
1352         if (unlikely(carl9170_tx_prepare(ar, skb)))
1353                 goto err_free;
1354
1355         carl9170_tx_accounting(ar, skb);
1356         /*
1357          * from now on, one has to use carl9170_tx_status to free
1358          * all ressouces which are associated with the frame.
1359          */
1360
1361         if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1362                 run = carl9170_tx_ampdu_queue(ar, sta, skb);
1363                 if (run)
1364                         carl9170_tx_ampdu(ar);
1365
1366         } else {
1367                 unsigned int queue = skb_get_queue_mapping(skb);
1368
1369                 skb_queue_tail(&ar->tx_pending[queue], skb);
1370         }
1371
1372         carl9170_tx(ar);
1373         return NETDEV_TX_OK;
1374
1375 err_free:
1376         ar->tx_dropped++;
1377         dev_kfree_skb_any(skb);
1378         return NETDEV_TX_OK;
1379 }
1380
1381 void carl9170_tx_scheduler(struct ar9170 *ar)
1382 {
1383
1384         if (ar->tx_ampdu_schedule)
1385                 carl9170_tx_ampdu(ar);
1386
1387         if (ar->tx_schedule)
1388                 carl9170_tx(ar);
1389 }