2 * Copyright (c) 2008 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #define BITS_PER_BYTE 8
20 #define OFDM_PLCP_BITS 22
21 #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
22 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
28 #define HT_LTF(_ns) (4 * (_ns))
29 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34 #define OFDM_SIFS_TIME 16
36 static u32 bits_per_symbol[][2] = {
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
46 { 52, 108 }, /* 8: BPSK */
47 { 104, 216 }, /* 9: QPSK 1/2 */
48 { 156, 324 }, /* 10: QPSK 3/4 */
49 { 208, 432 }, /* 11: 16-QAM 1/2 */
50 { 312, 648 }, /* 12: 16-QAM 3/4 */
51 { 416, 864 }, /* 13: 64-QAM 2/3 */
52 { 468, 972 }, /* 14: 64-QAM 3/4 */
53 { 520, 1080 }, /* 15: 64-QAM 5/6 */
56 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
59 * Insert a chain of ath_buf (descriptors) on a txq and
60 * assume the descriptors are already chained together by caller.
61 * NB: must be called with txq lock held
64 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct list_head *head)
67 struct ath_hal *ah = sc->sc_ah;
71 * Insert the frame on the outbound list and
72 * pass it on to the hardware.
78 bf = list_first_entry(head, struct ath_buf, list);
80 list_splice_tail_init(head, &txq->axq_q);
82 txq->axq_totalqueued++;
83 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
85 DPRINTF(sc, ATH_DBG_QUEUE,
86 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
88 if (txq->axq_link == NULL) {
89 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
90 DPRINTF(sc, ATH_DBG_XMIT,
91 "TXDP[%u] = %llx (%p)\n",
92 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
94 *txq->axq_link = bf->bf_daddr;
95 DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
96 txq->axq_qnum, txq->axq_link,
97 ito64(bf->bf_daddr), bf->bf_desc);
99 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
100 ath9k_hw_txstart(ah, txq->axq_qnum);
103 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
104 struct ath_xmit_status *tx_status)
106 struct ieee80211_hw *hw = sc->hw;
107 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
108 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
111 DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
113 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
114 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
116 tx_info->rate_driver_data[0] = NULL;
119 if (tx_status->flags & ATH_TX_BAR) {
120 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
121 tx_status->flags &= ~ATH_TX_BAR;
124 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
125 /* Frame was ACKed */
126 tx_info->flags |= IEEE80211_TX_STAT_ACK;
129 tx_info->status.rates[0].count = tx_status->retries + 1;
131 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
132 padsize = hdrlen & 3;
133 if (padsize && hdrlen >= 24) {
135 * Remove MAC header padding before giving the frame back to
138 memmove(skb->data + padsize, skb->data, hdrlen);
139 skb_pull(skb, padsize);
142 ieee80211_tx_status(hw, skb);
145 /* Check if it's okay to send out aggregates */
147 static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
149 struct ath_atx_tid *tid;
150 tid = ATH_AN_2_TID(an, tidno);
152 if (tid->state & AGGR_ADDBA_COMPLETE ||
153 tid->state & AGGR_ADDBA_PROGRESS)
159 static void ath_get_beaconconfig(struct ath_softc *sc, int if_id,
160 struct ath_beacon_config *conf)
162 struct ieee80211_hw *hw = sc->hw;
164 /* fill in beacon config data */
166 conf->beacon_interval = hw->conf.beacon_int;
167 conf->listen_interval = 100;
168 conf->dtim_count = 1;
169 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
172 /* Calculate Atheros packet type from IEEE80211 packet header */
174 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
176 struct ieee80211_hdr *hdr;
177 enum ath9k_pkt_type htype;
180 hdr = (struct ieee80211_hdr *)skb->data;
181 fc = hdr->frame_control;
183 if (ieee80211_is_beacon(fc))
184 htype = ATH9K_PKT_TYPE_BEACON;
185 else if (ieee80211_is_probe_resp(fc))
186 htype = ATH9K_PKT_TYPE_PROBE_RESP;
187 else if (ieee80211_is_atim(fc))
188 htype = ATH9K_PKT_TYPE_ATIM;
189 else if (ieee80211_is_pspoll(fc))
190 htype = ATH9K_PKT_TYPE_PSPOLL;
192 htype = ATH9K_PKT_TYPE_NORMAL;
197 static bool is_pae(struct sk_buff *skb)
199 struct ieee80211_hdr *hdr;
202 hdr = (struct ieee80211_hdr *)skb->data;
203 fc = hdr->frame_control;
205 if (ieee80211_is_data(fc)) {
206 if (ieee80211_is_nullfunc(fc) ||
207 /* Port Access Entity (IEEE 802.1X) */
208 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
216 static int get_hw_crypto_keytype(struct sk_buff *skb)
218 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
220 if (tx_info->control.hw_key) {
221 if (tx_info->control.hw_key->alg == ALG_WEP)
222 return ATH9K_KEY_TYPE_WEP;
223 else if (tx_info->control.hw_key->alg == ALG_TKIP)
224 return ATH9K_KEY_TYPE_TKIP;
225 else if (tx_info->control.hw_key->alg == ALG_CCMP)
226 return ATH9K_KEY_TYPE_AES;
229 return ATH9K_KEY_TYPE_CLEAR;
232 /* Called only when tx aggregation is enabled and HT is supported */
234 static void assign_aggr_tid_seqno(struct sk_buff *skb,
237 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
238 struct ieee80211_hdr *hdr;
240 struct ath_atx_tid *tid;
244 if (!tx_info->control.sta)
247 an = (struct ath_node *)tx_info->control.sta->drv_priv;
248 hdr = (struct ieee80211_hdr *)skb->data;
249 fc = hdr->frame_control;
253 if (ieee80211_is_data_qos(fc)) {
254 qc = ieee80211_get_qos_ctl(hdr);
255 bf->bf_tidno = qc[0] & 0xf;
259 /* For HT capable stations, we save tidno for later use.
260 * We also override seqno set by upper layer with the one
261 * in tx aggregation state.
263 * If fragmentation is on, the sequence number is
264 * not overridden, since it has been
265 * incremented by the fragmentation routine.
267 * FIXME: check if the fragmentation threshold exceeds
270 tid = ATH_AN_2_TID(an, bf->bf_tidno);
271 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
272 IEEE80211_SEQ_SEQ_SHIFT);
273 bf->bf_seqno = tid->seq_next;
274 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
277 static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
280 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
283 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
284 flags |= ATH9K_TXDESC_INTREQ;
286 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
287 flags |= ATH9K_TXDESC_NOACK;
288 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
289 flags |= ATH9K_TXDESC_RTSENA;
294 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
296 struct ath_buf *bf = NULL;
298 spin_lock_bh(&sc->tx.txbuflock);
300 if (unlikely(list_empty(&sc->tx.txbuf))) {
301 spin_unlock_bh(&sc->tx.txbuflock);
305 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
308 spin_unlock_bh(&sc->tx.txbuflock);
313 /* To complete a chain of buffers associated a frame */
315 static void ath_tx_complete_buf(struct ath_softc *sc,
317 struct list_head *bf_q,
318 int txok, int sendbar)
320 struct sk_buff *skb = bf->bf_mpdu;
321 struct ath_xmit_status tx_status;
325 * Set retry information.
326 * NB: Don't use the information in the descriptor, because the frame
327 * could be software retried.
329 tx_status.retries = bf->bf_retries;
333 tx_status.flags = ATH_TX_BAR;
336 tx_status.flags |= ATH_TX_ERROR;
338 if (bf_isxretried(bf))
339 tx_status.flags |= ATH_TX_XRETRY;
342 /* Unmap this frame */
343 pci_unmap_single(sc->pdev,
347 /* complete this frame */
348 ath_tx_complete(sc, skb, &tx_status);
351 * Return the list of ath_buf of this mpdu to free queue
353 spin_lock_irqsave(&sc->tx.txbuflock, flags);
354 list_splice_tail_init(bf_q, &sc->tx.txbuf);
355 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
359 * queue up a dest/ac pair for tx scheduling
360 * NB: must be called with txq lock held
363 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
365 struct ath_atx_ac *ac = tid->ac;
368 * if tid is paused, hold off
374 * add tid to ac atmost once
380 list_add_tail(&tid->list, &ac->tid_q);
383 * add node ac to txq atmost once
389 list_add_tail(&ac->list, &txq->axq_acq);
394 static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
396 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
398 spin_lock_bh(&txq->axq_lock);
402 spin_unlock_bh(&txq->axq_lock);
405 /* resume a tid and schedule aggregate */
407 void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
409 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
411 ASSERT(tid->paused > 0);
412 spin_lock_bh(&txq->axq_lock);
419 if (list_empty(&tid->buf_q))
423 * Add this TID to scheduler and try to send out aggregates
425 ath_tx_queue_tid(txq, tid);
426 ath_txq_schedule(sc, txq);
428 spin_unlock_bh(&txq->axq_lock);
431 /* Compute the number of bad frames */
433 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
436 struct ath_buf *bf_last = bf->bf_lastbf;
437 struct ath_desc *ds = bf_last->bf_desc;
439 u32 ba[WME_BA_BMP_SIZE >> 5];
444 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
447 isaggr = bf_isaggr(bf);
449 seq_st = ATH_DS_BA_SEQ(ds);
450 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
454 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
455 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
464 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
467 struct ieee80211_hdr *hdr;
469 bf->bf_state.bf_type |= BUF_RETRY;
473 hdr = (struct ieee80211_hdr *)skb->data;
474 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
477 /* Update block ack window */
479 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
484 index = ATH_BA_INDEX(tid->seq_start, seqno);
485 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
487 tid->tx_buf[cindex] = NULL;
489 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
490 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
491 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
496 * ath_pkt_dur - compute packet duration (NB: not NAV)
499 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
500 * width - 0 for 20 MHz, 1 for 40 MHz
501 * half_gi - to use 4us v/s 3.6 us for symbol time
503 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
504 int width, int half_gi, bool shortPreamble)
506 struct ath_rate_table *rate_table = sc->cur_rate_table;
507 u32 nbits, nsymbits, duration, nsymbols;
511 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
512 rc = rate_table->info[rix].ratecode;
514 /* for legacy rates, use old function to compute packet duration */
516 return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
519 /* find number of symbols: PLCP + data */
520 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
521 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
522 nsymbols = (nbits + nsymbits - 1) / nsymbits;
525 duration = SYMBOL_TIME(nsymbols);
527 duration = SYMBOL_TIME_HALFGI(nsymbols);
529 /* addup duration for legacy/ht training and signal fields */
530 streams = HT_RC_2_STREAMS(rc);
531 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
536 /* Rate module function to set rate related fields in tx descriptor */
538 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
540 struct ath_hal *ah = sc->sc_ah;
541 struct ath_rate_table *rt;
542 struct ath_desc *ds = bf->bf_desc;
543 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
544 struct ath9k_11n_rate_series series[4];
546 struct ieee80211_tx_info *tx_info;
547 struct ieee80211_tx_rate *rates;
548 struct ieee80211_hdr *hdr;
549 struct ieee80211_hw *hw = sc->hw;
550 int i, flags, rtsctsena = 0, enable_g_protection = 0;
552 u8 rix = 0, cix, ctsrate = 0;
555 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
557 skb = (struct sk_buff *)bf->bf_mpdu;
558 hdr = (struct ieee80211_hdr *)skb->data;
559 fc = hdr->frame_control;
560 tx_info = IEEE80211_SKB_CB(skb);
561 rates = tx_info->control.rates;
563 if (ieee80211_has_morefrags(fc) ||
564 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
565 rates[1].count = rates[2].count = rates[3].count = 0;
566 rates[1].idx = rates[2].idx = rates[3].idx = 0;
567 rates[0].count = ATH_TXMAXTRY;
570 /* get the cix for the lowest valid rix */
571 rt = sc->cur_rate_table;
572 for (i = 3; i >= 0; i--) {
573 if (rates[i].count && (rates[i].idx >= 0)) {
579 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
580 cix = rt->info[rix].ctrl_rate;
582 /* All protection frames are transmited at 2Mb/s for 802.11g,
583 * otherwise we transmit them at 1Mb/s */
584 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ &&
585 !conf_is_ht(&hw->conf))
586 enable_g_protection = 1;
589 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
590 * just CTS. Note that this is only done for OFDM/HT unicast frames.
592 if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK)
593 && (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
594 WLAN_RC_PHY_HT(rt->info[rix].phy))) {
595 if (sc->sc_protmode == PROT_M_RTSCTS)
596 flags = ATH9K_TXDESC_RTSENA;
597 else if (sc->sc_protmode == PROT_M_CTSONLY)
598 flags = ATH9K_TXDESC_CTSENA;
600 cix = rt->info[enable_g_protection].ctrl_rate;
604 /* For 11n, the default behavior is to enable RTS for hw retried frames.
605 * We enable the global flag here and let rate series flags determine
606 * which rates will actually use RTS.
608 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
609 /* 802.11g protection not needed, use our default behavior */
611 flags = ATH9K_TXDESC_RTSENA;
614 /* Set protection if aggregate protection on */
615 if (sc->sc_config.ath_aggr_prot &&
616 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
617 flags = ATH9K_TXDESC_RTSENA;
618 cix = rt->info[enable_g_protection].ctrl_rate;
622 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
623 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
624 flags &= ~(ATH9K_TXDESC_RTSENA);
627 * CTS transmit rate is derived from the transmit rate by looking in the
628 * h/w rate table. We must also factor in whether or not a short
629 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
631 ctsrate = rt->info[cix].ratecode |
632 (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
634 for (i = 0; i < 4; i++) {
635 if (!rates[i].count || (rates[i].idx < 0))
640 series[i].Rate = rt->info[rix].ratecode |
641 (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0);
643 series[i].Tries = rates[i].count;
645 series[i].RateFlags = (
646 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ?
647 ATH9K_RATESERIES_RTS_CTS : 0) |
648 ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ?
649 ATH9K_RATESERIES_2040 : 0) |
650 ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ?
651 ATH9K_RATESERIES_HALFGI : 0);
653 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
654 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
655 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
656 bf_isshpreamble(bf));
658 series[i].ChSel = sc->sc_tx_chainmask;
661 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
664 /* set dur_update_en for l-sig computation except for PS-Poll frames */
665 ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
666 ctsrate, ctsduration,
669 if (sc->sc_config.ath_aggr_prot && flags)
670 ath9k_hw_set11n_burstduration(ah, ds, 8192);
674 * Function to send a normal HT (non-AMPDU) frame
675 * NB: must be called with txq lock held
677 static int ath_tx_send_normal(struct ath_softc *sc,
679 struct ath_atx_tid *tid,
680 struct list_head *bf_head)
684 BUG_ON(list_empty(bf_head));
686 bf = list_first_entry(bf_head, struct ath_buf, list);
687 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
689 /* update starting sequence number for subsequent ADDBA request */
690 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
692 /* Queue to h/w without aggregation */
694 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
695 ath_buf_set_rate(sc, bf);
696 ath_tx_txqaddbuf(sc, txq, bf_head);
701 /* flush tid's software queue and send frames as non-ampdu's */
703 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
705 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
707 struct list_head bf_head;
708 INIT_LIST_HEAD(&bf_head);
710 ASSERT(tid->paused > 0);
711 spin_lock_bh(&txq->axq_lock);
715 if (tid->paused > 0) {
716 spin_unlock_bh(&txq->axq_lock);
720 while (!list_empty(&tid->buf_q)) {
721 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
722 ASSERT(!bf_isretried(bf));
723 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
724 ath_tx_send_normal(sc, txq, tid, &bf_head);
727 spin_unlock_bh(&txq->axq_lock);
730 /* Completion routine of an aggregate */
732 static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
735 struct list_head *bf_q,
738 struct ath_node *an = NULL;
740 struct ieee80211_tx_info *tx_info;
741 struct ath_atx_tid *tid = NULL;
742 struct ath_buf *bf_last = bf->bf_lastbf;
743 struct ath_desc *ds = bf_last->bf_desc;
744 struct ath_buf *bf_next, *bf_lastq = NULL;
745 struct list_head bf_head, bf_pending;
747 u32 ba[WME_BA_BMP_SIZE >> 5];
748 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
750 skb = (struct sk_buff *)bf->bf_mpdu;
751 tx_info = IEEE80211_SKB_CB(skb);
753 if (tx_info->control.sta) {
754 an = (struct ath_node *)tx_info->control.sta->drv_priv;
755 tid = ATH_AN_2_TID(an, bf->bf_tidno);
758 isaggr = bf_isaggr(bf);
761 if (ATH_DS_TX_BA(ds)) {
763 * extract starting sequence and
766 seq_st = ATH_DS_BA_SEQ(ds);
768 ATH_DS_BA_BITMAP(ds),
769 WME_BA_BMP_SIZE >> 3);
771 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
774 * AR5416 can become deaf/mute when BA
775 * issue happens. Chip needs to be reset.
776 * But AP code may have sychronization issues
777 * when perform internal reset in this routine.
778 * Only enable reset in STA mode for now.
780 if (sc->sc_ah->ah_opmode ==
781 NL80211_IFTYPE_STATION)
785 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
789 INIT_LIST_HEAD(&bf_pending);
790 INIT_LIST_HEAD(&bf_head);
793 txfail = txpending = 0;
794 bf_next = bf->bf_next;
796 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
797 /* transmit completion, subframe is
798 * acked by block ack */
799 } else if (!isaggr && txok) {
800 /* transmit completion */
803 if (!(tid->state & AGGR_CLEANUP) &&
804 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
805 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
806 ath_tx_set_retry(sc, bf);
809 bf->bf_state.bf_type |= BUF_XRETRY;
815 * cleanup in progress, just fail
816 * the un-acked sub-frames
822 * Remove ath_buf's of this sub-frame from aggregate queue.
824 if (bf_next == NULL) { /* last subframe in the aggregate */
825 ASSERT(bf->bf_lastfrm == bf_last);
828 * The last descriptor of the last sub frame could be
829 * a holding descriptor for h/w. If that's the case,
830 * bf->bf_lastfrm won't be in the bf_q.
831 * Make sure we handle bf_q properly here.
834 if (!list_empty(bf_q)) {
835 bf_lastq = list_entry(bf_q->prev,
836 struct ath_buf, list);
837 list_cut_position(&bf_head,
838 bf_q, &bf_lastq->list);
841 * XXX: if the last subframe only has one
842 * descriptor which is also being used as
843 * a holding descriptor. Then the ath_buf
844 * is not in the bf_q at all.
846 INIT_LIST_HEAD(&bf_head);
849 ASSERT(!list_empty(bf_q));
850 list_cut_position(&bf_head,
851 bf_q, &bf->bf_lastfrm->list);
856 * complete the acked-ones/xretried ones; update
859 spin_lock_bh(&txq->axq_lock);
860 ath_tx_update_baw(sc, tid, bf->bf_seqno);
861 spin_unlock_bh(&txq->axq_lock);
863 /* complete this sub-frame */
864 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
867 * retry the un-acked ones
870 * XXX: if the last descriptor is holding descriptor,
871 * in order to requeue the frame to software queue, we
872 * need to allocate a new descriptor and
873 * copy the content of holding descriptor to it.
875 if (bf->bf_next == NULL &&
876 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
879 /* allocate new descriptor */
880 spin_lock_bh(&sc->tx.txbuflock);
881 ASSERT(!list_empty((&sc->tx.txbuf)));
882 tbf = list_first_entry(&sc->tx.txbuf,
883 struct ath_buf, list);
884 list_del(&tbf->list);
885 spin_unlock_bh(&sc->tx.txbuflock);
887 ATH_TXBUF_RESET(tbf);
889 /* copy descriptor content */
890 tbf->bf_mpdu = bf_last->bf_mpdu;
891 tbf->bf_buf_addr = bf_last->bf_buf_addr;
892 *(tbf->bf_desc) = *(bf_last->bf_desc);
894 /* link it to the frame */
896 bf_lastq->bf_desc->ds_link =
898 bf->bf_lastfrm = tbf;
899 ath9k_hw_cleartxdesc(sc->sc_ah,
900 bf->bf_lastfrm->bf_desc);
902 tbf->bf_state = bf_last->bf_state;
903 tbf->bf_lastfrm = tbf;
904 ath9k_hw_cleartxdesc(sc->sc_ah,
905 tbf->bf_lastfrm->bf_desc);
907 /* copy the DMA context */
909 bf_last->bf_dmacontext;
911 list_add_tail(&tbf->list, &bf_head);
914 * Clear descriptor status words for
917 ath9k_hw_cleartxdesc(sc->sc_ah,
918 bf->bf_lastfrm->bf_desc);
922 * Put this buffer to the temporary pending
923 * queue to retain ordering
925 list_splice_tail_init(&bf_head, &bf_pending);
931 if (tid->state & AGGR_CLEANUP) {
932 /* check to see if we're done with cleaning the h/w queue */
933 spin_lock_bh(&txq->axq_lock);
935 if (tid->baw_head == tid->baw_tail) {
936 tid->state &= ~AGGR_ADDBA_COMPLETE;
937 tid->addba_exchangeattempts = 0;
938 spin_unlock_bh(&txq->axq_lock);
940 tid->state &= ~AGGR_CLEANUP;
942 /* send buffered frames as singles */
943 ath_tx_flush_tid(sc, tid);
945 spin_unlock_bh(&txq->axq_lock);
951 * prepend un-acked frames to the beginning of the pending frame queue
953 if (!list_empty(&bf_pending)) {
954 spin_lock_bh(&txq->axq_lock);
955 /* Note: we _prepend_, we _do_not_ at to
956 * the end of the queue ! */
957 list_splice(&bf_pending, &tid->buf_q);
958 ath_tx_queue_tid(txq, tid);
959 spin_unlock_bh(&txq->axq_lock);
963 ath_reset(sc, false);
968 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
970 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
971 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
972 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
974 tx_info_priv->update_rc = false;
975 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
976 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
978 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
979 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
981 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
982 sizeof(tx_info_priv->tx));
983 tx_info_priv->n_frames = bf->bf_nframes;
984 tx_info_priv->n_bad_frames = nbad;
985 tx_info_priv->update_rc = true;
990 /* Process completed xmit descriptors from the specified queue */
992 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
994 struct ath_hal *ah = sc->sc_ah;
995 struct ath_buf *bf, *lastbf, *bf_held = NULL;
996 struct list_head bf_head;
1001 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
1002 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1006 spin_lock_bh(&txq->axq_lock);
1007 if (list_empty(&txq->axq_q)) {
1008 txq->axq_link = NULL;
1009 txq->axq_linkbuf = NULL;
1010 spin_unlock_bh(&txq->axq_lock);
1013 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1016 * There is a race condition that a BH gets scheduled
1017 * after sw writes TxE and before hw re-load the last
1018 * descriptor to get the newly chained one.
1019 * Software must keep the last DONE descriptor as a
1020 * holding descriptor - software does so by marking
1021 * it with the STALE flag.
1024 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1026 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1028 * The holding descriptor is the last
1029 * descriptor in queue. It's safe to remove
1030 * the last holding descriptor in BH context.
1032 spin_unlock_bh(&txq->axq_lock);
1035 /* Lets work with the next buffer now */
1036 bf = list_entry(bf_held->list.next,
1037 struct ath_buf, list);
1041 lastbf = bf->bf_lastbf;
1042 ds = lastbf->bf_desc; /* NB: last decriptor */
1044 status = ath9k_hw_txprocdesc(ah, ds);
1045 if (status == -EINPROGRESS) {
1046 spin_unlock_bh(&txq->axq_lock);
1049 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1050 txq->axq_lastdsWithCTS = NULL;
1051 if (ds == txq->axq_gatingds)
1052 txq->axq_gatingds = NULL;
1055 * Remove ath_buf's of the same transmit unit from txq,
1056 * however leave the last descriptor back as the holding
1057 * descriptor for hw.
1059 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1060 INIT_LIST_HEAD(&bf_head);
1062 if (!list_is_singular(&lastbf->list))
1063 list_cut_position(&bf_head,
1064 &txq->axq_q, lastbf->list.prev);
1069 txq->axq_aggr_depth--;
1071 txok = (ds->ds_txstat.ts_status == 0);
1073 spin_unlock_bh(&txq->axq_lock);
1076 list_del(&bf_held->list);
1077 spin_lock_bh(&sc->tx.txbuflock);
1078 list_add_tail(&bf_held->list, &sc->tx.txbuf);
1079 spin_unlock_bh(&sc->tx.txbuflock);
1082 if (!bf_isampdu(bf)) {
1084 * This frame is sent out as a single frame.
1085 * Use hardware retry status for this frame.
1087 bf->bf_retries = ds->ds_txstat.ts_longretry;
1088 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1089 bf->bf_state.bf_type |= BUF_XRETRY;
1092 nbad = ath_tx_num_badfrms(sc, bf, txok);
1095 ath_tx_rc_status(bf, ds, nbad);
1098 * Complete this transmit unit
1101 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1103 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1105 /* Wake up mac80211 queue */
1107 spin_lock_bh(&txq->axq_lock);
1108 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1111 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1113 ieee80211_wake_queue(sc->hw, qnum);
1120 * schedule any pending packets if aggregation is enabled
1122 if (sc->sc_flags & SC_OP_TXAGGR)
1123 ath_txq_schedule(sc, txq);
1124 spin_unlock_bh(&txq->axq_lock);
1128 static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1130 struct ath_hal *ah = sc->sc_ah;
1132 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1133 DPRINTF(sc, ATH_DBG_XMIT, "tx queue [%u] %x, link %p\n",
1134 txq->axq_qnum, ath9k_hw_gettxbuf(ah, txq->axq_qnum),
1138 /* Drain only the data queues */
1140 static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1142 struct ath_hal *ah = sc->sc_ah;
1143 int i, status, npend = 0;
1145 if (!(sc->sc_flags & SC_OP_INVALID)) {
1146 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1147 if (ATH_TXQ_SETUP(sc, i)) {
1148 ath_tx_stopdma(sc, &sc->tx.txq[i]);
1149 /* The TxDMA may not really be stopped.
1150 * Double check the hal tx pending count */
1151 npend += ath9k_hw_numtxpending(ah,
1152 sc->tx.txq[i].axq_qnum);
1158 /* TxDMA not stopped, reset the hal */
1159 DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
1161 spin_lock_bh(&sc->sc_resetlock);
1162 if (!ath9k_hw_reset(ah,
1163 sc->sc_ah->ah_curchan,
1165 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1166 sc->sc_ht_extprotspacing, true, &status)) {
1168 DPRINTF(sc, ATH_DBG_FATAL,
1169 "Unable to reset hardware; hal status %u\n",
1172 spin_unlock_bh(&sc->sc_resetlock);
1175 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1176 if (ATH_TXQ_SETUP(sc, i))
1177 ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx);
1181 /* Add a sub-frame to block ack window */
1183 static void ath_tx_addto_baw(struct ath_softc *sc,
1184 struct ath_atx_tid *tid,
1189 if (bf_isretried(bf))
1192 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1193 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1195 ASSERT(tid->tx_buf[cindex] == NULL);
1196 tid->tx_buf[cindex] = bf;
1198 if (index >= ((tid->baw_tail - tid->baw_head) &
1199 (ATH_TID_MAX_BUFS - 1))) {
1200 tid->baw_tail = cindex;
1201 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1206 * Function to send an A-MPDU
1207 * NB: must be called with txq lock held
1209 static int ath_tx_send_ampdu(struct ath_softc *sc,
1210 struct ath_atx_tid *tid,
1211 struct list_head *bf_head,
1212 struct ath_tx_control *txctl)
1216 BUG_ON(list_empty(bf_head));
1218 bf = list_first_entry(bf_head, struct ath_buf, list);
1219 bf->bf_state.bf_type |= BUF_AMPDU;
1222 * Do not queue to h/w when any of the following conditions is true:
1223 * - there are pending frames in software queue
1224 * - the TID is currently paused for ADDBA/BAR request
1225 * - seqno is not within block-ack window
1226 * - h/w queue depth exceeds low water mark
1228 if (!list_empty(&tid->buf_q) || tid->paused ||
1229 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1230 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1232 * Add this frame to software queue for scheduling later
1235 list_splice_tail_init(bf_head, &tid->buf_q);
1236 ath_tx_queue_tid(txctl->txq, tid);
1240 /* Add sub-frame to BAW */
1241 ath_tx_addto_baw(sc, tid, bf);
1243 /* Queue to h/w without aggregation */
1245 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1246 ath_buf_set_rate(sc, bf);
1247 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
1254 * returns aggr limit based on lowest of the rates
1256 static u32 ath_lookup_rate(struct ath_softc *sc,
1258 struct ath_atx_tid *tid)
1260 struct ath_rate_table *rate_table = sc->cur_rate_table;
1261 struct sk_buff *skb;
1262 struct ieee80211_tx_info *tx_info;
1263 struct ieee80211_tx_rate *rates;
1264 struct ath_tx_info_priv *tx_info_priv;
1265 u32 max_4ms_framelen, frame_length;
1266 u16 aggr_limit, legacy = 0, maxampdu;
1269 skb = (struct sk_buff *)bf->bf_mpdu;
1270 tx_info = IEEE80211_SKB_CB(skb);
1271 rates = tx_info->control.rates;
1273 (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
1276 * Find the lowest frame length among the rate series that will have a
1277 * 4ms transmit duration.
1278 * TODO - TXOP limit needs to be considered.
1280 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1282 for (i = 0; i < 4; i++) {
1283 if (rates[i].count) {
1284 if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) {
1290 rate_table->info[rates[i].idx].max_4ms_framelen;
1291 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1296 * limit aggregate size by the minimum rate if rate selected is
1297 * not a probe rate, if rate selected is a probe rate then
1298 * avoid aggregation of this packet.
1300 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1303 aggr_limit = min(max_4ms_framelen,
1304 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1307 * h/w can accept aggregates upto 16 bit lengths (65535).
1308 * The IE, however can hold upto 65536, which shows up here
1309 * as zero. Ignore 65536 since we are constrained by hw.
1311 maxampdu = tid->an->maxampdu;
1313 aggr_limit = min(aggr_limit, maxampdu);
1319 * returns the number of delimiters to be added to
1320 * meet the minimum required mpdudensity.
1321 * caller should make sure that the rate is HT rate .
1323 static int ath_compute_num_delims(struct ath_softc *sc,
1324 struct ath_atx_tid *tid,
1328 struct ath_rate_table *rt = sc->cur_rate_table;
1329 struct sk_buff *skb = bf->bf_mpdu;
1330 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1331 u32 nsymbits, nsymbols, mpdudensity;
1334 int width, half_gi, ndelim, mindelim;
1336 /* Select standard number of delimiters based on frame length alone */
1337 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1340 * If encryption enabled, hardware requires some more padding between
1342 * TODO - this could be improved to be dependent on the rate.
1343 * The hardware can keep up at lower rates, but not higher rates
1345 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1346 ndelim += ATH_AGGR_ENCRYPTDELIM;
1349 * Convert desired mpdu density from microeconds to bytes based
1350 * on highest rate in rate series (i.e. first rate) to determine
1351 * required minimum length for subframe. Take into account
1352 * whether high rate is 20 or 40Mhz and half or full GI.
1354 mpdudensity = tid->an->mpdudensity;
1357 * If there is no mpdu density restriction, no further calculation
1360 if (mpdudensity == 0)
1363 rix = tx_info->control.rates[0].idx;
1364 flags = tx_info->control.rates[0].flags;
1365 rc = rt->info[rix].ratecode;
1366 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
1367 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
1370 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1372 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1377 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1378 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1380 /* Is frame shorter than required minimum length? */
1381 if (frmlen < minlen) {
1382 /* Get the minimum number of delimiters required. */
1383 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1384 ndelim = max(mindelim, ndelim);
1391 * For aggregation from software buffer queue.
1392 * NB: must be called with txq lock held
1394 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1395 struct ath_atx_tid *tid,
1396 struct list_head *bf_q,
1397 struct ath_buf **bf_last,
1398 struct aggr_rifs_param *param,
1401 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1402 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1403 struct list_head bf_head;
1404 int rl = 0, nframes = 0, ndelim;
1405 u16 aggr_limit = 0, al = 0, bpad = 0,
1406 al_delta, h_baw = tid->baw_size / 2;
1407 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1409 INIT_LIST_HEAD(&bf_head);
1411 BUG_ON(list_empty(&tid->buf_q));
1413 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1416 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1419 * do not step over block-ack window
1421 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1422 status = ATH_AGGR_BAW_CLOSED;
1427 aggr_limit = ath_lookup_rate(sc, bf, tid);
1432 * do not exceed aggregation limit
1434 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1436 if (nframes && (aggr_limit <
1437 (al + bpad + al_delta + prev_al))) {
1438 status = ATH_AGGR_LIMITED;
1443 * do not exceed subframe limit
1445 if ((nframes + *prev_frames) >=
1446 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1447 status = ATH_AGGR_LIMITED;
1452 * add padding for previous frame to aggregation length
1454 al += bpad + al_delta;
1457 * Get the delimiters needed to meet the MPDU
1458 * density for this node.
1460 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
1462 bpad = PADBYTES(al_delta) + (ndelim << 2);
1465 bf->bf_lastfrm->bf_desc->ds_link = 0;
1468 * this packet is part of an aggregate
1469 * - remove all descriptors belonging to this frame from
1471 * - add it to block ack window
1472 * - set up descriptors for aggregation
1474 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1475 ath_tx_addto_baw(sc, tid, bf);
1477 list_for_each_entry(tbf, &bf_head, list) {
1478 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1479 tbf->bf_desc, ndelim);
1483 * link buffers of this frame to the aggregate
1485 list_splice_tail_init(&bf_head, bf_q);
1489 bf_prev->bf_next = bf;
1490 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1496 * terminate aggregation on a small packet boundary
1498 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1499 status = ATH_AGGR_SHORTPKT;
1503 } while (!list_empty(&tid->buf_q));
1505 bf_first->bf_al = al;
1506 bf_first->bf_nframes = nframes;
1513 * process pending frames possibly doing a-mpdu aggregation
1514 * NB: must be called with txq lock held
1516 static void ath_tx_sched_aggr(struct ath_softc *sc,
1517 struct ath_txq *txq, struct ath_atx_tid *tid)
1519 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1520 enum ATH_AGGR_STATUS status;
1521 struct list_head bf_q;
1522 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1523 int prev_frames = 0;
1526 if (list_empty(&tid->buf_q))
1529 INIT_LIST_HEAD(&bf_q);
1531 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, ¶m,
1535 * no frames picked up to be aggregated; block-ack
1536 * window is not open
1538 if (list_empty(&bf_q))
1541 bf = list_first_entry(&bf_q, struct ath_buf, list);
1542 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1543 bf->bf_lastbf = bf_last;
1546 * if only one frame, send as non-aggregate
1548 if (bf->bf_nframes == 1) {
1549 ASSERT(bf->bf_lastfrm == bf_last);
1551 bf->bf_state.bf_type &= ~BUF_AGGR;
1553 * clear aggr bits for every descriptor
1554 * XXX TODO: is there a way to optimize it?
1556 list_for_each_entry(tbf, &bf_q, list) {
1557 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1560 ath_buf_set_rate(sc, bf);
1561 ath_tx_txqaddbuf(sc, txq, &bf_q);
1566 * setup first desc with rate and aggr info
1568 bf->bf_state.bf_type |= BUF_AGGR;
1569 ath_buf_set_rate(sc, bf);
1570 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1573 * anchor last frame of aggregate correctly
1575 ASSERT(bf_lastaggr);
1576 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1578 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1580 /* XXX: We don't enter into this loop, consider removing this */
1581 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1582 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1583 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1586 txq->axq_aggr_depth++;
1589 * Normal aggregate, queue to hardware
1591 ath_tx_txqaddbuf(sc, txq, &bf_q);
1593 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1594 status != ATH_AGGR_BAW_CLOSED);
1597 /* Called with txq lock held */
1599 static void ath_tid_drain(struct ath_softc *sc,
1600 struct ath_txq *txq,
1601 struct ath_atx_tid *tid)
1605 struct list_head bf_head;
1606 INIT_LIST_HEAD(&bf_head);
1609 if (list_empty(&tid->buf_q))
1611 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1613 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1615 /* update baw for software retried frame */
1616 if (bf_isretried(bf))
1617 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1620 * do not indicate packets while holding txq spinlock.
1621 * unlock is intentional here
1623 spin_unlock(&txq->axq_lock);
1625 /* complete this sub-frame */
1626 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1628 spin_lock(&txq->axq_lock);
1632 * TODO: For frame(s) that are in the retry state, we will reuse the
1633 * sequence number(s) without setting the retry bit. The
1634 * alternative is to give up on these and BAR the receiver's window
1637 tid->seq_next = tid->seq_start;
1638 tid->baw_tail = tid->baw_head;
1642 * Drain all pending buffers
1643 * NB: must be called with txq lock held
1645 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1646 struct ath_txq *txq)
1648 struct ath_atx_ac *ac, *ac_tmp;
1649 struct ath_atx_tid *tid, *tid_tmp;
1651 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1652 list_del(&ac->list);
1654 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1655 list_del(&tid->list);
1657 ath_tid_drain(sc, txq, tid);
1662 static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
1663 struct sk_buff *skb,
1664 struct ath_tx_control *txctl)
1666 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1667 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1668 struct ath_tx_info_priv *tx_info_priv;
1672 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1673 if (unlikely(!tx_info_priv))
1675 tx_info->rate_driver_data[0] = tx_info_priv;
1676 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1677 fc = hdr->frame_control;
1679 ATH_TXBUF_RESET(bf);
1683 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
1685 ieee80211_is_data(fc) ?
1686 (bf->bf_state.bf_type |= BUF_DATA) :
1687 (bf->bf_state.bf_type &= ~BUF_DATA);
1688 ieee80211_is_back_req(fc) ?
1689 (bf->bf_state.bf_type |= BUF_BAR) :
1690 (bf->bf_state.bf_type &= ~BUF_BAR);
1691 ieee80211_is_pspoll(fc) ?
1692 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1693 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1694 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
1695 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1696 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1697 (sc->hw->conf.ht.enabled && !is_pae(skb) &&
1698 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1699 (bf->bf_state.bf_type |= BUF_HT) :
1700 (bf->bf_state.bf_type &= ~BUF_HT);
1702 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1706 bf->bf_keytype = get_hw_crypto_keytype(skb);
1708 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1709 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1710 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1712 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1715 /* Assign seqno, tidno */
1717 if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
1718 assign_aggr_tid_seqno(skb, bf);
1723 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1724 skb->len, PCI_DMA_TODEVICE);
1725 if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_dmacontext))) {
1727 DPRINTF(sc, ATH_DBG_CONFIG,
1728 "pci_dma_mapping_error() on TX\n");
1732 bf->bf_buf_addr = bf->bf_dmacontext;
1736 /* FIXME: tx power */
1737 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1738 struct ath_tx_control *txctl)
1740 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1741 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1742 struct ath_node *an = NULL;
1743 struct list_head bf_head;
1744 struct ath_desc *ds;
1745 struct ath_atx_tid *tid;
1746 struct ath_hal *ah = sc->sc_ah;
1749 frm_type = get_hw_packet_type(skb);
1751 INIT_LIST_HEAD(&bf_head);
1752 list_add_tail(&bf->list, &bf_head);
1754 /* setup descriptor */
1758 ds->ds_data = bf->bf_buf_addr;
1760 /* Formulate first tx descriptor with tx controls */
1762 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1763 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1765 ath9k_hw_filltxdesc(ah, ds,
1766 skb->len, /* segment length */
1767 true, /* first segment */
1768 true, /* last segment */
1769 ds); /* first descriptor */
1771 bf->bf_lastfrm = bf;
1773 spin_lock_bh(&txctl->txq->axq_lock);
1775 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1776 tx_info->control.sta) {
1777 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1778 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1780 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
1782 * Try aggregation if it's a unicast data frame
1783 * and the destination is HT capable.
1785 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1788 * Send this frame as regular when ADDBA
1789 * exchange is neither complete nor pending.
1791 ath_tx_send_normal(sc, txctl->txq,
1798 ath_buf_set_rate(sc, bf);
1799 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
1802 spin_unlock_bh(&txctl->txq->axq_lock);
1805 /* Upon failure caller should free skb */
1806 int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1807 struct ath_tx_control *txctl)
1812 /* Check if a tx buffer is available */
1814 bf = ath_tx_get_buffer(sc);
1816 DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n");
1820 r = ath_tx_setup_buffer(sc, bf, skb, txctl);
1822 struct ath_txq *txq = txctl->txq;
1824 DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
1826 /* upon ath_tx_processq() this TX queue will be resumed, we
1827 * guarantee this will happen by knowing beforehand that
1828 * we will at least have to run TX completionon one buffer
1830 spin_lock_bh(&txq->axq_lock);
1831 if (ath_txq_depth(sc, txq->axq_qnum) > 1) {
1832 ieee80211_stop_queue(sc->hw,
1833 skb_get_queue_mapping(skb));
1836 spin_unlock_bh(&txq->axq_lock);
1838 spin_lock_bh(&sc->tx.txbuflock);
1839 list_add_tail(&bf->list, &sc->tx.txbuf);
1840 spin_unlock_bh(&sc->tx.txbuflock);
1845 ath_tx_start_dma(sc, bf, txctl);
1850 /* Initialize TX queue and h/w */
1852 int ath_tx_init(struct ath_softc *sc, int nbufs)
1857 spin_lock_init(&sc->tx.txbuflock);
1859 /* Setup tx descriptors */
1860 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
1863 DPRINTF(sc, ATH_DBG_FATAL,
1864 "Failed to allocate tx descriptors: %d\n",
1869 /* XXX allocate beacon state together with vap */
1870 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
1871 "beacon", ATH_BCBUF, 1);
1873 DPRINTF(sc, ATH_DBG_FATAL,
1874 "Failed to allocate beacon descriptors: %d\n",
1887 /* Reclaim all tx queue resources */
1889 int ath_tx_cleanup(struct ath_softc *sc)
1891 /* cleanup beacon descriptors */
1892 if (sc->beacon.bdma.dd_desc_len != 0)
1893 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
1895 /* cleanup tx descriptors */
1896 if (sc->tx.txdma.dd_desc_len != 0)
1897 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
1902 /* Setup a h/w transmit queue */
1904 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1906 struct ath_hal *ah = sc->sc_ah;
1907 struct ath9k_tx_queue_info qi;
1910 memset(&qi, 0, sizeof(qi));
1911 qi.tqi_subtype = subtype;
1912 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1913 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1914 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1915 qi.tqi_physCompBuf = 0;
1918 * Enable interrupts only for EOL and DESC conditions.
1919 * We mark tx descriptors to receive a DESC interrupt
1920 * when a tx queue gets deep; otherwise waiting for the
1921 * EOL to reap descriptors. Note that this is done to
1922 * reduce interrupt load and this only defers reaping
1923 * descriptors, never transmitting frames. Aside from
1924 * reducing interrupts this also permits more concurrency.
1925 * The only potential downside is if the tx queue backs
1926 * up in which case the top half of the kernel may backup
1927 * due to a lack of tx descriptors.
1929 * The UAPSD queue is an exception, since we take a desc-
1930 * based intr on the EOSP frames.
1932 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1933 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1935 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1936 TXQ_FLAG_TXDESCINT_ENABLE;
1937 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1940 * NB: don't print a message, this happens
1941 * normally on parts with too few tx queues
1945 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
1946 DPRINTF(sc, ATH_DBG_FATAL,
1947 "qnum %u out of range, max %u!\n",
1948 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
1949 ath9k_hw_releasetxqueue(ah, qnum);
1952 if (!ATH_TXQ_SETUP(sc, qnum)) {
1953 struct ath_txq *txq = &sc->tx.txq[qnum];
1955 txq->axq_qnum = qnum;
1956 txq->axq_link = NULL;
1957 INIT_LIST_HEAD(&txq->axq_q);
1958 INIT_LIST_HEAD(&txq->axq_acq);
1959 spin_lock_init(&txq->axq_lock);
1961 txq->axq_aggr_depth = 0;
1962 txq->axq_totalqueued = 0;
1963 txq->axq_linkbuf = NULL;
1964 sc->tx.txqsetup |= 1<<qnum;
1966 return &sc->tx.txq[qnum];
1969 /* Reclaim resources for a setup queue */
1971 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1973 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1974 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1978 * Setup a hardware data transmit queue for the specified
1979 * access control. The hal may not support all requested
1980 * queues in which case it will return a reference to a
1981 * previously setup queue. We record the mapping from ac's
1982 * to h/w queues for use by ath_tx_start and also track
1983 * the set of h/w queues being used to optimize work in the
1984 * transmit interrupt handler and related routines.
1987 int ath_tx_setup(struct ath_softc *sc, int haltype)
1989 struct ath_txq *txq;
1991 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1992 DPRINTF(sc, ATH_DBG_FATAL,
1993 "HAL AC %u out of range, max %zu!\n",
1994 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1997 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1999 sc->tx.hwq_map[haltype] = txq->axq_qnum;
2005 int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2010 case ATH9K_TX_QUEUE_DATA:
2011 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
2012 DPRINTF(sc, ATH_DBG_FATAL,
2013 "HAL AC %u out of range, max %zu!\n",
2014 haltype, ARRAY_SIZE(sc->tx.hwq_map));
2017 qnum = sc->tx.hwq_map[haltype];
2019 case ATH9K_TX_QUEUE_BEACON:
2020 qnum = sc->beacon.beaconq;
2022 case ATH9K_TX_QUEUE_CAB:
2023 qnum = sc->beacon.cabq->axq_qnum;
2031 /* Get a transmit queue, if available */
2033 struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2035 struct ath_txq *txq = NULL;
2038 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
2039 txq = &sc->tx.txq[qnum];
2041 spin_lock_bh(&txq->axq_lock);
2043 /* Try to avoid running out of descriptors */
2044 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
2045 DPRINTF(sc, ATH_DBG_FATAL,
2046 "TX queue: %d is full, depth: %d\n",
2047 qnum, txq->axq_depth);
2048 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
2050 spin_unlock_bh(&txq->axq_lock);
2054 spin_unlock_bh(&txq->axq_lock);
2059 /* Update parameters for a transmit queue */
2061 int ath_txq_update(struct ath_softc *sc, int qnum,
2062 struct ath9k_tx_queue_info *qinfo)
2064 struct ath_hal *ah = sc->sc_ah;
2066 struct ath9k_tx_queue_info qi;
2068 if (qnum == sc->beacon.beaconq) {
2070 * XXX: for beacon queue, we just save the parameter.
2071 * It will be picked up by ath_beaconq_config when
2074 sc->beacon.beacon_qi = *qinfo;
2078 ASSERT(sc->tx.txq[qnum].axq_qnum == qnum);
2080 ath9k_hw_get_txq_props(ah, qnum, &qi);
2081 qi.tqi_aifs = qinfo->tqi_aifs;
2082 qi.tqi_cwmin = qinfo->tqi_cwmin;
2083 qi.tqi_cwmax = qinfo->tqi_cwmax;
2084 qi.tqi_burstTime = qinfo->tqi_burstTime;
2085 qi.tqi_readyTime = qinfo->tqi_readyTime;
2087 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
2088 DPRINTF(sc, ATH_DBG_FATAL,
2089 "Unable to update hardware queue %u!\n", qnum);
2092 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2098 int ath_cabq_update(struct ath_softc *sc)
2100 struct ath9k_tx_queue_info qi;
2101 int qnum = sc->beacon.cabq->axq_qnum;
2102 struct ath_beacon_config conf;
2104 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
2106 * Ensure the readytime % is within the bounds.
2108 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2109 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2110 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2111 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2113 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2115 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2116 ath_txq_update(sc, qnum, &qi);
2121 /* Deferred processing of transmit interrupt */
2123 void ath_tx_tasklet(struct ath_softc *sc)
2126 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2128 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2131 * Process each active queue.
2133 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2134 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2135 ath_tx_processq(sc, &sc->tx.txq[i]);
2139 void ath_tx_draintxq(struct ath_softc *sc,
2140 struct ath_txq *txq, bool retry_tx)
2142 struct ath_buf *bf, *lastbf;
2143 struct list_head bf_head;
2145 INIT_LIST_HEAD(&bf_head);
2148 * NB: this assumes output has been stopped and
2149 * we do not need to block ath_tx_tasklet
2152 spin_lock_bh(&txq->axq_lock);
2154 if (list_empty(&txq->axq_q)) {
2155 txq->axq_link = NULL;
2156 txq->axq_linkbuf = NULL;
2157 spin_unlock_bh(&txq->axq_lock);
2161 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2163 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2164 list_del(&bf->list);
2165 spin_unlock_bh(&txq->axq_lock);
2167 spin_lock_bh(&sc->tx.txbuflock);
2168 list_add_tail(&bf->list, &sc->tx.txbuf);
2169 spin_unlock_bh(&sc->tx.txbuflock);
2173 lastbf = bf->bf_lastbf;
2175 lastbf->bf_desc->ds_txstat.ts_flags =
2176 ATH9K_TX_SW_ABORTED;
2178 /* remove ath_buf's of the same mpdu from txq */
2179 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2182 spin_unlock_bh(&txq->axq_lock);
2185 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2187 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2190 /* flush any pending frames if aggregation is enabled */
2191 if (sc->sc_flags & SC_OP_TXAGGR) {
2193 spin_lock_bh(&txq->axq_lock);
2194 ath_txq_drain_pending_buffers(sc, txq);
2195 spin_unlock_bh(&txq->axq_lock);
2200 /* Drain the transmit queues and reclaim resources */
2202 void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2204 /* stop beacon queue. The beacon will be freed when
2205 * we go to INIT state */
2206 if (!(sc->sc_flags & SC_OP_INVALID)) {
2207 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2208 DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n",
2209 ath9k_hw_gettxbuf(sc->sc_ah, sc->beacon.beaconq));
2212 ath_drain_txdataq(sc, retry_tx);
2215 u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2217 return sc->tx.txq[qnum].axq_depth;
2220 u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2222 return sc->tx.txq[qnum].axq_aggr_depth;
2225 bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
2227 struct ath_atx_tid *txtid;
2229 if (!(sc->sc_flags & SC_OP_TXAGGR))
2232 txtid = ATH_AN_2_TID(an, tidno);
2234 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2235 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
2236 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2237 txtid->addba_exchangeattempts++;
2245 /* Start TX aggregation */
2247 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
2250 struct ath_atx_tid *txtid;
2251 struct ath_node *an;
2253 an = (struct ath_node *)sta->drv_priv;
2255 if (sc->sc_flags & SC_OP_TXAGGR) {
2256 txtid = ATH_AN_2_TID(an, tid);
2257 txtid->state |= AGGR_ADDBA_PROGRESS;
2258 ath_tx_pause_tid(sc, txtid);
2264 /* Stop tx aggregation */
2266 int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2268 struct ath_node *an = (struct ath_node *)sta->drv_priv;
2270 ath_tx_aggr_teardown(sc, an, tid);
2274 /* Resume tx aggregation */
2276 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2278 struct ath_atx_tid *txtid;
2279 struct ath_node *an;
2281 an = (struct ath_node *)sta->drv_priv;
2283 if (sc->sc_flags & SC_OP_TXAGGR) {
2284 txtid = ATH_AN_2_TID(an, tid);
2286 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
2287 txtid->state |= AGGR_ADDBA_COMPLETE;
2288 txtid->state &= ~AGGR_ADDBA_PROGRESS;
2289 ath_tx_resume_tid(sc, txtid);
2294 * Performs transmit side cleanup when TID changes from aggregated to
2296 * - Pause the TID and mark cleanup in progress
2297 * - Discard all retry frames from the s/w queue.
2300 void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
2302 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2303 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
2305 struct list_head bf_head;
2306 INIT_LIST_HEAD(&bf_head);
2308 if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */
2311 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2312 txtid->addba_exchangeattempts = 0;
2316 /* TID must be paused first */
2317 ath_tx_pause_tid(sc, txtid);
2319 /* drop all software retried frames and mark this TID */
2320 spin_lock_bh(&txq->axq_lock);
2321 while (!list_empty(&txtid->buf_q)) {
2322 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2323 if (!bf_isretried(bf)) {
2325 * NB: it's based on the assumption that
2326 * software retried frame will always stay
2327 * at the head of software queue.
2331 list_cut_position(&bf_head,
2332 &txtid->buf_q, &bf->bf_lastfrm->list);
2333 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2335 /* complete this sub-frame */
2336 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2339 if (txtid->baw_head != txtid->baw_tail) {
2340 spin_unlock_bh(&txq->axq_lock);
2341 txtid->state |= AGGR_CLEANUP;
2343 txtid->state &= ~AGGR_ADDBA_COMPLETE;
2344 txtid->addba_exchangeattempts = 0;
2345 spin_unlock_bh(&txq->axq_lock);
2346 ath_tx_flush_tid(sc, txtid);
2351 * Tx scheduling logic
2352 * NB: must be called with txq lock held
2355 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2357 struct ath_atx_ac *ac;
2358 struct ath_atx_tid *tid;
2360 /* nothing to schedule */
2361 if (list_empty(&txq->axq_acq))
2364 * get the first node/ac pair on the queue
2366 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2367 list_del(&ac->list);
2371 * process a single tid per destination
2374 /* nothing to schedule */
2375 if (list_empty(&ac->tid_q))
2378 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2379 list_del(&tid->list);
2382 if (tid->paused) /* check next tid to keep h/w busy */
2385 if ((txq->axq_depth % 2) == 0)
2386 ath_tx_sched_aggr(sc, txq, tid);
2389 * add tid to round-robin queue if more frames
2390 * are pending for the tid
2392 if (!list_empty(&tid->buf_q))
2393 ath_tx_queue_tid(txq, tid);
2395 /* only schedule one TID at a time */
2397 } while (!list_empty(&ac->tid_q));
2400 * schedule AC if more TIDs need processing
2402 if (!list_empty(&ac->tid_q)) {
2404 * add dest ac to txq if not already added
2408 list_add_tail(&ac->list, &txq->axq_acq);
2413 /* Initialize per-node transmit state */
2415 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2417 struct ath_atx_tid *tid;
2418 struct ath_atx_ac *ac;
2422 * Init per tid tx state
2424 for (tidno = 0, tid = &an->tid[tidno];
2425 tidno < WME_NUM_TID;
2429 tid->seq_start = tid->seq_next = 0;
2430 tid->baw_size = WME_MAX_BA;
2431 tid->baw_head = tid->baw_tail = 0;
2433 tid->paused = false;
2434 tid->state &= ~AGGR_CLEANUP;
2435 INIT_LIST_HEAD(&tid->buf_q);
2437 acno = TID_TO_WME_AC(tidno);
2438 tid->ac = &an->ac[acno];
2441 tid->state &= ~AGGR_ADDBA_COMPLETE;
2442 tid->state &= ~AGGR_ADDBA_PROGRESS;
2443 tid->addba_exchangeattempts = 0;
2447 * Init per ac tx state
2449 for (acno = 0, ac = &an->ac[acno];
2450 acno < WME_NUM_AC; acno++, ac++) {
2452 INIT_LIST_HEAD(&ac->tid_q);
2456 ac->qnum = ath_tx_get_qnum(sc,
2457 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2460 ac->qnum = ath_tx_get_qnum(sc,
2461 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2464 ac->qnum = ath_tx_get_qnum(sc,
2465 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2468 ac->qnum = ath_tx_get_qnum(sc,
2469 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2475 /* Cleanupthe pending buffers for the node. */
2477 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2480 struct ath_atx_ac *ac, *ac_tmp;
2481 struct ath_atx_tid *tid, *tid_tmp;
2482 struct ath_txq *txq;
2483 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2484 if (ATH_TXQ_SETUP(sc, i)) {
2485 txq = &sc->tx.txq[i];
2487 spin_lock(&txq->axq_lock);
2489 list_for_each_entry_safe(ac,
2490 ac_tmp, &txq->axq_acq, list) {
2491 tid = list_first_entry(&ac->tid_q,
2492 struct ath_atx_tid, list);
2493 if (tid && tid->an != an)
2495 list_del(&ac->list);
2498 list_for_each_entry_safe(tid,
2499 tid_tmp, &ac->tid_q, list) {
2500 list_del(&tid->list);
2502 ath_tid_drain(sc, txq, tid);
2503 tid->state &= ~AGGR_ADDBA_COMPLETE;
2504 tid->addba_exchangeattempts = 0;
2505 tid->state &= ~AGGR_CLEANUP;
2509 spin_unlock(&txq->axq_lock);
2514 void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2516 int hdrlen, padsize;
2517 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2518 struct ath_tx_control txctl;
2520 memset(&txctl, 0, sizeof(struct ath_tx_control));
2523 * As a temporary workaround, assign seq# here; this will likely need
2524 * to be cleaned up to work better with Beacon transmission and virtual
2527 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2528 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2529 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2530 sc->tx.seq_no += 0x10;
2531 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2532 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2535 /* Add the padding after the header if this is not already done */
2536 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2538 padsize = hdrlen % 4;
2539 if (skb_headroom(skb) < padsize) {
2540 DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n");
2541 dev_kfree_skb_any(skb);
2544 skb_push(skb, padsize);
2545 memmove(skb->data, skb->data + padsize, hdrlen);
2548 txctl.txq = sc->beacon.cabq;
2550 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
2552 if (ath_tx_start(sc, skb, &txctl) != 0) {
2553 DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n");
2559 dev_kfree_skb_any(skb);