2 * Copyright (c) 2008 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #define BITS_PER_BYTE 8
20 #define OFDM_PLCP_BITS 22
21 #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
22 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
28 #define HT_LTF(_ns) (4 * (_ns))
29 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34 #define OFDM_SIFS_TIME 16
36 static u32 bits_per_symbol[][2] = {
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
46 { 52, 108 }, /* 8: BPSK */
47 { 104, 216 }, /* 9: QPSK 1/2 */
48 { 156, 324 }, /* 10: QPSK 3/4 */
49 { 208, 432 }, /* 11: 16-QAM 1/2 */
50 { 312, 648 }, /* 12: 16-QAM 3/4 */
51 { 416, 864 }, /* 13: 64-QAM 2/3 */
52 { 468, 972 }, /* 14: 64-QAM 3/4 */
53 { 520, 1080 }, /* 15: 64-QAM 5/6 */
56 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
59 * Insert a chain of ath_buf (descriptors) on a txq and
60 * assume the descriptors are already chained together by caller.
61 * NB: must be called with txq lock held
64 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct list_head *head)
67 struct ath_hal *ah = sc->sc_ah;
71 * Insert the frame on the outbound list and
72 * pass it on to the hardware.
78 bf = list_first_entry(head, struct ath_buf, list);
80 list_splice_tail_init(head, &txq->axq_q);
82 txq->axq_totalqueued++;
83 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
85 DPRINTF(sc, ATH_DBG_QUEUE,
86 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
88 if (txq->axq_link == NULL) {
89 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
90 DPRINTF(sc, ATH_DBG_XMIT,
91 "TXDP[%u] = %llx (%p)\n",
92 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
94 *txq->axq_link = bf->bf_daddr;
95 DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
96 txq->axq_qnum, txq->axq_link,
97 ito64(bf->bf_daddr), bf->bf_desc);
99 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
100 ath9k_hw_txstart(ah, txq->axq_qnum);
103 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
104 struct ath_xmit_status *tx_status)
106 struct ieee80211_hw *hw = sc->hw;
107 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
108 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
111 DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
113 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
114 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
116 tx_info->rate_driver_data[0] = NULL;
119 if (tx_status->flags & ATH_TX_BAR) {
120 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
121 tx_status->flags &= ~ATH_TX_BAR;
124 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
125 /* Frame was ACKed */
126 tx_info->flags |= IEEE80211_TX_STAT_ACK;
129 tx_info->status.rates[0].count = tx_status->retries + 1;
131 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
132 padsize = hdrlen & 3;
133 if (padsize && hdrlen >= 24) {
135 * Remove MAC header padding before giving the frame back to
138 memmove(skb->data + padsize, skb->data, hdrlen);
139 skb_pull(skb, padsize);
142 ieee80211_tx_status(hw, skb);
145 /* Check if it's okay to send out aggregates */
147 static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
149 struct ath_atx_tid *tid;
150 tid = ATH_AN_2_TID(an, tidno);
152 if (tid->state & AGGR_ADDBA_COMPLETE ||
153 tid->state & AGGR_ADDBA_PROGRESS)
159 static void ath_get_beaconconfig(struct ath_softc *sc, int if_id,
160 struct ath_beacon_config *conf)
162 struct ieee80211_hw *hw = sc->hw;
164 /* fill in beacon config data */
166 conf->beacon_interval = hw->conf.beacon_int;
167 conf->listen_interval = 100;
168 conf->dtim_count = 1;
169 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
172 /* Calculate Atheros packet type from IEEE80211 packet header */
174 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
176 struct ieee80211_hdr *hdr;
177 enum ath9k_pkt_type htype;
180 hdr = (struct ieee80211_hdr *)skb->data;
181 fc = hdr->frame_control;
183 if (ieee80211_is_beacon(fc))
184 htype = ATH9K_PKT_TYPE_BEACON;
185 else if (ieee80211_is_probe_resp(fc))
186 htype = ATH9K_PKT_TYPE_PROBE_RESP;
187 else if (ieee80211_is_atim(fc))
188 htype = ATH9K_PKT_TYPE_ATIM;
189 else if (ieee80211_is_pspoll(fc))
190 htype = ATH9K_PKT_TYPE_PSPOLL;
192 htype = ATH9K_PKT_TYPE_NORMAL;
197 static bool is_pae(struct sk_buff *skb)
199 struct ieee80211_hdr *hdr;
202 hdr = (struct ieee80211_hdr *)skb->data;
203 fc = hdr->frame_control;
205 if (ieee80211_is_data(fc)) {
206 if (ieee80211_is_nullfunc(fc) ||
207 /* Port Access Entity (IEEE 802.1X) */
208 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
216 static int get_hw_crypto_keytype(struct sk_buff *skb)
218 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
220 if (tx_info->control.hw_key) {
221 if (tx_info->control.hw_key->alg == ALG_WEP)
222 return ATH9K_KEY_TYPE_WEP;
223 else if (tx_info->control.hw_key->alg == ALG_TKIP)
224 return ATH9K_KEY_TYPE_TKIP;
225 else if (tx_info->control.hw_key->alg == ALG_CCMP)
226 return ATH9K_KEY_TYPE_AES;
229 return ATH9K_KEY_TYPE_CLEAR;
232 /* Called only when tx aggregation is enabled and HT is supported */
234 static void assign_aggr_tid_seqno(struct sk_buff *skb,
237 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
238 struct ieee80211_hdr *hdr;
240 struct ath_atx_tid *tid;
244 if (!tx_info->control.sta)
247 an = (struct ath_node *)tx_info->control.sta->drv_priv;
248 hdr = (struct ieee80211_hdr *)skb->data;
249 fc = hdr->frame_control;
253 if (ieee80211_is_data_qos(fc)) {
254 qc = ieee80211_get_qos_ctl(hdr);
255 bf->bf_tidno = qc[0] & 0xf;
259 /* For HT capable stations, we save tidno for later use.
260 * We also override seqno set by upper layer with the one
261 * in tx aggregation state.
263 * If fragmentation is on, the sequence number is
264 * not overridden, since it has been
265 * incremented by the fragmentation routine.
267 * FIXME: check if the fragmentation threshold exceeds
270 tid = ATH_AN_2_TID(an, bf->bf_tidno);
271 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
272 IEEE80211_SEQ_SEQ_SHIFT);
273 bf->bf_seqno = tid->seq_next;
274 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
277 static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
280 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
283 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
284 flags |= ATH9K_TXDESC_INTREQ;
286 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
287 flags |= ATH9K_TXDESC_NOACK;
288 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
289 flags |= ATH9K_TXDESC_RTSENA;
294 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
296 struct ath_buf *bf = NULL;
298 spin_lock_bh(&sc->tx.txbuflock);
300 if (unlikely(list_empty(&sc->tx.txbuf))) {
301 spin_unlock_bh(&sc->tx.txbuflock);
305 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
308 spin_unlock_bh(&sc->tx.txbuflock);
313 /* To complete a chain of buffers associated a frame */
315 static void ath_tx_complete_buf(struct ath_softc *sc,
317 struct list_head *bf_q,
318 int txok, int sendbar)
320 struct sk_buff *skb = bf->bf_mpdu;
321 struct ath_xmit_status tx_status;
325 * Set retry information.
326 * NB: Don't use the information in the descriptor, because the frame
327 * could be software retried.
329 tx_status.retries = bf->bf_retries;
333 tx_status.flags = ATH_TX_BAR;
336 tx_status.flags |= ATH_TX_ERROR;
338 if (bf_isxretried(bf))
339 tx_status.flags |= ATH_TX_XRETRY;
342 /* Unmap this frame */
343 pci_unmap_single(sc->pdev,
347 /* complete this frame */
348 ath_tx_complete(sc, skb, &tx_status);
351 * Return the list of ath_buf of this mpdu to free queue
353 spin_lock_irqsave(&sc->tx.txbuflock, flags);
354 list_splice_tail_init(bf_q, &sc->tx.txbuf);
355 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
359 * queue up a dest/ac pair for tx scheduling
360 * NB: must be called with txq lock held
363 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
365 struct ath_atx_ac *ac = tid->ac;
368 * if tid is paused, hold off
374 * add tid to ac atmost once
380 list_add_tail(&tid->list, &ac->tid_q);
383 * add node ac to txq atmost once
389 list_add_tail(&ac->list, &txq->axq_acq);
394 static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
396 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
398 spin_lock_bh(&txq->axq_lock);
402 spin_unlock_bh(&txq->axq_lock);
405 /* resume a tid and schedule aggregate */
407 void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
409 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
411 ASSERT(tid->paused > 0);
412 spin_lock_bh(&txq->axq_lock);
419 if (list_empty(&tid->buf_q))
423 * Add this TID to scheduler and try to send out aggregates
425 ath_tx_queue_tid(txq, tid);
426 ath_txq_schedule(sc, txq);
428 spin_unlock_bh(&txq->axq_lock);
431 /* Compute the number of bad frames */
433 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
436 struct ath_buf *bf_last = bf->bf_lastbf;
437 struct ath_desc *ds = bf_last->bf_desc;
439 u32 ba[WME_BA_BMP_SIZE >> 5];
444 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
447 isaggr = bf_isaggr(bf);
449 seq_st = ATH_DS_BA_SEQ(ds);
450 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
454 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
455 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
464 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
467 struct ieee80211_hdr *hdr;
469 bf->bf_state.bf_type |= BUF_RETRY;
473 hdr = (struct ieee80211_hdr *)skb->data;
474 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
477 /* Update block ack window */
479 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
484 index = ATH_BA_INDEX(tid->seq_start, seqno);
485 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
487 tid->tx_buf[cindex] = NULL;
489 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
490 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
491 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
496 * ath_pkt_dur - compute packet duration (NB: not NAV)
499 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
500 * width - 0 for 20 MHz, 1 for 40 MHz
501 * half_gi - to use 4us v/s 3.6 us for symbol time
503 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
504 int width, int half_gi, bool shortPreamble)
506 struct ath_rate_table *rate_table = sc->cur_rate_table;
507 u32 nbits, nsymbits, duration, nsymbols;
511 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
512 rc = rate_table->info[rix].ratecode;
514 /* for legacy rates, use old function to compute packet duration */
516 return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
519 /* find number of symbols: PLCP + data */
520 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
521 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
522 nsymbols = (nbits + nsymbits - 1) / nsymbits;
525 duration = SYMBOL_TIME(nsymbols);
527 duration = SYMBOL_TIME_HALFGI(nsymbols);
529 /* addup duration for legacy/ht training and signal fields */
530 streams = HT_RC_2_STREAMS(rc);
531 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
536 /* Rate module function to set rate related fields in tx descriptor */
538 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
540 struct ath_hal *ah = sc->sc_ah;
541 struct ath_rate_table *rt;
542 struct ath_desc *ds = bf->bf_desc;
543 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
544 struct ath9k_11n_rate_series series[4];
546 struct ieee80211_tx_info *tx_info;
547 struct ieee80211_tx_rate *rates;
548 struct ieee80211_hdr *hdr;
549 struct ieee80211_hw *hw = sc->hw;
550 int i, flags, rtsctsena = 0, enable_g_protection = 0;
552 u8 rix = 0, cix, ctsrate = 0;
555 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
557 skb = (struct sk_buff *)bf->bf_mpdu;
558 hdr = (struct ieee80211_hdr *)skb->data;
559 fc = hdr->frame_control;
560 tx_info = IEEE80211_SKB_CB(skb);
561 rates = tx_info->control.rates;
563 if (ieee80211_has_morefrags(fc) ||
564 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
565 rates[1].count = rates[2].count = rates[3].count = 0;
566 rates[1].idx = rates[2].idx = rates[3].idx = 0;
567 rates[0].count = ATH_TXMAXTRY;
570 /* get the cix for the lowest valid rix */
571 rt = sc->cur_rate_table;
572 for (i = 3; i >= 0; i--) {
573 if (rates[i].count && (rates[i].idx >= 0)) {
579 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
580 cix = rt->info[rix].ctrl_rate;
582 /* All protection frames are transmited at 2Mb/s for 802.11g,
583 * otherwise we transmit them at 1Mb/s */
584 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ &&
585 !conf_is_ht(&hw->conf))
586 enable_g_protection = 1;
589 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
590 * just CTS. Note that this is only done for OFDM/HT unicast frames.
592 if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK)
593 && (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
594 WLAN_RC_PHY_HT(rt->info[rix].phy))) {
595 if (sc->sc_protmode == PROT_M_RTSCTS)
596 flags = ATH9K_TXDESC_RTSENA;
597 else if (sc->sc_protmode == PROT_M_CTSONLY)
598 flags = ATH9K_TXDESC_CTSENA;
600 cix = rt->info[enable_g_protection].ctrl_rate;
604 /* For 11n, the default behavior is to enable RTS for hw retried frames.
605 * We enable the global flag here and let rate series flags determine
606 * which rates will actually use RTS.
608 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
609 /* 802.11g protection not needed, use our default behavior */
611 flags = ATH9K_TXDESC_RTSENA;
614 /* Set protection if aggregate protection on */
615 if (sc->sc_config.ath_aggr_prot &&
616 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
617 flags = ATH9K_TXDESC_RTSENA;
618 cix = rt->info[enable_g_protection].ctrl_rate;
622 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
623 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
624 flags &= ~(ATH9K_TXDESC_RTSENA);
627 * CTS transmit rate is derived from the transmit rate by looking in the
628 * h/w rate table. We must also factor in whether or not a short
629 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
631 ctsrate = rt->info[cix].ratecode |
632 (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
634 for (i = 0; i < 4; i++) {
635 if (!rates[i].count || (rates[i].idx < 0))
640 series[i].Rate = rt->info[rix].ratecode |
641 (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0);
643 series[i].Tries = rates[i].count;
645 series[i].RateFlags = (
646 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ?
647 ATH9K_RATESERIES_RTS_CTS : 0) |
648 ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ?
649 ATH9K_RATESERIES_2040 : 0) |
650 ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ?
651 ATH9K_RATESERIES_HALFGI : 0);
653 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
654 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
655 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
656 bf_isshpreamble(bf));
658 series[i].ChSel = sc->sc_tx_chainmask;
661 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
664 /* set dur_update_en for l-sig computation except for PS-Poll frames */
665 ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
666 ctsrate, ctsduration,
669 if (sc->sc_config.ath_aggr_prot && flags)
670 ath9k_hw_set11n_burstduration(ah, ds, 8192);
674 * Function to send a normal HT (non-AMPDU) frame
675 * NB: must be called with txq lock held
677 static int ath_tx_send_normal(struct ath_softc *sc,
679 struct ath_atx_tid *tid,
680 struct list_head *bf_head)
684 BUG_ON(list_empty(bf_head));
686 bf = list_first_entry(bf_head, struct ath_buf, list);
687 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
689 /* update starting sequence number for subsequent ADDBA request */
690 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
692 /* Queue to h/w without aggregation */
694 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
695 ath_buf_set_rate(sc, bf);
696 ath_tx_txqaddbuf(sc, txq, bf_head);
701 /* flush tid's software queue and send frames as non-ampdu's */
703 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
705 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
707 struct list_head bf_head;
708 INIT_LIST_HEAD(&bf_head);
710 ASSERT(tid->paused > 0);
711 spin_lock_bh(&txq->axq_lock);
715 if (tid->paused > 0) {
716 spin_unlock_bh(&txq->axq_lock);
720 while (!list_empty(&tid->buf_q)) {
721 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
722 ASSERT(!bf_isretried(bf));
723 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
724 ath_tx_send_normal(sc, txq, tid, &bf_head);
727 spin_unlock_bh(&txq->axq_lock);
730 /* Completion routine of an aggregate */
732 static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
735 struct list_head *bf_q,
738 struct ath_node *an = NULL;
740 struct ieee80211_tx_info *tx_info;
741 struct ath_atx_tid *tid = NULL;
742 struct ath_buf *bf_last = bf->bf_lastbf;
743 struct ath_desc *ds = bf_last->bf_desc;
744 struct ath_buf *bf_next, *bf_lastq = NULL;
745 struct list_head bf_head, bf_pending;
747 u32 ba[WME_BA_BMP_SIZE >> 5];
748 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
750 skb = (struct sk_buff *)bf->bf_mpdu;
751 tx_info = IEEE80211_SKB_CB(skb);
753 if (tx_info->control.sta) {
754 an = (struct ath_node *)tx_info->control.sta->drv_priv;
755 tid = ATH_AN_2_TID(an, bf->bf_tidno);
758 isaggr = bf_isaggr(bf);
761 if (ATH_DS_TX_BA(ds)) {
763 * extract starting sequence and
766 seq_st = ATH_DS_BA_SEQ(ds);
768 ATH_DS_BA_BITMAP(ds),
769 WME_BA_BMP_SIZE >> 3);
771 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
774 * AR5416 can become deaf/mute when BA
775 * issue happens. Chip needs to be reset.
776 * But AP code may have sychronization issues
777 * when perform internal reset in this routine.
778 * Only enable reset in STA mode for now.
780 if (sc->sc_ah->ah_opmode ==
781 NL80211_IFTYPE_STATION)
785 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
789 INIT_LIST_HEAD(&bf_pending);
790 INIT_LIST_HEAD(&bf_head);
793 txfail = txpending = 0;
794 bf_next = bf->bf_next;
796 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
797 /* transmit completion, subframe is
798 * acked by block ack */
799 } else if (!isaggr && txok) {
800 /* transmit completion */
803 if (!(tid->state & AGGR_CLEANUP) &&
804 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
805 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
806 ath_tx_set_retry(sc, bf);
809 bf->bf_state.bf_type |= BUF_XRETRY;
815 * cleanup in progress, just fail
816 * the un-acked sub-frames
822 * Remove ath_buf's of this sub-frame from aggregate queue.
824 if (bf_next == NULL) { /* last subframe in the aggregate */
825 ASSERT(bf->bf_lastfrm == bf_last);
828 * The last descriptor of the last sub frame could be
829 * a holding descriptor for h/w. If that's the case,
830 * bf->bf_lastfrm won't be in the bf_q.
831 * Make sure we handle bf_q properly here.
834 if (!list_empty(bf_q)) {
835 bf_lastq = list_entry(bf_q->prev,
836 struct ath_buf, list);
837 list_cut_position(&bf_head,
838 bf_q, &bf_lastq->list);
841 * XXX: if the last subframe only has one
842 * descriptor which is also being used as
843 * a holding descriptor. Then the ath_buf
844 * is not in the bf_q at all.
846 INIT_LIST_HEAD(&bf_head);
849 ASSERT(!list_empty(bf_q));
850 list_cut_position(&bf_head,
851 bf_q, &bf->bf_lastfrm->list);
856 * complete the acked-ones/xretried ones; update
859 spin_lock_bh(&txq->axq_lock);
860 ath_tx_update_baw(sc, tid, bf->bf_seqno);
861 spin_unlock_bh(&txq->axq_lock);
863 /* complete this sub-frame */
864 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
867 * retry the un-acked ones
870 * XXX: if the last descriptor is holding descriptor,
871 * in order to requeue the frame to software queue, we
872 * need to allocate a new descriptor and
873 * copy the content of holding descriptor to it.
875 if (bf->bf_next == NULL &&
876 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
879 /* allocate new descriptor */
880 spin_lock_bh(&sc->tx.txbuflock);
881 ASSERT(!list_empty((&sc->tx.txbuf)));
882 tbf = list_first_entry(&sc->tx.txbuf,
883 struct ath_buf, list);
884 list_del(&tbf->list);
885 spin_unlock_bh(&sc->tx.txbuflock);
887 ATH_TXBUF_RESET(tbf);
889 /* copy descriptor content */
890 tbf->bf_mpdu = bf_last->bf_mpdu;
891 tbf->bf_buf_addr = bf_last->bf_buf_addr;
892 *(tbf->bf_desc) = *(bf_last->bf_desc);
894 /* link it to the frame */
896 bf_lastq->bf_desc->ds_link =
898 bf->bf_lastfrm = tbf;
899 ath9k_hw_cleartxdesc(sc->sc_ah,
900 bf->bf_lastfrm->bf_desc);
902 tbf->bf_state = bf_last->bf_state;
903 tbf->bf_lastfrm = tbf;
904 ath9k_hw_cleartxdesc(sc->sc_ah,
905 tbf->bf_lastfrm->bf_desc);
907 /* copy the DMA context */
909 bf_last->bf_dmacontext;
911 list_add_tail(&tbf->list, &bf_head);
914 * Clear descriptor status words for
917 ath9k_hw_cleartxdesc(sc->sc_ah,
918 bf->bf_lastfrm->bf_desc);
922 * Put this buffer to the temporary pending
923 * queue to retain ordering
925 list_splice_tail_init(&bf_head, &bf_pending);
931 if (tid->state & AGGR_CLEANUP) {
932 /* check to see if we're done with cleaning the h/w queue */
933 spin_lock_bh(&txq->axq_lock);
935 if (tid->baw_head == tid->baw_tail) {
936 tid->state &= ~AGGR_ADDBA_COMPLETE;
937 tid->addba_exchangeattempts = 0;
938 spin_unlock_bh(&txq->axq_lock);
940 tid->state &= ~AGGR_CLEANUP;
942 /* send buffered frames as singles */
943 ath_tx_flush_tid(sc, tid);
945 spin_unlock_bh(&txq->axq_lock);
951 * prepend un-acked frames to the beginning of the pending frame queue
953 if (!list_empty(&bf_pending)) {
954 spin_lock_bh(&txq->axq_lock);
955 /* Note: we _prepend_, we _do_not_ at to
956 * the end of the queue ! */
957 list_splice(&bf_pending, &tid->buf_q);
958 ath_tx_queue_tid(txq, tid);
959 spin_unlock_bh(&txq->axq_lock);
963 ath_reset(sc, false);
968 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
970 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
971 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
972 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
974 tx_info_priv->update_rc = false;
975 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
976 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
978 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
979 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
981 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
982 sizeof(tx_info_priv->tx));
983 tx_info_priv->n_frames = bf->bf_nframes;
984 tx_info_priv->n_bad_frames = nbad;
985 tx_info_priv->update_rc = true;
990 /* Process completed xmit descriptors from the specified queue */
992 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
994 struct ath_hal *ah = sc->sc_ah;
995 struct ath_buf *bf, *lastbf, *bf_held = NULL;
996 struct list_head bf_head;
1001 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
1002 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1006 spin_lock_bh(&txq->axq_lock);
1007 if (list_empty(&txq->axq_q)) {
1008 txq->axq_link = NULL;
1009 txq->axq_linkbuf = NULL;
1010 spin_unlock_bh(&txq->axq_lock);
1013 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1016 * There is a race condition that a BH gets scheduled
1017 * after sw writes TxE and before hw re-load the last
1018 * descriptor to get the newly chained one.
1019 * Software must keep the last DONE descriptor as a
1020 * holding descriptor - software does so by marking
1021 * it with the STALE flag.
1024 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1026 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1028 * The holding descriptor is the last
1029 * descriptor in queue. It's safe to remove
1030 * the last holding descriptor in BH context.
1032 spin_unlock_bh(&txq->axq_lock);
1035 /* Lets work with the next buffer now */
1036 bf = list_entry(bf_held->list.next,
1037 struct ath_buf, list);
1041 lastbf = bf->bf_lastbf;
1042 ds = lastbf->bf_desc; /* NB: last decriptor */
1044 status = ath9k_hw_txprocdesc(ah, ds);
1045 if (status == -EINPROGRESS) {
1046 spin_unlock_bh(&txq->axq_lock);
1049 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1050 txq->axq_lastdsWithCTS = NULL;
1051 if (ds == txq->axq_gatingds)
1052 txq->axq_gatingds = NULL;
1055 * Remove ath_buf's of the same transmit unit from txq,
1056 * however leave the last descriptor back as the holding
1057 * descriptor for hw.
1059 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1060 INIT_LIST_HEAD(&bf_head);
1062 if (!list_is_singular(&lastbf->list))
1063 list_cut_position(&bf_head,
1064 &txq->axq_q, lastbf->list.prev);
1069 txq->axq_aggr_depth--;
1071 txok = (ds->ds_txstat.ts_status == 0);
1073 spin_unlock_bh(&txq->axq_lock);
1076 list_del(&bf_held->list);
1077 spin_lock_bh(&sc->tx.txbuflock);
1078 list_add_tail(&bf_held->list, &sc->tx.txbuf);
1079 spin_unlock_bh(&sc->tx.txbuflock);
1082 if (!bf_isampdu(bf)) {
1084 * This frame is sent out as a single frame.
1085 * Use hardware retry status for this frame.
1087 bf->bf_retries = ds->ds_txstat.ts_longretry;
1088 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1089 bf->bf_state.bf_type |= BUF_XRETRY;
1092 nbad = ath_tx_num_badfrms(sc, bf, txok);
1095 ath_tx_rc_status(bf, ds, nbad);
1098 * Complete this transmit unit
1101 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1103 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1105 /* Wake up mac80211 queue */
1107 spin_lock_bh(&txq->axq_lock);
1108 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1111 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1113 ieee80211_wake_queue(sc->hw, qnum);
1120 * schedule any pending packets if aggregation is enabled
1122 if (sc->sc_flags & SC_OP_TXAGGR)
1123 ath_txq_schedule(sc, txq);
1124 spin_unlock_bh(&txq->axq_lock);
1128 static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1130 struct ath_hal *ah = sc->sc_ah;
1132 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1133 DPRINTF(sc, ATH_DBG_XMIT, "tx queue [%u] %x, link %p\n",
1134 txq->axq_qnum, ath9k_hw_gettxbuf(ah, txq->axq_qnum),
1138 /* Drain only the data queues */
1140 static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1142 struct ath_hal *ah = sc->sc_ah;
1145 if (!(sc->sc_flags & SC_OP_INVALID)) {
1146 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1147 if (ATH_TXQ_SETUP(sc, i)) {
1148 ath_tx_stopdma(sc, &sc->tx.txq[i]);
1149 /* The TxDMA may not really be stopped.
1150 * Double check the hal tx pending count */
1151 npend += ath9k_hw_numtxpending(ah,
1152 sc->tx.txq[i].axq_qnum);
1159 /* TxDMA not stopped, reset the hal */
1160 DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
1162 spin_lock_bh(&sc->sc_resetlock);
1163 r = ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, true);
1165 DPRINTF(sc, ATH_DBG_FATAL,
1166 "Unable to reset hardware; reset status %u\n",
1168 spin_unlock_bh(&sc->sc_resetlock);
1171 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1172 if (ATH_TXQ_SETUP(sc, i))
1173 ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx);
1177 /* Add a sub-frame to block ack window */
1179 static void ath_tx_addto_baw(struct ath_softc *sc,
1180 struct ath_atx_tid *tid,
1185 if (bf_isretried(bf))
1188 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1189 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1191 ASSERT(tid->tx_buf[cindex] == NULL);
1192 tid->tx_buf[cindex] = bf;
1194 if (index >= ((tid->baw_tail - tid->baw_head) &
1195 (ATH_TID_MAX_BUFS - 1))) {
1196 tid->baw_tail = cindex;
1197 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1202 * Function to send an A-MPDU
1203 * NB: must be called with txq lock held
1205 static int ath_tx_send_ampdu(struct ath_softc *sc,
1206 struct ath_atx_tid *tid,
1207 struct list_head *bf_head,
1208 struct ath_tx_control *txctl)
1212 BUG_ON(list_empty(bf_head));
1214 bf = list_first_entry(bf_head, struct ath_buf, list);
1215 bf->bf_state.bf_type |= BUF_AMPDU;
1218 * Do not queue to h/w when any of the following conditions is true:
1219 * - there are pending frames in software queue
1220 * - the TID is currently paused for ADDBA/BAR request
1221 * - seqno is not within block-ack window
1222 * - h/w queue depth exceeds low water mark
1224 if (!list_empty(&tid->buf_q) || tid->paused ||
1225 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1226 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1228 * Add this frame to software queue for scheduling later
1231 list_splice_tail_init(bf_head, &tid->buf_q);
1232 ath_tx_queue_tid(txctl->txq, tid);
1236 /* Add sub-frame to BAW */
1237 ath_tx_addto_baw(sc, tid, bf);
1239 /* Queue to h/w without aggregation */
1241 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1242 ath_buf_set_rate(sc, bf);
1243 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
1250 * returns aggr limit based on lowest of the rates
1252 static u32 ath_lookup_rate(struct ath_softc *sc,
1254 struct ath_atx_tid *tid)
1256 struct ath_rate_table *rate_table = sc->cur_rate_table;
1257 struct sk_buff *skb;
1258 struct ieee80211_tx_info *tx_info;
1259 struct ieee80211_tx_rate *rates;
1260 struct ath_tx_info_priv *tx_info_priv;
1261 u32 max_4ms_framelen, frame_length;
1262 u16 aggr_limit, legacy = 0, maxampdu;
1265 skb = (struct sk_buff *)bf->bf_mpdu;
1266 tx_info = IEEE80211_SKB_CB(skb);
1267 rates = tx_info->control.rates;
1269 (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
1272 * Find the lowest frame length among the rate series that will have a
1273 * 4ms transmit duration.
1274 * TODO - TXOP limit needs to be considered.
1276 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1278 for (i = 0; i < 4; i++) {
1279 if (rates[i].count) {
1280 if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) {
1286 rate_table->info[rates[i].idx].max_4ms_framelen;
1287 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1292 * limit aggregate size by the minimum rate if rate selected is
1293 * not a probe rate, if rate selected is a probe rate then
1294 * avoid aggregation of this packet.
1296 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1299 aggr_limit = min(max_4ms_framelen,
1300 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1303 * h/w can accept aggregates upto 16 bit lengths (65535).
1304 * The IE, however can hold upto 65536, which shows up here
1305 * as zero. Ignore 65536 since we are constrained by hw.
1307 maxampdu = tid->an->maxampdu;
1309 aggr_limit = min(aggr_limit, maxampdu);
1315 * returns the number of delimiters to be added to
1316 * meet the minimum required mpdudensity.
1317 * caller should make sure that the rate is HT rate .
1319 static int ath_compute_num_delims(struct ath_softc *sc,
1320 struct ath_atx_tid *tid,
1324 struct ath_rate_table *rt = sc->cur_rate_table;
1325 struct sk_buff *skb = bf->bf_mpdu;
1326 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1327 u32 nsymbits, nsymbols, mpdudensity;
1330 int width, half_gi, ndelim, mindelim;
1332 /* Select standard number of delimiters based on frame length alone */
1333 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1336 * If encryption enabled, hardware requires some more padding between
1338 * TODO - this could be improved to be dependent on the rate.
1339 * The hardware can keep up at lower rates, but not higher rates
1341 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1342 ndelim += ATH_AGGR_ENCRYPTDELIM;
1345 * Convert desired mpdu density from microeconds to bytes based
1346 * on highest rate in rate series (i.e. first rate) to determine
1347 * required minimum length for subframe. Take into account
1348 * whether high rate is 20 or 40Mhz and half or full GI.
1350 mpdudensity = tid->an->mpdudensity;
1353 * If there is no mpdu density restriction, no further calculation
1356 if (mpdudensity == 0)
1359 rix = tx_info->control.rates[0].idx;
1360 flags = tx_info->control.rates[0].flags;
1361 rc = rt->info[rix].ratecode;
1362 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
1363 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
1366 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1368 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1373 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1374 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1376 /* Is frame shorter than required minimum length? */
1377 if (frmlen < minlen) {
1378 /* Get the minimum number of delimiters required. */
1379 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1380 ndelim = max(mindelim, ndelim);
1387 * For aggregation from software buffer queue.
1388 * NB: must be called with txq lock held
1390 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1391 struct ath_atx_tid *tid,
1392 struct list_head *bf_q,
1393 struct ath_buf **bf_last,
1394 struct aggr_rifs_param *param,
1397 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1398 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1399 struct list_head bf_head;
1400 int rl = 0, nframes = 0, ndelim;
1401 u16 aggr_limit = 0, al = 0, bpad = 0,
1402 al_delta, h_baw = tid->baw_size / 2;
1403 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1405 INIT_LIST_HEAD(&bf_head);
1407 BUG_ON(list_empty(&tid->buf_q));
1409 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1412 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1415 * do not step over block-ack window
1417 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1418 status = ATH_AGGR_BAW_CLOSED;
1423 aggr_limit = ath_lookup_rate(sc, bf, tid);
1428 * do not exceed aggregation limit
1430 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1432 if (nframes && (aggr_limit <
1433 (al + bpad + al_delta + prev_al))) {
1434 status = ATH_AGGR_LIMITED;
1439 * do not exceed subframe limit
1441 if ((nframes + *prev_frames) >=
1442 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1443 status = ATH_AGGR_LIMITED;
1448 * add padding for previous frame to aggregation length
1450 al += bpad + al_delta;
1453 * Get the delimiters needed to meet the MPDU
1454 * density for this node.
1456 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
1458 bpad = PADBYTES(al_delta) + (ndelim << 2);
1461 bf->bf_lastfrm->bf_desc->ds_link = 0;
1464 * this packet is part of an aggregate
1465 * - remove all descriptors belonging to this frame from
1467 * - add it to block ack window
1468 * - set up descriptors for aggregation
1470 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1471 ath_tx_addto_baw(sc, tid, bf);
1473 list_for_each_entry(tbf, &bf_head, list) {
1474 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1475 tbf->bf_desc, ndelim);
1479 * link buffers of this frame to the aggregate
1481 list_splice_tail_init(&bf_head, bf_q);
1485 bf_prev->bf_next = bf;
1486 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1492 * terminate aggregation on a small packet boundary
1494 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1495 status = ATH_AGGR_SHORTPKT;
1499 } while (!list_empty(&tid->buf_q));
1501 bf_first->bf_al = al;
1502 bf_first->bf_nframes = nframes;
1509 * process pending frames possibly doing a-mpdu aggregation
1510 * NB: must be called with txq lock held
1512 static void ath_tx_sched_aggr(struct ath_softc *sc,
1513 struct ath_txq *txq, struct ath_atx_tid *tid)
1515 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1516 enum ATH_AGGR_STATUS status;
1517 struct list_head bf_q;
1518 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1519 int prev_frames = 0;
1522 if (list_empty(&tid->buf_q))
1525 INIT_LIST_HEAD(&bf_q);
1527 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, ¶m,
1531 * no frames picked up to be aggregated; block-ack
1532 * window is not open
1534 if (list_empty(&bf_q))
1537 bf = list_first_entry(&bf_q, struct ath_buf, list);
1538 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1539 bf->bf_lastbf = bf_last;
1542 * if only one frame, send as non-aggregate
1544 if (bf->bf_nframes == 1) {
1545 ASSERT(bf->bf_lastfrm == bf_last);
1547 bf->bf_state.bf_type &= ~BUF_AGGR;
1549 * clear aggr bits for every descriptor
1550 * XXX TODO: is there a way to optimize it?
1552 list_for_each_entry(tbf, &bf_q, list) {
1553 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1556 ath_buf_set_rate(sc, bf);
1557 ath_tx_txqaddbuf(sc, txq, &bf_q);
1562 * setup first desc with rate and aggr info
1564 bf->bf_state.bf_type |= BUF_AGGR;
1565 ath_buf_set_rate(sc, bf);
1566 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1569 * anchor last frame of aggregate correctly
1571 ASSERT(bf_lastaggr);
1572 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1574 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1576 /* XXX: We don't enter into this loop, consider removing this */
1577 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1578 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1579 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1582 txq->axq_aggr_depth++;
1585 * Normal aggregate, queue to hardware
1587 ath_tx_txqaddbuf(sc, txq, &bf_q);
1589 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1590 status != ATH_AGGR_BAW_CLOSED);
1593 /* Called with txq lock held */
1595 static void ath_tid_drain(struct ath_softc *sc,
1596 struct ath_txq *txq,
1597 struct ath_atx_tid *tid)
1601 struct list_head bf_head;
1602 INIT_LIST_HEAD(&bf_head);
1605 if (list_empty(&tid->buf_q))
1607 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1609 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1611 /* update baw for software retried frame */
1612 if (bf_isretried(bf))
1613 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1616 * do not indicate packets while holding txq spinlock.
1617 * unlock is intentional here
1619 spin_unlock(&txq->axq_lock);
1621 /* complete this sub-frame */
1622 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1624 spin_lock(&txq->axq_lock);
1628 * TODO: For frame(s) that are in the retry state, we will reuse the
1629 * sequence number(s) without setting the retry bit. The
1630 * alternative is to give up on these and BAR the receiver's window
1633 tid->seq_next = tid->seq_start;
1634 tid->baw_tail = tid->baw_head;
1638 * Drain all pending buffers
1639 * NB: must be called with txq lock held
1641 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1642 struct ath_txq *txq)
1644 struct ath_atx_ac *ac, *ac_tmp;
1645 struct ath_atx_tid *tid, *tid_tmp;
1647 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1648 list_del(&ac->list);
1650 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1651 list_del(&tid->list);
1653 ath_tid_drain(sc, txq, tid);
1658 static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
1659 struct sk_buff *skb,
1660 struct ath_tx_control *txctl)
1662 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1663 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1664 struct ath_tx_info_priv *tx_info_priv;
1668 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1669 if (unlikely(!tx_info_priv))
1671 tx_info->rate_driver_data[0] = tx_info_priv;
1672 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1673 fc = hdr->frame_control;
1675 ATH_TXBUF_RESET(bf);
1679 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
1681 ieee80211_is_data(fc) ?
1682 (bf->bf_state.bf_type |= BUF_DATA) :
1683 (bf->bf_state.bf_type &= ~BUF_DATA);
1684 ieee80211_is_back_req(fc) ?
1685 (bf->bf_state.bf_type |= BUF_BAR) :
1686 (bf->bf_state.bf_type &= ~BUF_BAR);
1687 ieee80211_is_pspoll(fc) ?
1688 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1689 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1690 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
1691 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1692 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1693 (sc->hw->conf.ht.enabled && !is_pae(skb) &&
1694 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1695 (bf->bf_state.bf_type |= BUF_HT) :
1696 (bf->bf_state.bf_type &= ~BUF_HT);
1698 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1702 bf->bf_keytype = get_hw_crypto_keytype(skb);
1704 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1705 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1706 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1708 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1711 /* Assign seqno, tidno */
1713 if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
1714 assign_aggr_tid_seqno(skb, bf);
1719 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1720 skb->len, PCI_DMA_TODEVICE);
1721 if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_dmacontext))) {
1723 DPRINTF(sc, ATH_DBG_CONFIG,
1724 "pci_dma_mapping_error() on TX\n");
1728 bf->bf_buf_addr = bf->bf_dmacontext;
1732 /* FIXME: tx power */
1733 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1734 struct ath_tx_control *txctl)
1736 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1737 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1738 struct ath_node *an = NULL;
1739 struct list_head bf_head;
1740 struct ath_desc *ds;
1741 struct ath_atx_tid *tid;
1742 struct ath_hal *ah = sc->sc_ah;
1745 frm_type = get_hw_packet_type(skb);
1747 INIT_LIST_HEAD(&bf_head);
1748 list_add_tail(&bf->list, &bf_head);
1750 /* setup descriptor */
1754 ds->ds_data = bf->bf_buf_addr;
1756 /* Formulate first tx descriptor with tx controls */
1758 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1759 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1761 ath9k_hw_filltxdesc(ah, ds,
1762 skb->len, /* segment length */
1763 true, /* first segment */
1764 true, /* last segment */
1765 ds); /* first descriptor */
1767 bf->bf_lastfrm = bf;
1769 spin_lock_bh(&txctl->txq->axq_lock);
1771 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1772 tx_info->control.sta) {
1773 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1774 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1776 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
1778 * Try aggregation if it's a unicast data frame
1779 * and the destination is HT capable.
1781 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1784 * Send this frame as regular when ADDBA
1785 * exchange is neither complete nor pending.
1787 ath_tx_send_normal(sc, txctl->txq,
1794 ath_buf_set_rate(sc, bf);
1795 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
1798 spin_unlock_bh(&txctl->txq->axq_lock);
1801 /* Upon failure caller should free skb */
1802 int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1803 struct ath_tx_control *txctl)
1808 /* Check if a tx buffer is available */
1810 bf = ath_tx_get_buffer(sc);
1812 DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n");
1816 r = ath_tx_setup_buffer(sc, bf, skb, txctl);
1818 struct ath_txq *txq = txctl->txq;
1820 DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
1822 /* upon ath_tx_processq() this TX queue will be resumed, we
1823 * guarantee this will happen by knowing beforehand that
1824 * we will at least have to run TX completionon one buffer
1826 spin_lock_bh(&txq->axq_lock);
1827 if (ath_txq_depth(sc, txq->axq_qnum) > 1) {
1828 ieee80211_stop_queue(sc->hw,
1829 skb_get_queue_mapping(skb));
1832 spin_unlock_bh(&txq->axq_lock);
1834 spin_lock_bh(&sc->tx.txbuflock);
1835 list_add_tail(&bf->list, &sc->tx.txbuf);
1836 spin_unlock_bh(&sc->tx.txbuflock);
1841 ath_tx_start_dma(sc, bf, txctl);
1846 /* Initialize TX queue and h/w */
1848 int ath_tx_init(struct ath_softc *sc, int nbufs)
1853 spin_lock_init(&sc->tx.txbuflock);
1855 /* Setup tx descriptors */
1856 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
1859 DPRINTF(sc, ATH_DBG_FATAL,
1860 "Failed to allocate tx descriptors: %d\n",
1865 /* XXX allocate beacon state together with vap */
1866 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
1867 "beacon", ATH_BCBUF, 1);
1869 DPRINTF(sc, ATH_DBG_FATAL,
1870 "Failed to allocate beacon descriptors: %d\n",
1883 /* Reclaim all tx queue resources */
1885 int ath_tx_cleanup(struct ath_softc *sc)
1887 /* cleanup beacon descriptors */
1888 if (sc->beacon.bdma.dd_desc_len != 0)
1889 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
1891 /* cleanup tx descriptors */
1892 if (sc->tx.txdma.dd_desc_len != 0)
1893 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
1898 /* Setup a h/w transmit queue */
1900 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1902 struct ath_hal *ah = sc->sc_ah;
1903 struct ath9k_tx_queue_info qi;
1906 memset(&qi, 0, sizeof(qi));
1907 qi.tqi_subtype = subtype;
1908 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1909 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1910 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1911 qi.tqi_physCompBuf = 0;
1914 * Enable interrupts only for EOL and DESC conditions.
1915 * We mark tx descriptors to receive a DESC interrupt
1916 * when a tx queue gets deep; otherwise waiting for the
1917 * EOL to reap descriptors. Note that this is done to
1918 * reduce interrupt load and this only defers reaping
1919 * descriptors, never transmitting frames. Aside from
1920 * reducing interrupts this also permits more concurrency.
1921 * The only potential downside is if the tx queue backs
1922 * up in which case the top half of the kernel may backup
1923 * due to a lack of tx descriptors.
1925 * The UAPSD queue is an exception, since we take a desc-
1926 * based intr on the EOSP frames.
1928 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1929 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1931 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1932 TXQ_FLAG_TXDESCINT_ENABLE;
1933 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1936 * NB: don't print a message, this happens
1937 * normally on parts with too few tx queues
1941 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
1942 DPRINTF(sc, ATH_DBG_FATAL,
1943 "qnum %u out of range, max %u!\n",
1944 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
1945 ath9k_hw_releasetxqueue(ah, qnum);
1948 if (!ATH_TXQ_SETUP(sc, qnum)) {
1949 struct ath_txq *txq = &sc->tx.txq[qnum];
1951 txq->axq_qnum = qnum;
1952 txq->axq_link = NULL;
1953 INIT_LIST_HEAD(&txq->axq_q);
1954 INIT_LIST_HEAD(&txq->axq_acq);
1955 spin_lock_init(&txq->axq_lock);
1957 txq->axq_aggr_depth = 0;
1958 txq->axq_totalqueued = 0;
1959 txq->axq_linkbuf = NULL;
1960 sc->tx.txqsetup |= 1<<qnum;
1962 return &sc->tx.txq[qnum];
1965 /* Reclaim resources for a setup queue */
1967 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1969 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1970 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1974 * Setup a hardware data transmit queue for the specified
1975 * access control. The hal may not support all requested
1976 * queues in which case it will return a reference to a
1977 * previously setup queue. We record the mapping from ac's
1978 * to h/w queues for use by ath_tx_start and also track
1979 * the set of h/w queues being used to optimize work in the
1980 * transmit interrupt handler and related routines.
1983 int ath_tx_setup(struct ath_softc *sc, int haltype)
1985 struct ath_txq *txq;
1987 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1988 DPRINTF(sc, ATH_DBG_FATAL,
1989 "HAL AC %u out of range, max %zu!\n",
1990 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1993 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1995 sc->tx.hwq_map[haltype] = txq->axq_qnum;
2001 int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2006 case ATH9K_TX_QUEUE_DATA:
2007 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
2008 DPRINTF(sc, ATH_DBG_FATAL,
2009 "HAL AC %u out of range, max %zu!\n",
2010 haltype, ARRAY_SIZE(sc->tx.hwq_map));
2013 qnum = sc->tx.hwq_map[haltype];
2015 case ATH9K_TX_QUEUE_BEACON:
2016 qnum = sc->beacon.beaconq;
2018 case ATH9K_TX_QUEUE_CAB:
2019 qnum = sc->beacon.cabq->axq_qnum;
2027 /* Get a transmit queue, if available */
2029 struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2031 struct ath_txq *txq = NULL;
2034 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
2035 txq = &sc->tx.txq[qnum];
2037 spin_lock_bh(&txq->axq_lock);
2039 /* Try to avoid running out of descriptors */
2040 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
2041 DPRINTF(sc, ATH_DBG_FATAL,
2042 "TX queue: %d is full, depth: %d\n",
2043 qnum, txq->axq_depth);
2044 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
2046 spin_unlock_bh(&txq->axq_lock);
2050 spin_unlock_bh(&txq->axq_lock);
2055 /* Update parameters for a transmit queue */
2057 int ath_txq_update(struct ath_softc *sc, int qnum,
2058 struct ath9k_tx_queue_info *qinfo)
2060 struct ath_hal *ah = sc->sc_ah;
2062 struct ath9k_tx_queue_info qi;
2064 if (qnum == sc->beacon.beaconq) {
2066 * XXX: for beacon queue, we just save the parameter.
2067 * It will be picked up by ath_beaconq_config when
2070 sc->beacon.beacon_qi = *qinfo;
2074 ASSERT(sc->tx.txq[qnum].axq_qnum == qnum);
2076 ath9k_hw_get_txq_props(ah, qnum, &qi);
2077 qi.tqi_aifs = qinfo->tqi_aifs;
2078 qi.tqi_cwmin = qinfo->tqi_cwmin;
2079 qi.tqi_cwmax = qinfo->tqi_cwmax;
2080 qi.tqi_burstTime = qinfo->tqi_burstTime;
2081 qi.tqi_readyTime = qinfo->tqi_readyTime;
2083 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
2084 DPRINTF(sc, ATH_DBG_FATAL,
2085 "Unable to update hardware queue %u!\n", qnum);
2088 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2094 int ath_cabq_update(struct ath_softc *sc)
2096 struct ath9k_tx_queue_info qi;
2097 int qnum = sc->beacon.cabq->axq_qnum;
2098 struct ath_beacon_config conf;
2100 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
2102 * Ensure the readytime % is within the bounds.
2104 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2105 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2106 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2107 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2109 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2111 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2112 ath_txq_update(sc, qnum, &qi);
2117 /* Deferred processing of transmit interrupt */
2119 void ath_tx_tasklet(struct ath_softc *sc)
2122 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2124 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2127 * Process each active queue.
2129 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2130 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2131 ath_tx_processq(sc, &sc->tx.txq[i]);
2135 void ath_tx_draintxq(struct ath_softc *sc,
2136 struct ath_txq *txq, bool retry_tx)
2138 struct ath_buf *bf, *lastbf;
2139 struct list_head bf_head;
2141 INIT_LIST_HEAD(&bf_head);
2144 * NB: this assumes output has been stopped and
2145 * we do not need to block ath_tx_tasklet
2148 spin_lock_bh(&txq->axq_lock);
2150 if (list_empty(&txq->axq_q)) {
2151 txq->axq_link = NULL;
2152 txq->axq_linkbuf = NULL;
2153 spin_unlock_bh(&txq->axq_lock);
2157 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2159 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2160 list_del(&bf->list);
2161 spin_unlock_bh(&txq->axq_lock);
2163 spin_lock_bh(&sc->tx.txbuflock);
2164 list_add_tail(&bf->list, &sc->tx.txbuf);
2165 spin_unlock_bh(&sc->tx.txbuflock);
2169 lastbf = bf->bf_lastbf;
2171 lastbf->bf_desc->ds_txstat.ts_flags =
2172 ATH9K_TX_SW_ABORTED;
2174 /* remove ath_buf's of the same mpdu from txq */
2175 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2178 spin_unlock_bh(&txq->axq_lock);
2181 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2183 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2186 /* flush any pending frames if aggregation is enabled */
2187 if (sc->sc_flags & SC_OP_TXAGGR) {
2189 spin_lock_bh(&txq->axq_lock);
2190 ath_txq_drain_pending_buffers(sc, txq);
2191 spin_unlock_bh(&txq->axq_lock);
2196 /* Drain the transmit queues and reclaim resources */
2198 void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2200 /* stop beacon queue. The beacon will be freed when
2201 * we go to INIT state */
2202 if (!(sc->sc_flags & SC_OP_INVALID)) {
2203 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2204 DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n",
2205 ath9k_hw_gettxbuf(sc->sc_ah, sc->beacon.beaconq));
2208 ath_drain_txdataq(sc, retry_tx);
2211 u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2213 return sc->tx.txq[qnum].axq_depth;
2216 u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2218 return sc->tx.txq[qnum].axq_aggr_depth;
2221 bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
2223 struct ath_atx_tid *txtid;
2225 if (!(sc->sc_flags & SC_OP_TXAGGR))
2228 txtid = ATH_AN_2_TID(an, tidno);
2230 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2231 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
2232 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2233 txtid->addba_exchangeattempts++;
2241 /* Start TX aggregation */
2243 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
2246 struct ath_atx_tid *txtid;
2247 struct ath_node *an;
2249 an = (struct ath_node *)sta->drv_priv;
2251 if (sc->sc_flags & SC_OP_TXAGGR) {
2252 txtid = ATH_AN_2_TID(an, tid);
2253 txtid->state |= AGGR_ADDBA_PROGRESS;
2254 ath_tx_pause_tid(sc, txtid);
2260 /* Stop tx aggregation */
2262 int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2264 struct ath_node *an = (struct ath_node *)sta->drv_priv;
2266 ath_tx_aggr_teardown(sc, an, tid);
2270 /* Resume tx aggregation */
2272 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2274 struct ath_atx_tid *txtid;
2275 struct ath_node *an;
2277 an = (struct ath_node *)sta->drv_priv;
2279 if (sc->sc_flags & SC_OP_TXAGGR) {
2280 txtid = ATH_AN_2_TID(an, tid);
2282 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
2283 txtid->state |= AGGR_ADDBA_COMPLETE;
2284 txtid->state &= ~AGGR_ADDBA_PROGRESS;
2285 ath_tx_resume_tid(sc, txtid);
2290 * Performs transmit side cleanup when TID changes from aggregated to
2292 * - Pause the TID and mark cleanup in progress
2293 * - Discard all retry frames from the s/w queue.
2296 void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
2298 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2299 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
2301 struct list_head bf_head;
2302 INIT_LIST_HEAD(&bf_head);
2304 if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */
2307 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2308 txtid->addba_exchangeattempts = 0;
2312 /* TID must be paused first */
2313 ath_tx_pause_tid(sc, txtid);
2315 /* drop all software retried frames and mark this TID */
2316 spin_lock_bh(&txq->axq_lock);
2317 while (!list_empty(&txtid->buf_q)) {
2318 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2319 if (!bf_isretried(bf)) {
2321 * NB: it's based on the assumption that
2322 * software retried frame will always stay
2323 * at the head of software queue.
2327 list_cut_position(&bf_head,
2328 &txtid->buf_q, &bf->bf_lastfrm->list);
2329 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2331 /* complete this sub-frame */
2332 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2335 if (txtid->baw_head != txtid->baw_tail) {
2336 spin_unlock_bh(&txq->axq_lock);
2337 txtid->state |= AGGR_CLEANUP;
2339 txtid->state &= ~AGGR_ADDBA_COMPLETE;
2340 txtid->addba_exchangeattempts = 0;
2341 spin_unlock_bh(&txq->axq_lock);
2342 ath_tx_flush_tid(sc, txtid);
2347 * Tx scheduling logic
2348 * NB: must be called with txq lock held
2351 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2353 struct ath_atx_ac *ac;
2354 struct ath_atx_tid *tid;
2356 /* nothing to schedule */
2357 if (list_empty(&txq->axq_acq))
2360 * get the first node/ac pair on the queue
2362 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2363 list_del(&ac->list);
2367 * process a single tid per destination
2370 /* nothing to schedule */
2371 if (list_empty(&ac->tid_q))
2374 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2375 list_del(&tid->list);
2378 if (tid->paused) /* check next tid to keep h/w busy */
2381 if ((txq->axq_depth % 2) == 0)
2382 ath_tx_sched_aggr(sc, txq, tid);
2385 * add tid to round-robin queue if more frames
2386 * are pending for the tid
2388 if (!list_empty(&tid->buf_q))
2389 ath_tx_queue_tid(txq, tid);
2391 /* only schedule one TID at a time */
2393 } while (!list_empty(&ac->tid_q));
2396 * schedule AC if more TIDs need processing
2398 if (!list_empty(&ac->tid_q)) {
2400 * add dest ac to txq if not already added
2404 list_add_tail(&ac->list, &txq->axq_acq);
2409 /* Initialize per-node transmit state */
2411 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2413 struct ath_atx_tid *tid;
2414 struct ath_atx_ac *ac;
2418 * Init per tid tx state
2420 for (tidno = 0, tid = &an->tid[tidno];
2421 tidno < WME_NUM_TID;
2425 tid->seq_start = tid->seq_next = 0;
2426 tid->baw_size = WME_MAX_BA;
2427 tid->baw_head = tid->baw_tail = 0;
2429 tid->paused = false;
2430 tid->state &= ~AGGR_CLEANUP;
2431 INIT_LIST_HEAD(&tid->buf_q);
2433 acno = TID_TO_WME_AC(tidno);
2434 tid->ac = &an->ac[acno];
2437 tid->state &= ~AGGR_ADDBA_COMPLETE;
2438 tid->state &= ~AGGR_ADDBA_PROGRESS;
2439 tid->addba_exchangeattempts = 0;
2443 * Init per ac tx state
2445 for (acno = 0, ac = &an->ac[acno];
2446 acno < WME_NUM_AC; acno++, ac++) {
2448 INIT_LIST_HEAD(&ac->tid_q);
2452 ac->qnum = ath_tx_get_qnum(sc,
2453 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2456 ac->qnum = ath_tx_get_qnum(sc,
2457 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2460 ac->qnum = ath_tx_get_qnum(sc,
2461 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2464 ac->qnum = ath_tx_get_qnum(sc,
2465 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2471 /* Cleanupthe pending buffers for the node. */
2473 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2476 struct ath_atx_ac *ac, *ac_tmp;
2477 struct ath_atx_tid *tid, *tid_tmp;
2478 struct ath_txq *txq;
2479 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2480 if (ATH_TXQ_SETUP(sc, i)) {
2481 txq = &sc->tx.txq[i];
2483 spin_lock(&txq->axq_lock);
2485 list_for_each_entry_safe(ac,
2486 ac_tmp, &txq->axq_acq, list) {
2487 tid = list_first_entry(&ac->tid_q,
2488 struct ath_atx_tid, list);
2489 if (tid && tid->an != an)
2491 list_del(&ac->list);
2494 list_for_each_entry_safe(tid,
2495 tid_tmp, &ac->tid_q, list) {
2496 list_del(&tid->list);
2498 ath_tid_drain(sc, txq, tid);
2499 tid->state &= ~AGGR_ADDBA_COMPLETE;
2500 tid->addba_exchangeattempts = 0;
2501 tid->state &= ~AGGR_CLEANUP;
2505 spin_unlock(&txq->axq_lock);
2510 void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2512 int hdrlen, padsize;
2513 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2514 struct ath_tx_control txctl;
2516 memset(&txctl, 0, sizeof(struct ath_tx_control));
2519 * As a temporary workaround, assign seq# here; this will likely need
2520 * to be cleaned up to work better with Beacon transmission and virtual
2523 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2524 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2525 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2526 sc->tx.seq_no += 0x10;
2527 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2528 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2531 /* Add the padding after the header if this is not already done */
2532 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2534 padsize = hdrlen % 4;
2535 if (skb_headroom(skb) < padsize) {
2536 DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n");
2537 dev_kfree_skb_any(skb);
2540 skb_push(skb, padsize);
2541 memmove(skb->data, skb->data + padsize, hdrlen);
2544 txctl.txq = sc->beacon.cabq;
2546 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
2548 if (ath_tx_start(sc, skb, &txctl) != 0) {
2549 DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n");
2555 dev_kfree_skb_any(skb);