2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
21 struct ath9k_tx_queue_info *qi)
23 ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
24 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
25 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
26 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
27 ah->txurn_interrupt_mask);
29 ENABLE_REGWRITE_BUFFER(ah);
31 REG_WRITE(ah, AR_IMR_S0,
32 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
33 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
34 REG_WRITE(ah, AR_IMR_S1,
35 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
36 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
38 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
39 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
40 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
42 REGWRITE_BUFFER_FLUSH(ah);
45 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
47 return REG_READ(ah, AR_QTXDP(q));
49 EXPORT_SYMBOL(ath9k_hw_gettxbuf);
51 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
53 REG_WRITE(ah, AR_QTXDP(q), txdp);
55 EXPORT_SYMBOL(ath9k_hw_puttxbuf);
57 void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
59 ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE,
60 "Enable TXE on queue: %u\n", q);
61 REG_WRITE(ah, AR_Q_TXE, 1 << q);
63 EXPORT_SYMBOL(ath9k_hw_txstart);
65 void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
67 struct ar5416_desc *ads = AR5416DESC(ds);
69 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
70 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
71 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
72 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
73 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
75 EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
77 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
81 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
84 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
90 EXPORT_SYMBOL(ath9k_hw_numtxpending);
93 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
95 * @ah: atheros hardware struct
96 * @bIncTrigLevel: whether or not the frame trigger level should be updated
98 * The frame trigger level specifies the minimum number of bytes,
99 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
100 * before the PCU will initiate sending the frame on the air. This can
101 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
102 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
105 * Caution must be taken to ensure to set the frame trigger level based
106 * on the DMA request size. For example if the DMA request size is set to
107 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
108 * there need to be enough space in the tx FIFO for the requested transfer
109 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
110 * the threshold to a value beyond 6, then the transmit will hang.
112 * Current dual stream devices have a PCU TX FIFO size of 8 KB.
113 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
114 * there is a hardware issue which forces us to use 2 KB instead so the
115 * frame trigger level must not exceed 2 KB for these chipsets.
117 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
119 u32 txcfg, curLevel, newLevel;
121 if (ah->tx_trig_level >= ah->config.max_txtrig_level)
124 ath9k_hw_disable_interrupts(ah);
126 txcfg = REG_READ(ah, AR_TXCFG);
127 curLevel = MS(txcfg, AR_FTRIG);
130 if (curLevel < ah->config.max_txtrig_level)
132 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
134 if (newLevel != curLevel)
135 REG_WRITE(ah, AR_TXCFG,
136 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
138 ath9k_hw_enable_interrupts(ah);
140 ah->tx_trig_level = newLevel;
142 return newLevel != curLevel;
144 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
146 void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
150 REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
152 REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
153 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
154 REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
156 for (q = 0; q < AR_NUM_QCU; q++) {
157 for (i = 0; i < 1000; i++) {
161 if (!ath9k_hw_numtxpending(ah, q))
166 REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
167 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
168 REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
170 REG_WRITE(ah, AR_Q_TXD, 0);
172 EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
174 bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
176 #define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
177 #define ATH9K_TIME_QUANTUM 100 /* usec */
178 struct ath_common *common = ath9k_hw_common(ah);
179 struct ath9k_hw_capabilities *pCap = &ah->caps;
180 struct ath9k_tx_queue_info *qi;
182 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
184 if (q >= pCap->total_queues) {
185 ath_dbg(common, ATH_DBG_QUEUE,
186 "Stopping TX DMA, invalid queue: %u\n", q);
191 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
192 ath_dbg(common, ATH_DBG_QUEUE,
193 "Stopping TX DMA, inactive queue: %u\n", q);
197 REG_WRITE(ah, AR_Q_TXD, 1 << q);
199 for (wait = wait_time; wait != 0; wait--) {
200 if (ath9k_hw_numtxpending(ah, q) == 0)
202 udelay(ATH9K_TIME_QUANTUM);
205 if (ath9k_hw_numtxpending(ah, q)) {
206 ath_dbg(common, ATH_DBG_QUEUE,
207 "%s: Num of pending TX Frames %d on Q %d\n",
208 __func__, ath9k_hw_numtxpending(ah, q), q);
210 for (j = 0; j < 2; j++) {
211 tsfLow = REG_READ(ah, AR_TSF_L32);
212 REG_WRITE(ah, AR_QUIET2,
213 SM(10, AR_QUIET2_QUIET_DUR));
214 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
215 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
216 REG_SET_BIT(ah, AR_TIMER_MODE,
219 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
222 ath_dbg(common, ATH_DBG_QUEUE,
223 "TSF has moved while trying to set quiet time TSF: 0x%08x\n",
227 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
230 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
233 while (ath9k_hw_numtxpending(ah, q)) {
236 "Failed to stop TX DMA in 100 msec after killing last frame\n");
239 udelay(ATH9K_TIME_QUANTUM);
242 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
245 REG_WRITE(ah, AR_Q_TXD, 0);
248 #undef ATH9K_TX_STOP_DMA_TIMEOUT
249 #undef ATH9K_TIME_QUANTUM
251 EXPORT_SYMBOL(ath9k_hw_stoptxdma);
253 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
255 *txqs &= ah->intr_txqs;
256 ah->intr_txqs &= ~(*txqs);
258 EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
260 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
261 const struct ath9k_tx_queue_info *qinfo)
264 struct ath_common *common = ath9k_hw_common(ah);
265 struct ath9k_hw_capabilities *pCap = &ah->caps;
266 struct ath9k_tx_queue_info *qi;
268 if (q >= pCap->total_queues) {
269 ath_dbg(common, ATH_DBG_QUEUE,
270 "Set TXQ properties, invalid queue: %u\n", q);
275 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
276 ath_dbg(common, ATH_DBG_QUEUE,
277 "Set TXQ properties, inactive queue: %u\n", q);
281 ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
283 qi->tqi_ver = qinfo->tqi_ver;
284 qi->tqi_subtype = qinfo->tqi_subtype;
285 qi->tqi_qflags = qinfo->tqi_qflags;
286 qi->tqi_priority = qinfo->tqi_priority;
287 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
288 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
290 qi->tqi_aifs = INIT_AIFS;
291 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
292 cw = min(qinfo->tqi_cwmin, 1024U);
294 while (qi->tqi_cwmin < cw)
295 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
297 qi->tqi_cwmin = qinfo->tqi_cwmin;
298 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
299 cw = min(qinfo->tqi_cwmax, 1024U);
301 while (qi->tqi_cwmax < cw)
302 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
304 qi->tqi_cwmax = INIT_CWMAX;
306 if (qinfo->tqi_shretry != 0)
307 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
309 qi->tqi_shretry = INIT_SH_RETRY;
310 if (qinfo->tqi_lgretry != 0)
311 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
313 qi->tqi_lgretry = INIT_LG_RETRY;
314 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
315 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
316 qi->tqi_burstTime = qinfo->tqi_burstTime;
317 qi->tqi_readyTime = qinfo->tqi_readyTime;
319 switch (qinfo->tqi_subtype) {
321 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
322 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
330 EXPORT_SYMBOL(ath9k_hw_set_txq_props);
332 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
333 struct ath9k_tx_queue_info *qinfo)
335 struct ath_common *common = ath9k_hw_common(ah);
336 struct ath9k_hw_capabilities *pCap = &ah->caps;
337 struct ath9k_tx_queue_info *qi;
339 if (q >= pCap->total_queues) {
340 ath_dbg(common, ATH_DBG_QUEUE,
341 "Get TXQ properties, invalid queue: %u\n", q);
346 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
347 ath_dbg(common, ATH_DBG_QUEUE,
348 "Get TXQ properties, inactive queue: %u\n", q);
352 qinfo->tqi_qflags = qi->tqi_qflags;
353 qinfo->tqi_ver = qi->tqi_ver;
354 qinfo->tqi_subtype = qi->tqi_subtype;
355 qinfo->tqi_qflags = qi->tqi_qflags;
356 qinfo->tqi_priority = qi->tqi_priority;
357 qinfo->tqi_aifs = qi->tqi_aifs;
358 qinfo->tqi_cwmin = qi->tqi_cwmin;
359 qinfo->tqi_cwmax = qi->tqi_cwmax;
360 qinfo->tqi_shretry = qi->tqi_shretry;
361 qinfo->tqi_lgretry = qi->tqi_lgretry;
362 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
363 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
364 qinfo->tqi_burstTime = qi->tqi_burstTime;
365 qinfo->tqi_readyTime = qi->tqi_readyTime;
369 EXPORT_SYMBOL(ath9k_hw_get_txq_props);
371 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
372 const struct ath9k_tx_queue_info *qinfo)
374 struct ath_common *common = ath9k_hw_common(ah);
375 struct ath9k_tx_queue_info *qi;
376 struct ath9k_hw_capabilities *pCap = &ah->caps;
380 case ATH9K_TX_QUEUE_BEACON:
381 q = pCap->total_queues - 1;
383 case ATH9K_TX_QUEUE_CAB:
384 q = pCap->total_queues - 2;
386 case ATH9K_TX_QUEUE_PSPOLL:
389 case ATH9K_TX_QUEUE_UAPSD:
390 q = pCap->total_queues - 3;
392 case ATH9K_TX_QUEUE_DATA:
393 for (q = 0; q < pCap->total_queues; q++)
394 if (ah->txq[q].tqi_type ==
395 ATH9K_TX_QUEUE_INACTIVE)
397 if (q == pCap->total_queues) {
398 ath_err(common, "No available TX queue\n");
403 ath_err(common, "Invalid TX queue type: %u\n", type);
407 ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
410 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
411 ath_err(common, "TX queue: %u already active\n", q);
414 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
418 TXQ_FLAG_TXOKINT_ENABLE
419 | TXQ_FLAG_TXERRINT_ENABLE
420 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
421 qi->tqi_aifs = INIT_AIFS;
422 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
423 qi->tqi_cwmax = INIT_CWMAX;
424 qi->tqi_shretry = INIT_SH_RETRY;
425 qi->tqi_lgretry = INIT_LG_RETRY;
426 qi->tqi_physCompBuf = 0;
428 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
429 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
434 EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
436 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
438 struct ath9k_hw_capabilities *pCap = &ah->caps;
439 struct ath_common *common = ath9k_hw_common(ah);
440 struct ath9k_tx_queue_info *qi;
442 if (q >= pCap->total_queues) {
443 ath_dbg(common, ATH_DBG_QUEUE,
444 "Release TXQ, invalid queue: %u\n", q);
448 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
449 ath_dbg(common, ATH_DBG_QUEUE,
450 "Release TXQ, inactive queue: %u\n", q);
454 ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
456 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
457 ah->txok_interrupt_mask &= ~(1 << q);
458 ah->txerr_interrupt_mask &= ~(1 << q);
459 ah->txdesc_interrupt_mask &= ~(1 << q);
460 ah->txeol_interrupt_mask &= ~(1 << q);
461 ah->txurn_interrupt_mask &= ~(1 << q);
462 ath9k_hw_set_txq_interrupts(ah, qi);
466 EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
468 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
470 struct ath9k_hw_capabilities *pCap = &ah->caps;
471 struct ath_common *common = ath9k_hw_common(ah);
472 struct ath9k_channel *chan = ah->curchan;
473 struct ath9k_tx_queue_info *qi;
474 u32 cwMin, chanCwMin, value;
476 if (q >= pCap->total_queues) {
477 ath_dbg(common, ATH_DBG_QUEUE,
478 "Reset TXQ, invalid queue: %u\n", q);
483 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
484 ath_dbg(common, ATH_DBG_QUEUE,
485 "Reset TXQ, inactive queue: %u\n", q);
489 ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
491 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
492 if (chan && IS_CHAN_B(chan))
493 chanCwMin = INIT_CWMIN_11B;
495 chanCwMin = INIT_CWMIN;
497 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
499 cwMin = qi->tqi_cwmin;
501 ENABLE_REGWRITE_BUFFER(ah);
503 REG_WRITE(ah, AR_DLCL_IFS(q),
504 SM(cwMin, AR_D_LCL_IFS_CWMIN) |
505 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
506 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
508 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
509 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
510 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
511 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
513 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
514 REG_WRITE(ah, AR_DMISC(q),
515 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
517 if (qi->tqi_cbrPeriod) {
518 REG_WRITE(ah, AR_QCBRCFG(q),
519 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
520 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
521 REG_WRITE(ah, AR_QMISC(q),
522 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
523 (qi->tqi_cbrOverflowLimit ?
524 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
526 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
527 REG_WRITE(ah, AR_QRDYTIMECFG(q),
528 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
532 REG_WRITE(ah, AR_DCHNTIME(q),
533 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
534 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
536 if (qi->tqi_burstTime
537 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
538 REG_WRITE(ah, AR_QMISC(q),
539 REG_READ(ah, AR_QMISC(q)) |
540 AR_Q_MISC_RDYTIME_EXP_POLICY);
544 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
545 REG_WRITE(ah, AR_DMISC(q),
546 REG_READ(ah, AR_DMISC(q)) |
547 AR_D_MISC_POST_FR_BKOFF_DIS);
550 REGWRITE_BUFFER_FLUSH(ah);
552 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
553 REG_WRITE(ah, AR_DMISC(q),
554 REG_READ(ah, AR_DMISC(q)) |
555 AR_D_MISC_FRAG_BKOFF_EN);
557 switch (qi->tqi_type) {
558 case ATH9K_TX_QUEUE_BEACON:
559 ENABLE_REGWRITE_BUFFER(ah);
561 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
562 | AR_Q_MISC_FSP_DBA_GATED
563 | AR_Q_MISC_BEACON_USE
564 | AR_Q_MISC_CBR_INCR_DIS1);
566 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
567 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
568 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
569 | AR_D_MISC_BEACON_USE
570 | AR_D_MISC_POST_FR_BKOFF_DIS);
572 REGWRITE_BUFFER_FLUSH(ah);
575 * cwmin and cwmax should be 0 for beacon queue
576 * but not for IBSS as we would create an imbalance
577 * on beaconing fairness for participating nodes.
579 if (AR_SREV_9300_20_OR_LATER(ah) &&
580 ah->opmode != NL80211_IFTYPE_ADHOC) {
581 REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
582 | SM(0, AR_D_LCL_IFS_CWMAX)
583 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
586 case ATH9K_TX_QUEUE_CAB:
587 ENABLE_REGWRITE_BUFFER(ah);
589 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
590 | AR_Q_MISC_FSP_DBA_GATED
591 | AR_Q_MISC_CBR_INCR_DIS1
592 | AR_Q_MISC_CBR_INCR_DIS0);
593 value = (qi->tqi_readyTime -
594 (ah->config.sw_beacon_response_time -
595 ah->config.dma_beacon_response_time) -
596 ah->config.additional_swba_backoff) * 1024;
597 REG_WRITE(ah, AR_QRDYTIMECFG(q),
598 value | AR_Q_RDYTIMECFG_EN);
599 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
600 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
601 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
603 REGWRITE_BUFFER_FLUSH(ah);
606 case ATH9K_TX_QUEUE_PSPOLL:
607 REG_WRITE(ah, AR_QMISC(q),
608 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
610 case ATH9K_TX_QUEUE_UAPSD:
611 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
612 AR_D_MISC_POST_FR_BKOFF_DIS);
618 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
619 REG_WRITE(ah, AR_DMISC(q),
620 REG_READ(ah, AR_DMISC(q)) |
621 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
622 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
623 AR_D_MISC_POST_FR_BKOFF_DIS);
626 if (AR_SREV_9300_20_OR_LATER(ah))
627 REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
629 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
630 ah->txok_interrupt_mask |= 1 << q;
632 ah->txok_interrupt_mask &= ~(1 << q);
633 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
634 ah->txerr_interrupt_mask |= 1 << q;
636 ah->txerr_interrupt_mask &= ~(1 << q);
637 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
638 ah->txdesc_interrupt_mask |= 1 << q;
640 ah->txdesc_interrupt_mask &= ~(1 << q);
641 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
642 ah->txeol_interrupt_mask |= 1 << q;
644 ah->txeol_interrupt_mask &= ~(1 << q);
645 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
646 ah->txurn_interrupt_mask |= 1 << q;
648 ah->txurn_interrupt_mask &= ~(1 << q);
649 ath9k_hw_set_txq_interrupts(ah, qi);
653 EXPORT_SYMBOL(ath9k_hw_resettxqueue);
655 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
656 struct ath_rx_status *rs, u64 tsf)
658 struct ar5416_desc ads;
659 struct ar5416_desc *adsp = AR5416DESC(ds);
662 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
665 ads.u.rx = adsp->u.rx;
670 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
671 rs->rs_tstamp = ads.AR_RcvTimestamp;
673 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
674 rs->rs_rssi = ATH9K_RSSI_BAD;
675 rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
676 rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
677 rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
678 rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
679 rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
680 rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
682 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
683 rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
685 rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
687 rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
689 rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
691 rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
693 rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
696 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
697 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
699 rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
701 rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
702 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
704 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
706 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
707 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
709 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
711 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
713 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
714 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
715 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
716 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
717 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
718 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
720 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
722 * Treat these errors as mutually exclusive to avoid spurious
723 * extra error reports from the hardware. If a CRC error is
724 * reported, then decryption and MIC errors are irrelevant,
725 * the frame is going to be dropped either way
727 if (ads.ds_rxstatus8 & AR_CRCErr)
728 rs->rs_status |= ATH9K_RXERR_CRC;
729 else if (ads.ds_rxstatus8 & AR_PHYErr) {
730 rs->rs_status |= ATH9K_RXERR_PHY;
731 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
732 rs->rs_phyerr = phyerr;
733 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
734 rs->rs_status |= ATH9K_RXERR_DECRYPT;
735 else if (ads.ds_rxstatus8 & AR_MichaelErr)
736 rs->rs_status |= ATH9K_RXERR_MIC;
738 if (ads.ds_rxstatus8 & AR_KeyMiss)
739 rs->rs_status |= ATH9K_RXERR_DECRYPT;
744 EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
747 * This can stop or re-enables RX.
749 * If bool is set this will kill any frame which is currently being
750 * transferred between the MAC and baseband and also prevent any new
751 * frames from getting started.
753 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
758 REG_SET_BIT(ah, AR_DIAG_SW,
759 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
761 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
762 0, AH_WAIT_TIMEOUT)) {
763 REG_CLR_BIT(ah, AR_DIAG_SW,
767 reg = REG_READ(ah, AR_OBS_BUS_1);
768 ath_err(ath9k_hw_common(ah),
769 "RX failed to go idle in 10 ms RXSM=0x%x\n",
775 REG_CLR_BIT(ah, AR_DIAG_SW,
776 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
781 EXPORT_SYMBOL(ath9k_hw_setrxabort);
783 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
785 REG_WRITE(ah, AR_RXDP, rxdp);
787 EXPORT_SYMBOL(ath9k_hw_putrxbuf);
789 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
791 ath9k_enable_mib_counters(ah);
793 ath9k_ani_reset(ah, is_scanning);
795 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
797 EXPORT_SYMBOL(ath9k_hw_startpcureceive);
799 void ath9k_hw_abortpcurecv(struct ath_hw *ah)
801 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
803 ath9k_hw_disable_mib_counters(ah);
805 EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
807 bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
809 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
810 #define AH_RX_TIME_QUANTUM 100 /* usec */
811 struct ath_common *common = ath9k_hw_common(ah);
814 REG_WRITE(ah, AR_CR, AR_CR_RXD);
816 /* Wait for rx enable bit to go low */
817 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
818 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
820 udelay(AH_TIME_QUANTUM);
825 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
826 AH_RX_STOP_DMA_TIMEOUT / 1000,
828 REG_READ(ah, AR_DIAG_SW));
834 #undef AH_RX_TIME_QUANTUM
835 #undef AH_RX_STOP_DMA_TIMEOUT
837 EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
839 int ath9k_hw_beaconq_setup(struct ath_hw *ah)
841 struct ath9k_tx_queue_info qi;
843 memset(&qi, 0, sizeof(qi));
847 /* NB: don't enable any interrupts */
848 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
850 EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
852 bool ath9k_hw_intrpend(struct ath_hw *ah)
856 if (AR_SREV_9100(ah))
859 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
860 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
863 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
864 if ((host_isr & AR_INTR_SYNC_DEFAULT)
865 && (host_isr != AR_INTR_SPURIOUS))
870 EXPORT_SYMBOL(ath9k_hw_intrpend);
872 void ath9k_hw_disable_interrupts(struct ath_hw *ah)
874 struct ath_common *common = ath9k_hw_common(ah);
876 ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
877 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
878 (void) REG_READ(ah, AR_IER);
879 if (!AR_SREV_9100(ah)) {
880 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
881 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
883 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
884 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
887 EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
889 void ath9k_hw_enable_interrupts(struct ath_hw *ah)
891 struct ath_common *common = ath9k_hw_common(ah);
893 if (!(ah->imask & ATH9K_INT_GLOBAL))
896 ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
897 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
898 if (!AR_SREV_9100(ah)) {
899 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
901 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
904 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
905 AR_INTR_SYNC_DEFAULT);
906 REG_WRITE(ah, AR_INTR_SYNC_MASK,
907 AR_INTR_SYNC_DEFAULT);
909 ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
910 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
912 EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
914 void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
916 enum ath9k_int omask = ah->imask;
918 struct ath9k_hw_capabilities *pCap = &ah->caps;
919 struct ath_common *common = ath9k_hw_common(ah);
921 if (!(ints & ATH9K_INT_GLOBAL))
922 ath9k_hw_enable_interrupts(ah);
924 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
926 /* TODO: global int Ref count */
927 mask = ints & ATH9K_INT_COMMON;
930 if (ints & ATH9K_INT_TX) {
931 if (ah->config.tx_intr_mitigation)
932 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
934 if (ah->txok_interrupt_mask)
936 if (ah->txdesc_interrupt_mask)
937 mask |= AR_IMR_TXDESC;
939 if (ah->txerr_interrupt_mask)
940 mask |= AR_IMR_TXERR;
941 if (ah->txeol_interrupt_mask)
942 mask |= AR_IMR_TXEOL;
944 if (ints & ATH9K_INT_RX) {
945 if (AR_SREV_9300_20_OR_LATER(ah)) {
946 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
947 if (ah->config.rx_intr_mitigation) {
948 mask &= ~AR_IMR_RXOK_LP;
949 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
951 mask |= AR_IMR_RXOK_LP;
954 if (ah->config.rx_intr_mitigation)
955 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
957 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
959 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
960 mask |= AR_IMR_GENTMR;
963 if (ints & (ATH9K_INT_BMISC)) {
964 mask |= AR_IMR_BCNMISC;
965 if (ints & ATH9K_INT_TIM)
966 mask2 |= AR_IMR_S2_TIM;
967 if (ints & ATH9K_INT_DTIM)
968 mask2 |= AR_IMR_S2_DTIM;
969 if (ints & ATH9K_INT_DTIMSYNC)
970 mask2 |= AR_IMR_S2_DTIMSYNC;
971 if (ints & ATH9K_INT_CABEND)
972 mask2 |= AR_IMR_S2_CABEND;
973 if (ints & ATH9K_INT_TSFOOR)
974 mask2 |= AR_IMR_S2_TSFOOR;
977 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
978 mask |= AR_IMR_BCNMISC;
979 if (ints & ATH9K_INT_GTT)
980 mask2 |= AR_IMR_S2_GTT;
981 if (ints & ATH9K_INT_CST)
982 mask2 |= AR_IMR_S2_CST;
985 ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
986 REG_WRITE(ah, AR_IMR, mask);
987 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
988 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
989 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
990 ah->imrs2_reg |= mask2;
991 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
993 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
994 if (ints & ATH9K_INT_TIM_TIMER)
995 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
997 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
1000 ath9k_hw_enable_interrupts(ah);
1004 EXPORT_SYMBOL(ath9k_hw_set_interrupts);