2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
5 <http://rt2x00.serialmonkey.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the
19 Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 Abstract: rt2x00 queue specific routines.
28 #include <linux/slab.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/dma-mapping.h>
34 #include "rt2x00lib.h"
36 struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
40 struct skb_frame_desc *skbdesc;
41 unsigned int frame_size;
42 unsigned int head_size = 0;
43 unsigned int tail_size = 0;
46 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer.
49 frame_size = entry->queue->data_size + entry->queue->desc_size;
52 * The payload should be aligned to a 4-byte boundary,
53 * this means we need at least 3 bytes for moving the frame
54 * into the correct offset.
59 * For IV/EIV/ICV assembly we must make sure there is
60 * at least 8 bytes bytes available in headroom for IV/EIV
61 * and 8 bytes for ICV data as tailroon.
63 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
71 skb = dev_alloc_skb(frame_size + head_size + tail_size);
76 * Make sure we not have a frame with the requested bytes
77 * available in the head and tail.
79 skb_reserve(skb, head_size);
80 skb_put(skb, frame_size);
85 skbdesc = get_skb_frame_desc(skb);
86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry;
89 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
94 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
100 void rt2x00queue_map_txskb(struct queue_entry *entry)
102 struct device *dev = entry->queue->rt2x00dev->dev;
103 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
106 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
107 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
109 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
111 void rt2x00queue_unmap_skb(struct queue_entry *entry)
113 struct device *dev = entry->queue->rt2x00dev->dev;
114 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
116 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
117 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
119 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
120 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
121 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
123 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
126 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
128 void rt2x00queue_free_skb(struct queue_entry *entry)
133 rt2x00queue_unmap_skb(entry);
134 dev_kfree_skb_any(entry->skb);
138 void rt2x00queue_align_frame(struct sk_buff *skb)
140 unsigned int frame_length = skb->len;
141 unsigned int align = ALIGN_SIZE(skb, 0);
146 skb_push(skb, align);
147 memmove(skb->data, skb->data + align, frame_length);
148 skb_trim(skb, frame_length);
151 void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
153 unsigned int payload_length = skb->len - header_length;
154 unsigned int header_align = ALIGN_SIZE(skb, 0);
155 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
156 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
159 * Adjust the header alignment if the payload needs to be moved more
162 if (payload_align > header_align)
165 /* There is nothing to do if no alignment is needed */
169 /* Reserve the amount of space needed in front of the frame */
170 skb_push(skb, header_align);
175 memmove(skb->data, skb->data + header_align, header_length);
177 /* Move the payload, if present and if required */
178 if (payload_length && payload_align)
179 memmove(skb->data + header_length + l2pad,
180 skb->data + header_length + l2pad + payload_align,
183 /* Trim the skb to the correct size */
184 skb_trim(skb, header_length + l2pad + payload_length);
187 void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
190 * L2 padding is only present if the skb contains more than just the
191 * IEEE 802.11 header.
193 unsigned int l2pad = (skb->len > header_length) ?
194 L2PAD_SIZE(header_length) : 0;
199 memmove(skb->data + l2pad, skb->data, header_length);
200 skb_pull(skb, l2pad);
203 static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
205 struct txentry_desc *txdesc)
207 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
209 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
212 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
215 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
217 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags))
221 * The hardware is not able to insert a sequence number. Assign a
222 * software generated one here.
224 * This is wrong because beacons are not getting sequence
225 * numbers assigned properly.
227 * A secondary problem exists for drivers that cannot toggle
228 * sequence counting per-frame, since those will override the
229 * sequence counter given by mac80211.
231 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
232 seqno = atomic_add_return(0x10, &intf->seqno);
234 seqno = atomic_read(&intf->seqno);
236 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
237 hdr->seq_ctrl |= cpu_to_le16(seqno);
240 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
242 struct txentry_desc *txdesc,
243 const struct rt2x00_rate *hwrate)
245 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
246 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
247 unsigned int data_length;
248 unsigned int duration;
249 unsigned int residual;
252 * Determine with what IFS priority this frame should be send.
253 * Set ifs to IFS_SIFS when the this is not the first fragment,
254 * or this fragment came after RTS/CTS.
256 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
257 txdesc->u.plcp.ifs = IFS_BACKOFF;
259 txdesc->u.plcp.ifs = IFS_SIFS;
261 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
262 data_length = skb->len + 4;
263 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
267 * Length calculation depends on OFDM/CCK rate.
269 txdesc->u.plcp.signal = hwrate->plcp;
270 txdesc->u.plcp.service = 0x04;
272 if (hwrate->flags & DEV_RATE_OFDM) {
273 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
274 txdesc->u.plcp.length_low = data_length & 0x3f;
277 * Convert length to microseconds.
279 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
280 duration = GET_DURATION(data_length, hwrate->bitrate);
286 * Check if we need to set the Length Extension
288 if (hwrate->bitrate == 110 && residual <= 30)
289 txdesc->u.plcp.service |= 0x80;
292 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
293 txdesc->u.plcp.length_low = duration & 0xff;
296 * When preamble is enabled we should set the
297 * preamble bit for the signal.
299 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
300 txdesc->u.plcp.signal |= 0x08;
304 static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
306 struct txentry_desc *txdesc,
307 const struct rt2x00_rate *hwrate)
309 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
310 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
312 struct rt2x00_sta *sta_priv = NULL;
314 if (tx_info->control.sta) {
315 txdesc->u.ht.mpdu_density =
316 tx_info->control.sta->ht_cap.ampdu_density;
318 sta_priv = sta_to_rt2x00_sta(tx_info->control.sta);
319 txdesc->u.ht.wcid = sta_priv->wcid;
322 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
325 * Only one STBC stream is supported for now.
327 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
328 txdesc->u.ht.stbc = 1;
331 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
332 * mcs rate to be used
334 if (txrate->flags & IEEE80211_TX_RC_MCS) {
335 txdesc->u.ht.mcs = txrate->idx;
338 * MIMO PS should be set to 1 for STA's using dynamic SM PS
339 * when using more then one tx stream (>MCS7).
341 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
342 ((tx_info->control.sta->ht_cap.cap &
343 IEEE80211_HT_CAP_SM_PS) >>
344 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
345 WLAN_HT_CAP_SM_PS_DYNAMIC)
346 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
348 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
349 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
350 txdesc->u.ht.mcs |= 0x08;
354 * This frame is eligible for an AMPDU, however, don't aggregate
355 * frames that are intended to probe a specific tx rate.
357 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
358 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
359 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
362 * Set 40Mhz mode if necessary (for legacy rates this will
363 * duplicate the frame to both channels).
365 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
366 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
367 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
368 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
369 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
372 * Determine IFS values
373 * - Use TXOP_BACKOFF for management frames except beacons
374 * - Use TXOP_SIFS for fragment bursts
375 * - Use TXOP_HTTXOP for everything else
377 * Note: rt2800 devices won't use CTS protection (if used)
378 * for frames not transmitted with TXOP_HTTXOP
380 if (ieee80211_is_mgmt(hdr->frame_control) &&
381 !ieee80211_is_beacon(hdr->frame_control))
382 txdesc->u.ht.txop = TXOP_BACKOFF;
383 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
384 txdesc->u.ht.txop = TXOP_SIFS;
386 txdesc->u.ht.txop = TXOP_HTTXOP;
389 static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
391 struct txentry_desc *txdesc)
393 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
394 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
395 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
396 struct ieee80211_rate *rate;
397 const struct rt2x00_rate *hwrate = NULL;
399 memset(txdesc, 0, sizeof(*txdesc));
402 * Header and frame information.
404 txdesc->length = skb->len;
405 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
408 * Check whether this frame is to be acked.
410 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
411 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
414 * Check if this is a RTS/CTS frame
416 if (ieee80211_is_rts(hdr->frame_control) ||
417 ieee80211_is_cts(hdr->frame_control)) {
418 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
419 if (ieee80211_is_rts(hdr->frame_control))
420 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
422 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
423 if (tx_info->control.rts_cts_rate_idx >= 0)
425 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
429 * Determine retry information.
431 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
432 if (txdesc->retry_limit >= rt2x00dev->long_retry)
433 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
436 * Check if more fragments are pending
438 if (ieee80211_has_morefrags(hdr->frame_control)) {
439 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
440 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
444 * Check if more frames (!= fragments) are pending
446 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
447 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
450 * Beacons and probe responses require the tsf timestamp
451 * to be inserted into the frame.
453 if (ieee80211_is_beacon(hdr->frame_control) ||
454 ieee80211_is_probe_resp(hdr->frame_control))
455 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
457 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
458 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
459 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
462 * Determine rate modulation.
464 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
465 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
466 else if (txrate->flags & IEEE80211_TX_RC_MCS)
467 txdesc->rate_mode = RATE_MODE_HT_MIX;
469 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
470 hwrate = rt2x00_get_rate(rate->hw_value);
471 if (hwrate->flags & DEV_RATE_OFDM)
472 txdesc->rate_mode = RATE_MODE_OFDM;
474 txdesc->rate_mode = RATE_MODE_CCK;
478 * Apply TX descriptor handling by components
480 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
481 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
483 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
484 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
487 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
491 static int rt2x00queue_write_tx_data(struct queue_entry *entry,
492 struct txentry_desc *txdesc)
494 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
497 * This should not happen, we already checked the entry
498 * was ours. When the hardware disagrees there has been
499 * a queue corruption!
501 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
502 rt2x00dev->ops->lib->get_entry_state(entry))) {
504 "Corrupt queue %d, accessing entry which is not ours.\n"
505 "Please file bug report to %s.\n",
506 entry->queue->qid, DRV_PROJECT);
511 * Add the requested extra tx headroom in front of the skb.
513 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
514 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
517 * Call the driver's write_tx_data function, if it exists.
519 if (rt2x00dev->ops->lib->write_tx_data)
520 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
523 * Map the skb to DMA.
525 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
526 rt2x00queue_map_txskb(entry);
531 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
532 struct txentry_desc *txdesc)
534 struct data_queue *queue = entry->queue;
536 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
539 * All processing on the frame has been completed, this means
540 * it is now ready to be dumped to userspace through debugfs.
542 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
545 static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
546 struct txentry_desc *txdesc)
549 * Check if we need to kick the queue, there are however a few rules
550 * 1) Don't kick unless this is the last in frame in a burst.
551 * When the burst flag is set, this frame is always followed
552 * by another frame which in some way are related to eachother.
553 * This is true for fragments, RTS or CTS-to-self frames.
554 * 2) Rule 1 can be broken when the available entries
555 * in the queue are less then a certain threshold.
557 if (rt2x00queue_threshold(queue) ||
558 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
559 queue->rt2x00dev->ops->lib->kick_queue(queue);
562 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
565 struct ieee80211_tx_info *tx_info;
566 struct queue_entry *entry;
567 struct txentry_desc txdesc;
568 struct skb_frame_desc *skbdesc;
569 u8 rate_idx, rate_flags;
573 * Copy all TX descriptor information into txdesc,
574 * after that we are free to use the skb->cb array
575 * for our information.
577 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
580 * All information is retrieved from the skb->cb array,
581 * now we should claim ownership of the driver part of that
582 * array, preserving the bitrate index and flags.
584 tx_info = IEEE80211_SKB_CB(skb);
585 rate_idx = tx_info->control.rates[0].idx;
586 rate_flags = tx_info->control.rates[0].flags;
587 skbdesc = get_skb_frame_desc(skb);
588 memset(skbdesc, 0, sizeof(*skbdesc));
589 skbdesc->tx_rate_idx = rate_idx;
590 skbdesc->tx_rate_flags = rate_flags;
593 skbdesc->flags |= SKBDESC_NOT_MAC80211;
596 * When hardware encryption is supported, and this frame
597 * is to be encrypted, we should strip the IV/EIV data from
598 * the frame so we can provide it to the driver separately.
600 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
601 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
602 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
603 rt2x00crypto_tx_copy_iv(skb, &txdesc);
605 rt2x00crypto_tx_remove_iv(skb, &txdesc);
609 * When DMA allocation is required we should guarantee to the
610 * driver that the DMA is aligned to a 4-byte boundary.
611 * However some drivers require L2 padding to pad the payload
612 * rather then the header. This could be a requirement for
613 * PCI and USB devices, while header alignment only is valid
616 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
617 rt2x00queue_insert_l2pad(skb, txdesc.header_length);
618 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
619 rt2x00queue_align_frame(skb);
622 * That function must be called with bh disabled.
624 spin_lock(&queue->tx_lock);
626 if (unlikely(rt2x00queue_full(queue))) {
627 ERROR(queue->rt2x00dev,
628 "Dropping frame due to full tx queue %d.\n", queue->qid);
633 entry = rt2x00queue_get_entry(queue, Q_INDEX);
635 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
637 ERROR(queue->rt2x00dev,
638 "Arrived at non-free entry in the non-full queue %d.\n"
639 "Please file bug report to %s.\n",
640 queue->qid, DRV_PROJECT);
645 skbdesc->entry = entry;
649 * It could be possible that the queue was corrupted and this
650 * call failed. Since we always return NETDEV_TX_OK to mac80211,
651 * this frame will simply be dropped.
653 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
654 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
660 set_bit(ENTRY_DATA_PENDING, &entry->flags);
662 rt2x00queue_index_inc(entry, Q_INDEX);
663 rt2x00queue_write_tx_descriptor(entry, &txdesc);
664 rt2x00queue_kick_tx_queue(queue, &txdesc);
667 spin_unlock(&queue->tx_lock);
671 int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
672 struct ieee80211_vif *vif)
674 struct rt2x00_intf *intf = vif_to_intf(vif);
676 if (unlikely(!intf->beacon))
679 mutex_lock(&intf->beacon_skb_mutex);
682 * Clean up the beacon skb.
684 rt2x00queue_free_skb(intf->beacon);
687 * Clear beacon (single bssid devices don't need to clear the beacon
688 * since the beacon queue will get stopped anyway).
690 if (rt2x00dev->ops->lib->clear_beacon)
691 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
693 mutex_unlock(&intf->beacon_skb_mutex);
698 int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
699 struct ieee80211_vif *vif)
701 struct rt2x00_intf *intf = vif_to_intf(vif);
702 struct skb_frame_desc *skbdesc;
703 struct txentry_desc txdesc;
705 if (unlikely(!intf->beacon))
709 * Clean up the beacon skb.
711 rt2x00queue_free_skb(intf->beacon);
713 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
714 if (!intf->beacon->skb)
718 * Copy all TX descriptor information into txdesc,
719 * after that we are free to use the skb->cb array
720 * for our information.
722 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
725 * Fill in skb descriptor
727 skbdesc = get_skb_frame_desc(intf->beacon->skb);
728 memset(skbdesc, 0, sizeof(*skbdesc));
729 skbdesc->entry = intf->beacon;
732 * Send beacon to hardware.
734 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
740 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
741 struct ieee80211_vif *vif)
743 struct rt2x00_intf *intf = vif_to_intf(vif);
746 mutex_lock(&intf->beacon_skb_mutex);
747 ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
748 mutex_unlock(&intf->beacon_skb_mutex);
753 bool rt2x00queue_for_each_entry(struct data_queue *queue,
754 enum queue_index start,
755 enum queue_index end,
757 bool (*fn)(struct queue_entry *entry,
760 unsigned long irqflags;
761 unsigned int index_start;
762 unsigned int index_end;
765 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
766 ERROR(queue->rt2x00dev,
767 "Entry requested from invalid index range (%d - %d)\n",
773 * Only protect the range we are going to loop over,
774 * if during our loop a extra entry is set to pending
775 * it should not be kicked during this run, since it
776 * is part of another TX operation.
778 spin_lock_irqsave(&queue->index_lock, irqflags);
779 index_start = queue->index[start];
780 index_end = queue->index[end];
781 spin_unlock_irqrestore(&queue->index_lock, irqflags);
784 * Start from the TX done pointer, this guarantees that we will
785 * send out all frames in the correct order.
787 if (index_start < index_end) {
788 for (i = index_start; i < index_end; i++) {
789 if (fn(&queue->entries[i], data))
793 for (i = index_start; i < queue->limit; i++) {
794 if (fn(&queue->entries[i], data))
798 for (i = 0; i < index_end; i++) {
799 if (fn(&queue->entries[i], data))
806 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
808 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
809 enum queue_index index)
811 struct queue_entry *entry;
812 unsigned long irqflags;
814 if (unlikely(index >= Q_INDEX_MAX)) {
815 ERROR(queue->rt2x00dev,
816 "Entry requested from invalid index type (%d)\n", index);
820 spin_lock_irqsave(&queue->index_lock, irqflags);
822 entry = &queue->entries[queue->index[index]];
824 spin_unlock_irqrestore(&queue->index_lock, irqflags);
828 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
830 void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
832 struct data_queue *queue = entry->queue;
833 unsigned long irqflags;
835 if (unlikely(index >= Q_INDEX_MAX)) {
836 ERROR(queue->rt2x00dev,
837 "Index change on invalid index type (%d)\n", index);
841 spin_lock_irqsave(&queue->index_lock, irqflags);
843 queue->index[index]++;
844 if (queue->index[index] >= queue->limit)
845 queue->index[index] = 0;
847 entry->last_action = jiffies;
849 if (index == Q_INDEX) {
851 } else if (index == Q_INDEX_DONE) {
856 spin_unlock_irqrestore(&queue->index_lock, irqflags);
859 void rt2x00queue_pause_queue(struct data_queue *queue)
861 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
862 !test_bit(QUEUE_STARTED, &queue->flags) ||
863 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
866 switch (queue->qid) {
872 * For TX queues, we have to disable the queue
875 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
881 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
883 void rt2x00queue_unpause_queue(struct data_queue *queue)
885 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
886 !test_bit(QUEUE_STARTED, &queue->flags) ||
887 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
890 switch (queue->qid) {
896 * For TX queues, we have to enable the queue
899 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
903 * For RX we need to kick the queue now in order to
906 queue->rt2x00dev->ops->lib->kick_queue(queue);
911 EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
913 void rt2x00queue_start_queue(struct data_queue *queue)
915 mutex_lock(&queue->status_lock);
917 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
918 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
919 mutex_unlock(&queue->status_lock);
923 set_bit(QUEUE_PAUSED, &queue->flags);
925 queue->rt2x00dev->ops->lib->start_queue(queue);
927 rt2x00queue_unpause_queue(queue);
929 mutex_unlock(&queue->status_lock);
931 EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
933 void rt2x00queue_stop_queue(struct data_queue *queue)
935 mutex_lock(&queue->status_lock);
937 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
938 mutex_unlock(&queue->status_lock);
942 rt2x00queue_pause_queue(queue);
944 queue->rt2x00dev->ops->lib->stop_queue(queue);
946 mutex_unlock(&queue->status_lock);
948 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
950 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
954 (queue->qid == QID_AC_VO) ||
955 (queue->qid == QID_AC_VI) ||
956 (queue->qid == QID_AC_BE) ||
957 (queue->qid == QID_AC_BK);
959 mutex_lock(&queue->status_lock);
962 * If the queue has been started, we must stop it temporarily
963 * to prevent any new frames to be queued on the device. If
964 * we are not dropping the pending frames, the queue must
965 * only be stopped in the software and not the hardware,
966 * otherwise the queue will never become empty on its own.
968 started = test_bit(QUEUE_STARTED, &queue->flags);
973 rt2x00queue_pause_queue(queue);
976 * If we are not supposed to drop any pending
977 * frames, this means we must force a start (=kick)
978 * to the queue to make sure the hardware will
979 * start transmitting.
981 if (!drop && tx_queue)
982 queue->rt2x00dev->ops->lib->kick_queue(queue);
986 * Check if driver supports flushing, if that is the case we can
987 * defer the flushing to the driver. Otherwise we must use the
988 * alternative which just waits for the queue to become empty.
990 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
991 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
994 * The queue flush has failed...
996 if (unlikely(!rt2x00queue_empty(queue)))
997 WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
1000 * Restore the queue to the previous status
1003 rt2x00queue_unpause_queue(queue);
1005 mutex_unlock(&queue->status_lock);
1007 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1009 void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1011 struct data_queue *queue;
1014 * rt2x00queue_start_queue will call ieee80211_wake_queue
1015 * for each queue after is has been properly initialized.
1017 tx_queue_for_each(rt2x00dev, queue)
1018 rt2x00queue_start_queue(queue);
1020 rt2x00queue_start_queue(rt2x00dev->rx);
1022 EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1024 void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1026 struct data_queue *queue;
1029 * rt2x00queue_stop_queue will call ieee80211_stop_queue
1030 * as well, but we are completely shutting doing everything
1031 * now, so it is much safer to stop all TX queues at once,
1032 * and use rt2x00queue_stop_queue for cleaning up.
1034 ieee80211_stop_queues(rt2x00dev->hw);
1036 tx_queue_for_each(rt2x00dev, queue)
1037 rt2x00queue_stop_queue(queue);
1039 rt2x00queue_stop_queue(rt2x00dev->rx);
1041 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1043 void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1045 struct data_queue *queue;
1047 tx_queue_for_each(rt2x00dev, queue)
1048 rt2x00queue_flush_queue(queue, drop);
1050 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1052 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1054 static void rt2x00queue_reset(struct data_queue *queue)
1056 unsigned long irqflags;
1059 spin_lock_irqsave(&queue->index_lock, irqflags);
1064 for (i = 0; i < Q_INDEX_MAX; i++)
1065 queue->index[i] = 0;
1067 spin_unlock_irqrestore(&queue->index_lock, irqflags);
1070 void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1072 struct data_queue *queue;
1075 queue_for_each(rt2x00dev, queue) {
1076 rt2x00queue_reset(queue);
1078 for (i = 0; i < queue->limit; i++)
1079 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1083 static int rt2x00queue_alloc_entries(struct data_queue *queue,
1084 const struct data_queue_desc *qdesc)
1086 struct queue_entry *entries;
1087 unsigned int entry_size;
1090 rt2x00queue_reset(queue);
1092 queue->limit = qdesc->entry_num;
1093 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
1094 queue->data_size = qdesc->data_size;
1095 queue->desc_size = qdesc->desc_size;
1098 * Allocate all queue entries.
1100 entry_size = sizeof(*entries) + qdesc->priv_size;
1101 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1105 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1106 (((char *)(__base)) + ((__limit) * (__esize)) + \
1107 ((__index) * (__psize)))
1109 for (i = 0; i < queue->limit; i++) {
1110 entries[i].flags = 0;
1111 entries[i].queue = queue;
1112 entries[i].skb = NULL;
1113 entries[i].entry_idx = i;
1114 entries[i].priv_data =
1115 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1116 sizeof(*entries), qdesc->priv_size);
1119 #undef QUEUE_ENTRY_PRIV_OFFSET
1121 queue->entries = entries;
1126 static void rt2x00queue_free_skbs(struct data_queue *queue)
1130 if (!queue->entries)
1133 for (i = 0; i < queue->limit; i++) {
1134 rt2x00queue_free_skb(&queue->entries[i]);
1138 static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1141 struct sk_buff *skb;
1143 for (i = 0; i < queue->limit; i++) {
1144 skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
1147 queue->entries[i].skb = skb;
1153 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1155 struct data_queue *queue;
1158 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
1162 tx_queue_for_each(rt2x00dev, queue) {
1163 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
1168 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
1172 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1173 status = rt2x00queue_alloc_entries(rt2x00dev->atim,
1174 rt2x00dev->ops->atim);
1179 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1186 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
1188 rt2x00queue_uninitialize(rt2x00dev);
1193 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1195 struct data_queue *queue;
1197 rt2x00queue_free_skbs(rt2x00dev->rx);
1199 queue_for_each(rt2x00dev, queue) {
1200 kfree(queue->entries);
1201 queue->entries = NULL;
1205 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1206 struct data_queue *queue, enum data_queue_qid qid)
1208 mutex_init(&queue->status_lock);
1209 spin_lock_init(&queue->tx_lock);
1210 spin_lock_init(&queue->index_lock);
1212 queue->rt2x00dev = rt2x00dev;
1220 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1222 struct data_queue *queue;
1223 enum data_queue_qid qid;
1224 unsigned int req_atim =
1225 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1228 * We need the following queues:
1230 * TX: ops->tx_queues
1232 * Atim: 1 (if required)
1234 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1236 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1238 ERROR(rt2x00dev, "Queue allocation failed.\n");
1243 * Initialize pointers
1245 rt2x00dev->rx = queue;
1246 rt2x00dev->tx = &queue[1];
1247 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1248 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1251 * Initialize queue parameters.
1253 * TX: qid = QID_AC_VO + index
1254 * TX: cw_min: 2^5 = 32.
1255 * TX: cw_max: 2^10 = 1024.
1256 * BCN: qid = QID_BEACON
1257 * ATIM: qid = QID_ATIM
1259 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1262 tx_queue_for_each(rt2x00dev, queue)
1263 rt2x00queue_init(rt2x00dev, queue, qid++);
1265 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1267 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1272 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1274 kfree(rt2x00dev->rx);
1275 rt2x00dev->rx = NULL;
1276 rt2x00dev->tx = NULL;
1277 rt2x00dev->bcn = NULL;