2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 Abstract: rt2x00 queue specific routines.
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/dma-mapping.h>
31 #include "rt2x00lib.h"
33 struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
34 struct queue_entry *entry)
36 unsigned int frame_size;
37 unsigned int reserved_size;
39 struct skb_frame_desc *skbdesc;
42 * The frame size includes descriptor size, because the
43 * hardware directly receive the frame into the skbuffer.
45 frame_size = entry->queue->data_size + entry->queue->desc_size;
48 * The payload should be aligned to a 4-byte boundary,
49 * this means we need at least 3 bytes for moving the frame
50 * into the correct offset.
57 skb = dev_alloc_skb(frame_size + reserved_size);
61 skb_reserve(skb, reserved_size);
62 skb_put(skb, frame_size);
67 skbdesc = get_skb_frame_desc(skb);
68 memset(skbdesc, 0, sizeof(*skbdesc));
69 skbdesc->entry = entry;
71 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
72 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
76 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
82 void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
84 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
86 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
88 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
90 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
92 void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
94 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
96 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
97 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
99 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
102 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
103 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
105 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
109 void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
114 rt2x00queue_unmap_skb(rt2x00dev, skb);
115 dev_kfree_skb_any(skb);
118 static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
119 struct txentry_desc *txdesc)
121 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
122 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
123 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
124 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
125 struct ieee80211_rate *rate =
126 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
127 const struct rt2x00_rate *hwrate;
128 unsigned int data_length;
129 unsigned int duration;
130 unsigned int residual;
132 memset(txdesc, 0, sizeof(*txdesc));
135 * Initialize information from queue
137 txdesc->queue = entry->queue->qid;
138 txdesc->cw_min = entry->queue->cw_min;
139 txdesc->cw_max = entry->queue->cw_max;
140 txdesc->aifs = entry->queue->aifs;
142 /* Data length should be extended with 4 bytes for CRC */
143 data_length = entry->skb->len + 4;
146 * Check whether this frame is to be acked.
148 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
149 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
152 * Check if this is a RTS/CTS frame
154 if (ieee80211_is_rts(hdr->frame_control) ||
155 ieee80211_is_cts(hdr->frame_control)) {
156 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
157 if (ieee80211_is_rts(hdr->frame_control))
158 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
160 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
161 if (tx_info->control.rts_cts_rate_idx >= 0)
163 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
167 * Determine retry information.
169 txdesc->retry_limit = tx_info->control.retry_limit;
170 if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
171 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
174 * Check if more fragments are pending
176 if (ieee80211_has_morefrags(hdr->frame_control)) {
177 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
178 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
182 * Beacons and probe responses require the tsf timestamp
183 * to be inserted into the frame.
185 if (ieee80211_is_beacon(hdr->frame_control) ||
186 ieee80211_is_probe_resp(hdr->frame_control))
187 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
190 * Determine with what IFS priority this frame should be send.
191 * Set ifs to IFS_SIFS when the this is not the first fragment,
192 * or this fragment came after RTS/CTS.
194 if (test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
195 txdesc->ifs = IFS_SIFS;
196 } else if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
197 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
198 txdesc->ifs = IFS_BACKOFF;
200 txdesc->ifs = IFS_SIFS;
204 * Hardware should insert sequence counter.
205 * FIXME: We insert a software sequence counter first for
206 * hardware that doesn't support hardware sequence counting.
208 * This is wrong because beacons are not getting sequence
209 * numbers assigned properly.
211 * A secondary problem exists for drivers that cannot toggle
212 * sequence counting per-frame, since those will override the
213 * sequence counter given by mac80211.
215 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
216 spin_lock(&intf->lock);
218 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
220 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
221 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
223 spin_unlock(&intf->lock);
225 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
230 * Length calculation depends on OFDM/CCK rate.
232 hwrate = rt2x00_get_rate(rate->hw_value);
233 txdesc->signal = hwrate->plcp;
234 txdesc->service = 0x04;
236 if (hwrate->flags & DEV_RATE_OFDM) {
237 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags);
239 txdesc->length_high = (data_length >> 6) & 0x3f;
240 txdesc->length_low = data_length & 0x3f;
243 * Convert length to microseconds.
245 residual = get_duration_res(data_length, hwrate->bitrate);
246 duration = get_duration(data_length, hwrate->bitrate);
252 * Check if we need to set the Length Extension
254 if (hwrate->bitrate == 110 && residual <= 30)
255 txdesc->service |= 0x80;
258 txdesc->length_high = (duration >> 8) & 0xff;
259 txdesc->length_low = duration & 0xff;
262 * When preamble is enabled we should set the
263 * preamble bit for the signal.
265 if (rt2x00_get_rate_preamble(rate->hw_value))
266 txdesc->signal |= 0x08;
270 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
271 struct txentry_desc *txdesc)
273 struct data_queue *queue = entry->queue;
274 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
276 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
279 * All processing on the frame has been completed, this means
280 * it is now ready to be dumped to userspace through debugfs.
282 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
285 * Check if we need to kick the queue, there are however a few rules
286 * 1) Don't kick beacon queue
287 * 2) Don't kick unless this is the last in frame in a burst.
288 * When the burst flag is set, this frame is always followed
289 * by another frame which in some way are related to eachother.
290 * This is true for fragments, RTS or CTS-to-self frames.
291 * 3) Rule 2 can be broken when the available entries
292 * in the queue are less then a certain threshold.
294 if (entry->queue->qid == QID_BEACON)
297 if (rt2x00queue_threshold(queue) ||
298 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
299 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
302 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
304 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
305 struct txentry_desc txdesc;
306 struct skb_frame_desc *skbdesc;
308 if (unlikely(rt2x00queue_full(queue)))
311 if (__test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
312 ERROR(queue->rt2x00dev,
313 "Arrived at non-free entry in the non-full queue %d.\n"
314 "Please file bug report to %s.\n",
315 queue->qid, DRV_PROJECT);
320 * Copy all TX descriptor information into txdesc,
321 * after that we are free to use the skb->cb array
322 * for our information.
325 rt2x00queue_create_tx_descriptor(entry, &txdesc);
328 * skb->cb array is now ours and we are free to use it.
330 skbdesc = get_skb_frame_desc(entry->skb);
331 memset(skbdesc, 0, sizeof(*skbdesc));
332 skbdesc->entry = entry;
334 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
335 __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
339 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
340 rt2x00queue_map_txskb(queue->rt2x00dev, skb);
342 __set_bit(ENTRY_DATA_PENDING, &entry->flags);
344 rt2x00queue_index_inc(queue, Q_INDEX);
345 rt2x00queue_write_tx_descriptor(entry, &txdesc);
350 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
351 struct ieee80211_vif *vif)
353 struct rt2x00_intf *intf = vif_to_intf(vif);
354 struct skb_frame_desc *skbdesc;
355 struct txentry_desc txdesc;
358 if (unlikely(!intf->beacon))
361 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
362 if (!intf->beacon->skb)
366 * Copy all TX descriptor information into txdesc,
367 * after that we are free to use the skb->cb array
368 * for our information.
370 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
373 * For the descriptor we use a local array from where the
374 * driver can move it to the correct location required for
377 memset(desc, 0, sizeof(desc));
380 * Fill in skb descriptor
382 skbdesc = get_skb_frame_desc(intf->beacon->skb);
383 memset(skbdesc, 0, sizeof(*skbdesc));
384 skbdesc->desc = desc;
385 skbdesc->desc_len = intf->beacon->queue->desc_size;
386 skbdesc->entry = intf->beacon;
389 * Write TX descriptor into reserved room in front of the beacon.
391 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
394 * Send beacon to hardware.
395 * Also enable beacon generation, which might have been disabled
396 * by the driver during the config_beacon() callback function.
398 rt2x00dev->ops->lib->write_beacon(intf->beacon);
399 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
404 struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
405 const enum data_queue_qid queue)
407 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
409 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
410 return &rt2x00dev->tx[queue];
415 if (queue == QID_BEACON)
416 return &rt2x00dev->bcn[0];
417 else if (queue == QID_ATIM && atim)
418 return &rt2x00dev->bcn[1];
422 EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
424 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
425 enum queue_index index)
427 struct queue_entry *entry;
428 unsigned long irqflags;
430 if (unlikely(index >= Q_INDEX_MAX)) {
431 ERROR(queue->rt2x00dev,
432 "Entry requested from invalid index type (%d)\n", index);
436 spin_lock_irqsave(&queue->lock, irqflags);
438 entry = &queue->entries[queue->index[index]];
440 spin_unlock_irqrestore(&queue->lock, irqflags);
444 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
446 void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
448 unsigned long irqflags;
450 if (unlikely(index >= Q_INDEX_MAX)) {
451 ERROR(queue->rt2x00dev,
452 "Index change on invalid index type (%d)\n", index);
456 spin_lock_irqsave(&queue->lock, irqflags);
458 queue->index[index]++;
459 if (queue->index[index] >= queue->limit)
460 queue->index[index] = 0;
462 if (index == Q_INDEX) {
464 } else if (index == Q_INDEX_DONE) {
469 spin_unlock_irqrestore(&queue->lock, irqflags);
472 static void rt2x00queue_reset(struct data_queue *queue)
474 unsigned long irqflags;
476 spin_lock_irqsave(&queue->lock, irqflags);
480 memset(queue->index, 0, sizeof(queue->index));
482 spin_unlock_irqrestore(&queue->lock, irqflags);
485 void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
487 struct data_queue *queue = rt2x00dev->rx;
490 rt2x00queue_reset(queue);
492 if (!rt2x00dev->ops->lib->init_rxentry)
495 for (i = 0; i < queue->limit; i++) {
496 queue->entries[i].flags = 0;
498 rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
503 void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
505 struct data_queue *queue;
508 txall_queue_for_each(rt2x00dev, queue) {
509 rt2x00queue_reset(queue);
511 if (!rt2x00dev->ops->lib->init_txentry)
514 for (i = 0; i < queue->limit; i++) {
515 queue->entries[i].flags = 0;
517 rt2x00dev->ops->lib->init_txentry(rt2x00dev,
523 static int rt2x00queue_alloc_entries(struct data_queue *queue,
524 const struct data_queue_desc *qdesc)
526 struct queue_entry *entries;
527 unsigned int entry_size;
530 rt2x00queue_reset(queue);
532 queue->limit = qdesc->entry_num;
533 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
534 queue->data_size = qdesc->data_size;
535 queue->desc_size = qdesc->desc_size;
538 * Allocate all queue entries.
540 entry_size = sizeof(*entries) + qdesc->priv_size;
541 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
545 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
546 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
547 ((__index) * (__psize)) )
549 for (i = 0; i < queue->limit; i++) {
550 entries[i].flags = 0;
551 entries[i].queue = queue;
552 entries[i].skb = NULL;
553 entries[i].entry_idx = i;
554 entries[i].priv_data =
555 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
556 sizeof(*entries), qdesc->priv_size);
559 #undef QUEUE_ENTRY_PRIV_OFFSET
561 queue->entries = entries;
566 static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
567 struct data_queue *queue)
574 for (i = 0; i < queue->limit; i++) {
575 if (queue->entries[i].skb)
576 rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
580 static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
581 struct data_queue *queue)
586 for (i = 0; i < queue->limit; i++) {
587 skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
590 queue->entries[i].skb = skb;
596 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
598 struct data_queue *queue;
601 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
605 tx_queue_for_each(rt2x00dev, queue) {
606 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
611 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
615 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
616 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
617 rt2x00dev->ops->atim);
622 status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
629 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
631 rt2x00queue_uninitialize(rt2x00dev);
636 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
638 struct data_queue *queue;
640 rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
642 queue_for_each(rt2x00dev, queue) {
643 kfree(queue->entries);
644 queue->entries = NULL;
648 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
649 struct data_queue *queue, enum data_queue_qid qid)
651 spin_lock_init(&queue->lock);
653 queue->rt2x00dev = rt2x00dev;
660 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
662 struct data_queue *queue;
663 enum data_queue_qid qid;
664 unsigned int req_atim =
665 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
668 * We need the following queues:
672 * Atim: 1 (if required)
674 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
676 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
678 ERROR(rt2x00dev, "Queue allocation failed.\n");
683 * Initialize pointers
685 rt2x00dev->rx = queue;
686 rt2x00dev->tx = &queue[1];
687 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
690 * Initialize queue parameters.
692 * TX: qid = QID_AC_BE + index
693 * TX: cw_min: 2^5 = 32.
694 * TX: cw_max: 2^10 = 1024.
695 * BCN: qid = QID_BEACON
696 * ATIM: qid = QID_ATIM
698 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
701 tx_queue_for_each(rt2x00dev, queue)
702 rt2x00queue_init(rt2x00dev, queue, qid++);
704 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
706 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
711 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
713 kfree(rt2x00dev->rx);
714 rt2x00dev->rx = NULL;
715 rt2x00dev->tx = NULL;
716 rt2x00dev->bcn = NULL;