3 * Copyright (c) 2010 Atheros Communications Inc.
8 // Permission to use, copy, modify, and/or distribute this software for any
9 // purpose with or without fee is hereby granted, provided that the above
10 // copyright notice and this permission notice appear in all copies.
12 // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
29 #include "aggr_recv_api.h"
30 #include "aggr_rx_internal.h"
34 wmi_dot3_2_dix(void *osbuf);
37 aggr_slice_amsdu(struct aggr_info *p_aggr, struct rxtid *rxtid, void **osbuf);
40 aggr_timeout(unsigned long arg);
43 aggr_deque_frms(struct aggr_info *p_aggr, u8 tid, u16 seq_no, u8 order);
46 aggr_dispatch_frames(struct aggr_info *p_aggr, A_NETBUF_QUEUE_T *q);
49 aggr_get_osbuf(struct aggr_info *p_aggr);
52 aggr_init(ALLOC_NETBUFS netbuf_allocator)
54 struct aggr_info *p_aggr = NULL;
59 A_PRINTF("In aggr_init..\n");
62 p_aggr = A_MALLOC(sizeof(struct aggr_info));
64 A_PRINTF("Failed to allocate memory for aggr_node\n");
69 /* Init timer and data structures */
70 A_MEMZERO(p_aggr, sizeof(struct aggr_info));
71 p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
72 A_INIT_TIMER(&p_aggr->timer, aggr_timeout, p_aggr);
73 p_aggr->timerScheduled = false;
74 A_NETBUF_QUEUE_INIT(&p_aggr->freeQ);
76 p_aggr->netbuf_allocator = netbuf_allocator;
77 p_aggr->netbuf_allocator(&p_aggr->freeQ, AGGR_NUM_OF_FREE_NETBUFS);
79 for(i = 0; i < NUM_OF_TIDS; i++) {
80 rxtid = AGGR_GET_RXTID(p_aggr, i);
82 rxtid->progress = false;
83 rxtid->timerMon = false;
84 A_NETBUF_QUEUE_INIT(&rxtid->q);
85 A_MUTEX_INIT(&rxtid->lock);
89 A_PRINTF("going out of aggr_init..status %s\n",
90 (status == 0) ? "OK":"Error");
94 aggr_module_destroy(p_aggr);
96 return ((status == 0) ? p_aggr : NULL);
99 /* utility function to clear rx hold_q for a tid */
101 aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
104 struct rxtid_stats *stats;
106 A_ASSERT(tid < NUM_OF_TIDS && p_aggr);
108 rxtid = AGGR_GET_RXTID(p_aggr, tid);
109 stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
112 aggr_deque_frms(p_aggr, tid, 0, ALL_SEQNO);
116 rxtid->progress = false;
117 rxtid->timerMon = false;
120 rxtid->hold_q_sz = 0;
123 kfree(rxtid->hold_q);
124 rxtid->hold_q = NULL;
127 A_MEMZERO(stats, sizeof(struct rxtid_stats));
131 aggr_module_destroy(void *cntxt)
133 struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
136 A_PRINTF("%s(): aggr = %p\n",_A_FUNCNAME_, p_aggr);
140 if(p_aggr->timerScheduled) {
141 A_UNTIMEOUT(&p_aggr->timer);
142 p_aggr->timerScheduled = false;
145 for(i = 0; i < NUM_OF_TIDS; i++) {
146 rxtid = AGGR_GET_RXTID(p_aggr, i);
147 /* Free the hold q contents and hold_q*/
149 for(k = 0; k< rxtid->hold_q_sz; k++) {
150 if(rxtid->hold_q[k].osbuf) {
151 A_NETBUF_FREE(rxtid->hold_q[k].osbuf);
154 kfree(rxtid->hold_q);
156 /* Free the dispatch q contents*/
157 while(A_NETBUF_QUEUE_SIZE(&rxtid->q)) {
158 A_NETBUF_FREE(A_NETBUF_DEQUEUE(&rxtid->q));
160 if (A_IS_MUTEX_VALID(&rxtid->lock)) {
161 A_MUTEX_DELETE(&rxtid->lock);
164 /* free the freeQ and its contents*/
165 while(A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ)) {
166 A_NETBUF_FREE(A_NETBUF_DEQUEUE(&p_aggr->freeQ));
170 A_PRINTF("out aggr_module_destroy\n");
175 aggr_register_rx_dispatcher(void *cntxt, void * dev, RX_CALLBACK fn)
177 struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
179 A_ASSERT(p_aggr && fn && dev);
187 aggr_process_bar(void *cntxt, u8 tid, u16 seq_no)
189 struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
190 struct rxtid_stats *stats;
193 stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
196 aggr_deque_frms(p_aggr, tid, seq_no, ALL_SEQNO);
201 aggr_recv_addba_req_evt(void *cntxt, u8 tid, u16 seq_no, u8 win_sz)
203 struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
205 struct rxtid_stats *stats;
208 rxtid = AGGR_GET_RXTID(p_aggr, tid);
209 stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
211 A_PRINTF("%s(): win_sz = %d aggr %d\n", _A_FUNCNAME_, win_sz, rxtid->aggr);
212 if(win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) {
213 A_PRINTF("win_sz %d, tid %d\n", win_sz, tid);
217 /* Just go and deliver all the frames up from this
218 * queue, as if we got DELBA and re-initialize the queue
220 aggr_delete_tid_state(p_aggr, tid);
223 rxtid->seq_next = seq_no;
224 /* create these queues, only upon receiving of ADDBA for a
225 * tid, reducing memory requirement
227 rxtid->hold_q = A_MALLOC(HOLD_Q_SZ(win_sz));
228 if((rxtid->hold_q == NULL)) {
229 A_PRINTF("Failed to allocate memory, tid = %d\n", tid);
232 A_MEMZERO(rxtid->hold_q, HOLD_Q_SZ(win_sz));
234 /* Update rxtid for the window sz */
235 rxtid->win_sz = win_sz;
236 /* hold_q_sz inicates the depth of holding q - which is
237 * a factor of win_sz. Compute once, as it will be used often
239 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
240 /* There should be no frames on q - even when second ADDBA comes in.
241 * If aggr was previously ON on this tid, we would have cleaned up
244 if(A_NETBUF_QUEUE_SIZE(&rxtid->q) != 0) {
245 A_PRINTF("ERROR: Frames still on queue ?\n");
253 aggr_recv_delba_req_evt(void *cntxt, u8 tid)
255 struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
259 A_PRINTF("%s(): tid %d\n", _A_FUNCNAME_, tid);
261 rxtid = AGGR_GET_RXTID(p_aggr, tid);
264 aggr_delete_tid_state(p_aggr, tid);
269 aggr_deque_frms(struct aggr_info *p_aggr, u8 tid, u16 seq_no, u8 order)
272 struct osbuf_hold_q *node;
273 u16 idx, idx_end, seq_end;
274 struct rxtid_stats *stats;
277 rxtid = AGGR_GET_RXTID(p_aggr, tid);
278 stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
280 /* idx is absolute location for first frame */
281 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
283 /* idx_end is typically the last possible frame in the window,
284 * but changes to 'the' seq_no, when BAR comes. If seq_no
285 * is non-zero, we will go up to that and stop.
286 * Note: last seq no in current window will occupy the same
287 * index position as index that is just previous to start.
288 * An imp point : if win_sz is 7, for seq_no space of 4095,
289 * then, there would be holes when sequence wrap around occurs.
290 * Target should judiciously choose the win_sz, based on
291 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
292 * 2, 4, 8, 16 win_sz works fine).
293 * We must deque from "idx" to "idx_end", including both.
295 seq_end = (seq_no) ? seq_no : rxtid->seq_next;
296 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
298 /* Critical section begins */
299 A_MUTEX_LOCK(&rxtid->lock);
302 node = &rxtid->hold_q[idx];
304 if((order == CONTIGUOUS_SEQNO) && (!node->osbuf))
307 /* chain frames and deliver frames bcos:
308 * 1. either the frames are in order and window is contiguous, OR
309 * 2. we need to deque frames, irrespective of holes
313 aggr_slice_amsdu(p_aggr, rxtid, &node->osbuf);
315 A_NETBUF_ENQUEUE(&rxtid->q, node->osbuf);
322 /* window is moving */
323 rxtid->seq_next = IEEE80211_NEXT_SEQ_NO(rxtid->seq_next);
324 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
325 } while(idx != idx_end);
326 /* Critical section ends */
327 A_MUTEX_UNLOCK(&rxtid->lock);
329 stats->num_delivered += A_NETBUF_QUEUE_SIZE(&rxtid->q);
330 aggr_dispatch_frames(p_aggr, &rxtid->q);
334 aggr_get_osbuf(struct aggr_info *p_aggr)
338 /* Starving for buffers? get more from OS
339 * check for low netbuffers( < 1/4 AGGR_NUM_OF_FREE_NETBUFS) :
340 * re-allocate bufs if so
341 * allocate a free buf from freeQ
343 if (A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ) < (AGGR_NUM_OF_FREE_NETBUFS >> 2)) {
344 p_aggr->netbuf_allocator(&p_aggr->freeQ, AGGR_NUM_OF_FREE_NETBUFS);
347 if (A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ)) {
348 buf = A_NETBUF_DEQUEUE(&p_aggr->freeQ);
356 aggr_slice_amsdu(struct aggr_info *p_aggr, struct rxtid *rxtid, void **osbuf)
359 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
362 /* Frame format at this point:
363 * [DIX hdr | 802.3 | 802.3 | ... | 802.3]
365 * Strip the DIX header.
366 * Iterate through the osbuf and do:
367 * grab a free netbuf from freeQ
368 * find the start and end of a frame
369 * copy it to netbuf(Vista can do better here)
370 * convert all msdu's(802.3) frames to upper layer format - os routine
371 * -for now lets convert from 802.3 to dix
372 * enque this to dispatch q of tid
374 * free the osbuf - to OS. It's been sliced.
377 mac_hdr_len = sizeof(ATH_MAC_HDR);
378 framep = A_NETBUF_DATA(*osbuf) + mac_hdr_len;
379 amsdu_len = A_NETBUF_LEN(*osbuf) - mac_hdr_len;
381 while(amsdu_len > mac_hdr_len) {
382 /* Begin of a 802.3 frame */
383 payload_8023_len = A_BE2CPU16(((ATH_MAC_HDR *)framep)->typeOrLen);
384 #define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508
385 #define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46
386 if(payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
387 A_PRINTF("802.3 AMSDU frame bound check failed. len %d\n", payload_8023_len);
390 frame_8023_len = payload_8023_len + mac_hdr_len;
391 new_buf = aggr_get_osbuf(p_aggr);
392 if(new_buf == NULL) {
393 A_PRINTF("No buffer available \n");
397 memcpy(A_NETBUF_DATA(new_buf), framep, frame_8023_len);
398 A_NETBUF_PUT(new_buf, frame_8023_len);
399 if (wmi_dot3_2_dix(new_buf) != 0) {
400 A_PRINTF("dot3_2_dix err..\n");
401 A_NETBUF_FREE(new_buf);
405 A_NETBUF_ENQUEUE(&rxtid->q, new_buf);
407 /* Is this the last subframe within this aggregate ? */
408 if ((amsdu_len - frame_8023_len) == 0) {
412 /* Add the length of A-MSDU subframe padding bytes -
413 * Round to nearest word.
415 frame_8023_len = ((frame_8023_len + 3) & ~3);
417 framep += frame_8023_len;
418 amsdu_len -= frame_8023_len;
421 A_NETBUF_FREE(*osbuf);
426 aggr_process_recv_frm(void *cntxt, u8 tid, u16 seq_no, bool is_amsdu, void **osbuf)
428 struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
430 struct rxtid_stats *stats;
431 u16 idx, st, cur, end;
433 struct osbuf_hold_q *node;
437 A_ASSERT(tid < NUM_OF_TIDS);
439 rxtid = AGGR_GET_RXTID(p_aggr, tid);
440 stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
442 stats->num_into_aggr++;
446 aggr_slice_amsdu(p_aggr, rxtid, osbuf);
448 aggr_dispatch_frames(p_aggr, &rxtid->q);
453 /* Check the incoming sequence no, if it's in the window */
454 st = rxtid->seq_next;
456 end = (st + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO;
457 /* Log the pkt info for future analysis */
458 log = &p_aggr->pkt_log;
459 log_idx = &log->last_idx;
460 log->info[*log_idx].cur = cur;
461 log->info[*log_idx].st = st;
462 log->info[*log_idx].end = end;
463 *log_idx = IEEE80211_NEXT_SEQ_NO(*log_idx);
465 if(((st < end) && (cur < st || cur > end)) ||
466 ((st > end) && (cur > end) && (cur < st))) {
467 /* the cur frame is outside the window. Since we know
468 * our target would not do this without reason it must
469 * be assumed that the window has moved for some valid reason.
470 * Therefore, we dequeue all frames and start fresh.
474 extended_end = (end + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO;
476 if(((end < extended_end) && (cur < end || cur > extended_end)) ||
477 ((end > extended_end) && (cur > extended_end) && (cur < end))) {
478 // dequeue all frames in queue and shift window to new frame
479 aggr_deque_frms(p_aggr, tid, 0, ALL_SEQNO);
480 //set window start so that new frame is last frame in window
481 if(cur >= rxtid->hold_q_sz-1) {
482 rxtid->seq_next = cur - (rxtid->hold_q_sz-1);
484 rxtid->seq_next = IEEE80211_MAX_SEQ_NO - (rxtid->hold_q_sz-2 - cur);
487 // dequeue only those frames that are outside the new shifted window
488 if(cur >= rxtid->hold_q_sz-1) {
489 st = cur - (rxtid->hold_q_sz-1);
491 st = IEEE80211_MAX_SEQ_NO - (rxtid->hold_q_sz-2 - cur);
494 aggr_deque_frms(p_aggr, tid, st, ALL_SEQNO);
500 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
502 /*enque the frame, in hold_q */
503 node = &rxtid->hold_q[idx];
505 A_MUTEX_LOCK(&rxtid->lock);
507 /* Is the cur frame duplicate or something beyond our
508 * window(hold_q -> which is 2x, already)?
509 * 1. Duplicate is easy - drop incoming frame.
510 * 2. Not falling in current sliding window.
511 * 2a. is the frame_seq_no preceding current tid_seq_no?
512 * -> drop the frame. perhaps sender did not get our ACK.
513 * this is taken care of above.
514 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
515 * -> Taken care of it above, by moving window forward.
518 A_NETBUF_FREE(node->osbuf);
522 node->osbuf = *osbuf;
523 node->is_amsdu = is_amsdu;
524 node->seq_no = seq_no;
530 A_MUTEX_UNLOCK(&rxtid->lock);
533 aggr_deque_frms(p_aggr, tid, 0, CONTIGUOUS_SEQNO);
535 if(p_aggr->timerScheduled) {
536 rxtid->progress = true;
538 for(idx=0 ; idx<rxtid->hold_q_sz ; idx++) {
539 if(rxtid->hold_q[idx].osbuf) {
540 /* there is a frame in the queue and no timer so
541 * start a timer to ensure that the frame doesn't remain
543 p_aggr->timerScheduled = true;
544 A_TIMEOUT_MS(&p_aggr->timer, AGGR_RX_TIMEOUT, 0);
545 rxtid->progress = false;
546 rxtid->timerMon = true;
554 * aggr_reset_state -- Called when it is deemed necessary to clear the aggregate
555 * hold Q state. Examples include when a Connect event or disconnect event is
559 aggr_reset_state(void *cntxt)
562 struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
566 for(tid=0 ; tid<NUM_OF_TIDS ; tid++) {
567 aggr_delete_tid_state(p_aggr, tid);
573 aggr_timeout(unsigned long arg)
576 struct aggr_info *p_aggr = (struct aggr_info *)arg;
578 struct rxtid_stats *stats;
580 * If the q for which the timer was originally started has
581 * not progressed then it is necessary to dequeue all the
582 * contained frames so that they are not held forever.
584 for(i = 0; i < NUM_OF_TIDS; i++) {
585 rxtid = AGGR_GET_RXTID(p_aggr, i);
586 stats = AGGR_GET_RXTID_STATS(p_aggr, i);
588 if(rxtid->aggr == false ||
589 rxtid->timerMon == false ||
590 rxtid->progress == true) {
593 // dequeue all frames in for this tid
594 stats->num_timeouts++;
595 A_PRINTF("TO: st %d end %d\n", rxtid->seq_next, ((rxtid->seq_next + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO));
596 aggr_deque_frms(p_aggr, i, 0, ALL_SEQNO);
599 p_aggr->timerScheduled = false;
600 // determine whether a new timer should be started.
601 for(i = 0; i < NUM_OF_TIDS; i++) {
602 rxtid = AGGR_GET_RXTID(p_aggr, i);
604 if(rxtid->aggr == true && rxtid->hold_q) {
605 for(j = 0 ; j < rxtid->hold_q_sz ; j++)
607 if(rxtid->hold_q[j].osbuf)
609 p_aggr->timerScheduled = true;
610 rxtid->timerMon = true;
611 rxtid->progress = false;
616 if(j >= rxtid->hold_q_sz) {
617 rxtid->timerMon = false;
622 if(p_aggr->timerScheduled) {
624 A_TIMEOUT_MS(&p_aggr->timer, AGGR_RX_TIMEOUT, 0);
630 aggr_dispatch_frames(struct aggr_info *p_aggr, A_NETBUF_QUEUE_T *q)
634 while((osbuf = A_NETBUF_DEQUEUE(q))) {
635 p_aggr->rx_fn(p_aggr->dev, osbuf);
640 aggr_dump_stats(void *cntxt, PACKET_LOG **log_buf)
642 struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
644 struct rxtid_stats *stats;
647 *log_buf = &p_aggr->pkt_log;
648 A_PRINTF("\n\n================================================\n");
649 A_PRINTF("tid: num_into_aggr, dups, oow, mpdu, amsdu, delivered, timeouts, holes, bar, seq_next\n");
650 for(i = 0; i < NUM_OF_TIDS; i++) {
651 stats = AGGR_GET_RXTID_STATS(p_aggr, i);
652 rxtid = AGGR_GET_RXTID(p_aggr, i);
653 A_PRINTF("%d: %d %d %d %d %d %d %d %d %d : %d\n", i, stats->num_into_aggr, stats->num_dups,
654 stats->num_oow, stats->num_mpdu,
655 stats->num_amsdu, stats->num_delivered, stats->num_timeouts,
656 stats->num_hole, stats->num_bar,
659 A_PRINTF("================================================\n\n");