staging: ozwpan: Return correct actual_length to userland
[pandora-kernel.git] / drivers / staging / ozwpan / ozpd.c
1 /* -----------------------------------------------------------------------------
2  * Copyright (c) 2011 Ozmo Inc
3  * Released under the GNU General Public License Version 2 (GPLv2).
4  * -----------------------------------------------------------------------------
5  */
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
12 #include "ozconfig.h"
13 #include "ozprotocol.h"
14 #include "ozeltbuf.h"
15 #include "ozpd.h"
16 #include "ozproto.h"
17 #include "oztrace.h"
18 #include "ozevent.h"
19 #include "ozcdev.h"
20 #include "ozusbsvc.h"
21 #include <asm/unaligned.h>
22 #include <linux/uaccess.h>
23 #include <net/psnap.h>
24 /*------------------------------------------------------------------------------
25  */
26 #define OZ_MAX_TX_POOL_SIZE     6
27 /* Maximum number of uncompleted isoc frames that can be pending in network.
28  */
29 #define OZ_MAX_SUBMITTED_ISOC   16
30 /* Maximum number of uncompleted isoc frames that can be pending in Tx Queue.
31  */
32 #define OZ_MAX_TX_QUEUE_ISOC    32
33 /*------------------------------------------------------------------------------
34  */
35 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
36 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
37 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
38 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
39 static int oz_send_isoc_frame(struct oz_pd *pd);
40 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
41 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
42 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
43 static void oz_isoc_destructor(struct sk_buff *skb);
44 static int oz_def_app_init(void);
45 static void oz_def_app_term(void);
46 static int oz_def_app_start(struct oz_pd *pd, int resume);
47 static void oz_def_app_stop(struct oz_pd *pd, int pause);
48 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
49 /*------------------------------------------------------------------------------
50  * Counts the uncompleted isoc frames submitted to netcard.
51  */
52 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
53 /* Application handler functions.
54  */
55 static struct oz_app_if g_app_if[OZ_APPID_MAX] = {
56         {oz_usb_init,
57         oz_usb_term,
58         oz_usb_start,
59         oz_usb_stop,
60         oz_usb_rx,
61         oz_usb_heartbeat,
62         oz_usb_farewell,
63         OZ_APPID_USB},
64
65         {oz_def_app_init,
66         oz_def_app_term,
67         oz_def_app_start,
68         oz_def_app_stop,
69         oz_def_app_rx,
70         0,
71         0,
72         OZ_APPID_UNUSED1},
73
74         {oz_def_app_init,
75         oz_def_app_term,
76         oz_def_app_start,
77         oz_def_app_stop,
78         oz_def_app_rx,
79         0,
80         0,
81         OZ_APPID_UNUSED2},
82
83         {oz_cdev_init,
84         oz_cdev_term,
85         oz_cdev_start,
86         oz_cdev_stop,
87         oz_cdev_rx,
88         0,
89         0,
90         OZ_APPID_SERIAL},
91 };
92 /*------------------------------------------------------------------------------
93  * Context: process
94  */
95 static int oz_def_app_init(void)
96 {
97         return 0;
98 }
99 /*------------------------------------------------------------------------------
100  * Context: process
101  */
102 static void oz_def_app_term(void)
103 {
104 }
105 /*------------------------------------------------------------------------------
106  * Context: softirq
107  */
108 static int oz_def_app_start(struct oz_pd *pd, int resume)
109 {
110         return 0;
111 }
112 /*------------------------------------------------------------------------------
113  * Context: softirq
114  */
115 static void oz_def_app_stop(struct oz_pd *pd, int pause)
116 {
117 }
118 /*------------------------------------------------------------------------------
119  * Context: softirq
120  */
121 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
122 {
123 }
124 /*------------------------------------------------------------------------------
125  * Context: softirq or process
126  */
127 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
128 {
129         pd->state = state;
130         oz_event_log(OZ_EVT_PD_STATE, 0, 0, 0, state);
131 #ifdef WANT_TRACE
132         switch (state) {
133         case OZ_PD_S_IDLE:
134                 oz_trace("PD State: OZ_PD_S_IDLE\n");
135                 break;
136         case OZ_PD_S_CONNECTED:
137                 oz_trace("PD State: OZ_PD_S_CONNECTED\n");
138                 break;
139         case OZ_PD_S_STOPPED:
140                 oz_trace("PD State: OZ_PD_S_STOPPED\n");
141                 break;
142         case OZ_PD_S_SLEEP:
143                 oz_trace("PD State: OZ_PD_S_SLEEP\n");
144                 break;
145         }
146 #endif /* WANT_TRACE */
147 }
148 /*------------------------------------------------------------------------------
149  * Context: softirq or process
150  */
151 void oz_pd_get(struct oz_pd *pd)
152 {
153         atomic_inc(&pd->ref_count);
154 }
155 /*------------------------------------------------------------------------------
156  * Context: softirq or process
157  */
158 void oz_pd_put(struct oz_pd *pd)
159 {
160         if (atomic_dec_and_test(&pd->ref_count))
161                 oz_pd_destroy(pd);
162 }
163 /*------------------------------------------------------------------------------
164  * Context: softirq-serialized
165  */
166 struct oz_pd *oz_pd_alloc(u8 *mac_addr)
167 {
168         struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
169         if (pd) {
170                 int i;
171                 atomic_set(&pd->ref_count, 2);
172                 for (i = 0; i < OZ_APPID_MAX; i++)
173                         spin_lock_init(&pd->app_lock[i]);
174                 pd->last_rx_pkt_num = 0xffffffff;
175                 oz_pd_set_state(pd, OZ_PD_S_IDLE);
176                 pd->max_tx_size = OZ_MAX_TX_SIZE;
177                 memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
178                 if (0 != oz_elt_buf_init(&pd->elt_buff)) {
179                         kfree(pd);
180                         pd = 0;
181                 }
182                 spin_lock_init(&pd->tx_frame_lock);
183                 INIT_LIST_HEAD(&pd->tx_queue);
184                 INIT_LIST_HEAD(&pd->farewell_list);
185                 pd->last_sent_frame = &pd->tx_queue;
186                 spin_lock_init(&pd->stream_lock);
187                 INIT_LIST_HEAD(&pd->stream_list);
188         }
189         return pd;
190 }
191 /*------------------------------------------------------------------------------
192  * Context: softirq or process
193  */
194 void oz_pd_destroy(struct oz_pd *pd)
195 {
196         struct list_head *e;
197         struct oz_tx_frame *f;
198         struct oz_isoc_stream *st;
199         struct oz_farewell *fwell;
200         oz_trace("Destroying PD\n");
201         /* Delete any streams.
202          */
203         e = pd->stream_list.next;
204         while (e != &pd->stream_list) {
205                 st = container_of(e, struct oz_isoc_stream, link);
206                 e = e->next;
207                 oz_isoc_stream_free(st);
208         }
209         /* Free any queued tx frames.
210          */
211         e = pd->tx_queue.next;
212         while (e != &pd->tx_queue) {
213                 f = container_of(e, struct oz_tx_frame, link);
214                 e = e->next;
215                 if (f->skb != NULL)
216                         kfree_skb(f->skb);
217                 oz_retire_frame(pd, f);
218         }
219         oz_elt_buf_term(&pd->elt_buff);
220         /* Free any farewells.
221          */
222         e = pd->farewell_list.next;
223         while (e != &pd->farewell_list) {
224                 fwell = container_of(e, struct oz_farewell, link);
225                 e = e->next;
226                 kfree(fwell);
227         }
228         /* Deallocate all frames in tx pool.
229          */
230         while (pd->tx_pool) {
231                 e = pd->tx_pool;
232                 pd->tx_pool = e->next;
233                 kfree(container_of(e, struct oz_tx_frame, link));
234         }
235         if (pd->net_dev)
236                 dev_put(pd->net_dev);
237         kfree(pd);
238 }
239 /*------------------------------------------------------------------------------
240  * Context: softirq-serialized
241  */
242 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
243 {
244         struct oz_app_if *ai;
245         int rc = 0;
246         oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
247         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
248                 if (apps & (1<<ai->app_id)) {
249                         if (ai->start(pd, resume)) {
250                                 rc = -1;
251                                 oz_trace("Unabled to start service %d\n",
252                                         ai->app_id);
253                                 break;
254                         }
255                         oz_polling_lock_bh();
256                         pd->total_apps |= (1<<ai->app_id);
257                         if (resume)
258                                 pd->paused_apps &= ~(1<<ai->app_id);
259                         oz_polling_unlock_bh();
260                 }
261         }
262         return rc;
263 }
264 /*------------------------------------------------------------------------------
265  * Context: softirq or process
266  */
267 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
268 {
269         struct oz_app_if *ai;
270         oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
271         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
272                 if (apps & (1<<ai->app_id)) {
273                         oz_polling_lock_bh();
274                         if (pause) {
275                                 pd->paused_apps |= (1<<ai->app_id);
276                         } else {
277                                 pd->total_apps &= ~(1<<ai->app_id);
278                                 pd->paused_apps &= ~(1<<ai->app_id);
279                         }
280                         oz_polling_unlock_bh();
281                         ai->stop(pd, pause);
282                 }
283         }
284 }
285 /*------------------------------------------------------------------------------
286  * Context: softirq
287  */
288 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
289 {
290         struct oz_app_if *ai;
291         int more = 0;
292         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
293                 if (ai->heartbeat && (apps & (1<<ai->app_id))) {
294                         if (ai->heartbeat(pd))
295                                 more = 1;
296                 }
297         }
298         if (more)
299                 oz_pd_request_heartbeat(pd);
300         if (pd->mode & OZ_F_ISOC_ANYTIME) {
301                 int count = 8;
302                 while (count-- && (oz_send_isoc_frame(pd) >= 0))
303                         ;
304         }
305 }
306 /*------------------------------------------------------------------------------
307  * Context: softirq or process
308  */
309 void oz_pd_stop(struct oz_pd *pd)
310 {
311         u16 stop_apps = 0;
312         oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
313         oz_pd_indicate_farewells(pd);
314         oz_polling_lock_bh();
315         stop_apps = pd->total_apps;
316         pd->total_apps = 0;
317         pd->paused_apps = 0;
318         oz_polling_unlock_bh();
319         oz_services_stop(pd, stop_apps, 0);
320         oz_polling_lock_bh();
321         oz_pd_set_state(pd, OZ_PD_S_STOPPED);
322         /* Remove from PD list.*/
323         list_del(&pd->link);
324         oz_polling_unlock_bh();
325         oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
326         oz_timer_delete(pd, 0);
327         oz_pd_put(pd);
328 }
329 /*------------------------------------------------------------------------------
330  * Context: softirq
331  */
332 int oz_pd_sleep(struct oz_pd *pd)
333 {
334         int do_stop = 0;
335         u16 stop_apps = 0;
336         oz_polling_lock_bh();
337         if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
338                 oz_polling_unlock_bh();
339                 return 0;
340         }
341         if (pd->keep_alive_j && pd->session_id) {
342                 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
343                 pd->pulse_time_j = jiffies + pd->keep_alive_j;
344                 oz_trace("Sleep Now %lu until %lu\n",
345                         jiffies, pd->pulse_time_j);
346         } else {
347                 do_stop = 1;
348         }
349         stop_apps = pd->total_apps;
350         oz_polling_unlock_bh();
351         if (do_stop) {
352                 oz_pd_stop(pd);
353         } else {
354                 oz_services_stop(pd, stop_apps, 1);
355                 oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
356         }
357         return do_stop;
358 }
359 /*------------------------------------------------------------------------------
360  * Context: softirq
361  */
362 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
363 {
364         struct oz_tx_frame *f = 0;
365         spin_lock_bh(&pd->tx_frame_lock);
366         if (pd->tx_pool) {
367                 f = container_of(pd->tx_pool, struct oz_tx_frame, link);
368                 pd->tx_pool = pd->tx_pool->next;
369                 pd->tx_pool_count--;
370         }
371         spin_unlock_bh(&pd->tx_frame_lock);
372         if (f == 0)
373                 f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
374         if (f) {
375                 f->total_size = sizeof(struct oz_hdr);
376                 INIT_LIST_HEAD(&f->link);
377                 INIT_LIST_HEAD(&f->elt_list);
378         }
379         return f;
380 }
381 /*------------------------------------------------------------------------------
382  * Context: softirq or process
383  */
384 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
385 {
386         pd->nb_queued_isoc_frames--;
387         list_del_init(&f->link);
388         if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
389                 f->link.next = pd->tx_pool;
390                 pd->tx_pool = &f->link;
391                 pd->tx_pool_count++;
392         } else {
393                 kfree(f);
394         }
395         oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
396                                                 pd->nb_queued_isoc_frames);
397 }
398 /*------------------------------------------------------------------------------
399  * Context: softirq or process
400  */
401 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
402 {
403         spin_lock_bh(&pd->tx_frame_lock);
404         if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
405                 f->link.next = pd->tx_pool;
406                 pd->tx_pool = &f->link;
407                 pd->tx_pool_count++;
408                 f = 0;
409         }
410         spin_unlock_bh(&pd->tx_frame_lock);
411         if (f)
412                 kfree(f);
413 }
414 /*------------------------------------------------------------------------------
415  * Context: softirq-serialized
416  */
417 void oz_set_more_bit(struct sk_buff *skb)
418 {
419         struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
420         oz_hdr->control |= OZ_F_MORE_DATA;
421 }
422 /*------------------------------------------------------------------------------
423  * Context: softirq-serialized
424  */
425 void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
426 {
427         struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
428         oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
429 }
430 /*------------------------------------------------------------------------------
431  * Context: softirq
432  */
433 int oz_prepare_frame(struct oz_pd *pd, int empty)
434 {
435         struct oz_tx_frame *f;
436         if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
437                 return -1;
438         if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
439                 return -1;
440         if (!empty && !oz_are_elts_available(&pd->elt_buff))
441                 return -1;
442         f = oz_tx_frame_alloc(pd);
443         if (f == 0)
444                 return -1;
445         f->skb = NULL;
446         f->hdr.control =
447                 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
448         ++pd->last_tx_pkt_num;
449         put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
450         if (empty == 0) {
451                 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
452                         pd->max_tx_size, &f->elt_list);
453         }
454         spin_lock(&pd->tx_frame_lock);
455         list_add_tail(&f->link, &pd->tx_queue);
456         pd->nb_queued_frames++;
457         spin_unlock(&pd->tx_frame_lock);
458         return 0;
459 }
460 /*------------------------------------------------------------------------------
461  * Context: softirq-serialized
462  */
463 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
464 {
465         struct sk_buff *skb = 0;
466         struct net_device *dev = pd->net_dev;
467         struct oz_hdr *oz_hdr;
468         struct oz_elt *elt;
469         struct list_head *e;
470         /* Allocate skb with enough space for the lower layers as well
471          * as the space we need.
472          */
473         skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
474         if (skb == 0)
475                 return 0;
476         /* Reserve the head room for lower layers.
477          */
478         skb_reserve(skb, LL_RESERVED_SPACE(dev));
479         skb_reset_network_header(skb);
480         skb->dev = dev;
481         skb->protocol = htons(OZ_ETHERTYPE);
482         if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
483                 dev->dev_addr, skb->len) < 0)
484                 goto fail;
485         /* Push the tail to the end of the area we are going to copy to.
486          */
487         oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
488         f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
489         memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
490         /* Copy the elements into the frame body.
491          */
492         elt = (struct oz_elt *)(oz_hdr+1);
493         for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
494                 struct oz_elt_info *ei;
495                 ei = container_of(e, struct oz_elt_info, link);
496                 memcpy(elt, ei->data, ei->length);
497                 elt = oz_next_elt(elt);
498         }
499         return skb;
500 fail:
501         kfree_skb(skb);
502         return 0;
503 }
504 /*------------------------------------------------------------------------------
505  * Context: softirq or process
506  */
507 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
508 {
509         struct list_head *e;
510         struct oz_elt_info *ei;
511         e = f->elt_list.next;
512         while (e != &f->elt_list) {
513                 ei = container_of(e, struct oz_elt_info, link);
514                 e = e->next;
515                 list_del_init(&ei->link);
516                 if (ei->callback)
517                         ei->callback(pd, ei->context);
518                 spin_lock_bh(&pd->elt_buff.lock);
519                 oz_elt_info_free(&pd->elt_buff, ei);
520                 spin_unlock_bh(&pd->elt_buff.lock);
521         }
522         oz_tx_frame_free(pd, f);
523         if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
524                 oz_trim_elt_pool(&pd->elt_buff);
525 }
526 /*------------------------------------------------------------------------------
527  * Context: softirq-serialized
528  */
529 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
530 {
531         struct sk_buff *skb;
532         struct oz_tx_frame *f;
533         struct list_head *e;
534         spin_lock(&pd->tx_frame_lock);
535         e = pd->last_sent_frame->next;
536         if (e == &pd->tx_queue) {
537                 spin_unlock(&pd->tx_frame_lock);
538                 return -1;
539         }
540         f = container_of(e, struct oz_tx_frame, link);
541
542         if (f->skb != NULL) {
543                 skb = f->skb;
544                 oz_tx_isoc_free(pd, f);
545                 spin_unlock(&pd->tx_frame_lock);
546                 if (more_data)
547                         oz_set_more_bit(skb);
548                 oz_set_last_pkt_nb(pd, skb);
549                 if ((int)atomic_read(&g_submitted_isoc) <
550                                                         OZ_MAX_SUBMITTED_ISOC) {
551                         if (dev_queue_xmit(skb) < 0) {
552                                 oz_trace2(OZ_TRACE_TX_FRAMES,
553                                                 "Dropping ISOC Frame\n");
554                                 oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
555                                 return -1;
556                         }
557                         atomic_inc(&g_submitted_isoc);
558                         oz_trace2(OZ_TRACE_TX_FRAMES,
559                                         "Sending ISOC Frame, nb_isoc= %d\n",
560                                                 pd->nb_queued_isoc_frames);
561                         return 0;
562                 } else {
563                         kfree_skb(skb);
564                         oz_trace2(OZ_TRACE_TX_FRAMES, "Dropping ISOC Frame>\n");
565                         oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
566                         return -1;
567                 }
568         }
569
570         pd->last_sent_frame = e;
571         skb = oz_build_frame(pd, f);
572         spin_unlock(&pd->tx_frame_lock);
573         if (more_data)
574                 oz_set_more_bit(skb);
575         oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
576         if (skb) {
577                 oz_event_log(OZ_EVT_TX_FRAME,
578                         0,
579                         (((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
580                         0, f->hdr.pkt_num);
581                 if (dev_queue_xmit(skb) < 0)
582                         return -1;
583
584         }
585         return 0;
586 }
587 /*------------------------------------------------------------------------------
588  * Context: softirq-serialized
589  */
590 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
591 {
592         while (oz_prepare_frame(pd, 0) >= 0)
593                 backlog++;
594
595         switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
596
597                 case OZ_F_ISOC_NO_ELTS: {
598                         backlog += pd->nb_queued_isoc_frames;
599                         if (backlog <= 0)
600                                 goto out;
601                         if (backlog > OZ_MAX_SUBMITTED_ISOC)
602                                 backlog = OZ_MAX_SUBMITTED_ISOC;
603                         break;
604                 }
605                 case OZ_NO_ELTS_ANYTIME: {
606                         if ((backlog <= 0) && (pd->isoc_sent == 0))
607                                 goto out;
608                         break;
609                 }
610                 default: {
611                         if (backlog <= 0)
612                                 goto out;
613                         break;
614                 }
615         }
616         while (backlog--) {
617                 if (oz_send_next_queued_frame(pd, backlog) < 0)
618                         break;
619         }
620         return;
621
622 out:    oz_prepare_frame(pd, 1);
623         oz_send_next_queued_frame(pd, 0);
624 }
625 /*------------------------------------------------------------------------------
626  * Context: softirq
627  */
628 static int oz_send_isoc_frame(struct oz_pd *pd)
629 {
630         struct sk_buff *skb = 0;
631         struct net_device *dev = pd->net_dev;
632         struct oz_hdr *oz_hdr;
633         struct oz_elt *elt;
634         struct list_head *e;
635         struct list_head list;
636         int total_size = sizeof(struct oz_hdr);
637         INIT_LIST_HEAD(&list);
638
639         oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
640                 pd->max_tx_size, &list);
641         if (list.next == &list)
642                 return 0;
643         skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
644         if (skb == 0) {
645                 oz_trace("Cannot alloc skb\n");
646                 oz_elt_info_free_chain(&pd->elt_buff, &list);
647                 return -1;
648         }
649         skb_reserve(skb, LL_RESERVED_SPACE(dev));
650         skb_reset_network_header(skb);
651         skb->dev = dev;
652         skb->protocol = htons(OZ_ETHERTYPE);
653         if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
654                 dev->dev_addr, skb->len) < 0) {
655                 kfree_skb(skb);
656                 return -1;
657         }
658         oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
659         oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
660         oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
661         elt = (struct oz_elt *)(oz_hdr+1);
662
663         for (e = list.next; e != &list; e = e->next) {
664                 struct oz_elt_info *ei;
665                 ei = container_of(e, struct oz_elt_info, link);
666                 memcpy(elt, ei->data, ei->length);
667                 elt = oz_next_elt(elt);
668         }
669         oz_event_log(OZ_EVT_TX_ISOC, 0, 0, 0, 0);
670         dev_queue_xmit(skb);
671         oz_elt_info_free_chain(&pd->elt_buff, &list);
672         return 0;
673 }
674 /*------------------------------------------------------------------------------
675  * Context: softirq-serialized
676  */
677 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
678 {
679         struct list_head *e;
680         struct oz_tx_frame *f;
681         struct list_head *first = 0;
682         struct list_head *last = 0;
683         u8 diff;
684         u32 pkt_num;
685
686         spin_lock(&pd->tx_frame_lock);
687         e = pd->tx_queue.next;
688         while (e != &pd->tx_queue) {
689                 f = container_of(e, struct oz_tx_frame, link);
690                 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
691                 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
692                 if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
693                         break;
694                 oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
695                                                  pkt_num, pd->nb_queued_frames);
696                 if (first == 0)
697                         first = e;
698                 last = e;
699                 e = e->next;
700                 pd->nb_queued_frames--;
701         }
702         if (first) {
703                 last->next->prev = &pd->tx_queue;
704                 pd->tx_queue.next = last->next;
705                 last->next = 0;
706         }
707         pd->last_sent_frame = &pd->tx_queue;
708         spin_unlock(&pd->tx_frame_lock);
709         while (first) {
710                 f = container_of(first, struct oz_tx_frame, link);
711                 first = first->next;
712                 oz_retire_frame(pd, f);
713         }
714 }
715 /*------------------------------------------------------------------------------
716  * Precondition: stream_lock must be held.
717  * Context: softirq
718  */
719 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
720 {
721         struct list_head *e;
722         struct oz_isoc_stream *st;
723         list_for_each(e, &pd->stream_list) {
724                 st = container_of(e, struct oz_isoc_stream, link);
725                 if (st->ep_num == ep_num)
726                         return st;
727         }
728         return 0;
729 }
730 /*------------------------------------------------------------------------------
731  * Context: softirq
732  */
733 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
734 {
735         struct oz_isoc_stream *st =
736                 kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
737         if (!st)
738                 return -ENOMEM;
739         st->ep_num = ep_num;
740         spin_lock_bh(&pd->stream_lock);
741         if (!pd_stream_find(pd, ep_num)) {
742                 list_add(&st->link, &pd->stream_list);
743                 st = 0;
744         }
745         spin_unlock_bh(&pd->stream_lock);
746         if (st)
747                 kfree(st);
748         return 0;
749 }
750 /*------------------------------------------------------------------------------
751  * Context: softirq or process
752  */
753 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
754 {
755         if (st->skb)
756                 kfree_skb(st->skb);
757         kfree(st);
758 }
759 /*------------------------------------------------------------------------------
760  * Context: softirq
761  */
762 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
763 {
764         struct oz_isoc_stream *st;
765         spin_lock_bh(&pd->stream_lock);
766         st = pd_stream_find(pd, ep_num);
767         if (st)
768                 list_del(&st->link);
769         spin_unlock_bh(&pd->stream_lock);
770         if (st)
771                 oz_isoc_stream_free(st);
772         return 0;
773 }
774 /*------------------------------------------------------------------------------
775  * Context: any
776  */
777 static void oz_isoc_destructor(struct sk_buff *skb)
778 {
779         atomic_dec(&g_submitted_isoc);
780         oz_event_log(OZ_EVT_TX_ISOC_DONE, atomic_read(&g_submitted_isoc),
781                 0, skb, 0);
782 }
783 /*------------------------------------------------------------------------------
784  * Context: softirq
785  */
786 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len)
787 {
788         struct net_device *dev = pd->net_dev;
789         struct oz_isoc_stream *st;
790         u8 nb_units = 0;
791         struct sk_buff *skb = 0;
792         struct oz_hdr *oz_hdr = 0;
793         int size = 0;
794         spin_lock_bh(&pd->stream_lock);
795         st = pd_stream_find(pd, ep_num);
796         if (st) {
797                 skb = st->skb;
798                 st->skb = 0;
799                 nb_units = st->nb_units;
800                 st->nb_units = 0;
801                 oz_hdr = st->oz_hdr;
802                 size = st->size;
803         }
804         spin_unlock_bh(&pd->stream_lock);
805         if (!st)
806                 return 0;
807         if (!skb) {
808                 /* Allocate enough space for max size frame. */
809                 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
810                                 GFP_ATOMIC);
811                 if (skb == 0)
812                         return 0;
813                 /* Reserve the head room for lower layers. */
814                 skb_reserve(skb, LL_RESERVED_SPACE(dev));
815                 skb_reset_network_header(skb);
816                 skb->dev = dev;
817                 skb->protocol = htons(OZ_ETHERTYPE);
818                 /* For audio packet set priority to AC_VO */
819                 skb->priority = 0x7;
820                 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
821                 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
822         }
823         memcpy(skb_put(skb, len), data, len);
824         size += len;
825         if (++nb_units < pd->ms_per_isoc) {
826                 spin_lock_bh(&pd->stream_lock);
827                 st->skb = skb;
828                 st->nb_units = nb_units;
829                 st->oz_hdr = oz_hdr;
830                 st->size = size;
831                 spin_unlock_bh(&pd->stream_lock);
832         } else {
833                 struct oz_hdr oz;
834                 struct oz_isoc_large iso;
835                 spin_lock_bh(&pd->stream_lock);
836                 iso.frame_number = st->frame_num;
837                 st->frame_num += nb_units;
838                 spin_unlock_bh(&pd->stream_lock);
839                 oz.control =
840                         (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
841                 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
842                 oz.pkt_num = 0;
843                 iso.endpoint = ep_num;
844                 iso.format = OZ_DATA_F_ISOC_LARGE;
845                 iso.ms_data = nb_units;
846                 memcpy(oz_hdr, &oz, sizeof(oz));
847                 memcpy(oz_hdr+1, &iso, sizeof(iso));
848                 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
849                                 dev->dev_addr, skb->len) < 0)
850                         goto out;
851
852                 skb->destructor = oz_isoc_destructor;
853                 /*Queue for Xmit if mode is not ANYTIME*/
854                 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
855                         struct oz_tx_frame *isoc_unit = NULL;
856                         int nb = pd->nb_queued_isoc_frames;
857                         if (nb >= OZ_MAX_TX_QUEUE_ISOC) {
858                                 oz_trace2(OZ_TRACE_TX_FRAMES,
859                                                 "Dropping ISOC Unit nb= %d\n",
860                                                                         nb);
861                                 goto out;
862                         }
863                         isoc_unit = oz_tx_frame_alloc(pd);
864                         if (isoc_unit == NULL)
865                                 goto out;
866                         isoc_unit->hdr = oz;
867                         isoc_unit->skb = skb;
868                         spin_lock_bh(&pd->tx_frame_lock);
869                         list_add_tail(&isoc_unit->link, &pd->tx_queue);
870                         pd->nb_queued_isoc_frames++;
871                         spin_unlock_bh(&pd->tx_frame_lock);
872                         oz_trace2(OZ_TRACE_TX_FRAMES,
873                         "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
874                         pd->nb_queued_isoc_frames, pd->nb_queued_frames);
875                         oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
876                                         skb, atomic_read(&g_submitted_isoc));
877                         return 0;
878                 }
879
880                 /*In ANYTIME mode Xmit unit immediately*/
881                 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
882                         atomic_inc(&g_submitted_isoc);
883                         oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
884                                         skb, atomic_read(&g_submitted_isoc));
885                         if (dev_queue_xmit(skb) < 0) {
886                                 oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
887                                 return -1;
888                         } else
889                                 return 0;
890                 }
891
892 out:    oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
893         kfree_skb(skb);
894         return -1;
895
896         }
897         return 0;
898 }
899 /*------------------------------------------------------------------------------
900  * Context: process
901  */
902 void oz_apps_init(void)
903 {
904         int i;
905         for (i = 0; i < OZ_APPID_MAX; i++)
906                 if (g_app_if[i].init)
907                         g_app_if[i].init();
908 }
909 /*------------------------------------------------------------------------------
910  * Context: process
911  */
912 void oz_apps_term(void)
913 {
914         int i;
915         /* Terminate all the apps. */
916         for (i = 0; i < OZ_APPID_MAX; i++)
917                 if (g_app_if[i].term)
918                         g_app_if[i].term();
919 }
920 /*------------------------------------------------------------------------------
921  * Context: softirq-serialized
922  */
923 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
924 {
925         struct oz_app_if *ai;
926         if (app_id == 0 || app_id > OZ_APPID_MAX)
927                 return;
928         ai = &g_app_if[app_id-1];
929         ai->rx(pd, elt);
930 }
931 /*------------------------------------------------------------------------------
932  * Context: softirq or process
933  */
934 void oz_pd_indicate_farewells(struct oz_pd *pd)
935 {
936         struct oz_farewell *f;
937         struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
938         while (1) {
939                 oz_polling_lock_bh();
940                 if (list_empty(&pd->farewell_list)) {
941                         oz_polling_unlock_bh();
942                         break;
943                 }
944                 f = list_first_entry(&pd->farewell_list,
945                                 struct oz_farewell, link);
946                 list_del(&f->link);
947                 oz_polling_unlock_bh();
948                 if (ai->farewell)
949                         ai->farewell(pd, f->ep_num, f->report, f->len);
950                 kfree(f);
951         }
952 }