1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
12 #include <linux/ieee80211.h>
14 #include "ozprotocol.h"
22 #include <asm/unaligned.h>
23 #include <linux/uaccess.h>
24 #include <net/psnap.h>
25 /*------------------------------------------------------------------------------
27 #define OZ_CF_CONN_SUCCESS 1
28 #define OZ_CF_CONN_FAILURE 2
33 /* States of the timer.
35 #define OZ_TIMER_IDLE 0
36 #define OZ_TIMER_SET 1
37 #define OZ_TIMER_IN_HANDLER 2
39 #define OZ_MAX_TIMER_POOL_SIZE 16
41 /*------------------------------------------------------------------------------
44 struct packet_type ptype;
45 char name[OZ_MAX_BINDING_LEN];
46 struct oz_binding *next;
50 struct list_head link;
52 unsigned long due_time;
55 /*------------------------------------------------------------------------------
56 * Static external variables.
58 static DEFINE_SPINLOCK(g_polling_lock);
59 static LIST_HEAD(g_pd_list);
60 static struct oz_binding *g_binding ;
61 static DEFINE_SPINLOCK(g_binding_lock);
62 static struct sk_buff_head g_rx_queue;
63 static u8 g_session_id;
64 static u16 g_apps = 0x1;
65 static int g_processing_rx;
66 static struct timer_list g_timer;
67 static struct oz_timer *g_cur_timer;
68 static struct list_head *g_timer_pool;
69 static int g_timer_pool_count;
70 static int g_timer_state = OZ_TIMER_IDLE;
71 static LIST_HEAD(g_timer_list);
72 /*------------------------------------------------------------------------------
74 static void oz_protocol_timer_start(void);
75 /*------------------------------------------------------------------------------
76 * Context: softirq-serialized
78 static u8 oz_get_new_session_id(u8 exclude)
80 if (++g_session_id == 0)
82 if (g_session_id == exclude) {
83 if (++g_session_id == 0)
88 /*------------------------------------------------------------------------------
89 * Context: softirq-serialized
91 static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
94 struct net_device *dev = pd->net_dev;
95 struct oz_hdr *oz_hdr;
97 struct oz_elt_connect_rsp *body;
98 int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
99 sizeof(struct oz_elt_connect_rsp);
100 skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
103 skb_reserve(skb, LL_RESERVED_SPACE(dev));
104 skb_reset_network_header(skb);
105 oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
106 elt = (struct oz_elt *)(oz_hdr+1);
107 body = (struct oz_elt_connect_rsp *)(elt+1);
109 skb->protocol = htons(OZ_ETHERTYPE);
110 /* Fill in device header */
111 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
112 dev->dev_addr, skb->len) < 0) {
116 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT);
117 oz_hdr->last_pkt_num = 0;
118 put_unaligned(0, &oz_hdr->pkt_num);
119 oz_event_log(OZ_EVT_CONNECT_RSP, 0, 0, 0, 0);
120 elt->type = OZ_ELT_CONNECT_RSP;
121 elt->length = sizeof(struct oz_elt_connect_rsp);
122 memset(body, 0, sizeof(struct oz_elt_connect_rsp));
123 body->status = status;
125 body->mode = pd->mode;
126 body->session_id = pd->session_id;
127 put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
129 oz_trace("TX: OZ_ELT_CONNECT_RSP %d", status);
133 /*------------------------------------------------------------------------------
134 * Context: softirq-serialized
136 static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
138 unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK;
140 switch (kalive & OZ_KALIVE_TYPE_MASK) {
141 case OZ_KALIVE_SPECIAL:
143 oz_ms_to_jiffies(keep_alive * 1000*60*60*24*20);
146 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000);
149 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60);
151 case OZ_KALIVE_HOURS:
152 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60*60);
155 pd->keep_alive_j = 0;
157 oz_trace("Keepalive = %lu jiffies\n", pd->keep_alive_j);
159 /*------------------------------------------------------------------------------
160 * Context: softirq-serialized
162 static void pd_set_presleep(struct oz_pd *pd, u8 presleep)
165 pd->presleep_j = oz_ms_to_jiffies(presleep*100);
167 pd->presleep_j = OZ_PRESLEEP_TOUT_J;
168 oz_trace("Presleep time = %lu jiffies\n", pd->presleep_j);
170 /*------------------------------------------------------------------------------
171 * Context: softirq-serialized
173 static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
174 u8 *pd_addr, struct net_device *net_dev)
177 struct oz_elt_connect_req *body =
178 (struct oz_elt_connect_req *)(elt+1);
179 u8 rsp_status = OZ_STATUS_SUCCESS;
181 u16 new_apps = g_apps;
182 struct net_device *old_net_dev = 0;
183 struct oz_pd *free_pd = 0;
186 spin_lock_bh(&g_polling_lock);
188 struct oz_pd *pd2 = 0;
190 pd = oz_pd_alloc(pd_addr);
193 pd->last_rx_time_j = jiffies;
194 spin_lock_bh(&g_polling_lock);
195 list_for_each(e, &g_pd_list) {
196 pd2 = container_of(e, struct oz_pd, link);
197 if (memcmp(pd2->mac_addr, pd_addr, ETH_ALEN) == 0) {
204 list_add_tail(&pd->link, &g_pd_list);
207 spin_unlock_bh(&g_polling_lock);
210 if (pd->net_dev != net_dev) {
211 old_net_dev = pd->net_dev;
213 pd->net_dev = net_dev;
215 oz_trace("Host vendor: %d\n", body->host_vendor);
216 pd->max_tx_size = OZ_MAX_TX_SIZE;
217 pd->mode = body->mode;
218 pd->pd_info = body->pd_info;
219 if (pd->mode & OZ_F_ISOC_NO_ELTS) {
220 pd->ms_per_isoc = body->ms_per_isoc;
221 if (!pd->ms_per_isoc)
224 if (body->max_len_div16)
225 pd->max_tx_size = ((u16)body->max_len_div16)<<4;
226 oz_trace("Max frame:%u Ms per isoc:%u\n",
227 pd->max_tx_size, pd->ms_per_isoc);
228 pd->max_stream_buffering = 3*1024;
229 pd->timeout_time_j = jiffies + OZ_CONNECTION_TOUT_J;
230 pd->pulse_period_j = OZ_QUANTUM_J;
231 pd_set_presleep(pd, body->presleep);
232 pd_set_keepalive(pd, body->keep_alive);
234 new_apps &= le16_to_cpu(get_unaligned(&body->apps));
235 if ((new_apps & 0x1) && (body->session_id)) {
236 if (pd->session_id) {
237 if (pd->session_id != body->session_id) {
238 rsp_status = OZ_STATUS_SESSION_MISMATCH;
242 new_apps &= ~0x1; /* Resume not permitted */
244 oz_get_new_session_id(body->session_id);
247 if (pd->session_id && !body->session_id) {
248 rsp_status = OZ_STATUS_SESSION_TEARDOWN;
251 new_apps &= ~0x1; /* Resume not permitted */
253 oz_get_new_session_id(body->session_id);
257 if (rsp_status == OZ_STATUS_SUCCESS) {
258 u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
259 u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
260 u16 resume_apps = new_apps & pd->paused_apps & ~0x1;
261 spin_unlock_bh(&g_polling_lock);
262 oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
263 oz_timer_delete(pd, OZ_TIMER_STOP);
264 oz_trace("new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
265 new_apps, pd->total_apps, pd->paused_apps);
267 if (oz_services_start(pd, start_apps, 0))
268 rsp_status = OZ_STATUS_TOO_MANY_PDS;
271 if (oz_services_start(pd, resume_apps, 1))
272 rsp_status = OZ_STATUS_TOO_MANY_PDS;
274 oz_services_stop(pd, stop_apps, 0);
275 oz_pd_request_heartbeat(pd);
277 spin_unlock_bh(&g_polling_lock);
279 oz_send_conn_rsp(pd, rsp_status);
280 if (rsp_status != OZ_STATUS_SUCCESS) {
287 dev_put(old_net_dev);
289 oz_pd_destroy(free_pd);
292 /*------------------------------------------------------------------------------
293 * Context: softirq-serialized
295 static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
298 struct oz_farewell *f;
299 struct oz_farewell *f2;
301 f = kmalloc(sizeof(struct oz_farewell) + len - 1, GFP_ATOMIC);
306 memcpy(f->report, report, len);
307 oz_trace("RX: Adding farewell report\n");
308 spin_lock(&g_polling_lock);
309 list_for_each_entry(f2, &pd->farewell_list, link) {
310 if ((f2->ep_num == ep_num) && (f2->index == index)) {
316 list_add_tail(&f->link, &pd->farewell_list);
317 spin_unlock(&g_polling_lock);
321 /*------------------------------------------------------------------------------
322 * Context: softirq-serialized
324 static void oz_rx_frame(struct sk_buff *skb)
330 struct oz_pd *pd = 0;
331 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
335 oz_event_log(OZ_EVT_RX_PROCESS, 0,
336 (((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num,
338 oz_trace2(OZ_TRACE_RX_FRAMES,
339 "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
340 oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
341 mac_hdr = skb_mac_header(skb);
342 src_addr = &mac_hdr[ETH_ALEN] ;
345 /* Check the version field */
346 if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
347 oz_trace("Incorrect protocol version: %d\n",
348 oz_get_prot_ver(oz_hdr->control));
352 pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
354 pd = oz_pd_find(src_addr);
356 pd->last_rx_time_j = jiffies;
357 oz_timer_add(pd, OZ_TIMER_TOUT,
358 pd->last_rx_time_j + pd->presleep_j, 1);
359 if (pkt_num != pd->last_rx_pkt_num) {
360 pd->last_rx_pkt_num = pkt_num;
363 oz_trace("Duplicate frame\n");
367 if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
368 oz_trace2(OZ_TRACE_RX_FRAMES, "Received TRIGGER Frame\n");
369 pd->last_sent_frame = &pd->tx_queue;
370 if (oz_hdr->control & OZ_F_ACK) {
371 /* Retire completed frames */
372 oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
374 if ((oz_hdr->control & OZ_F_ACK_REQUESTED) &&
375 (pd->state == OZ_PD_S_CONNECTED)) {
376 int backlog = pd->nb_queued_frames;
377 pd->trigger_pkt_num = pkt_num;
378 /* Send queued frames */
379 oz_send_queued_frames(pd, backlog);
383 length -= sizeof(struct oz_hdr);
384 elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
386 while (length >= sizeof(struct oz_elt)) {
387 length -= sizeof(struct oz_elt) + elt->length;
391 case OZ_ELT_CONNECT_REQ:
392 oz_event_log(OZ_EVT_CONNECT_REQ, 0, 0, 0, 0);
393 oz_trace("RX: OZ_ELT_CONNECT_REQ\n");
394 pd = oz_connect_req(pd, elt, src_addr, skb->dev);
396 case OZ_ELT_DISCONNECT:
397 oz_trace("RX: OZ_ELT_DISCONNECT\n");
401 case OZ_ELT_UPDATE_PARAM_REQ: {
402 struct oz_elt_update_param *body =
403 (struct oz_elt_update_param *)(elt + 1);
404 oz_trace("RX: OZ_ELT_UPDATE_PARAM_REQ\n");
405 if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
406 spin_lock(&g_polling_lock);
407 pd_set_keepalive(pd, body->keepalive);
408 pd_set_presleep(pd, body->presleep);
409 spin_unlock(&g_polling_lock);
413 case OZ_ELT_FAREWELL_REQ: {
414 struct oz_elt_farewell *body =
415 (struct oz_elt_farewell *)(elt + 1);
416 oz_trace("RX: OZ_ELT_FAREWELL_REQ\n");
417 oz_add_farewell(pd, body->ep_num,
418 body->index, body->report,
419 elt->length + 1 - sizeof(*body));
422 case OZ_ELT_APP_DATA:
423 if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
424 struct oz_app_hdr *app_hdr =
425 (struct oz_app_hdr *)(elt+1);
428 oz_handle_app_elt(pd, app_hdr->app_id, elt);
432 oz_trace("RX: Unknown elt %02x\n", elt->type);
434 elt = oz_next_elt(elt);
441 /*------------------------------------------------------------------------------
444 void oz_protocol_term(void)
446 struct list_head *chain = 0;
447 del_timer_sync(&g_timer);
448 /* Walk the list of bindings and remove each one.
450 spin_lock_bh(&g_binding_lock);
452 struct oz_binding *b = g_binding;
454 spin_unlock_bh(&g_binding_lock);
455 dev_remove_pack(&b->ptype);
457 dev_put(b->ptype.dev);
459 spin_lock_bh(&g_binding_lock);
461 spin_unlock_bh(&g_binding_lock);
462 /* Walk the list of PDs and stop each one. This causes the PD to be
463 * removed from the list so we can just pull each one from the head
466 spin_lock_bh(&g_polling_lock);
467 while (!list_empty(&g_pd_list)) {
469 list_first_entry(&g_pd_list, struct oz_pd, link);
471 spin_unlock_bh(&g_polling_lock);
474 spin_lock_bh(&g_polling_lock);
476 chain = g_timer_pool;
478 spin_unlock_bh(&g_polling_lock);
480 struct oz_timer *t = container_of(chain, struct oz_timer, link);
484 oz_trace("Protocol stopped\n");
486 /*------------------------------------------------------------------------------
489 static void oz_pd_handle_timer(struct oz_pd *pd, int type)
498 case OZ_TIMER_HEARTBEAT: {
500 spin_lock_bh(&g_polling_lock);
501 pd->heartbeat_requested = 0;
502 if (pd->state & OZ_PD_S_CONNECTED)
503 apps = pd->total_apps;
504 spin_unlock_bh(&g_polling_lock);
506 oz_pd_heartbeat(pd, apps);
511 /*------------------------------------------------------------------------------
514 static void oz_protocol_timer(unsigned long arg)
519 spin_lock_bh(&g_polling_lock);
521 /* This happens if we remove the current timer but can't stop
522 * the timer from firing. In this case just get out.
524 oz_event_log(OZ_EVT_TIMER, 0, 0, 0, 0);
525 spin_unlock_bh(&g_polling_lock);
528 g_timer_state = OZ_TIMER_IN_HANDLER;
532 spin_unlock_bh(&g_polling_lock);
535 oz_event_log(OZ_EVT_TIMER, 0, t->type, 0, 0);
536 oz_pd_handle_timer(pd, t->type);
537 spin_lock_bh(&g_polling_lock);
538 if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
539 t->link.next = g_timer_pool;
540 g_timer_pool = &t->link;
541 g_timer_pool_count++;
544 if (!list_empty(&g_timer_list)) {
545 t2 = container_of(g_timer_list.next,
546 struct oz_timer, link);
547 if (time_before_eq(t2->due_time, jiffies))
554 spin_unlock_bh(&g_polling_lock);
560 g_timer_state = OZ_TIMER_IDLE;
561 oz_protocol_timer_start();
563 /*------------------------------------------------------------------------------
566 static void oz_protocol_timer_start(void)
568 spin_lock_bh(&g_polling_lock);
569 if (!list_empty(&g_timer_list)) {
571 container_of(g_timer_list.next, struct oz_timer, link);
572 if (g_timer_state == OZ_TIMER_SET) {
573 oz_event_log(OZ_EVT_TIMER_CTRL, 3,
574 (u16)g_cur_timer->type, 0,
575 (unsigned)g_cur_timer->due_time);
576 mod_timer(&g_timer, g_cur_timer->due_time);
578 oz_event_log(OZ_EVT_TIMER_CTRL, 4,
579 (u16)g_cur_timer->type, 0,
580 (unsigned)g_cur_timer->due_time);
581 g_timer.expires = g_cur_timer->due_time;
582 g_timer.function = oz_protocol_timer;
586 g_timer_state = OZ_TIMER_SET;
588 oz_trace("No queued timers\n");
590 spin_unlock_bh(&g_polling_lock);
592 /*------------------------------------------------------------------------------
593 * Context: softirq or process
595 void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
599 struct oz_timer *t = 0;
600 int restart_needed = 0;
601 oz_event_log(OZ_EVT_TIMER_CTRL, 1, (u16)type, 0, (unsigned)due_time);
602 spin_lock(&g_polling_lock);
604 list_for_each(e, &g_timer_list) {
605 t = container_of(e, struct oz_timer, link);
606 if ((t->pd == pd) && (t->type == type)) {
607 if (g_cur_timer == t) {
619 t = container_of(g_timer_pool, struct oz_timer, link);
620 g_timer_pool = g_timer_pool->next;
621 g_timer_pool_count--;
623 t = kmalloc(sizeof(struct oz_timer), GFP_ATOMIC);
633 t->due_time = due_time;
634 list_for_each(e, &g_timer_list) {
635 t2 = container_of(e, struct oz_timer, link);
636 if (time_before(due_time, t2->due_time)) {
637 if (t2 == g_cur_timer) {
644 list_add_tail(&t->link, e);
646 if (g_timer_state == OZ_TIMER_IDLE)
648 else if (g_timer_state == OZ_TIMER_IN_HANDLER)
650 spin_unlock(&g_polling_lock);
652 oz_protocol_timer_start();
654 /*------------------------------------------------------------------------------
655 * Context: softirq or process
657 void oz_timer_delete(struct oz_pd *pd, int type)
659 struct list_head *chain = 0;
662 int restart_needed = 0;
664 oz_event_log(OZ_EVT_TIMER_CTRL, 2, (u16)type, 0, 0);
665 spin_lock(&g_polling_lock);
666 list_for_each_entry_safe(t, n, &g_timer_list, link) {
667 if ((t->pd == pd) && ((type == 0) || (t->type == type))) {
668 if (g_cur_timer == t) {
675 if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
676 t->link.next = g_timer_pool;
677 g_timer_pool = &t->link;
678 g_timer_pool_count++;
680 t->link.next = chain;
687 if (g_timer_state == OZ_TIMER_IN_HANDLER)
689 else if (restart_needed)
690 g_timer_state = OZ_TIMER_IDLE;
691 spin_unlock(&g_polling_lock);
693 oz_protocol_timer_start();
697 t = container_of(chain, struct oz_timer, link);
702 /*------------------------------------------------------------------------------
703 * Context: softirq or process
705 void oz_pd_request_heartbeat(struct oz_pd *pd)
707 unsigned long now = jiffies;
709 spin_lock(&g_polling_lock);
710 if (pd->heartbeat_requested) {
711 spin_unlock(&g_polling_lock);
714 if (pd->pulse_period_j)
715 t = ((now / pd->pulse_period_j) + 1) * pd->pulse_period_j;
718 pd->heartbeat_requested = 1;
719 spin_unlock(&g_polling_lock);
720 oz_timer_add(pd, OZ_TIMER_HEARTBEAT, t, 0);
722 /*------------------------------------------------------------------------------
723 * Context: softirq or process
725 struct oz_pd *oz_pd_find(u8 *mac_addr)
729 spin_lock_bh(&g_polling_lock);
730 list_for_each(e, &g_pd_list) {
731 pd = container_of(e, struct oz_pd, link);
732 if (memcmp(pd->mac_addr, mac_addr, ETH_ALEN) == 0) {
733 atomic_inc(&pd->ref_count);
734 spin_unlock_bh(&g_polling_lock);
738 spin_unlock_bh(&g_polling_lock);
741 /*------------------------------------------------------------------------------
744 void oz_app_enable(int app_id, int enable)
746 if (app_id <= OZ_APPID_MAX) {
747 spin_lock_bh(&g_polling_lock);
749 g_apps |= (1<<app_id);
751 g_apps &= ~(1<<app_id);
752 spin_unlock_bh(&g_polling_lock);
755 /*------------------------------------------------------------------------------
758 static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
759 struct packet_type *pt, struct net_device *orig_dev)
761 oz_event_log(OZ_EVT_RX_FRAME, 0, 0, 0, 0);
762 skb = skb_share_check(skb, GFP_ATOMIC);
765 spin_lock_bh(&g_rx_queue.lock);
766 if (g_processing_rx) {
767 /* We already hold the lock so use __ variant.
769 __skb_queue_head(&g_rx_queue, skb);
770 spin_unlock_bh(&g_rx_queue.lock);
775 spin_unlock_bh(&g_rx_queue.lock);
777 spin_lock_bh(&g_rx_queue.lock);
778 if (skb_queue_empty(&g_rx_queue)) {
780 spin_unlock_bh(&g_rx_queue.lock);
783 /* We already hold the lock so use __ variant.
785 skb = __skb_dequeue(&g_rx_queue);
790 /*------------------------------------------------------------------------------
793 void oz_binding_add(char *net_dev)
795 struct oz_binding *binding;
797 binding = kmalloc(sizeof(struct oz_binding), GFP_KERNEL);
799 binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
800 binding->ptype.func = oz_pkt_recv;
801 memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
802 if (net_dev && *net_dev) {
803 oz_trace("Adding binding: %s\n", net_dev);
805 dev_get_by_name(&init_net, net_dev);
806 if (binding->ptype.dev == 0) {
807 oz_trace("Netdev %s not found\n", net_dev);
812 oz_trace("Binding to all netcards\n");
813 binding->ptype.dev = 0;
816 dev_add_pack(&binding->ptype);
817 spin_lock_bh(&g_binding_lock);
818 binding->next = g_binding;
820 spin_unlock_bh(&g_binding_lock);
824 /*------------------------------------------------------------------------------
827 static int compare_binding_name(char *s1, char *s2)
830 for (i = 0; i < OZ_MAX_BINDING_LEN; i++) {
839 /*------------------------------------------------------------------------------
842 static void pd_stop_all_for_device(struct net_device *net_dev)
848 spin_lock_bh(&g_polling_lock);
849 list_for_each_entry_safe(pd, n, &g_pd_list, link) {
850 if (pd->net_dev == net_dev) {
851 list_move(&pd->link, &h);
855 spin_unlock_bh(&g_polling_lock);
856 while (!list_empty(&h)) {
857 pd = list_first_entry(&h, struct oz_pd, link);
862 /*------------------------------------------------------------------------------
865 void oz_binding_remove(char *net_dev)
867 struct oz_binding *binding = 0;
868 struct oz_binding **link;
869 oz_trace("Removing binding: %s\n", net_dev);
870 spin_lock_bh(&g_binding_lock);
874 if (compare_binding_name(binding->name, net_dev)) {
875 oz_trace("Binding '%s' found\n", net_dev);
876 *link = binding->next;
880 binding = binding->next;
883 spin_unlock_bh(&g_binding_lock);
885 dev_remove_pack(&binding->ptype);
886 if (binding->ptype.dev) {
887 dev_put(binding->ptype.dev);
888 pd_stop_all_for_device(binding->ptype.dev);
893 /*------------------------------------------------------------------------------
896 static char *oz_get_next_device_name(char *s, char *dname, int max_size)
900 while (*s && (*s != ',') && max_size > 1) {
907 /*------------------------------------------------------------------------------
910 int oz_protocol_init(char *devs)
912 skb_queue_head_init(&g_rx_queue);
913 if (devs && (devs[0] == '*')) {
918 devs = oz_get_next_device_name(devs, d, sizeof(d));
923 init_timer(&g_timer);
926 /*------------------------------------------------------------------------------
929 int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
934 spin_lock_bh(&g_polling_lock);
935 list_for_each(e, &g_pd_list) {
936 if (count >= max_count)
938 pd = container_of(e, struct oz_pd, link);
939 memcpy(&addr[count++], pd->mac_addr, ETH_ALEN);
941 spin_unlock_bh(&g_polling_lock);
944 /*------------------------------------------------------------------------------
946 void oz_polling_lock_bh(void)
948 spin_lock_bh(&g_polling_lock);
950 /*------------------------------------------------------------------------------
952 void oz_polling_unlock_bh(void)
954 spin_unlock_bh(&g_polling_lock);