9c71cffbc6b0edbd3f25a6db6d09d6c0bf411dfb
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/module.h>
28 #include <linux/kmod.h>
29
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/skbuff.h>
39 #include <linux/interrupt.h>
40 #include <linux/notifier.h>
41 #include <net/sock.h>
42
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/unaligned.h>
46
47 #include <net/bluetooth/bluetooth.h>
48 #include <net/bluetooth/hci_core.h>
49
50 #ifndef CONFIG_BT_HCI_CORE_DEBUG
51 #undef  BT_DBG
52 #define BT_DBG(D...)
53 #endif
54
55 static void hci_cmd_task(unsigned long arg);
56 static void hci_rx_task(unsigned long arg);
57 static void hci_tx_task(unsigned long arg);
58 static void hci_notify(struct hci_dev *hdev, int event);
59
60 static DEFINE_RWLOCK(hci_task_lock);
61
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
65
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
69
70 /* HCI protocols */
71 #define HCI_MAX_PROTO   2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77 /* ---- HCI notifications ---- */
78
79 int hci_register_notifier(struct notifier_block *nb)
80 {
81         return atomic_notifier_chain_register(&hci_notifier, nb);
82 }
83
84 int hci_unregister_notifier(struct notifier_block *nb)
85 {
86         return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 }
88
89 static void hci_notify(struct hci_dev *hdev, int event)
90 {
91         atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 }
93
94 /* ---- HCI requests ---- */
95
96 void hci_req_complete(struct hci_dev *hdev, int result)
97 {
98         BT_DBG("%s result 0x%2.2x", hdev->name, result);
99
100         if (hdev->req_status == HCI_REQ_PEND) {
101                 hdev->req_result = result;
102                 hdev->req_status = HCI_REQ_DONE;
103                 wake_up_interruptible(&hdev->req_wait_q);
104         }
105 }
106
107 static void hci_req_cancel(struct hci_dev *hdev, int err)
108 {
109         BT_DBG("%s err 0x%2.2x", hdev->name, err);
110
111         if (hdev->req_status == HCI_REQ_PEND) {
112                 hdev->req_result = err;
113                 hdev->req_status = HCI_REQ_CANCELED;
114                 wake_up_interruptible(&hdev->req_wait_q);
115         }
116 }
117
118 /* Execute request and wait for completion. */
119 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
120                                 unsigned long opt, __u32 timeout)
121 {
122         DECLARE_WAITQUEUE(wait, current);
123         int err = 0;
124
125         BT_DBG("%s start", hdev->name);
126
127         hdev->req_status = HCI_REQ_PEND;
128
129         add_wait_queue(&hdev->req_wait_q, &wait);
130         set_current_state(TASK_INTERRUPTIBLE);
131
132         req(hdev, opt);
133         schedule_timeout(timeout);
134
135         remove_wait_queue(&hdev->req_wait_q, &wait);
136
137         if (signal_pending(current))
138                 return -EINTR;
139
140         switch (hdev->req_status) {
141         case HCI_REQ_DONE:
142                 err = -bt_err(hdev->req_result);
143                 break;
144
145         case HCI_REQ_CANCELED:
146                 err = -hdev->req_result;
147                 break;
148
149         default:
150                 err = -ETIMEDOUT;
151                 break;
152         }
153
154         hdev->req_status = hdev->req_result = 0;
155
156         BT_DBG("%s end: err %d", hdev->name, err);
157
158         return err;
159 }
160
161 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
162                                 unsigned long opt, __u32 timeout)
163 {
164         int ret;
165
166         /* Serialize all requests */
167         hci_req_lock(hdev);
168         ret = __hci_request(hdev, req, opt, timeout);
169         hci_req_unlock(hdev);
170
171         return ret;
172 }
173
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175 {
176         BT_DBG("%s %ld", hdev->name, opt);
177
178         /* Reset device */
179         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
180 }
181
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183 {
184         struct sk_buff *skb;
185         __le16 param;
186
187         BT_DBG("%s %ld", hdev->name, opt);
188
189         /* Driver initialization */
190
191         /* Special commands */
192         while ((skb = skb_dequeue(&hdev->driver_init))) {
193                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
194                 skb->dev = (void *) hdev;
195                 skb_queue_tail(&hdev->cmd_q, skb);
196                 hci_sched_cmd(hdev);
197         }
198         skb_queue_purge(&hdev->driver_init);
199
200         /* Mandatory initialization */
201
202         /* Reset */
203         if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
204                         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
205
206         /* Read Local Supported Features */
207         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
208
209         /* Read Local Version */
210         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION, 0, NULL);
211
212         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
214
215 #if 0
216         /* Host buffer size */
217         {
218                 struct hci_cp_host_buffer_size cp;
219                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
220                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
221                 cp.acl_max_pkt = cpu_to_le16(0xffff);
222                 cp.sco_max_pkt = cpu_to_le16(0xffff);
223                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
224         }
225 #endif
226
227         /* Read BD Address */
228         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
229
230         /* Read Voice Setting */
231         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
232
233         /* Optional initialization */
234
235         /* Clear Event Filters */
236         {
237                 struct hci_cp_set_event_flt cp;
238                 cp.flt_type  = HCI_FLT_CLEAR_ALL;
239                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
240         }
241
242         /* Page timeout ~20 secs */
243         param = cpu_to_le16(0x8000);
244         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
245
246         /* Connection accept timeout ~20 secs */
247         param = cpu_to_le16(0x7d00);
248         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
249 }
250
251 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
252 {
253         __u8 scan = opt;
254
255         BT_DBG("%s %x", hdev->name, scan);
256
257         /* Inquiry and Page scans */
258         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
259 }
260
261 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
262 {
263         __u8 auth = opt;
264
265         BT_DBG("%s %x", hdev->name, auth);
266
267         /* Authentication */
268         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
269 }
270
271 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
272 {
273         __u8 encrypt = opt;
274
275         BT_DBG("%s %x", hdev->name, encrypt);
276
277         /* Authentication */
278         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
279 }
280
281 /* Get HCI device by index.
282  * Device is held on return. */
283 struct hci_dev *hci_dev_get(int index)
284 {
285         struct hci_dev *hdev = NULL;
286         struct list_head *p;
287
288         BT_DBG("%d", index);
289
290         if (index < 0)
291                 return NULL;
292
293         read_lock(&hci_dev_list_lock);
294         list_for_each(p, &hci_dev_list) {
295                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
296                 if (d->id == index) {
297                         hdev = hci_dev_hold(d);
298                         break;
299                 }
300         }
301         read_unlock(&hci_dev_list_lock);
302         return hdev;
303 }
304
305 /* ---- Inquiry support ---- */
306 static void inquiry_cache_flush(struct hci_dev *hdev)
307 {
308         struct inquiry_cache *cache = &hdev->inq_cache;
309         struct inquiry_entry *next  = cache->list, *e;
310
311         BT_DBG("cache %p", cache);
312
313         cache->list = NULL;
314         while ((e = next)) {
315                 next = e->next;
316                 kfree(e);
317         }
318 }
319
320 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
321 {
322         struct inquiry_cache *cache = &hdev->inq_cache;
323         struct inquiry_entry *e;
324
325         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
326
327         for (e = cache->list; e; e = e->next)
328                 if (!bacmp(&e->data.bdaddr, bdaddr))
329                         break;
330         return e;
331 }
332
333 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
334 {
335         struct inquiry_cache *cache = &hdev->inq_cache;
336         struct inquiry_entry *e;
337
338         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
339
340         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
341                 /* Entry not in the cache. Add new one. */
342                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
343                         return;
344                 e->next     = cache->list;
345                 cache->list = e;
346         }
347
348         memcpy(&e->data, data, sizeof(*data));
349         e->timestamp = jiffies;
350         cache->timestamp = jiffies;
351 }
352
353 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
354 {
355         struct inquiry_cache *cache = &hdev->inq_cache;
356         struct inquiry_info *info = (struct inquiry_info *) buf;
357         struct inquiry_entry *e;
358         int copied = 0;
359
360         for (e = cache->list; e && copied < num; e = e->next, copied++) {
361                 struct inquiry_data *data = &e->data;
362                 bacpy(&info->bdaddr, &data->bdaddr);
363                 info->pscan_rep_mode    = data->pscan_rep_mode;
364                 info->pscan_period_mode = data->pscan_period_mode;
365                 info->pscan_mode        = data->pscan_mode;
366                 memcpy(info->dev_class, data->dev_class, 3);
367                 info->clock_offset      = data->clock_offset;
368                 info++;
369         }
370
371         BT_DBG("cache %p, copied %d", cache, copied);
372         return copied;
373 }
374
375 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
376 {
377         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
378         struct hci_cp_inquiry cp;
379
380         BT_DBG("%s", hdev->name);
381
382         if (test_bit(HCI_INQUIRY, &hdev->flags))
383                 return;
384
385         /* Start Inquiry */
386         memcpy(&cp.lap, &ir->lap, 3);
387         cp.length  = ir->length;
388         cp.num_rsp = ir->num_rsp;
389         hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
390 }
391
392 int hci_inquiry(void __user *arg)
393 {
394         __u8 __user *ptr = arg;
395         struct hci_inquiry_req ir;
396         struct hci_dev *hdev;
397         int err = 0, do_inquiry = 0, max_rsp;
398         long timeo;
399         __u8 *buf;
400
401         if (copy_from_user(&ir, ptr, sizeof(ir)))
402                 return -EFAULT;
403
404         if (!(hdev = hci_dev_get(ir.dev_id)))
405                 return -ENODEV;
406
407         hci_dev_lock_bh(hdev);
408         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
409                                         inquiry_cache_empty(hdev) ||
410                                         ir.flags & IREQ_CACHE_FLUSH) {
411                 inquiry_cache_flush(hdev);
412                 do_inquiry = 1;
413         }
414         hci_dev_unlock_bh(hdev);
415
416         timeo = ir.length * msecs_to_jiffies(2000);
417         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
418                 goto done;
419
420         /* for unlimited number of responses we will use buffer with 255 entries */
421         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
422
423         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
424          * copy it to the user space.
425          */
426         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
427                 err = -ENOMEM;
428                 goto done;
429         }
430
431         hci_dev_lock_bh(hdev);
432         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
433         hci_dev_unlock_bh(hdev);
434
435         BT_DBG("num_rsp %d", ir.num_rsp);
436
437         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
438                 ptr += sizeof(ir);
439                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
440                                         ir.num_rsp))
441                         err = -EFAULT;
442         } else
443                 err = -EFAULT;
444
445         kfree(buf);
446
447 done:
448         hci_dev_put(hdev);
449         return err;
450 }
451
452 /* ---- HCI ioctl helpers ---- */
453
454 int hci_dev_open(__u16 dev)
455 {
456         struct hci_dev *hdev;
457         int ret = 0;
458
459         if (!(hdev = hci_dev_get(dev)))
460                 return -ENODEV;
461
462         BT_DBG("%s %p", hdev->name, hdev);
463
464         hci_req_lock(hdev);
465
466         if (test_bit(HCI_UP, &hdev->flags)) {
467                 ret = -EALREADY;
468                 goto done;
469         }
470
471         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
472                 set_bit(HCI_RAW, &hdev->flags);
473
474         if (hdev->open(hdev)) {
475                 ret = -EIO;
476                 goto done;
477         }
478
479         if (!test_bit(HCI_RAW, &hdev->flags)) {
480                 atomic_set(&hdev->cmd_cnt, 1);
481                 set_bit(HCI_INIT, &hdev->flags);
482
483                 //__hci_request(hdev, hci_reset_req, 0, HZ);
484                 ret = __hci_request(hdev, hci_init_req, 0,
485                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
486
487                 clear_bit(HCI_INIT, &hdev->flags);
488         }
489
490         if (!ret) {
491                 hci_dev_hold(hdev);
492                 set_bit(HCI_UP, &hdev->flags);
493                 hci_notify(hdev, HCI_DEV_UP);
494         } else {
495                 /* Init failed, cleanup */
496                 tasklet_kill(&hdev->rx_task);
497                 tasklet_kill(&hdev->tx_task);
498                 tasklet_kill(&hdev->cmd_task);
499
500                 skb_queue_purge(&hdev->cmd_q);
501                 skb_queue_purge(&hdev->rx_q);
502
503                 if (hdev->flush)
504                         hdev->flush(hdev);
505
506                 if (hdev->sent_cmd) {
507                         kfree_skb(hdev->sent_cmd);
508                         hdev->sent_cmd = NULL;
509                 }
510
511                 hdev->close(hdev);
512                 hdev->flags = 0;
513         }
514
515 done:
516         hci_req_unlock(hdev);
517         hci_dev_put(hdev);
518         return ret;
519 }
520
521 static int hci_dev_do_close(struct hci_dev *hdev)
522 {
523         BT_DBG("%s %p", hdev->name, hdev);
524
525         hci_req_cancel(hdev, ENODEV);
526         hci_req_lock(hdev);
527
528         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
529                 hci_req_unlock(hdev);
530                 return 0;
531         }
532
533         /* Kill RX and TX tasks */
534         tasklet_kill(&hdev->rx_task);
535         tasklet_kill(&hdev->tx_task);
536
537         hci_dev_lock_bh(hdev);
538         inquiry_cache_flush(hdev);
539         hci_conn_hash_flush(hdev);
540         hci_dev_unlock_bh(hdev);
541
542         hci_notify(hdev, HCI_DEV_DOWN);
543
544         if (hdev->flush)
545                 hdev->flush(hdev);
546
547         /* Reset device */
548         skb_queue_purge(&hdev->cmd_q);
549         atomic_set(&hdev->cmd_cnt, 1);
550         if (!test_bit(HCI_RAW, &hdev->flags)) {
551                 set_bit(HCI_INIT, &hdev->flags);
552                 __hci_request(hdev, hci_reset_req, 0,
553                                         msecs_to_jiffies(250));
554                 clear_bit(HCI_INIT, &hdev->flags);
555         }
556
557         /* Kill cmd task */
558         tasklet_kill(&hdev->cmd_task);
559
560         /* Drop queues */
561         skb_queue_purge(&hdev->rx_q);
562         skb_queue_purge(&hdev->cmd_q);
563         skb_queue_purge(&hdev->raw_q);
564
565         /* Drop last sent command */
566         if (hdev->sent_cmd) {
567                 kfree_skb(hdev->sent_cmd);
568                 hdev->sent_cmd = NULL;
569         }
570
571         /* After this point our queues are empty
572          * and no tasks are scheduled. */
573         hdev->close(hdev);
574
575         /* Clear flags */
576         hdev->flags = 0;
577
578         hci_req_unlock(hdev);
579
580         hci_dev_put(hdev);
581         return 0;
582 }
583
584 int hci_dev_close(__u16 dev)
585 {
586         struct hci_dev *hdev;
587         int err;
588
589         if (!(hdev = hci_dev_get(dev)))
590                 return -ENODEV;
591         err = hci_dev_do_close(hdev);
592         hci_dev_put(hdev);
593         return err;
594 }
595
596 int hci_dev_reset(__u16 dev)
597 {
598         struct hci_dev *hdev;
599         int ret = 0;
600
601         if (!(hdev = hci_dev_get(dev)))
602                 return -ENODEV;
603
604         hci_req_lock(hdev);
605         tasklet_disable(&hdev->tx_task);
606
607         if (!test_bit(HCI_UP, &hdev->flags))
608                 goto done;
609
610         /* Drop queues */
611         skb_queue_purge(&hdev->rx_q);
612         skb_queue_purge(&hdev->cmd_q);
613
614         hci_dev_lock_bh(hdev);
615         inquiry_cache_flush(hdev);
616         hci_conn_hash_flush(hdev);
617         hci_dev_unlock_bh(hdev);
618
619         if (hdev->flush)
620                 hdev->flush(hdev);
621
622         atomic_set(&hdev->cmd_cnt, 1);
623         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
624
625         if (!test_bit(HCI_RAW, &hdev->flags))
626                 ret = __hci_request(hdev, hci_reset_req, 0,
627                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
628
629 done:
630         tasklet_enable(&hdev->tx_task);
631         hci_req_unlock(hdev);
632         hci_dev_put(hdev);
633         return ret;
634 }
635
636 int hci_dev_reset_stat(__u16 dev)
637 {
638         struct hci_dev *hdev;
639         int ret = 0;
640
641         if (!(hdev = hci_dev_get(dev)))
642                 return -ENODEV;
643
644         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
645
646         hci_dev_put(hdev);
647
648         return ret;
649 }
650
651 int hci_dev_cmd(unsigned int cmd, void __user *arg)
652 {
653         struct hci_dev *hdev;
654         struct hci_dev_req dr;
655         int err = 0;
656
657         if (copy_from_user(&dr, arg, sizeof(dr)))
658                 return -EFAULT;
659
660         if (!(hdev = hci_dev_get(dr.dev_id)))
661                 return -ENODEV;
662
663         switch (cmd) {
664         case HCISETAUTH:
665                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
666                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
667                 break;
668
669         case HCISETENCRYPT:
670                 if (!lmp_encrypt_capable(hdev)) {
671                         err = -EOPNOTSUPP;
672                         break;
673                 }
674
675                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
676                         /* Auth must be enabled first */
677                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
678                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
679                         if (err)
680                                 break;
681                 }
682
683                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
684                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
685                 break;
686
687         case HCISETSCAN:
688                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
689                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
690                 break;
691
692         case HCISETPTYPE:
693                 hdev->pkt_type = (__u16) dr.dev_opt;
694                 break;
695
696         case HCISETLINKPOL:
697                 hdev->link_policy = (__u16) dr.dev_opt;
698                 break;
699
700         case HCISETLINKMODE:
701                 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
702                 break;
703
704         case HCISETACLMTU:
705                 hdev->acl_mtu  = *((__u16 *)&dr.dev_opt + 1);
706                 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
707                 break;
708
709         case HCISETSCOMTU:
710                 hdev->sco_mtu  = *((__u16 *)&dr.dev_opt + 1);
711                 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
712                 break;
713
714         default:
715                 err = -EINVAL;
716                 break;
717         }
718         hci_dev_put(hdev);
719         return err;
720 }
721
722 int hci_get_dev_list(void __user *arg)
723 {
724         struct hci_dev_list_req *dl;
725         struct hci_dev_req *dr;
726         struct list_head *p;
727         int n = 0, size, err;
728         __u16 dev_num;
729
730         if (get_user(dev_num, (__u16 __user *) arg))
731                 return -EFAULT;
732
733         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
734                 return -EINVAL;
735
736         size = sizeof(*dl) + dev_num * sizeof(*dr);
737
738         if (!(dl = kmalloc(size, GFP_KERNEL)))
739                 return -ENOMEM;
740
741         dr = dl->dev_req;
742
743         read_lock_bh(&hci_dev_list_lock);
744         list_for_each(p, &hci_dev_list) {
745                 struct hci_dev *hdev;
746                 hdev = list_entry(p, struct hci_dev, list);
747                 (dr + n)->dev_id  = hdev->id;
748                 (dr + n)->dev_opt = hdev->flags;
749                 if (++n >= dev_num)
750                         break;
751         }
752         read_unlock_bh(&hci_dev_list_lock);
753
754         dl->dev_num = n;
755         size = sizeof(*dl) + n * sizeof(*dr);
756
757         err = copy_to_user(arg, dl, size);
758         kfree(dl);
759
760         return err ? -EFAULT : 0;
761 }
762
763 int hci_get_dev_info(void __user *arg)
764 {
765         struct hci_dev *hdev;
766         struct hci_dev_info di;
767         int err = 0;
768
769         if (copy_from_user(&di, arg, sizeof(di)))
770                 return -EFAULT;
771
772         if (!(hdev = hci_dev_get(di.dev_id)))
773                 return -ENODEV;
774
775         strcpy(di.name, hdev->name);
776         di.bdaddr   = hdev->bdaddr;
777         di.type     = hdev->type;
778         di.flags    = hdev->flags;
779         di.pkt_type = hdev->pkt_type;
780         di.acl_mtu  = hdev->acl_mtu;
781         di.acl_pkts = hdev->acl_pkts;
782         di.sco_mtu  = hdev->sco_mtu;
783         di.sco_pkts = hdev->sco_pkts;
784         di.link_policy = hdev->link_policy;
785         di.link_mode   = hdev->link_mode;
786
787         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
788         memcpy(&di.features, &hdev->features, sizeof(di.features));
789
790         if (copy_to_user(arg, &di, sizeof(di)))
791                 err = -EFAULT;
792
793         hci_dev_put(hdev);
794
795         return err;
796 }
797
798 /* ---- Interface to HCI drivers ---- */
799
800 /* Alloc HCI device */
801 struct hci_dev *hci_alloc_dev(void)
802 {
803         struct hci_dev *hdev;
804
805         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
806         if (!hdev)
807                 return NULL;
808
809         skb_queue_head_init(&hdev->driver_init);
810
811         return hdev;
812 }
813 EXPORT_SYMBOL(hci_alloc_dev);
814
815 /* Free HCI device */
816 void hci_free_dev(struct hci_dev *hdev)
817 {
818         skb_queue_purge(&hdev->driver_init);
819
820         /* will free via device release */
821         put_device(&hdev->dev);
822 }
823 EXPORT_SYMBOL(hci_free_dev);
824
825 /* Register HCI device */
826 int hci_register_dev(struct hci_dev *hdev)
827 {
828         struct list_head *head = &hci_dev_list, *p;
829         int i, id = 0;
830
831         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
832
833         if (!hdev->open || !hdev->close || !hdev->destruct)
834                 return -EINVAL;
835
836         write_lock_bh(&hci_dev_list_lock);
837
838         /* Find first available device id */
839         list_for_each(p, &hci_dev_list) {
840                 if (list_entry(p, struct hci_dev, list)->id != id)
841                         break;
842                 head = p; id++;
843         }
844
845         sprintf(hdev->name, "hci%d", id);
846         hdev->id = id;
847         list_add(&hdev->list, head);
848
849         atomic_set(&hdev->refcnt, 1);
850         spin_lock_init(&hdev->lock);
851
852         hdev->flags = 0;
853         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
854         hdev->link_mode = (HCI_LM_ACCEPT);
855
856         hdev->idle_timeout = 0;
857         hdev->sniff_max_interval = 800;
858         hdev->sniff_min_interval = 80;
859
860         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
861         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
862         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
863
864         skb_queue_head_init(&hdev->rx_q);
865         skb_queue_head_init(&hdev->cmd_q);
866         skb_queue_head_init(&hdev->raw_q);
867
868         for (i = 0; i < 3; i++)
869                 hdev->reassembly[i] = NULL;
870
871         init_waitqueue_head(&hdev->req_wait_q);
872         init_MUTEX(&hdev->req_lock);
873
874         inquiry_cache_init(hdev);
875
876         hci_conn_hash_init(hdev);
877
878         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
879
880         atomic_set(&hdev->promisc, 0);
881
882         write_unlock_bh(&hci_dev_list_lock);
883
884         hci_register_sysfs(hdev);
885
886         hci_notify(hdev, HCI_DEV_REG);
887
888         return id;
889 }
890 EXPORT_SYMBOL(hci_register_dev);
891
892 /* Unregister HCI device */
893 int hci_unregister_dev(struct hci_dev *hdev)
894 {
895         int i;
896
897         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
898
899         hci_unregister_sysfs(hdev);
900
901         write_lock_bh(&hci_dev_list_lock);
902         list_del(&hdev->list);
903         write_unlock_bh(&hci_dev_list_lock);
904
905         hci_dev_do_close(hdev);
906
907         for (i = 0; i < 3; i++)
908                 kfree_skb(hdev->reassembly[i]);
909
910         hci_notify(hdev, HCI_DEV_UNREG);
911
912         __hci_dev_put(hdev);
913
914         return 0;
915 }
916 EXPORT_SYMBOL(hci_unregister_dev);
917
918 /* Suspend HCI device */
919 int hci_suspend_dev(struct hci_dev *hdev)
920 {
921         hci_notify(hdev, HCI_DEV_SUSPEND);
922         return 0;
923 }
924 EXPORT_SYMBOL(hci_suspend_dev);
925
926 /* Resume HCI device */
927 int hci_resume_dev(struct hci_dev *hdev)
928 {
929         hci_notify(hdev, HCI_DEV_RESUME);
930         return 0;
931 }
932 EXPORT_SYMBOL(hci_resume_dev);
933
934 /* Receive packet type fragment */
935 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 2])
936
937 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
938 {
939         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
940                 return -EILSEQ;
941
942         while (count) {
943                 struct sk_buff *skb = __reassembly(hdev, type);
944                 struct { int expect; } *scb;
945                 int len = 0;
946
947                 if (!skb) {
948                         /* Start of the frame */
949
950                         switch (type) {
951                         case HCI_EVENT_PKT:
952                                 if (count >= HCI_EVENT_HDR_SIZE) {
953                                         struct hci_event_hdr *h = data;
954                                         len = HCI_EVENT_HDR_SIZE + h->plen;
955                                 } else
956                                         return -EILSEQ;
957                                 break;
958
959                         case HCI_ACLDATA_PKT:
960                                 if (count >= HCI_ACL_HDR_SIZE) {
961                                         struct hci_acl_hdr *h = data;
962                                         len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
963                                 } else
964                                         return -EILSEQ;
965                                 break;
966
967                         case HCI_SCODATA_PKT:
968                                 if (count >= HCI_SCO_HDR_SIZE) {
969                                         struct hci_sco_hdr *h = data;
970                                         len = HCI_SCO_HDR_SIZE + h->dlen;
971                                 } else
972                                         return -EILSEQ;
973                                 break;
974                         }
975
976                         skb = bt_skb_alloc(len, GFP_ATOMIC);
977                         if (!skb) {
978                                 BT_ERR("%s no memory for packet", hdev->name);
979                                 return -ENOMEM;
980                         }
981
982                         skb->dev = (void *) hdev;
983                         bt_cb(skb)->pkt_type = type;
984         
985                         __reassembly(hdev, type) = skb;
986
987                         scb = (void *) skb->cb;
988                         scb->expect = len;
989                 } else {
990                         /* Continuation */
991
992                         scb = (void *) skb->cb;
993                         len = scb->expect;
994                 }
995
996                 len = min(len, count);
997
998                 memcpy(skb_put(skb, len), data, len);
999
1000                 scb->expect -= len;
1001
1002                 if (scb->expect == 0) {
1003                         /* Complete frame */
1004
1005                         __reassembly(hdev, type) = NULL;
1006
1007                         bt_cb(skb)->pkt_type = type;
1008                         hci_recv_frame(skb);
1009                 }
1010
1011                 count -= len; data += len;
1012         }
1013
1014         return 0;
1015 }
1016 EXPORT_SYMBOL(hci_recv_fragment);
1017
1018 /* ---- Interface to upper protocols ---- */
1019
1020 /* Register/Unregister protocols.
1021  * hci_task_lock is used to ensure that no tasks are running. */
1022 int hci_register_proto(struct hci_proto *hp)
1023 {
1024         int err = 0;
1025
1026         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1027
1028         if (hp->id >= HCI_MAX_PROTO)
1029                 return -EINVAL;
1030
1031         write_lock_bh(&hci_task_lock);
1032
1033         if (!hci_proto[hp->id])
1034                 hci_proto[hp->id] = hp;
1035         else
1036                 err = -EEXIST;
1037
1038         write_unlock_bh(&hci_task_lock);
1039
1040         return err;
1041 }
1042 EXPORT_SYMBOL(hci_register_proto);
1043
1044 int hci_unregister_proto(struct hci_proto *hp)
1045 {
1046         int err = 0;
1047
1048         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1049
1050         if (hp->id >= HCI_MAX_PROTO)
1051                 return -EINVAL;
1052
1053         write_lock_bh(&hci_task_lock);
1054
1055         if (hci_proto[hp->id])
1056                 hci_proto[hp->id] = NULL;
1057         else
1058                 err = -ENOENT;
1059
1060         write_unlock_bh(&hci_task_lock);
1061
1062         return err;
1063 }
1064 EXPORT_SYMBOL(hci_unregister_proto);
1065
1066 int hci_register_cb(struct hci_cb *cb)
1067 {
1068         BT_DBG("%p name %s", cb, cb->name);
1069
1070         write_lock_bh(&hci_cb_list_lock);
1071         list_add(&cb->list, &hci_cb_list);
1072         write_unlock_bh(&hci_cb_list_lock);
1073
1074         return 0;
1075 }
1076 EXPORT_SYMBOL(hci_register_cb);
1077
1078 int hci_unregister_cb(struct hci_cb *cb)
1079 {
1080         BT_DBG("%p name %s", cb, cb->name);
1081
1082         write_lock_bh(&hci_cb_list_lock);
1083         list_del(&cb->list);
1084         write_unlock_bh(&hci_cb_list_lock);
1085
1086         return 0;
1087 }
1088 EXPORT_SYMBOL(hci_unregister_cb);
1089
1090 static int hci_send_frame(struct sk_buff *skb)
1091 {
1092         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1093
1094         if (!hdev) {
1095                 kfree_skb(skb);
1096                 return -ENODEV;
1097         }
1098
1099         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1100
1101         if (atomic_read(&hdev->promisc)) {
1102                 /* Time stamp */
1103                 __net_timestamp(skb);
1104
1105                 hci_send_to_sock(hdev, skb);
1106         }
1107
1108         /* Get rid of skb owner, prior to sending to the driver. */
1109         skb_orphan(skb);
1110
1111         return hdev->send(skb);
1112 }
1113
1114 /* Send HCI command */
1115 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
1116 {
1117         int len = HCI_COMMAND_HDR_SIZE + plen;
1118         struct hci_command_hdr *hdr;
1119         struct sk_buff *skb;
1120
1121         BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1122
1123         skb = bt_skb_alloc(len, GFP_ATOMIC);
1124         if (!skb) {
1125                 BT_ERR("%s no memory for command", hdev->name);
1126                 return -ENOMEM;
1127         }
1128
1129         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1130         hdr->opcode = cpu_to_le16(hci_opcode_pack(ogf, ocf));
1131         hdr->plen   = plen;
1132
1133         if (plen)
1134                 memcpy(skb_put(skb, plen), param, plen);
1135
1136         BT_DBG("skb len %d", skb->len);
1137
1138         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1139         skb->dev = (void *) hdev;
1140         skb_queue_tail(&hdev->cmd_q, skb);
1141         hci_sched_cmd(hdev);
1142
1143         return 0;
1144 }
1145
1146 /* Get data from the previously sent command */
1147 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1148 {
1149         struct hci_command_hdr *hdr;
1150
1151         if (!hdev->sent_cmd)
1152                 return NULL;
1153
1154         hdr = (void *) hdev->sent_cmd->data;
1155
1156         if (hdr->opcode != cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1157                 return NULL;
1158
1159         BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1160
1161         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1162 }
1163
1164 /* Send ACL data */
1165 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1166 {
1167         struct hci_acl_hdr *hdr;
1168         int len = skb->len;
1169
1170         skb_push(skb, HCI_ACL_HDR_SIZE);
1171         skb_reset_transport_header(skb);
1172         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1173         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1174         hdr->dlen   = cpu_to_le16(len);
1175 }
1176
1177 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1178 {
1179         struct hci_dev *hdev = conn->hdev;
1180         struct sk_buff *list;
1181
1182         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1183
1184         skb->dev = (void *) hdev;
1185         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1186         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1187
1188         if (!(list = skb_shinfo(skb)->frag_list)) {
1189                 /* Non fragmented */
1190                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1191
1192                 skb_queue_tail(&conn->data_q, skb);
1193         } else {
1194                 /* Fragmented */
1195                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1196
1197                 skb_shinfo(skb)->frag_list = NULL;
1198
1199                 /* Queue all fragments atomically */
1200                 spin_lock_bh(&conn->data_q.lock);
1201
1202                 __skb_queue_tail(&conn->data_q, skb);
1203                 do {
1204                         skb = list; list = list->next;
1205
1206                         skb->dev = (void *) hdev;
1207                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1208                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1209
1210                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1211
1212                         __skb_queue_tail(&conn->data_q, skb);
1213                 } while (list);
1214
1215                 spin_unlock_bh(&conn->data_q.lock);
1216         }
1217
1218         hci_sched_tx(hdev);
1219         return 0;
1220 }
1221 EXPORT_SYMBOL(hci_send_acl);
1222
1223 /* Send SCO data */
1224 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1225 {
1226         struct hci_dev *hdev = conn->hdev;
1227         struct hci_sco_hdr hdr;
1228
1229         BT_DBG("%s len %d", hdev->name, skb->len);
1230
1231         if (skb->len > hdev->sco_mtu) {
1232                 kfree_skb(skb);
1233                 return -EINVAL;
1234         }
1235
1236         hdr.handle = cpu_to_le16(conn->handle);
1237         hdr.dlen   = skb->len;
1238
1239         skb_push(skb, HCI_SCO_HDR_SIZE);
1240         skb_reset_transport_header(skb);
1241         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1242
1243         skb->dev = (void *) hdev;
1244         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1245         skb_queue_tail(&conn->data_q, skb);
1246         hci_sched_tx(hdev);
1247         return 0;
1248 }
1249 EXPORT_SYMBOL(hci_send_sco);
1250
1251 /* ---- HCI TX task (outgoing data) ---- */
1252
1253 /* HCI Connection scheduler */
1254 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1255 {
1256         struct hci_conn_hash *h = &hdev->conn_hash;
1257         struct hci_conn  *conn = NULL;
1258         int num = 0, min = ~0;
1259         struct list_head *p;
1260
1261         /* We don't have to lock device here. Connections are always
1262          * added and removed with TX task disabled. */
1263         list_for_each(p, &h->list) {
1264                 struct hci_conn *c;
1265                 c = list_entry(p, struct hci_conn, list);
1266
1267                 if (c->type != type || c->state != BT_CONNECTED
1268                                 || skb_queue_empty(&c->data_q))
1269                         continue;
1270                 num++;
1271
1272                 if (c->sent < min) {
1273                         min  = c->sent;
1274                         conn = c;
1275                 }
1276         }
1277
1278         if (conn) {
1279                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1280                 int q = cnt / num;
1281                 *quote = q ? q : 1;
1282         } else
1283                 *quote = 0;
1284
1285         BT_DBG("conn %p quote %d", conn, *quote);
1286         return conn;
1287 }
1288
1289 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1290 {
1291         struct hci_conn_hash *h = &hdev->conn_hash;
1292         struct list_head *p;
1293         struct hci_conn  *c;
1294
1295         BT_ERR("%s ACL tx timeout", hdev->name);
1296
1297         /* Kill stalled connections */
1298         list_for_each(p, &h->list) {
1299                 c = list_entry(p, struct hci_conn, list);
1300                 if (c->type == ACL_LINK && c->sent) {
1301                         BT_ERR("%s killing stalled ACL connection %s",
1302                                 hdev->name, batostr(&c->dst));
1303                         hci_acl_disconn(c, 0x13);
1304                 }
1305         }
1306 }
1307
1308 static inline void hci_sched_acl(struct hci_dev *hdev)
1309 {
1310         struct hci_conn *conn;
1311         struct sk_buff *skb;
1312         int quote;
1313
1314         BT_DBG("%s", hdev->name);
1315
1316         if (!test_bit(HCI_RAW, &hdev->flags)) {
1317                 /* ACL tx timeout must be longer than maximum
1318                  * link supervision timeout (40.9 seconds) */
1319                 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1320                         hci_acl_tx_to(hdev);
1321         }
1322
1323         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1324                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1325                         BT_DBG("skb %p len %d", skb, skb->len);
1326
1327                         hci_conn_enter_active_mode(conn);
1328
1329                         hci_send_frame(skb);
1330                         hdev->acl_last_tx = jiffies;
1331
1332                         hdev->acl_cnt--;
1333                         conn->sent++;
1334                 }
1335         }
1336 }
1337
1338 /* Schedule SCO */
1339 static inline void hci_sched_sco(struct hci_dev *hdev)
1340 {
1341         struct hci_conn *conn;
1342         struct sk_buff *skb;
1343         int quote;
1344
1345         BT_DBG("%s", hdev->name);
1346
1347         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1348                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1349                         BT_DBG("skb %p len %d", skb, skb->len);
1350                         hci_send_frame(skb);
1351
1352                         conn->sent++;
1353                         if (conn->sent == ~0)
1354                                 conn->sent = 0;
1355                 }
1356         }
1357 }
1358
1359 static void hci_tx_task(unsigned long arg)
1360 {
1361         struct hci_dev *hdev = (struct hci_dev *) arg;
1362         struct sk_buff *skb;
1363
1364         read_lock(&hci_task_lock);
1365
1366         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1367
1368         /* Schedule queues and send stuff to HCI driver */
1369
1370         hci_sched_acl(hdev);
1371
1372         hci_sched_sco(hdev);
1373
1374         /* Send next queued raw (unknown type) packet */
1375         while ((skb = skb_dequeue(&hdev->raw_q)))
1376                 hci_send_frame(skb);
1377
1378         read_unlock(&hci_task_lock);
1379 }
1380
1381 /* ----- HCI RX task (incoming data proccessing) ----- */
1382
1383 /* ACL data packet */
1384 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1385 {
1386         struct hci_acl_hdr *hdr = (void *) skb->data;
1387         struct hci_conn *conn;
1388         __u16 handle, flags;
1389
1390         skb_pull(skb, HCI_ACL_HDR_SIZE);
1391
1392         handle = __le16_to_cpu(hdr->handle);
1393         flags  = hci_flags(handle);
1394         handle = hci_handle(handle);
1395
1396         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1397
1398         hdev->stat.acl_rx++;
1399
1400         hci_dev_lock(hdev);
1401         conn = hci_conn_hash_lookup_handle(hdev, handle);
1402         hci_dev_unlock(hdev);
1403
1404         if (conn) {
1405                 register struct hci_proto *hp;
1406
1407                 hci_conn_enter_active_mode(conn);
1408
1409                 /* Send to upper protocol */
1410                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1411                         hp->recv_acldata(conn, skb, flags);
1412                         return;
1413                 }
1414         } else {
1415                 BT_ERR("%s ACL packet for unknown connection handle %d",
1416                         hdev->name, handle);
1417         }
1418
1419         kfree_skb(skb);
1420 }
1421
1422 /* SCO data packet */
1423 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1424 {
1425         struct hci_sco_hdr *hdr = (void *) skb->data;
1426         struct hci_conn *conn;
1427         __u16 handle;
1428
1429         skb_pull(skb, HCI_SCO_HDR_SIZE);
1430
1431         handle = __le16_to_cpu(hdr->handle);
1432
1433         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1434
1435         hdev->stat.sco_rx++;
1436
1437         hci_dev_lock(hdev);
1438         conn = hci_conn_hash_lookup_handle(hdev, handle);
1439         hci_dev_unlock(hdev);
1440
1441         if (conn) {
1442                 register struct hci_proto *hp;
1443
1444                 /* Send to upper protocol */
1445                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1446                         hp->recv_scodata(conn, skb);
1447                         return;
1448                 }
1449         } else {
1450                 BT_ERR("%s SCO packet for unknown connection handle %d",
1451                         hdev->name, handle);
1452         }
1453
1454         kfree_skb(skb);
1455 }
1456
1457 static void hci_rx_task(unsigned long arg)
1458 {
1459         struct hci_dev *hdev = (struct hci_dev *) arg;
1460         struct sk_buff *skb;
1461
1462         BT_DBG("%s", hdev->name);
1463
1464         read_lock(&hci_task_lock);
1465
1466         while ((skb = skb_dequeue(&hdev->rx_q))) {
1467                 if (atomic_read(&hdev->promisc)) {
1468                         /* Send copy to the sockets */
1469                         hci_send_to_sock(hdev, skb);
1470                 }
1471
1472                 if (test_bit(HCI_RAW, &hdev->flags)) {
1473                         kfree_skb(skb);
1474                         continue;
1475                 }
1476
1477                 if (test_bit(HCI_INIT, &hdev->flags)) {
1478                         /* Don't process data packets in this states. */
1479                         switch (bt_cb(skb)->pkt_type) {
1480                         case HCI_ACLDATA_PKT:
1481                         case HCI_SCODATA_PKT:
1482                                 kfree_skb(skb);
1483                                 continue;
1484                         }
1485                 }
1486
1487                 /* Process frame */
1488                 switch (bt_cb(skb)->pkt_type) {
1489                 case HCI_EVENT_PKT:
1490                         hci_event_packet(hdev, skb);
1491                         break;
1492
1493                 case HCI_ACLDATA_PKT:
1494                         BT_DBG("%s ACL data packet", hdev->name);
1495                         hci_acldata_packet(hdev, skb);
1496                         break;
1497
1498                 case HCI_SCODATA_PKT:
1499                         BT_DBG("%s SCO data packet", hdev->name);
1500                         hci_scodata_packet(hdev, skb);
1501                         break;
1502
1503                 default:
1504                         kfree_skb(skb);
1505                         break;
1506                 }
1507         }
1508
1509         read_unlock(&hci_task_lock);
1510 }
1511
1512 static void hci_cmd_task(unsigned long arg)
1513 {
1514         struct hci_dev *hdev = (struct hci_dev *) arg;
1515         struct sk_buff *skb;
1516
1517         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1518
1519         if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1520                 BT_ERR("%s command tx timeout", hdev->name);
1521                 atomic_set(&hdev->cmd_cnt, 1);
1522         }
1523
1524         /* Send queued commands */
1525         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1526                 if (hdev->sent_cmd)
1527                         kfree_skb(hdev->sent_cmd);
1528
1529                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1530                         atomic_dec(&hdev->cmd_cnt);
1531                         hci_send_frame(skb);
1532                         hdev->cmd_last_tx = jiffies;
1533                 } else {
1534                         skb_queue_head(&hdev->cmd_q, skb);
1535                         hci_sched_cmd(hdev);
1536                 }
1537         }
1538 }