3fc90e543ec1bdc39b946f7164e48e9cec5862b9
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <linux/rfkill.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51
52 static void hci_cmd_task(unsigned long arg);
53 static void hci_rx_task(unsigned long arg);
54 static void hci_tx_task(unsigned long arg);
55 static void hci_notify(struct hci_dev *hdev, int event);
56
57 static DEFINE_RWLOCK(hci_task_lock);
58
59 /* HCI device list */
60 LIST_HEAD(hci_dev_list);
61 DEFINE_RWLOCK(hci_dev_list_lock);
62
63 /* HCI callback list */
64 LIST_HEAD(hci_cb_list);
65 DEFINE_RWLOCK(hci_cb_list_lock);
66
67 /* HCI protocols */
68 #define HCI_MAX_PROTO   2
69 struct hci_proto *hci_proto[HCI_MAX_PROTO];
70
71 /* HCI notifiers list */
72 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
73
74 /* ---- HCI notifications ---- */
75
76 int hci_register_notifier(struct notifier_block *nb)
77 {
78         return atomic_notifier_chain_register(&hci_notifier, nb);
79 }
80
81 int hci_unregister_notifier(struct notifier_block *nb)
82 {
83         return atomic_notifier_chain_unregister(&hci_notifier, nb);
84 }
85
86 static void hci_notify(struct hci_dev *hdev, int event)
87 {
88         atomic_notifier_call_chain(&hci_notifier, event, hdev);
89 }
90
91 /* ---- HCI requests ---- */
92
93 void hci_req_complete(struct hci_dev *hdev, int result)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 wake_up_interruptible(&hdev->req_wait_q);
101         }
102 }
103
104 static void hci_req_cancel(struct hci_dev *hdev, int err)
105 {
106         BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = err;
110                 hdev->req_status = HCI_REQ_CANCELED;
111                 wake_up_interruptible(&hdev->req_wait_q);
112         }
113 }
114
115 /* Execute request and wait for completion. */
116 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
117                                 unsigned long opt, __u32 timeout)
118 {
119         DECLARE_WAITQUEUE(wait, current);
120         int err = 0;
121
122         BT_DBG("%s start", hdev->name);
123
124         hdev->req_status = HCI_REQ_PEND;
125
126         add_wait_queue(&hdev->req_wait_q, &wait);
127         set_current_state(TASK_INTERRUPTIBLE);
128
129         req(hdev, opt);
130         schedule_timeout(timeout);
131
132         remove_wait_queue(&hdev->req_wait_q, &wait);
133
134         if (signal_pending(current))
135                 return -EINTR;
136
137         switch (hdev->req_status) {
138         case HCI_REQ_DONE:
139                 err = -bt_err(hdev->req_result);
140                 break;
141
142         case HCI_REQ_CANCELED:
143                 err = -hdev->req_result;
144                 break;
145
146         default:
147                 err = -ETIMEDOUT;
148                 break;
149         }
150
151         hdev->req_status = hdev->req_result = 0;
152
153         BT_DBG("%s end: err %d", hdev->name, err);
154
155         return err;
156 }
157
158 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
159                                 unsigned long opt, __u32 timeout)
160 {
161         int ret;
162
163         if (!test_bit(HCI_UP, &hdev->flags))
164                 return -ENETDOWN;
165
166         /* Serialize all requests */
167         hci_req_lock(hdev);
168         ret = __hci_request(hdev, req, opt, timeout);
169         hci_req_unlock(hdev);
170
171         return ret;
172 }
173
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175 {
176         BT_DBG("%s %ld", hdev->name, opt);
177
178         /* Reset device */
179         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
180 }
181
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183 {
184         struct sk_buff *skb;
185         __le16 param;
186         __u8 flt_type;
187
188         BT_DBG("%s %ld", hdev->name, opt);
189
190         /* Driver initialization */
191
192         /* Special commands */
193         while ((skb = skb_dequeue(&hdev->driver_init))) {
194                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195                 skb->dev = (void *) hdev;
196                 skb_queue_tail(&hdev->cmd_q, skb);
197                 hci_sched_cmd(hdev);
198         }
199         skb_queue_purge(&hdev->driver_init);
200
201         /* Mandatory initialization */
202
203         /* Reset */
204         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
205                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206
207         /* Read Local Supported Features */
208         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210         /* Read Local Version */
211         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
214         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
215
216 #if 0
217         /* Host buffer size */
218         {
219                 struct hci_cp_host_buffer_size cp;
220                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
221                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
222                 cp.acl_max_pkt = cpu_to_le16(0xffff);
223                 cp.sco_max_pkt = cpu_to_le16(0xffff);
224                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
225         }
226 #endif
227
228         /* Read BD Address */
229         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
230
231         /* Read Class of Device */
232         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
233
234         /* Read Local Name */
235         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
236
237         /* Read Voice Setting */
238         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
239
240         /* Optional initialization */
241
242         /* Clear Event Filters */
243         flt_type = HCI_FLT_CLEAR_ALL;
244         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
245
246         /* Page timeout ~20 secs */
247         param = cpu_to_le16(0x8000);
248         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
249
250         /* Connection accept timeout ~20 secs */
251         param = cpu_to_le16(0x7d00);
252         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
253 }
254
255 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
256 {
257         __u8 scan = opt;
258
259         BT_DBG("%s %x", hdev->name, scan);
260
261         /* Inquiry and Page scans */
262         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
263 }
264
265 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
266 {
267         __u8 auth = opt;
268
269         BT_DBG("%s %x", hdev->name, auth);
270
271         /* Authentication */
272         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
273 }
274
275 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
276 {
277         __u8 encrypt = opt;
278
279         BT_DBG("%s %x", hdev->name, encrypt);
280
281         /* Encryption */
282         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
283 }
284
285 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
286 {
287         __le16 policy = cpu_to_le16(opt);
288
289         BT_DBG("%s %x", hdev->name, policy);
290
291         /* Default link policy */
292         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
293 }
294
295 /* Get HCI device by index.
296  * Device is held on return. */
297 struct hci_dev *hci_dev_get(int index)
298 {
299         struct hci_dev *hdev = NULL;
300         struct list_head *p;
301
302         BT_DBG("%d", index);
303
304         if (index < 0)
305                 return NULL;
306
307         read_lock(&hci_dev_list_lock);
308         list_for_each(p, &hci_dev_list) {
309                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
310                 if (d->id == index) {
311                         hdev = hci_dev_hold(d);
312                         break;
313                 }
314         }
315         read_unlock(&hci_dev_list_lock);
316         return hdev;
317 }
318
319 /* ---- Inquiry support ---- */
320 static void inquiry_cache_flush(struct hci_dev *hdev)
321 {
322         struct inquiry_cache *cache = &hdev->inq_cache;
323         struct inquiry_entry *next  = cache->list, *e;
324
325         BT_DBG("cache %p", cache);
326
327         cache->list = NULL;
328         while ((e = next)) {
329                 next = e->next;
330                 kfree(e);
331         }
332 }
333
334 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
335 {
336         struct inquiry_cache *cache = &hdev->inq_cache;
337         struct inquiry_entry *e;
338
339         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
340
341         for (e = cache->list; e; e = e->next)
342                 if (!bacmp(&e->data.bdaddr, bdaddr))
343                         break;
344         return e;
345 }
346
347 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
348 {
349         struct inquiry_cache *cache = &hdev->inq_cache;
350         struct inquiry_entry *e;
351
352         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
353
354         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
355                 /* Entry not in the cache. Add new one. */
356                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
357                         return;
358                 e->next     = cache->list;
359                 cache->list = e;
360         }
361
362         memcpy(&e->data, data, sizeof(*data));
363         e->timestamp = jiffies;
364         cache->timestamp = jiffies;
365 }
366
367 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
368 {
369         struct inquiry_cache *cache = &hdev->inq_cache;
370         struct inquiry_info *info = (struct inquiry_info *) buf;
371         struct inquiry_entry *e;
372         int copied = 0;
373
374         for (e = cache->list; e && copied < num; e = e->next, copied++) {
375                 struct inquiry_data *data = &e->data;
376                 bacpy(&info->bdaddr, &data->bdaddr);
377                 info->pscan_rep_mode    = data->pscan_rep_mode;
378                 info->pscan_period_mode = data->pscan_period_mode;
379                 info->pscan_mode        = data->pscan_mode;
380                 memcpy(info->dev_class, data->dev_class, 3);
381                 info->clock_offset      = data->clock_offset;
382                 info++;
383         }
384
385         BT_DBG("cache %p, copied %d", cache, copied);
386         return copied;
387 }
388
389 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
390 {
391         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
392         struct hci_cp_inquiry cp;
393
394         BT_DBG("%s", hdev->name);
395
396         if (test_bit(HCI_INQUIRY, &hdev->flags))
397                 return;
398
399         /* Start Inquiry */
400         memcpy(&cp.lap, &ir->lap, 3);
401         cp.length  = ir->length;
402         cp.num_rsp = ir->num_rsp;
403         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
404 }
405
406 int hci_inquiry(void __user *arg)
407 {
408         __u8 __user *ptr = arg;
409         struct hci_inquiry_req ir;
410         struct hci_dev *hdev;
411         int err = 0, do_inquiry = 0, max_rsp;
412         long timeo;
413         __u8 *buf;
414
415         if (copy_from_user(&ir, ptr, sizeof(ir)))
416                 return -EFAULT;
417
418         if (!(hdev = hci_dev_get(ir.dev_id)))
419                 return -ENODEV;
420
421         hci_dev_lock_bh(hdev);
422         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
423                                         inquiry_cache_empty(hdev) ||
424                                         ir.flags & IREQ_CACHE_FLUSH) {
425                 inquiry_cache_flush(hdev);
426                 do_inquiry = 1;
427         }
428         hci_dev_unlock_bh(hdev);
429
430         timeo = ir.length * msecs_to_jiffies(2000);
431         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
432                 goto done;
433
434         /* for unlimited number of responses we will use buffer with 255 entries */
435         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
436
437         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
438          * copy it to the user space.
439          */
440         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
441                 err = -ENOMEM;
442                 goto done;
443         }
444
445         hci_dev_lock_bh(hdev);
446         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
447         hci_dev_unlock_bh(hdev);
448
449         BT_DBG("num_rsp %d", ir.num_rsp);
450
451         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
452                 ptr += sizeof(ir);
453                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
454                                         ir.num_rsp))
455                         err = -EFAULT;
456         } else
457                 err = -EFAULT;
458
459         kfree(buf);
460
461 done:
462         hci_dev_put(hdev);
463         return err;
464 }
465
466 /* ---- HCI ioctl helpers ---- */
467
468 int hci_dev_open(__u16 dev)
469 {
470         struct hci_dev *hdev;
471         int ret = 0;
472
473         if (!(hdev = hci_dev_get(dev)))
474                 return -ENODEV;
475
476         BT_DBG("%s %p", hdev->name, hdev);
477
478         hci_req_lock(hdev);
479
480         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
481                 ret = -ERFKILL;
482                 goto done;
483         }
484
485         if (test_bit(HCI_UP, &hdev->flags)) {
486                 ret = -EALREADY;
487                 goto done;
488         }
489
490         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
491                 set_bit(HCI_RAW, &hdev->flags);
492
493         if (hdev->open(hdev)) {
494                 ret = -EIO;
495                 goto done;
496         }
497
498         if (!test_bit(HCI_RAW, &hdev->flags)) {
499                 atomic_set(&hdev->cmd_cnt, 1);
500                 set_bit(HCI_INIT, &hdev->flags);
501
502                 //__hci_request(hdev, hci_reset_req, 0, HZ);
503                 ret = __hci_request(hdev, hci_init_req, 0,
504                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
505
506                 clear_bit(HCI_INIT, &hdev->flags);
507         }
508
509         if (!ret) {
510                 hci_dev_hold(hdev);
511                 set_bit(HCI_UP, &hdev->flags);
512                 hci_notify(hdev, HCI_DEV_UP);
513         } else {
514                 /* Init failed, cleanup */
515                 tasklet_kill(&hdev->rx_task);
516                 tasklet_kill(&hdev->tx_task);
517                 tasklet_kill(&hdev->cmd_task);
518
519                 skb_queue_purge(&hdev->cmd_q);
520                 skb_queue_purge(&hdev->rx_q);
521
522                 if (hdev->flush)
523                         hdev->flush(hdev);
524
525                 if (hdev->sent_cmd) {
526                         kfree_skb(hdev->sent_cmd);
527                         hdev->sent_cmd = NULL;
528                 }
529
530                 hdev->close(hdev);
531                 hdev->flags = 0;
532         }
533
534 done:
535         hci_req_unlock(hdev);
536         hci_dev_put(hdev);
537         return ret;
538 }
539
540 static int hci_dev_do_close(struct hci_dev *hdev)
541 {
542         BT_DBG("%s %p", hdev->name, hdev);
543
544         hci_req_cancel(hdev, ENODEV);
545         hci_req_lock(hdev);
546
547         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
548                 hci_req_unlock(hdev);
549                 return 0;
550         }
551
552         /* Kill RX and TX tasks */
553         tasklet_kill(&hdev->rx_task);
554         tasklet_kill(&hdev->tx_task);
555
556         hci_dev_lock_bh(hdev);
557         inquiry_cache_flush(hdev);
558         hci_conn_hash_flush(hdev);
559         hci_dev_unlock_bh(hdev);
560
561         hci_notify(hdev, HCI_DEV_DOWN);
562
563         if (hdev->flush)
564                 hdev->flush(hdev);
565
566         /* Reset device */
567         skb_queue_purge(&hdev->cmd_q);
568         atomic_set(&hdev->cmd_cnt, 1);
569         if (!test_bit(HCI_RAW, &hdev->flags)) {
570                 set_bit(HCI_INIT, &hdev->flags);
571                 __hci_request(hdev, hci_reset_req, 0,
572                                         msecs_to_jiffies(250));
573                 clear_bit(HCI_INIT, &hdev->flags);
574         }
575
576         /* Kill cmd task */
577         tasklet_kill(&hdev->cmd_task);
578
579         /* Drop queues */
580         skb_queue_purge(&hdev->rx_q);
581         skb_queue_purge(&hdev->cmd_q);
582         skb_queue_purge(&hdev->raw_q);
583
584         /* Drop last sent command */
585         if (hdev->sent_cmd) {
586                 kfree_skb(hdev->sent_cmd);
587                 hdev->sent_cmd = NULL;
588         }
589
590         /* After this point our queues are empty
591          * and no tasks are scheduled. */
592         hdev->close(hdev);
593
594         /* Clear flags */
595         hdev->flags = 0;
596
597         hci_req_unlock(hdev);
598
599         hci_dev_put(hdev);
600         return 0;
601 }
602
603 int hci_dev_close(__u16 dev)
604 {
605         struct hci_dev *hdev;
606         int err;
607
608         if (!(hdev = hci_dev_get(dev)))
609                 return -ENODEV;
610         err = hci_dev_do_close(hdev);
611         hci_dev_put(hdev);
612         return err;
613 }
614
615 int hci_dev_reset(__u16 dev)
616 {
617         struct hci_dev *hdev;
618         int ret = 0;
619
620         if (!(hdev = hci_dev_get(dev)))
621                 return -ENODEV;
622
623         hci_req_lock(hdev);
624         tasklet_disable(&hdev->tx_task);
625
626         if (!test_bit(HCI_UP, &hdev->flags))
627                 goto done;
628
629         /* Drop queues */
630         skb_queue_purge(&hdev->rx_q);
631         skb_queue_purge(&hdev->cmd_q);
632
633         hci_dev_lock_bh(hdev);
634         inquiry_cache_flush(hdev);
635         hci_conn_hash_flush(hdev);
636         hci_dev_unlock_bh(hdev);
637
638         if (hdev->flush)
639                 hdev->flush(hdev);
640
641         atomic_set(&hdev->cmd_cnt, 1);
642         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
643
644         if (!test_bit(HCI_RAW, &hdev->flags))
645                 ret = __hci_request(hdev, hci_reset_req, 0,
646                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
647
648 done:
649         tasklet_enable(&hdev->tx_task);
650         hci_req_unlock(hdev);
651         hci_dev_put(hdev);
652         return ret;
653 }
654
655 int hci_dev_reset_stat(__u16 dev)
656 {
657         struct hci_dev *hdev;
658         int ret = 0;
659
660         if (!(hdev = hci_dev_get(dev)))
661                 return -ENODEV;
662
663         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
664
665         hci_dev_put(hdev);
666
667         return ret;
668 }
669
670 int hci_dev_cmd(unsigned int cmd, void __user *arg)
671 {
672         struct hci_dev *hdev;
673         struct hci_dev_req dr;
674         int err = 0;
675
676         if (copy_from_user(&dr, arg, sizeof(dr)))
677                 return -EFAULT;
678
679         if (!(hdev = hci_dev_get(dr.dev_id)))
680                 return -ENODEV;
681
682         switch (cmd) {
683         case HCISETAUTH:
684                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
685                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
686                 break;
687
688         case HCISETENCRYPT:
689                 if (!lmp_encrypt_capable(hdev)) {
690                         err = -EOPNOTSUPP;
691                         break;
692                 }
693
694                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
695                         /* Auth must be enabled first */
696                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
697                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
698                         if (err)
699                                 break;
700                 }
701
702                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
703                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
704                 break;
705
706         case HCISETSCAN:
707                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
708                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
709                 break;
710
711         case HCISETLINKPOL:
712                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
713                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
714                 break;
715
716         case HCISETLINKMODE:
717                 hdev->link_mode = ((__u16) dr.dev_opt) &
718                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
719                 break;
720
721         case HCISETPTYPE:
722                 hdev->pkt_type = (__u16) dr.dev_opt;
723                 break;
724
725         case HCISETACLMTU:
726                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
727                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
728                 break;
729
730         case HCISETSCOMTU:
731                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
732                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
733                 break;
734
735         default:
736                 err = -EINVAL;
737                 break;
738         }
739
740         hci_dev_put(hdev);
741         return err;
742 }
743
744 int hci_get_dev_list(void __user *arg)
745 {
746         struct hci_dev_list_req *dl;
747         struct hci_dev_req *dr;
748         struct list_head *p;
749         int n = 0, size, err;
750         __u16 dev_num;
751
752         if (get_user(dev_num, (__u16 __user *) arg))
753                 return -EFAULT;
754
755         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
756                 return -EINVAL;
757
758         size = sizeof(*dl) + dev_num * sizeof(*dr);
759
760         if (!(dl = kzalloc(size, GFP_KERNEL)))
761                 return -ENOMEM;
762
763         dr = dl->dev_req;
764
765         read_lock_bh(&hci_dev_list_lock);
766         list_for_each(p, &hci_dev_list) {
767                 struct hci_dev *hdev;
768                 hdev = list_entry(p, struct hci_dev, list);
769                 (dr + n)->dev_id  = hdev->id;
770                 (dr + n)->dev_opt = hdev->flags;
771                 if (++n >= dev_num)
772                         break;
773         }
774         read_unlock_bh(&hci_dev_list_lock);
775
776         dl->dev_num = n;
777         size = sizeof(*dl) + n * sizeof(*dr);
778
779         err = copy_to_user(arg, dl, size);
780         kfree(dl);
781
782         return err ? -EFAULT : 0;
783 }
784
785 int hci_get_dev_info(void __user *arg)
786 {
787         struct hci_dev *hdev;
788         struct hci_dev_info di;
789         int err = 0;
790
791         if (copy_from_user(&di, arg, sizeof(di)))
792                 return -EFAULT;
793
794         if (!(hdev = hci_dev_get(di.dev_id)))
795                 return -ENODEV;
796
797         strcpy(di.name, hdev->name);
798         di.bdaddr   = hdev->bdaddr;
799         di.type     = hdev->type;
800         di.flags    = hdev->flags;
801         di.pkt_type = hdev->pkt_type;
802         di.acl_mtu  = hdev->acl_mtu;
803         di.acl_pkts = hdev->acl_pkts;
804         di.sco_mtu  = hdev->sco_mtu;
805         di.sco_pkts = hdev->sco_pkts;
806         di.link_policy = hdev->link_policy;
807         di.link_mode   = hdev->link_mode;
808
809         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
810         memcpy(&di.features, &hdev->features, sizeof(di.features));
811
812         if (copy_to_user(arg, &di, sizeof(di)))
813                 err = -EFAULT;
814
815         hci_dev_put(hdev);
816
817         return err;
818 }
819
820 /* ---- Interface to HCI drivers ---- */
821
822 static int hci_rfkill_set_block(void *data, bool blocked)
823 {
824         struct hci_dev *hdev = data;
825
826         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
827
828         if (!blocked)
829                 return 0;
830
831         hci_dev_do_close(hdev);
832
833         return 0;
834 }
835
836 static const struct rfkill_ops hci_rfkill_ops = {
837         .set_block = hci_rfkill_set_block,
838 };
839
840 /* Alloc HCI device */
841 struct hci_dev *hci_alloc_dev(void)
842 {
843         struct hci_dev *hdev;
844
845         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
846         if (!hdev)
847                 return NULL;
848
849         skb_queue_head_init(&hdev->driver_init);
850
851         return hdev;
852 }
853 EXPORT_SYMBOL(hci_alloc_dev);
854
855 /* Free HCI device */
856 void hci_free_dev(struct hci_dev *hdev)
857 {
858         skb_queue_purge(&hdev->driver_init);
859
860         /* will free via device release */
861         put_device(&hdev->dev);
862 }
863 EXPORT_SYMBOL(hci_free_dev);
864
865 /* Register HCI device */
866 int hci_register_dev(struct hci_dev *hdev)
867 {
868         struct list_head *head = &hci_dev_list, *p;
869         int i, id = 0;
870
871         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name,
872                                                 hdev->type, hdev->owner);
873
874         if (!hdev->open || !hdev->close || !hdev->destruct)
875                 return -EINVAL;
876
877         write_lock_bh(&hci_dev_list_lock);
878
879         /* Find first available device id */
880         list_for_each(p, &hci_dev_list) {
881                 if (list_entry(p, struct hci_dev, list)->id != id)
882                         break;
883                 head = p; id++;
884         }
885
886         sprintf(hdev->name, "hci%d", id);
887         hdev->id = id;
888         list_add(&hdev->list, head);
889
890         atomic_set(&hdev->refcnt, 1);
891         spin_lock_init(&hdev->lock);
892
893         hdev->flags = 0;
894         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
895         hdev->esco_type = (ESCO_HV1);
896         hdev->link_mode = (HCI_LM_ACCEPT);
897
898         hdev->idle_timeout = 0;
899         hdev->sniff_max_interval = 800;
900         hdev->sniff_min_interval = 80;
901
902         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
903         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
904         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
905
906         skb_queue_head_init(&hdev->rx_q);
907         skb_queue_head_init(&hdev->cmd_q);
908         skb_queue_head_init(&hdev->raw_q);
909
910         for (i = 0; i < 3; i++)
911                 hdev->reassembly[i] = NULL;
912
913         init_waitqueue_head(&hdev->req_wait_q);
914         mutex_init(&hdev->req_lock);
915
916         inquiry_cache_init(hdev);
917
918         hci_conn_hash_init(hdev);
919
920         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
921
922         atomic_set(&hdev->promisc, 0);
923
924         write_unlock_bh(&hci_dev_list_lock);
925
926         hci_register_sysfs(hdev);
927
928         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
929                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
930         if (hdev->rfkill) {
931                 if (rfkill_register(hdev->rfkill) < 0) {
932                         rfkill_destroy(hdev->rfkill);
933                         hdev->rfkill = NULL;
934                 }
935         }
936
937         hci_notify(hdev, HCI_DEV_REG);
938
939         return id;
940 }
941 EXPORT_SYMBOL(hci_register_dev);
942
943 /* Unregister HCI device */
944 int hci_unregister_dev(struct hci_dev *hdev)
945 {
946         int i;
947
948         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
949
950         write_lock_bh(&hci_dev_list_lock);
951         list_del(&hdev->list);
952         write_unlock_bh(&hci_dev_list_lock);
953
954         hci_dev_do_close(hdev);
955
956         for (i = 0; i < 3; i++)
957                 kfree_skb(hdev->reassembly[i]);
958
959         hci_notify(hdev, HCI_DEV_UNREG);
960
961         if (hdev->rfkill) {
962                 rfkill_unregister(hdev->rfkill);
963                 rfkill_destroy(hdev->rfkill);
964         }
965
966         hci_unregister_sysfs(hdev);
967
968         __hci_dev_put(hdev);
969
970         return 0;
971 }
972 EXPORT_SYMBOL(hci_unregister_dev);
973
974 /* Suspend HCI device */
975 int hci_suspend_dev(struct hci_dev *hdev)
976 {
977         hci_notify(hdev, HCI_DEV_SUSPEND);
978         return 0;
979 }
980 EXPORT_SYMBOL(hci_suspend_dev);
981
982 /* Resume HCI device */
983 int hci_resume_dev(struct hci_dev *hdev)
984 {
985         hci_notify(hdev, HCI_DEV_RESUME);
986         return 0;
987 }
988 EXPORT_SYMBOL(hci_resume_dev);
989
990 /* Receive frame from HCI drivers */
991 int hci_recv_frame(struct sk_buff *skb)
992 {
993         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
994         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
995                                 && !test_bit(HCI_INIT, &hdev->flags))) {
996                 kfree_skb(skb);
997                 return -ENXIO;
998         }
999
1000         /* Incomming skb */
1001         bt_cb(skb)->incoming = 1;
1002
1003         /* Time stamp */
1004         __net_timestamp(skb);
1005
1006         /* Queue frame for rx task */
1007         skb_queue_tail(&hdev->rx_q, skb);
1008         hci_sched_rx(hdev);
1009         return 0;
1010 }
1011 EXPORT_SYMBOL(hci_recv_frame);
1012
1013 /* Receive packet type fragment */
1014 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 2])
1015
1016 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1017 {
1018         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1019                 return -EILSEQ;
1020
1021         while (count) {
1022                 struct sk_buff *skb = __reassembly(hdev, type);
1023                 struct { int expect; } *scb;
1024                 int len = 0;
1025
1026                 if (!skb) {
1027                         /* Start of the frame */
1028
1029                         switch (type) {
1030                         case HCI_EVENT_PKT:
1031                                 if (count >= HCI_EVENT_HDR_SIZE) {
1032                                         struct hci_event_hdr *h = data;
1033                                         len = HCI_EVENT_HDR_SIZE + h->plen;
1034                                 } else
1035                                         return -EILSEQ;
1036                                 break;
1037
1038                         case HCI_ACLDATA_PKT:
1039                                 if (count >= HCI_ACL_HDR_SIZE) {
1040                                         struct hci_acl_hdr *h = data;
1041                                         len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1042                                 } else
1043                                         return -EILSEQ;
1044                                 break;
1045
1046                         case HCI_SCODATA_PKT:
1047                                 if (count >= HCI_SCO_HDR_SIZE) {
1048                                         struct hci_sco_hdr *h = data;
1049                                         len = HCI_SCO_HDR_SIZE + h->dlen;
1050                                 } else
1051                                         return -EILSEQ;
1052                                 break;
1053                         }
1054
1055                         skb = bt_skb_alloc(len, GFP_ATOMIC);
1056                         if (!skb) {
1057                                 BT_ERR("%s no memory for packet", hdev->name);
1058                                 return -ENOMEM;
1059                         }
1060
1061                         skb->dev = (void *) hdev;
1062                         bt_cb(skb)->pkt_type = type;
1063
1064                         __reassembly(hdev, type) = skb;
1065
1066                         scb = (void *) skb->cb;
1067                         scb->expect = len;
1068                 } else {
1069                         /* Continuation */
1070
1071                         scb = (void *) skb->cb;
1072                         len = scb->expect;
1073                 }
1074
1075                 len = min(len, count);
1076
1077                 memcpy(skb_put(skb, len), data, len);
1078
1079                 scb->expect -= len;
1080
1081                 if (scb->expect == 0) {
1082                         /* Complete frame */
1083
1084                         __reassembly(hdev, type) = NULL;
1085
1086                         bt_cb(skb)->pkt_type = type;
1087                         hci_recv_frame(skb);
1088                 }
1089
1090                 count -= len; data += len;
1091         }
1092
1093         return 0;
1094 }
1095 EXPORT_SYMBOL(hci_recv_fragment);
1096
1097 /* ---- Interface to upper protocols ---- */
1098
1099 /* Register/Unregister protocols.
1100  * hci_task_lock is used to ensure that no tasks are running. */
1101 int hci_register_proto(struct hci_proto *hp)
1102 {
1103         int err = 0;
1104
1105         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1106
1107         if (hp->id >= HCI_MAX_PROTO)
1108                 return -EINVAL;
1109
1110         write_lock_bh(&hci_task_lock);
1111
1112         if (!hci_proto[hp->id])
1113                 hci_proto[hp->id] = hp;
1114         else
1115                 err = -EEXIST;
1116
1117         write_unlock_bh(&hci_task_lock);
1118
1119         return err;
1120 }
1121 EXPORT_SYMBOL(hci_register_proto);
1122
1123 int hci_unregister_proto(struct hci_proto *hp)
1124 {
1125         int err = 0;
1126
1127         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1128
1129         if (hp->id >= HCI_MAX_PROTO)
1130                 return -EINVAL;
1131
1132         write_lock_bh(&hci_task_lock);
1133
1134         if (hci_proto[hp->id])
1135                 hci_proto[hp->id] = NULL;
1136         else
1137                 err = -ENOENT;
1138
1139         write_unlock_bh(&hci_task_lock);
1140
1141         return err;
1142 }
1143 EXPORT_SYMBOL(hci_unregister_proto);
1144
1145 int hci_register_cb(struct hci_cb *cb)
1146 {
1147         BT_DBG("%p name %s", cb, cb->name);
1148
1149         write_lock_bh(&hci_cb_list_lock);
1150         list_add(&cb->list, &hci_cb_list);
1151         write_unlock_bh(&hci_cb_list_lock);
1152
1153         return 0;
1154 }
1155 EXPORT_SYMBOL(hci_register_cb);
1156
1157 int hci_unregister_cb(struct hci_cb *cb)
1158 {
1159         BT_DBG("%p name %s", cb, cb->name);
1160
1161         write_lock_bh(&hci_cb_list_lock);
1162         list_del(&cb->list);
1163         write_unlock_bh(&hci_cb_list_lock);
1164
1165         return 0;
1166 }
1167 EXPORT_SYMBOL(hci_unregister_cb);
1168
1169 static int hci_send_frame(struct sk_buff *skb)
1170 {
1171         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1172
1173         if (!hdev) {
1174                 kfree_skb(skb);
1175                 return -ENODEV;
1176         }
1177
1178         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1179
1180         if (atomic_read(&hdev->promisc)) {
1181                 /* Time stamp */
1182                 __net_timestamp(skb);
1183
1184                 hci_send_to_sock(hdev, skb);
1185         }
1186
1187         /* Get rid of skb owner, prior to sending to the driver. */
1188         skb_orphan(skb);
1189
1190         return hdev->send(skb);
1191 }
1192
1193 /* Send HCI command */
1194 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1195 {
1196         int len = HCI_COMMAND_HDR_SIZE + plen;
1197         struct hci_command_hdr *hdr;
1198         struct sk_buff *skb;
1199
1200         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1201
1202         skb = bt_skb_alloc(len, GFP_ATOMIC);
1203         if (!skb) {
1204                 BT_ERR("%s no memory for command", hdev->name);
1205                 return -ENOMEM;
1206         }
1207
1208         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1209         hdr->opcode = cpu_to_le16(opcode);
1210         hdr->plen   = plen;
1211
1212         if (plen)
1213                 memcpy(skb_put(skb, plen), param, plen);
1214
1215         BT_DBG("skb len %d", skb->len);
1216
1217         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1218         skb->dev = (void *) hdev;
1219         skb_queue_tail(&hdev->cmd_q, skb);
1220         hci_sched_cmd(hdev);
1221
1222         return 0;
1223 }
1224
1225 /* Get data from the previously sent command */
1226 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1227 {
1228         struct hci_command_hdr *hdr;
1229
1230         if (!hdev->sent_cmd)
1231                 return NULL;
1232
1233         hdr = (void *) hdev->sent_cmd->data;
1234
1235         if (hdr->opcode != cpu_to_le16(opcode))
1236                 return NULL;
1237
1238         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1239
1240         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1241 }
1242
1243 /* Send ACL data */
1244 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1245 {
1246         struct hci_acl_hdr *hdr;
1247         int len = skb->len;
1248
1249         skb_push(skb, HCI_ACL_HDR_SIZE);
1250         skb_reset_transport_header(skb);
1251         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1252         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1253         hdr->dlen   = cpu_to_le16(len);
1254 }
1255
1256 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1257 {
1258         struct hci_dev *hdev = conn->hdev;
1259         struct sk_buff *list;
1260
1261         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1262
1263         skb->dev = (void *) hdev;
1264         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1265         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1266
1267         if (!(list = skb_shinfo(skb)->frag_list)) {
1268                 /* Non fragmented */
1269                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1270
1271                 skb_queue_tail(&conn->data_q, skb);
1272         } else {
1273                 /* Fragmented */
1274                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1275
1276                 skb_shinfo(skb)->frag_list = NULL;
1277
1278                 /* Queue all fragments atomically */
1279                 spin_lock_bh(&conn->data_q.lock);
1280
1281                 __skb_queue_tail(&conn->data_q, skb);
1282                 do {
1283                         skb = list; list = list->next;
1284
1285                         skb->dev = (void *) hdev;
1286                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1287                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1288
1289                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1290
1291                         __skb_queue_tail(&conn->data_q, skb);
1292                 } while (list);
1293
1294                 spin_unlock_bh(&conn->data_q.lock);
1295         }
1296
1297         hci_sched_tx(hdev);
1298         return 0;
1299 }
1300 EXPORT_SYMBOL(hci_send_acl);
1301
1302 /* Send SCO data */
1303 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1304 {
1305         struct hci_dev *hdev = conn->hdev;
1306         struct hci_sco_hdr hdr;
1307
1308         BT_DBG("%s len %d", hdev->name, skb->len);
1309
1310         if (skb->len > hdev->sco_mtu) {
1311                 kfree_skb(skb);
1312                 return -EINVAL;
1313         }
1314
1315         hdr.handle = cpu_to_le16(conn->handle);
1316         hdr.dlen   = skb->len;
1317
1318         skb_push(skb, HCI_SCO_HDR_SIZE);
1319         skb_reset_transport_header(skb);
1320         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1321
1322         skb->dev = (void *) hdev;
1323         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1324         skb_queue_tail(&conn->data_q, skb);
1325         hci_sched_tx(hdev);
1326         return 0;
1327 }
1328 EXPORT_SYMBOL(hci_send_sco);
1329
1330 /* ---- HCI TX task (outgoing data) ---- */
1331
1332 /* HCI Connection scheduler */
1333 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1334 {
1335         struct hci_conn_hash *h = &hdev->conn_hash;
1336         struct hci_conn *conn = NULL;
1337         int num = 0, min = ~0;
1338         struct list_head *p;
1339
1340         /* We don't have to lock device here. Connections are always
1341          * added and removed with TX task disabled. */
1342         list_for_each(p, &h->list) {
1343                 struct hci_conn *c;
1344                 c = list_entry(p, struct hci_conn, list);
1345
1346                 if (c->type != type || skb_queue_empty(&c->data_q))
1347                         continue;
1348
1349                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1350                         continue;
1351
1352                 num++;
1353
1354                 if (c->sent < min) {
1355                         min  = c->sent;
1356                         conn = c;
1357                 }
1358         }
1359
1360         if (conn) {
1361                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1362                 int q = cnt / num;
1363                 *quote = q ? q : 1;
1364         } else
1365                 *quote = 0;
1366
1367         BT_DBG("conn %p quote %d", conn, *quote);
1368         return conn;
1369 }
1370
1371 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1372 {
1373         struct hci_conn_hash *h = &hdev->conn_hash;
1374         struct list_head *p;
1375         struct hci_conn  *c;
1376
1377         BT_ERR("%s ACL tx timeout", hdev->name);
1378
1379         /* Kill stalled connections */
1380         list_for_each(p, &h->list) {
1381                 c = list_entry(p, struct hci_conn, list);
1382                 if (c->type == ACL_LINK && c->sent) {
1383                         BT_ERR("%s killing stalled ACL connection %s",
1384                                 hdev->name, batostr(&c->dst));
1385                         hci_acl_disconn(c, 0x13);
1386                 }
1387         }
1388 }
1389
1390 static inline void hci_sched_acl(struct hci_dev *hdev)
1391 {
1392         struct hci_conn *conn;
1393         struct sk_buff *skb;
1394         int quote;
1395
1396         BT_DBG("%s", hdev->name);
1397
1398         if (!test_bit(HCI_RAW, &hdev->flags)) {
1399                 /* ACL tx timeout must be longer than maximum
1400                  * link supervision timeout (40.9 seconds) */
1401                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1402                         hci_acl_tx_to(hdev);
1403         }
1404
1405         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1406                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1407                         BT_DBG("skb %p len %d", skb, skb->len);
1408
1409                         hci_conn_enter_active_mode(conn);
1410
1411                         hci_send_frame(skb);
1412                         hdev->acl_last_tx = jiffies;
1413
1414                         hdev->acl_cnt--;
1415                         conn->sent++;
1416                 }
1417         }
1418 }
1419
1420 /* Schedule SCO */
1421 static inline void hci_sched_sco(struct hci_dev *hdev)
1422 {
1423         struct hci_conn *conn;
1424         struct sk_buff *skb;
1425         int quote;
1426
1427         BT_DBG("%s", hdev->name);
1428
1429         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1430                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1431                         BT_DBG("skb %p len %d", skb, skb->len);
1432                         hci_send_frame(skb);
1433
1434                         conn->sent++;
1435                         if (conn->sent == ~0)
1436                                 conn->sent = 0;
1437                 }
1438         }
1439 }
1440
1441 static inline void hci_sched_esco(struct hci_dev *hdev)
1442 {
1443         struct hci_conn *conn;
1444         struct sk_buff *skb;
1445         int quote;
1446
1447         BT_DBG("%s", hdev->name);
1448
1449         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1450                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1451                         BT_DBG("skb %p len %d", skb, skb->len);
1452                         hci_send_frame(skb);
1453
1454                         conn->sent++;
1455                         if (conn->sent == ~0)
1456                                 conn->sent = 0;
1457                 }
1458         }
1459 }
1460
1461 static void hci_tx_task(unsigned long arg)
1462 {
1463         struct hci_dev *hdev = (struct hci_dev *) arg;
1464         struct sk_buff *skb;
1465
1466         read_lock(&hci_task_lock);
1467
1468         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1469
1470         /* Schedule queues and send stuff to HCI driver */
1471
1472         hci_sched_acl(hdev);
1473
1474         hci_sched_sco(hdev);
1475
1476         hci_sched_esco(hdev);
1477
1478         /* Send next queued raw (unknown type) packet */
1479         while ((skb = skb_dequeue(&hdev->raw_q)))
1480                 hci_send_frame(skb);
1481
1482         read_unlock(&hci_task_lock);
1483 }
1484
1485 /* ----- HCI RX task (incoming data proccessing) ----- */
1486
1487 /* ACL data packet */
1488 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1489 {
1490         struct hci_acl_hdr *hdr = (void *) skb->data;
1491         struct hci_conn *conn;
1492         __u16 handle, flags;
1493
1494         skb_pull(skb, HCI_ACL_HDR_SIZE);
1495
1496         handle = __le16_to_cpu(hdr->handle);
1497         flags  = hci_flags(handle);
1498         handle = hci_handle(handle);
1499
1500         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1501
1502         hdev->stat.acl_rx++;
1503
1504         hci_dev_lock(hdev);
1505         conn = hci_conn_hash_lookup_handle(hdev, handle);
1506         hci_dev_unlock(hdev);
1507
1508         if (conn) {
1509                 register struct hci_proto *hp;
1510
1511                 hci_conn_enter_active_mode(conn);
1512
1513                 /* Send to upper protocol */
1514                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1515                         hp->recv_acldata(conn, skb, flags);
1516                         return;
1517                 }
1518         } else {
1519                 BT_ERR("%s ACL packet for unknown connection handle %d",
1520                         hdev->name, handle);
1521         }
1522
1523         kfree_skb(skb);
1524 }
1525
1526 /* SCO data packet */
1527 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1528 {
1529         struct hci_sco_hdr *hdr = (void *) skb->data;
1530         struct hci_conn *conn;
1531         __u16 handle;
1532
1533         skb_pull(skb, HCI_SCO_HDR_SIZE);
1534
1535         handle = __le16_to_cpu(hdr->handle);
1536
1537         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1538
1539         hdev->stat.sco_rx++;
1540
1541         hci_dev_lock(hdev);
1542         conn = hci_conn_hash_lookup_handle(hdev, handle);
1543         hci_dev_unlock(hdev);
1544
1545         if (conn) {
1546                 register struct hci_proto *hp;
1547
1548                 /* Send to upper protocol */
1549                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1550                         hp->recv_scodata(conn, skb);
1551                         return;
1552                 }
1553         } else {
1554                 BT_ERR("%s SCO packet for unknown connection handle %d",
1555                         hdev->name, handle);
1556         }
1557
1558         kfree_skb(skb);
1559 }
1560
1561 static void hci_rx_task(unsigned long arg)
1562 {
1563         struct hci_dev *hdev = (struct hci_dev *) arg;
1564         struct sk_buff *skb;
1565
1566         BT_DBG("%s", hdev->name);
1567
1568         read_lock(&hci_task_lock);
1569
1570         while ((skb = skb_dequeue(&hdev->rx_q))) {
1571                 if (atomic_read(&hdev->promisc)) {
1572                         /* Send copy to the sockets */
1573                         hci_send_to_sock(hdev, skb);
1574                 }
1575
1576                 if (test_bit(HCI_RAW, &hdev->flags)) {
1577                         kfree_skb(skb);
1578                         continue;
1579                 }
1580
1581                 if (test_bit(HCI_INIT, &hdev->flags)) {
1582                         /* Don't process data packets in this states. */
1583                         switch (bt_cb(skb)->pkt_type) {
1584                         case HCI_ACLDATA_PKT:
1585                         case HCI_SCODATA_PKT:
1586                                 kfree_skb(skb);
1587                                 continue;
1588                         }
1589                 }
1590
1591                 /* Process frame */
1592                 switch (bt_cb(skb)->pkt_type) {
1593                 case HCI_EVENT_PKT:
1594                         hci_event_packet(hdev, skb);
1595                         break;
1596
1597                 case HCI_ACLDATA_PKT:
1598                         BT_DBG("%s ACL data packet", hdev->name);
1599                         hci_acldata_packet(hdev, skb);
1600                         break;
1601
1602                 case HCI_SCODATA_PKT:
1603                         BT_DBG("%s SCO data packet", hdev->name);
1604                         hci_scodata_packet(hdev, skb);
1605                         break;
1606
1607                 default:
1608                         kfree_skb(skb);
1609                         break;
1610                 }
1611         }
1612
1613         read_unlock(&hci_task_lock);
1614 }
1615
1616 static void hci_cmd_task(unsigned long arg)
1617 {
1618         struct hci_dev *hdev = (struct hci_dev *) arg;
1619         struct sk_buff *skb;
1620
1621         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1622
1623         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1624                 BT_ERR("%s command tx timeout", hdev->name);
1625                 atomic_set(&hdev->cmd_cnt, 1);
1626         }
1627
1628         /* Send queued commands */
1629         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1630                 kfree_skb(hdev->sent_cmd);
1631
1632                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1633                         atomic_dec(&hdev->cmd_cnt);
1634                         hci_send_frame(skb);
1635                         hdev->cmd_last_tx = jiffies;
1636                 } else {
1637                         skb_queue_head(&hdev->cmd_q, skb);
1638                         hci_sched_cmd(hdev);
1639                 }
1640         }
1641 }