Merge branch 'ixp4xx' of git://git.kernel.org/pub/scm/linux/kernel/git/chris/linux-2.6
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <linux/rfkill.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51
52 static void hci_cmd_task(unsigned long arg);
53 static void hci_rx_task(unsigned long arg);
54 static void hci_tx_task(unsigned long arg);
55 static void hci_notify(struct hci_dev *hdev, int event);
56
57 static DEFINE_RWLOCK(hci_task_lock);
58
59 /* HCI device list */
60 LIST_HEAD(hci_dev_list);
61 DEFINE_RWLOCK(hci_dev_list_lock);
62
63 /* HCI callback list */
64 LIST_HEAD(hci_cb_list);
65 DEFINE_RWLOCK(hci_cb_list_lock);
66
67 /* HCI protocols */
68 #define HCI_MAX_PROTO   2
69 struct hci_proto *hci_proto[HCI_MAX_PROTO];
70
71 /* HCI notifiers list */
72 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
73
74 /* ---- HCI notifications ---- */
75
76 int hci_register_notifier(struct notifier_block *nb)
77 {
78         return atomic_notifier_chain_register(&hci_notifier, nb);
79 }
80
81 int hci_unregister_notifier(struct notifier_block *nb)
82 {
83         return atomic_notifier_chain_unregister(&hci_notifier, nb);
84 }
85
86 static void hci_notify(struct hci_dev *hdev, int event)
87 {
88         atomic_notifier_call_chain(&hci_notifier, event, hdev);
89 }
90
91 /* ---- HCI requests ---- */
92
93 void hci_req_complete(struct hci_dev *hdev, int result)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 wake_up_interruptible(&hdev->req_wait_q);
101         }
102 }
103
104 static void hci_req_cancel(struct hci_dev *hdev, int err)
105 {
106         BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = err;
110                 hdev->req_status = HCI_REQ_CANCELED;
111                 wake_up_interruptible(&hdev->req_wait_q);
112         }
113 }
114
115 /* Execute request and wait for completion. */
116 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
117                                 unsigned long opt, __u32 timeout)
118 {
119         DECLARE_WAITQUEUE(wait, current);
120         int err = 0;
121
122         BT_DBG("%s start", hdev->name);
123
124         hdev->req_status = HCI_REQ_PEND;
125
126         add_wait_queue(&hdev->req_wait_q, &wait);
127         set_current_state(TASK_INTERRUPTIBLE);
128
129         req(hdev, opt);
130         schedule_timeout(timeout);
131
132         remove_wait_queue(&hdev->req_wait_q, &wait);
133
134         if (signal_pending(current))
135                 return -EINTR;
136
137         switch (hdev->req_status) {
138         case HCI_REQ_DONE:
139                 err = -bt_err(hdev->req_result);
140                 break;
141
142         case HCI_REQ_CANCELED:
143                 err = -hdev->req_result;
144                 break;
145
146         default:
147                 err = -ETIMEDOUT;
148                 break;
149         }
150
151         hdev->req_status = hdev->req_result = 0;
152
153         BT_DBG("%s end: err %d", hdev->name, err);
154
155         return err;
156 }
157
158 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
159                                 unsigned long opt, __u32 timeout)
160 {
161         int ret;
162
163         if (!test_bit(HCI_UP, &hdev->flags))
164                 return -ENETDOWN;
165
166         /* Serialize all requests */
167         hci_req_lock(hdev);
168         ret = __hci_request(hdev, req, opt, timeout);
169         hci_req_unlock(hdev);
170
171         return ret;
172 }
173
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175 {
176         BT_DBG("%s %ld", hdev->name, opt);
177
178         /* Reset device */
179         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
180 }
181
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183 {
184         struct sk_buff *skb;
185         __le16 param;
186         __u8 flt_type;
187
188         BT_DBG("%s %ld", hdev->name, opt);
189
190         /* Driver initialization */
191
192         /* Special commands */
193         while ((skb = skb_dequeue(&hdev->driver_init))) {
194                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195                 skb->dev = (void *) hdev;
196
197                 skb_queue_tail(&hdev->cmd_q, skb);
198                 tasklet_schedule(&hdev->cmd_task);
199         }
200         skb_queue_purge(&hdev->driver_init);
201
202         /* Mandatory initialization */
203
204         /* Reset */
205         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
206                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
207
208         /* Read Local Supported Features */
209         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
210
211         /* Read Local Version */
212         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
213
214         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
215         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
216
217 #if 0
218         /* Host buffer size */
219         {
220                 struct hci_cp_host_buffer_size cp;
221                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
222                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
223                 cp.acl_max_pkt = cpu_to_le16(0xffff);
224                 cp.sco_max_pkt = cpu_to_le16(0xffff);
225                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
226         }
227 #endif
228
229         /* Read BD Address */
230         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
231
232         /* Read Class of Device */
233         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
234
235         /* Read Local Name */
236         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
237
238         /* Read Voice Setting */
239         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
240
241         /* Optional initialization */
242
243         /* Clear Event Filters */
244         flt_type = HCI_FLT_CLEAR_ALL;
245         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
246
247         /* Page timeout ~20 secs */
248         param = cpu_to_le16(0x8000);
249         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
250
251         /* Connection accept timeout ~20 secs */
252         param = cpu_to_le16(0x7d00);
253         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
254 }
255
256 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
257 {
258         __u8 scan = opt;
259
260         BT_DBG("%s %x", hdev->name, scan);
261
262         /* Inquiry and Page scans */
263         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
264 }
265
266 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
267 {
268         __u8 auth = opt;
269
270         BT_DBG("%s %x", hdev->name, auth);
271
272         /* Authentication */
273         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
274 }
275
276 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
277 {
278         __u8 encrypt = opt;
279
280         BT_DBG("%s %x", hdev->name, encrypt);
281
282         /* Encryption */
283         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
284 }
285
286 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
287 {
288         __le16 policy = cpu_to_le16(opt);
289
290         BT_DBG("%s %x", hdev->name, policy);
291
292         /* Default link policy */
293         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
294 }
295
296 /* Get HCI device by index.
297  * Device is held on return. */
298 struct hci_dev *hci_dev_get(int index)
299 {
300         struct hci_dev *hdev = NULL;
301         struct list_head *p;
302
303         BT_DBG("%d", index);
304
305         if (index < 0)
306                 return NULL;
307
308         read_lock(&hci_dev_list_lock);
309         list_for_each(p, &hci_dev_list) {
310                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
311                 if (d->id == index) {
312                         hdev = hci_dev_hold(d);
313                         break;
314                 }
315         }
316         read_unlock(&hci_dev_list_lock);
317         return hdev;
318 }
319
320 /* ---- Inquiry support ---- */
321 static void inquiry_cache_flush(struct hci_dev *hdev)
322 {
323         struct inquiry_cache *cache = &hdev->inq_cache;
324         struct inquiry_entry *next  = cache->list, *e;
325
326         BT_DBG("cache %p", cache);
327
328         cache->list = NULL;
329         while ((e = next)) {
330                 next = e->next;
331                 kfree(e);
332         }
333 }
334
335 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
336 {
337         struct inquiry_cache *cache = &hdev->inq_cache;
338         struct inquiry_entry *e;
339
340         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
341
342         for (e = cache->list; e; e = e->next)
343                 if (!bacmp(&e->data.bdaddr, bdaddr))
344                         break;
345         return e;
346 }
347
348 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
349 {
350         struct inquiry_cache *cache = &hdev->inq_cache;
351         struct inquiry_entry *e;
352
353         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
354
355         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
356                 /* Entry not in the cache. Add new one. */
357                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
358                         return;
359                 e->next     = cache->list;
360                 cache->list = e;
361         }
362
363         memcpy(&e->data, data, sizeof(*data));
364         e->timestamp = jiffies;
365         cache->timestamp = jiffies;
366 }
367
368 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
369 {
370         struct inquiry_cache *cache = &hdev->inq_cache;
371         struct inquiry_info *info = (struct inquiry_info *) buf;
372         struct inquiry_entry *e;
373         int copied = 0;
374
375         for (e = cache->list; e && copied < num; e = e->next, copied++) {
376                 struct inquiry_data *data = &e->data;
377                 bacpy(&info->bdaddr, &data->bdaddr);
378                 info->pscan_rep_mode    = data->pscan_rep_mode;
379                 info->pscan_period_mode = data->pscan_period_mode;
380                 info->pscan_mode        = data->pscan_mode;
381                 memcpy(info->dev_class, data->dev_class, 3);
382                 info->clock_offset      = data->clock_offset;
383                 info++;
384         }
385
386         BT_DBG("cache %p, copied %d", cache, copied);
387         return copied;
388 }
389
390 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
391 {
392         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
393         struct hci_cp_inquiry cp;
394
395         BT_DBG("%s", hdev->name);
396
397         if (test_bit(HCI_INQUIRY, &hdev->flags))
398                 return;
399
400         /* Start Inquiry */
401         memcpy(&cp.lap, &ir->lap, 3);
402         cp.length  = ir->length;
403         cp.num_rsp = ir->num_rsp;
404         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
405 }
406
407 int hci_inquiry(void __user *arg)
408 {
409         __u8 __user *ptr = arg;
410         struct hci_inquiry_req ir;
411         struct hci_dev *hdev;
412         int err = 0, do_inquiry = 0, max_rsp;
413         long timeo;
414         __u8 *buf;
415
416         if (copy_from_user(&ir, ptr, sizeof(ir)))
417                 return -EFAULT;
418
419         if (!(hdev = hci_dev_get(ir.dev_id)))
420                 return -ENODEV;
421
422         hci_dev_lock_bh(hdev);
423         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
424                                         inquiry_cache_empty(hdev) ||
425                                         ir.flags & IREQ_CACHE_FLUSH) {
426                 inquiry_cache_flush(hdev);
427                 do_inquiry = 1;
428         }
429         hci_dev_unlock_bh(hdev);
430
431         timeo = ir.length * msecs_to_jiffies(2000);
432         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
433                 goto done;
434
435         /* for unlimited number of responses we will use buffer with 255 entries */
436         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
437
438         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
439          * copy it to the user space.
440          */
441         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
442                 err = -ENOMEM;
443                 goto done;
444         }
445
446         hci_dev_lock_bh(hdev);
447         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
448         hci_dev_unlock_bh(hdev);
449
450         BT_DBG("num_rsp %d", ir.num_rsp);
451
452         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
453                 ptr += sizeof(ir);
454                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
455                                         ir.num_rsp))
456                         err = -EFAULT;
457         } else
458                 err = -EFAULT;
459
460         kfree(buf);
461
462 done:
463         hci_dev_put(hdev);
464         return err;
465 }
466
467 /* ---- HCI ioctl helpers ---- */
468
469 int hci_dev_open(__u16 dev)
470 {
471         struct hci_dev *hdev;
472         int ret = 0;
473
474         if (!(hdev = hci_dev_get(dev)))
475                 return -ENODEV;
476
477         BT_DBG("%s %p", hdev->name, hdev);
478
479         hci_req_lock(hdev);
480
481         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
482                 ret = -ERFKILL;
483                 goto done;
484         }
485
486         if (test_bit(HCI_UP, &hdev->flags)) {
487                 ret = -EALREADY;
488                 goto done;
489         }
490
491         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
492                 set_bit(HCI_RAW, &hdev->flags);
493
494         if (hdev->open(hdev)) {
495                 ret = -EIO;
496                 goto done;
497         }
498
499         if (!test_bit(HCI_RAW, &hdev->flags)) {
500                 atomic_set(&hdev->cmd_cnt, 1);
501                 set_bit(HCI_INIT, &hdev->flags);
502
503                 //__hci_request(hdev, hci_reset_req, 0, HZ);
504                 ret = __hci_request(hdev, hci_init_req, 0,
505                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
506
507                 clear_bit(HCI_INIT, &hdev->flags);
508         }
509
510         if (!ret) {
511                 hci_dev_hold(hdev);
512                 set_bit(HCI_UP, &hdev->flags);
513                 hci_notify(hdev, HCI_DEV_UP);
514         } else {
515                 /* Init failed, cleanup */
516                 tasklet_kill(&hdev->rx_task);
517                 tasklet_kill(&hdev->tx_task);
518                 tasklet_kill(&hdev->cmd_task);
519
520                 skb_queue_purge(&hdev->cmd_q);
521                 skb_queue_purge(&hdev->rx_q);
522
523                 if (hdev->flush)
524                         hdev->flush(hdev);
525
526                 if (hdev->sent_cmd) {
527                         kfree_skb(hdev->sent_cmd);
528                         hdev->sent_cmd = NULL;
529                 }
530
531                 hdev->close(hdev);
532                 hdev->flags = 0;
533         }
534
535 done:
536         hci_req_unlock(hdev);
537         hci_dev_put(hdev);
538         return ret;
539 }
540
541 static int hci_dev_do_close(struct hci_dev *hdev)
542 {
543         BT_DBG("%s %p", hdev->name, hdev);
544
545         hci_req_cancel(hdev, ENODEV);
546         hci_req_lock(hdev);
547
548         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
549                 hci_req_unlock(hdev);
550                 return 0;
551         }
552
553         /* Kill RX and TX tasks */
554         tasklet_kill(&hdev->rx_task);
555         tasklet_kill(&hdev->tx_task);
556
557         hci_dev_lock_bh(hdev);
558         inquiry_cache_flush(hdev);
559         hci_conn_hash_flush(hdev);
560         hci_dev_unlock_bh(hdev);
561
562         hci_notify(hdev, HCI_DEV_DOWN);
563
564         if (hdev->flush)
565                 hdev->flush(hdev);
566
567         /* Reset device */
568         skb_queue_purge(&hdev->cmd_q);
569         atomic_set(&hdev->cmd_cnt, 1);
570         if (!test_bit(HCI_RAW, &hdev->flags)) {
571                 set_bit(HCI_INIT, &hdev->flags);
572                 __hci_request(hdev, hci_reset_req, 0,
573                                         msecs_to_jiffies(250));
574                 clear_bit(HCI_INIT, &hdev->flags);
575         }
576
577         /* Kill cmd task */
578         tasklet_kill(&hdev->cmd_task);
579
580         /* Drop queues */
581         skb_queue_purge(&hdev->rx_q);
582         skb_queue_purge(&hdev->cmd_q);
583         skb_queue_purge(&hdev->raw_q);
584
585         /* Drop last sent command */
586         if (hdev->sent_cmd) {
587                 kfree_skb(hdev->sent_cmd);
588                 hdev->sent_cmd = NULL;
589         }
590
591         /* After this point our queues are empty
592          * and no tasks are scheduled. */
593         hdev->close(hdev);
594
595         /* Clear flags */
596         hdev->flags = 0;
597
598         hci_req_unlock(hdev);
599
600         hci_dev_put(hdev);
601         return 0;
602 }
603
604 int hci_dev_close(__u16 dev)
605 {
606         struct hci_dev *hdev;
607         int err;
608
609         if (!(hdev = hci_dev_get(dev)))
610                 return -ENODEV;
611         err = hci_dev_do_close(hdev);
612         hci_dev_put(hdev);
613         return err;
614 }
615
616 int hci_dev_reset(__u16 dev)
617 {
618         struct hci_dev *hdev;
619         int ret = 0;
620
621         if (!(hdev = hci_dev_get(dev)))
622                 return -ENODEV;
623
624         hci_req_lock(hdev);
625         tasklet_disable(&hdev->tx_task);
626
627         if (!test_bit(HCI_UP, &hdev->flags))
628                 goto done;
629
630         /* Drop queues */
631         skb_queue_purge(&hdev->rx_q);
632         skb_queue_purge(&hdev->cmd_q);
633
634         hci_dev_lock_bh(hdev);
635         inquiry_cache_flush(hdev);
636         hci_conn_hash_flush(hdev);
637         hci_dev_unlock_bh(hdev);
638
639         if (hdev->flush)
640                 hdev->flush(hdev);
641
642         atomic_set(&hdev->cmd_cnt, 1);
643         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
644
645         if (!test_bit(HCI_RAW, &hdev->flags))
646                 ret = __hci_request(hdev, hci_reset_req, 0,
647                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
648
649 done:
650         tasklet_enable(&hdev->tx_task);
651         hci_req_unlock(hdev);
652         hci_dev_put(hdev);
653         return ret;
654 }
655
656 int hci_dev_reset_stat(__u16 dev)
657 {
658         struct hci_dev *hdev;
659         int ret = 0;
660
661         if (!(hdev = hci_dev_get(dev)))
662                 return -ENODEV;
663
664         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
665
666         hci_dev_put(hdev);
667
668         return ret;
669 }
670
671 int hci_dev_cmd(unsigned int cmd, void __user *arg)
672 {
673         struct hci_dev *hdev;
674         struct hci_dev_req dr;
675         int err = 0;
676
677         if (copy_from_user(&dr, arg, sizeof(dr)))
678                 return -EFAULT;
679
680         if (!(hdev = hci_dev_get(dr.dev_id)))
681                 return -ENODEV;
682
683         switch (cmd) {
684         case HCISETAUTH:
685                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
686                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
687                 break;
688
689         case HCISETENCRYPT:
690                 if (!lmp_encrypt_capable(hdev)) {
691                         err = -EOPNOTSUPP;
692                         break;
693                 }
694
695                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
696                         /* Auth must be enabled first */
697                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
698                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
699                         if (err)
700                                 break;
701                 }
702
703                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
704                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
705                 break;
706
707         case HCISETSCAN:
708                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
709                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
710                 break;
711
712         case HCISETLINKPOL:
713                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
714                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
715                 break;
716
717         case HCISETLINKMODE:
718                 hdev->link_mode = ((__u16) dr.dev_opt) &
719                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
720                 break;
721
722         case HCISETPTYPE:
723                 hdev->pkt_type = (__u16) dr.dev_opt;
724                 break;
725
726         case HCISETACLMTU:
727                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
728                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
729                 break;
730
731         case HCISETSCOMTU:
732                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
733                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
734                 break;
735
736         default:
737                 err = -EINVAL;
738                 break;
739         }
740
741         hci_dev_put(hdev);
742         return err;
743 }
744
745 int hci_get_dev_list(void __user *arg)
746 {
747         struct hci_dev_list_req *dl;
748         struct hci_dev_req *dr;
749         struct list_head *p;
750         int n = 0, size, err;
751         __u16 dev_num;
752
753         if (get_user(dev_num, (__u16 __user *) arg))
754                 return -EFAULT;
755
756         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
757                 return -EINVAL;
758
759         size = sizeof(*dl) + dev_num * sizeof(*dr);
760
761         if (!(dl = kzalloc(size, GFP_KERNEL)))
762                 return -ENOMEM;
763
764         dr = dl->dev_req;
765
766         read_lock_bh(&hci_dev_list_lock);
767         list_for_each(p, &hci_dev_list) {
768                 struct hci_dev *hdev;
769                 hdev = list_entry(p, struct hci_dev, list);
770                 (dr + n)->dev_id  = hdev->id;
771                 (dr + n)->dev_opt = hdev->flags;
772                 if (++n >= dev_num)
773                         break;
774         }
775         read_unlock_bh(&hci_dev_list_lock);
776
777         dl->dev_num = n;
778         size = sizeof(*dl) + n * sizeof(*dr);
779
780         err = copy_to_user(arg, dl, size);
781         kfree(dl);
782
783         return err ? -EFAULT : 0;
784 }
785
786 int hci_get_dev_info(void __user *arg)
787 {
788         struct hci_dev *hdev;
789         struct hci_dev_info di;
790         int err = 0;
791
792         if (copy_from_user(&di, arg, sizeof(di)))
793                 return -EFAULT;
794
795         if (!(hdev = hci_dev_get(di.dev_id)))
796                 return -ENODEV;
797
798         strcpy(di.name, hdev->name);
799         di.bdaddr   = hdev->bdaddr;
800         di.type     = hdev->type;
801         di.flags    = hdev->flags;
802         di.pkt_type = hdev->pkt_type;
803         di.acl_mtu  = hdev->acl_mtu;
804         di.acl_pkts = hdev->acl_pkts;
805         di.sco_mtu  = hdev->sco_mtu;
806         di.sco_pkts = hdev->sco_pkts;
807         di.link_policy = hdev->link_policy;
808         di.link_mode   = hdev->link_mode;
809
810         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
811         memcpy(&di.features, &hdev->features, sizeof(di.features));
812
813         if (copy_to_user(arg, &di, sizeof(di)))
814                 err = -EFAULT;
815
816         hci_dev_put(hdev);
817
818         return err;
819 }
820
821 /* ---- Interface to HCI drivers ---- */
822
823 static int hci_rfkill_set_block(void *data, bool blocked)
824 {
825         struct hci_dev *hdev = data;
826
827         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
828
829         if (!blocked)
830                 return 0;
831
832         hci_dev_do_close(hdev);
833
834         return 0;
835 }
836
837 static const struct rfkill_ops hci_rfkill_ops = {
838         .set_block = hci_rfkill_set_block,
839 };
840
841 /* Alloc HCI device */
842 struct hci_dev *hci_alloc_dev(void)
843 {
844         struct hci_dev *hdev;
845
846         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
847         if (!hdev)
848                 return NULL;
849
850         skb_queue_head_init(&hdev->driver_init);
851
852         return hdev;
853 }
854 EXPORT_SYMBOL(hci_alloc_dev);
855
856 /* Free HCI device */
857 void hci_free_dev(struct hci_dev *hdev)
858 {
859         skb_queue_purge(&hdev->driver_init);
860
861         /* will free via device release */
862         put_device(&hdev->dev);
863 }
864 EXPORT_SYMBOL(hci_free_dev);
865
866 /* Register HCI device */
867 int hci_register_dev(struct hci_dev *hdev)
868 {
869         struct list_head *head = &hci_dev_list, *p;
870         int i, id = 0;
871
872         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name,
873                                                 hdev->type, hdev->owner);
874
875         if (!hdev->open || !hdev->close || !hdev->destruct)
876                 return -EINVAL;
877
878         write_lock_bh(&hci_dev_list_lock);
879
880         /* Find first available device id */
881         list_for_each(p, &hci_dev_list) {
882                 if (list_entry(p, struct hci_dev, list)->id != id)
883                         break;
884                 head = p; id++;
885         }
886
887         sprintf(hdev->name, "hci%d", id);
888         hdev->id = id;
889         list_add(&hdev->list, head);
890
891         atomic_set(&hdev->refcnt, 1);
892         spin_lock_init(&hdev->lock);
893
894         hdev->flags = 0;
895         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
896         hdev->esco_type = (ESCO_HV1);
897         hdev->link_mode = (HCI_LM_ACCEPT);
898
899         hdev->idle_timeout = 0;
900         hdev->sniff_max_interval = 800;
901         hdev->sniff_min_interval = 80;
902
903         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
904         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
905         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
906
907         skb_queue_head_init(&hdev->rx_q);
908         skb_queue_head_init(&hdev->cmd_q);
909         skb_queue_head_init(&hdev->raw_q);
910
911         for (i = 0; i < 3; i++)
912                 hdev->reassembly[i] = NULL;
913
914         init_waitqueue_head(&hdev->req_wait_q);
915         mutex_init(&hdev->req_lock);
916
917         inquiry_cache_init(hdev);
918
919         hci_conn_hash_init(hdev);
920
921         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
922
923         atomic_set(&hdev->promisc, 0);
924
925         write_unlock_bh(&hci_dev_list_lock);
926
927         hci_register_sysfs(hdev);
928
929         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
930                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
931         if (hdev->rfkill) {
932                 if (rfkill_register(hdev->rfkill) < 0) {
933                         rfkill_destroy(hdev->rfkill);
934                         hdev->rfkill = NULL;
935                 }
936         }
937
938         hci_notify(hdev, HCI_DEV_REG);
939
940         return id;
941 }
942 EXPORT_SYMBOL(hci_register_dev);
943
944 /* Unregister HCI device */
945 int hci_unregister_dev(struct hci_dev *hdev)
946 {
947         int i;
948
949         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
950
951         write_lock_bh(&hci_dev_list_lock);
952         list_del(&hdev->list);
953         write_unlock_bh(&hci_dev_list_lock);
954
955         hci_dev_do_close(hdev);
956
957         for (i = 0; i < 3; i++)
958                 kfree_skb(hdev->reassembly[i]);
959
960         hci_notify(hdev, HCI_DEV_UNREG);
961
962         if (hdev->rfkill) {
963                 rfkill_unregister(hdev->rfkill);
964                 rfkill_destroy(hdev->rfkill);
965         }
966
967         hci_unregister_sysfs(hdev);
968
969         __hci_dev_put(hdev);
970
971         return 0;
972 }
973 EXPORT_SYMBOL(hci_unregister_dev);
974
975 /* Suspend HCI device */
976 int hci_suspend_dev(struct hci_dev *hdev)
977 {
978         hci_notify(hdev, HCI_DEV_SUSPEND);
979         return 0;
980 }
981 EXPORT_SYMBOL(hci_suspend_dev);
982
983 /* Resume HCI device */
984 int hci_resume_dev(struct hci_dev *hdev)
985 {
986         hci_notify(hdev, HCI_DEV_RESUME);
987         return 0;
988 }
989 EXPORT_SYMBOL(hci_resume_dev);
990
991 /* Receive frame from HCI drivers */
992 int hci_recv_frame(struct sk_buff *skb)
993 {
994         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
995         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
996                                 && !test_bit(HCI_INIT, &hdev->flags))) {
997                 kfree_skb(skb);
998                 return -ENXIO;
999         }
1000
1001         /* Incomming skb */
1002         bt_cb(skb)->incoming = 1;
1003
1004         /* Time stamp */
1005         __net_timestamp(skb);
1006
1007         /* Queue frame for rx task */
1008         skb_queue_tail(&hdev->rx_q, skb);
1009         tasklet_schedule(&hdev->rx_task);
1010
1011         return 0;
1012 }
1013 EXPORT_SYMBOL(hci_recv_frame);
1014
1015 /* Receive packet type fragment */
1016 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 2])
1017
1018 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1019 {
1020         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1021                 return -EILSEQ;
1022
1023         while (count) {
1024                 struct sk_buff *skb = __reassembly(hdev, type);
1025                 struct { int expect; } *scb;
1026                 int len = 0;
1027
1028                 if (!skb) {
1029                         /* Start of the frame */
1030
1031                         switch (type) {
1032                         case HCI_EVENT_PKT:
1033                                 if (count >= HCI_EVENT_HDR_SIZE) {
1034                                         struct hci_event_hdr *h = data;
1035                                         len = HCI_EVENT_HDR_SIZE + h->plen;
1036                                 } else
1037                                         return -EILSEQ;
1038                                 break;
1039
1040                         case HCI_ACLDATA_PKT:
1041                                 if (count >= HCI_ACL_HDR_SIZE) {
1042                                         struct hci_acl_hdr *h = data;
1043                                         len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1044                                 } else
1045                                         return -EILSEQ;
1046                                 break;
1047
1048                         case HCI_SCODATA_PKT:
1049                                 if (count >= HCI_SCO_HDR_SIZE) {
1050                                         struct hci_sco_hdr *h = data;
1051                                         len = HCI_SCO_HDR_SIZE + h->dlen;
1052                                 } else
1053                                         return -EILSEQ;
1054                                 break;
1055                         }
1056
1057                         skb = bt_skb_alloc(len, GFP_ATOMIC);
1058                         if (!skb) {
1059                                 BT_ERR("%s no memory for packet", hdev->name);
1060                                 return -ENOMEM;
1061                         }
1062
1063                         skb->dev = (void *) hdev;
1064                         bt_cb(skb)->pkt_type = type;
1065
1066                         __reassembly(hdev, type) = skb;
1067
1068                         scb = (void *) skb->cb;
1069                         scb->expect = len;
1070                 } else {
1071                         /* Continuation */
1072
1073                         scb = (void *) skb->cb;
1074                         len = scb->expect;
1075                 }
1076
1077                 len = min(len, count);
1078
1079                 memcpy(skb_put(skb, len), data, len);
1080
1081                 scb->expect -= len;
1082
1083                 if (scb->expect == 0) {
1084                         /* Complete frame */
1085
1086                         __reassembly(hdev, type) = NULL;
1087
1088                         bt_cb(skb)->pkt_type = type;
1089                         hci_recv_frame(skb);
1090                 }
1091
1092                 count -= len; data += len;
1093         }
1094
1095         return 0;
1096 }
1097 EXPORT_SYMBOL(hci_recv_fragment);
1098
1099 /* ---- Interface to upper protocols ---- */
1100
1101 /* Register/Unregister protocols.
1102  * hci_task_lock is used to ensure that no tasks are running. */
1103 int hci_register_proto(struct hci_proto *hp)
1104 {
1105         int err = 0;
1106
1107         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1108
1109         if (hp->id >= HCI_MAX_PROTO)
1110                 return -EINVAL;
1111
1112         write_lock_bh(&hci_task_lock);
1113
1114         if (!hci_proto[hp->id])
1115                 hci_proto[hp->id] = hp;
1116         else
1117                 err = -EEXIST;
1118
1119         write_unlock_bh(&hci_task_lock);
1120
1121         return err;
1122 }
1123 EXPORT_SYMBOL(hci_register_proto);
1124
1125 int hci_unregister_proto(struct hci_proto *hp)
1126 {
1127         int err = 0;
1128
1129         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1130
1131         if (hp->id >= HCI_MAX_PROTO)
1132                 return -EINVAL;
1133
1134         write_lock_bh(&hci_task_lock);
1135
1136         if (hci_proto[hp->id])
1137                 hci_proto[hp->id] = NULL;
1138         else
1139                 err = -ENOENT;
1140
1141         write_unlock_bh(&hci_task_lock);
1142
1143         return err;
1144 }
1145 EXPORT_SYMBOL(hci_unregister_proto);
1146
1147 int hci_register_cb(struct hci_cb *cb)
1148 {
1149         BT_DBG("%p name %s", cb, cb->name);
1150
1151         write_lock_bh(&hci_cb_list_lock);
1152         list_add(&cb->list, &hci_cb_list);
1153         write_unlock_bh(&hci_cb_list_lock);
1154
1155         return 0;
1156 }
1157 EXPORT_SYMBOL(hci_register_cb);
1158
1159 int hci_unregister_cb(struct hci_cb *cb)
1160 {
1161         BT_DBG("%p name %s", cb, cb->name);
1162
1163         write_lock_bh(&hci_cb_list_lock);
1164         list_del(&cb->list);
1165         write_unlock_bh(&hci_cb_list_lock);
1166
1167         return 0;
1168 }
1169 EXPORT_SYMBOL(hci_unregister_cb);
1170
1171 static int hci_send_frame(struct sk_buff *skb)
1172 {
1173         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1174
1175         if (!hdev) {
1176                 kfree_skb(skb);
1177                 return -ENODEV;
1178         }
1179
1180         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1181
1182         if (atomic_read(&hdev->promisc)) {
1183                 /* Time stamp */
1184                 __net_timestamp(skb);
1185
1186                 hci_send_to_sock(hdev, skb);
1187         }
1188
1189         /* Get rid of skb owner, prior to sending to the driver. */
1190         skb_orphan(skb);
1191
1192         return hdev->send(skb);
1193 }
1194
1195 /* Send HCI command */
1196 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1197 {
1198         int len = HCI_COMMAND_HDR_SIZE + plen;
1199         struct hci_command_hdr *hdr;
1200         struct sk_buff *skb;
1201
1202         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1203
1204         skb = bt_skb_alloc(len, GFP_ATOMIC);
1205         if (!skb) {
1206                 BT_ERR("%s no memory for command", hdev->name);
1207                 return -ENOMEM;
1208         }
1209
1210         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1211         hdr->opcode = cpu_to_le16(opcode);
1212         hdr->plen   = plen;
1213
1214         if (plen)
1215                 memcpy(skb_put(skb, plen), param, plen);
1216
1217         BT_DBG("skb len %d", skb->len);
1218
1219         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1220         skb->dev = (void *) hdev;
1221
1222         skb_queue_tail(&hdev->cmd_q, skb);
1223         tasklet_schedule(&hdev->cmd_task);
1224
1225         return 0;
1226 }
1227
1228 /* Get data from the previously sent command */
1229 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1230 {
1231         struct hci_command_hdr *hdr;
1232
1233         if (!hdev->sent_cmd)
1234                 return NULL;
1235
1236         hdr = (void *) hdev->sent_cmd->data;
1237
1238         if (hdr->opcode != cpu_to_le16(opcode))
1239                 return NULL;
1240
1241         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1242
1243         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1244 }
1245
1246 /* Send ACL data */
1247 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1248 {
1249         struct hci_acl_hdr *hdr;
1250         int len = skb->len;
1251
1252         skb_push(skb, HCI_ACL_HDR_SIZE);
1253         skb_reset_transport_header(skb);
1254         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1255         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1256         hdr->dlen   = cpu_to_le16(len);
1257 }
1258
1259 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1260 {
1261         struct hci_dev *hdev = conn->hdev;
1262         struct sk_buff *list;
1263
1264         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1265
1266         skb->dev = (void *) hdev;
1267         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1268         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1269
1270         if (!(list = skb_shinfo(skb)->frag_list)) {
1271                 /* Non fragmented */
1272                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1273
1274                 skb_queue_tail(&conn->data_q, skb);
1275         } else {
1276                 /* Fragmented */
1277                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1278
1279                 skb_shinfo(skb)->frag_list = NULL;
1280
1281                 /* Queue all fragments atomically */
1282                 spin_lock_bh(&conn->data_q.lock);
1283
1284                 __skb_queue_tail(&conn->data_q, skb);
1285                 do {
1286                         skb = list; list = list->next;
1287
1288                         skb->dev = (void *) hdev;
1289                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1290                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1291
1292                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1293
1294                         __skb_queue_tail(&conn->data_q, skb);
1295                 } while (list);
1296
1297                 spin_unlock_bh(&conn->data_q.lock);
1298         }
1299
1300         tasklet_schedule(&hdev->tx_task);
1301
1302         return 0;
1303 }
1304 EXPORT_SYMBOL(hci_send_acl);
1305
1306 /* Send SCO data */
1307 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1308 {
1309         struct hci_dev *hdev = conn->hdev;
1310         struct hci_sco_hdr hdr;
1311
1312         BT_DBG("%s len %d", hdev->name, skb->len);
1313
1314         if (skb->len > hdev->sco_mtu) {
1315                 kfree_skb(skb);
1316                 return -EINVAL;
1317         }
1318
1319         hdr.handle = cpu_to_le16(conn->handle);
1320         hdr.dlen   = skb->len;
1321
1322         skb_push(skb, HCI_SCO_HDR_SIZE);
1323         skb_reset_transport_header(skb);
1324         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1325
1326         skb->dev = (void *) hdev;
1327         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1328
1329         skb_queue_tail(&conn->data_q, skb);
1330         tasklet_schedule(&hdev->tx_task);
1331
1332         return 0;
1333 }
1334 EXPORT_SYMBOL(hci_send_sco);
1335
1336 /* ---- HCI TX task (outgoing data) ---- */
1337
1338 /* HCI Connection scheduler */
1339 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1340 {
1341         struct hci_conn_hash *h = &hdev->conn_hash;
1342         struct hci_conn *conn = NULL;
1343         int num = 0, min = ~0;
1344         struct list_head *p;
1345
1346         /* We don't have to lock device here. Connections are always
1347          * added and removed with TX task disabled. */
1348         list_for_each(p, &h->list) {
1349                 struct hci_conn *c;
1350                 c = list_entry(p, struct hci_conn, list);
1351
1352                 if (c->type != type || skb_queue_empty(&c->data_q))
1353                         continue;
1354
1355                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1356                         continue;
1357
1358                 num++;
1359
1360                 if (c->sent < min) {
1361                         min  = c->sent;
1362                         conn = c;
1363                 }
1364         }
1365
1366         if (conn) {
1367                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1368                 int q = cnt / num;
1369                 *quote = q ? q : 1;
1370         } else
1371                 *quote = 0;
1372
1373         BT_DBG("conn %p quote %d", conn, *quote);
1374         return conn;
1375 }
1376
1377 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1378 {
1379         struct hci_conn_hash *h = &hdev->conn_hash;
1380         struct list_head *p;
1381         struct hci_conn  *c;
1382
1383         BT_ERR("%s ACL tx timeout", hdev->name);
1384
1385         /* Kill stalled connections */
1386         list_for_each(p, &h->list) {
1387                 c = list_entry(p, struct hci_conn, list);
1388                 if (c->type == ACL_LINK && c->sent) {
1389                         BT_ERR("%s killing stalled ACL connection %s",
1390                                 hdev->name, batostr(&c->dst));
1391                         hci_acl_disconn(c, 0x13);
1392                 }
1393         }
1394 }
1395
1396 static inline void hci_sched_acl(struct hci_dev *hdev)
1397 {
1398         struct hci_conn *conn;
1399         struct sk_buff *skb;
1400         int quote;
1401
1402         BT_DBG("%s", hdev->name);
1403
1404         if (!test_bit(HCI_RAW, &hdev->flags)) {
1405                 /* ACL tx timeout must be longer than maximum
1406                  * link supervision timeout (40.9 seconds) */
1407                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1408                         hci_acl_tx_to(hdev);
1409         }
1410
1411         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1412                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1413                         BT_DBG("skb %p len %d", skb, skb->len);
1414
1415                         hci_conn_enter_active_mode(conn);
1416
1417                         hci_send_frame(skb);
1418                         hdev->acl_last_tx = jiffies;
1419
1420                         hdev->acl_cnt--;
1421                         conn->sent++;
1422                 }
1423         }
1424 }
1425
1426 /* Schedule SCO */
1427 static inline void hci_sched_sco(struct hci_dev *hdev)
1428 {
1429         struct hci_conn *conn;
1430         struct sk_buff *skb;
1431         int quote;
1432
1433         BT_DBG("%s", hdev->name);
1434
1435         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1436                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1437                         BT_DBG("skb %p len %d", skb, skb->len);
1438                         hci_send_frame(skb);
1439
1440                         conn->sent++;
1441                         if (conn->sent == ~0)
1442                                 conn->sent = 0;
1443                 }
1444         }
1445 }
1446
1447 static inline void hci_sched_esco(struct hci_dev *hdev)
1448 {
1449         struct hci_conn *conn;
1450         struct sk_buff *skb;
1451         int quote;
1452
1453         BT_DBG("%s", hdev->name);
1454
1455         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1456                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1457                         BT_DBG("skb %p len %d", skb, skb->len);
1458                         hci_send_frame(skb);
1459
1460                         conn->sent++;
1461                         if (conn->sent == ~0)
1462                                 conn->sent = 0;
1463                 }
1464         }
1465 }
1466
1467 static void hci_tx_task(unsigned long arg)
1468 {
1469         struct hci_dev *hdev = (struct hci_dev *) arg;
1470         struct sk_buff *skb;
1471
1472         read_lock(&hci_task_lock);
1473
1474         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1475
1476         /* Schedule queues and send stuff to HCI driver */
1477
1478         hci_sched_acl(hdev);
1479
1480         hci_sched_sco(hdev);
1481
1482         hci_sched_esco(hdev);
1483
1484         /* Send next queued raw (unknown type) packet */
1485         while ((skb = skb_dequeue(&hdev->raw_q)))
1486                 hci_send_frame(skb);
1487
1488         read_unlock(&hci_task_lock);
1489 }
1490
1491 /* ----- HCI RX task (incoming data proccessing) ----- */
1492
1493 /* ACL data packet */
1494 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1495 {
1496         struct hci_acl_hdr *hdr = (void *) skb->data;
1497         struct hci_conn *conn;
1498         __u16 handle, flags;
1499
1500         skb_pull(skb, HCI_ACL_HDR_SIZE);
1501
1502         handle = __le16_to_cpu(hdr->handle);
1503         flags  = hci_flags(handle);
1504         handle = hci_handle(handle);
1505
1506         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1507
1508         hdev->stat.acl_rx++;
1509
1510         hci_dev_lock(hdev);
1511         conn = hci_conn_hash_lookup_handle(hdev, handle);
1512         hci_dev_unlock(hdev);
1513
1514         if (conn) {
1515                 register struct hci_proto *hp;
1516
1517                 hci_conn_enter_active_mode(conn);
1518
1519                 /* Send to upper protocol */
1520                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1521                         hp->recv_acldata(conn, skb, flags);
1522                         return;
1523                 }
1524         } else {
1525                 BT_ERR("%s ACL packet for unknown connection handle %d",
1526                         hdev->name, handle);
1527         }
1528
1529         kfree_skb(skb);
1530 }
1531
1532 /* SCO data packet */
1533 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1534 {
1535         struct hci_sco_hdr *hdr = (void *) skb->data;
1536         struct hci_conn *conn;
1537         __u16 handle;
1538
1539         skb_pull(skb, HCI_SCO_HDR_SIZE);
1540
1541         handle = __le16_to_cpu(hdr->handle);
1542
1543         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1544
1545         hdev->stat.sco_rx++;
1546
1547         hci_dev_lock(hdev);
1548         conn = hci_conn_hash_lookup_handle(hdev, handle);
1549         hci_dev_unlock(hdev);
1550
1551         if (conn) {
1552                 register struct hci_proto *hp;
1553
1554                 /* Send to upper protocol */
1555                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1556                         hp->recv_scodata(conn, skb);
1557                         return;
1558                 }
1559         } else {
1560                 BT_ERR("%s SCO packet for unknown connection handle %d",
1561                         hdev->name, handle);
1562         }
1563
1564         kfree_skb(skb);
1565 }
1566
1567 static void hci_rx_task(unsigned long arg)
1568 {
1569         struct hci_dev *hdev = (struct hci_dev *) arg;
1570         struct sk_buff *skb;
1571
1572         BT_DBG("%s", hdev->name);
1573
1574         read_lock(&hci_task_lock);
1575
1576         while ((skb = skb_dequeue(&hdev->rx_q))) {
1577                 if (atomic_read(&hdev->promisc)) {
1578                         /* Send copy to the sockets */
1579                         hci_send_to_sock(hdev, skb);
1580                 }
1581
1582                 if (test_bit(HCI_RAW, &hdev->flags)) {
1583                         kfree_skb(skb);
1584                         continue;
1585                 }
1586
1587                 if (test_bit(HCI_INIT, &hdev->flags)) {
1588                         /* Don't process data packets in this states. */
1589                         switch (bt_cb(skb)->pkt_type) {
1590                         case HCI_ACLDATA_PKT:
1591                         case HCI_SCODATA_PKT:
1592                                 kfree_skb(skb);
1593                                 continue;
1594                         }
1595                 }
1596
1597                 /* Process frame */
1598                 switch (bt_cb(skb)->pkt_type) {
1599                 case HCI_EVENT_PKT:
1600                         hci_event_packet(hdev, skb);
1601                         break;
1602
1603                 case HCI_ACLDATA_PKT:
1604                         BT_DBG("%s ACL data packet", hdev->name);
1605                         hci_acldata_packet(hdev, skb);
1606                         break;
1607
1608                 case HCI_SCODATA_PKT:
1609                         BT_DBG("%s SCO data packet", hdev->name);
1610                         hci_scodata_packet(hdev, skb);
1611                         break;
1612
1613                 default:
1614                         kfree_skb(skb);
1615                         break;
1616                 }
1617         }
1618
1619         read_unlock(&hci_task_lock);
1620 }
1621
1622 static void hci_cmd_task(unsigned long arg)
1623 {
1624         struct hci_dev *hdev = (struct hci_dev *) arg;
1625         struct sk_buff *skb;
1626
1627         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1628
1629         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1630                 BT_ERR("%s command tx timeout", hdev->name);
1631                 atomic_set(&hdev->cmd_cnt, 1);
1632         }
1633
1634         /* Send queued commands */
1635         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1636                 kfree_skb(hdev->sent_cmd);
1637
1638                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1639                         atomic_dec(&hdev->cmd_cnt);
1640                         hci_send_frame(skb);
1641                         hdev->cmd_last_tx = jiffies;
1642                 } else {
1643                         skb_queue_head(&hdev->cmd_q, skb);
1644                         tasklet_schedule(&hdev->cmd_task);
1645                 }
1646         }
1647 }