compat-wireless-2010-03-10
[pandora-wifi.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31))
43 #include <linux/rfkill.h>
44 #else
45 #include <linux/rfkill_backport.h>
46 #endif
47
48 #include <net/sock.h>
49
50 #include <asm/system.h>
51 #include <asm/uaccess.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60 static void hci_notify(struct hci_dev *hdev, int event);
61
62 static DEFINE_RWLOCK(hci_task_lock);
63
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
67
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
71
72 /* HCI protocols */
73 #define HCI_MAX_PROTO   2
74 struct hci_proto *hci_proto[HCI_MAX_PROTO];
75
76 /* HCI notifiers list */
77 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
78
79 /* ---- HCI notifications ---- */
80
81 int hci_register_notifier(struct notifier_block *nb)
82 {
83         return atomic_notifier_chain_register(&hci_notifier, nb);
84 }
85
86 int hci_unregister_notifier(struct notifier_block *nb)
87 {
88         return atomic_notifier_chain_unregister(&hci_notifier, nb);
89 }
90
91 static void hci_notify(struct hci_dev *hdev, int event)
92 {
93         atomic_notifier_call_chain(&hci_notifier, event, hdev);
94 }
95
96 /* ---- HCI requests ---- */
97
98 void hci_req_complete(struct hci_dev *hdev, int result)
99 {
100         BT_DBG("%s result 0x%2.2x", hdev->name, result);
101
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = result;
104                 hdev->req_status = HCI_REQ_DONE;
105                 wake_up_interruptible(&hdev->req_wait_q);
106         }
107 }
108
109 static void hci_req_cancel(struct hci_dev *hdev, int err)
110 {
111         BT_DBG("%s err 0x%2.2x", hdev->name, err);
112
113         if (hdev->req_status == HCI_REQ_PEND) {
114                 hdev->req_result = err;
115                 hdev->req_status = HCI_REQ_CANCELED;
116                 wake_up_interruptible(&hdev->req_wait_q);
117         }
118 }
119
120 /* Execute request and wait for completion. */
121 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
122                                 unsigned long opt, __u32 timeout)
123 {
124         DECLARE_WAITQUEUE(wait, current);
125         int err = 0;
126
127         BT_DBG("%s start", hdev->name);
128
129         hdev->req_status = HCI_REQ_PEND;
130
131         add_wait_queue(&hdev->req_wait_q, &wait);
132         set_current_state(TASK_INTERRUPTIBLE);
133
134         req(hdev, opt);
135         schedule_timeout(timeout);
136
137         remove_wait_queue(&hdev->req_wait_q, &wait);
138
139         if (signal_pending(current))
140                 return -EINTR;
141
142         switch (hdev->req_status) {
143         case HCI_REQ_DONE:
144                 err = -bt_err(hdev->req_result);
145                 break;
146
147         case HCI_REQ_CANCELED:
148                 err = -hdev->req_result;
149                 break;
150
151         default:
152                 err = -ETIMEDOUT;
153                 break;
154         }
155
156         hdev->req_status = hdev->req_result = 0;
157
158         BT_DBG("%s end: err %d", hdev->name, err);
159
160         return err;
161 }
162
163 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
164                                 unsigned long opt, __u32 timeout)
165 {
166         int ret;
167
168         if (!test_bit(HCI_UP, &hdev->flags))
169                 return -ENETDOWN;
170
171         /* Serialize all requests */
172         hci_req_lock(hdev);
173         ret = __hci_request(hdev, req, opt, timeout);
174         hci_req_unlock(hdev);
175
176         return ret;
177 }
178
179 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
180 {
181         BT_DBG("%s %ld", hdev->name, opt);
182
183         /* Reset device */
184         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
185 }
186
187 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
188 {
189         struct sk_buff *skb;
190         __le16 param;
191         __u8 flt_type;
192
193         BT_DBG("%s %ld", hdev->name, opt);
194
195         /* Driver initialization */
196
197         /* Special commands */
198         while ((skb = skb_dequeue(&hdev->driver_init))) {
199                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
200                 skb->dev = (void *) hdev;
201
202                 skb_queue_tail(&hdev->cmd_q, skb);
203                 tasklet_schedule(&hdev->cmd_task);
204         }
205         skb_queue_purge(&hdev->driver_init);
206
207         /* Mandatory initialization */
208
209         /* Reset */
210         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
211                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
212
213         /* Read Local Supported Features */
214         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
215
216         /* Read Local Version */
217         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218
219         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
220         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
221
222 #if 0
223         /* Host buffer size */
224         {
225                 struct hci_cp_host_buffer_size cp;
226                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
227                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
228                 cp.acl_max_pkt = cpu_to_le16(0xffff);
229                 cp.sco_max_pkt = cpu_to_le16(0xffff);
230                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
231         }
232 #endif
233
234         /* Read BD Address */
235         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
236
237         /* Read Class of Device */
238         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
239
240         /* Read Local Name */
241         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
242
243         /* Read Voice Setting */
244         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
245
246         /* Optional initialization */
247
248         /* Clear Event Filters */
249         flt_type = HCI_FLT_CLEAR_ALL;
250         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
251
252         /* Page timeout ~20 secs */
253         param = cpu_to_le16(0x8000);
254         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
255
256         /* Connection accept timeout ~20 secs */
257         param = cpu_to_le16(0x7d00);
258         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
259 }
260
261 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
262 {
263         __u8 scan = opt;
264
265         BT_DBG("%s %x", hdev->name, scan);
266
267         /* Inquiry and Page scans */
268         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
269 }
270
271 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
272 {
273         __u8 auth = opt;
274
275         BT_DBG("%s %x", hdev->name, auth);
276
277         /* Authentication */
278         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
279 }
280
281 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
282 {
283         __u8 encrypt = opt;
284
285         BT_DBG("%s %x", hdev->name, encrypt);
286
287         /* Encryption */
288         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
289 }
290
291 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
292 {
293         __le16 policy = cpu_to_le16(opt);
294
295         BT_DBG("%s %x", hdev->name, policy);
296
297         /* Default link policy */
298         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
299 }
300
301 /* Get HCI device by index.
302  * Device is held on return. */
303 struct hci_dev *hci_dev_get(int index)
304 {
305         struct hci_dev *hdev = NULL;
306         struct list_head *p;
307
308         BT_DBG("%d", index);
309
310         if (index < 0)
311                 return NULL;
312
313         read_lock(&hci_dev_list_lock);
314         list_for_each(p, &hci_dev_list) {
315                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
316                 if (d->id == index) {
317                         hdev = hci_dev_hold(d);
318                         break;
319                 }
320         }
321         read_unlock(&hci_dev_list_lock);
322         return hdev;
323 }
324
325 /* ---- Inquiry support ---- */
326 static void inquiry_cache_flush(struct hci_dev *hdev)
327 {
328         struct inquiry_cache *cache = &hdev->inq_cache;
329         struct inquiry_entry *next  = cache->list, *e;
330
331         BT_DBG("cache %p", cache);
332
333         cache->list = NULL;
334         while ((e = next)) {
335                 next = e->next;
336                 kfree(e);
337         }
338 }
339
340 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
341 {
342         struct inquiry_cache *cache = &hdev->inq_cache;
343         struct inquiry_entry *e;
344
345         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
346
347         for (e = cache->list; e; e = e->next)
348                 if (!bacmp(&e->data.bdaddr, bdaddr))
349                         break;
350         return e;
351 }
352
353 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
354 {
355         struct inquiry_cache *cache = &hdev->inq_cache;
356         struct inquiry_entry *e;
357
358         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
359
360         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
361                 /* Entry not in the cache. Add new one. */
362                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
363                         return;
364                 e->next     = cache->list;
365                 cache->list = e;
366         }
367
368         memcpy(&e->data, data, sizeof(*data));
369         e->timestamp = jiffies;
370         cache->timestamp = jiffies;
371 }
372
373 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
374 {
375         struct inquiry_cache *cache = &hdev->inq_cache;
376         struct inquiry_info *info = (struct inquiry_info *) buf;
377         struct inquiry_entry *e;
378         int copied = 0;
379
380         for (e = cache->list; e && copied < num; e = e->next, copied++) {
381                 struct inquiry_data *data = &e->data;
382                 bacpy(&info->bdaddr, &data->bdaddr);
383                 info->pscan_rep_mode    = data->pscan_rep_mode;
384                 info->pscan_period_mode = data->pscan_period_mode;
385                 info->pscan_mode        = data->pscan_mode;
386                 memcpy(info->dev_class, data->dev_class, 3);
387                 info->clock_offset      = data->clock_offset;
388                 info++;
389         }
390
391         BT_DBG("cache %p, copied %d", cache, copied);
392         return copied;
393 }
394
395 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
396 {
397         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
398         struct hci_cp_inquiry cp;
399
400         BT_DBG("%s", hdev->name);
401
402         if (test_bit(HCI_INQUIRY, &hdev->flags))
403                 return;
404
405         /* Start Inquiry */
406         memcpy(&cp.lap, &ir->lap, 3);
407         cp.length  = ir->length;
408         cp.num_rsp = ir->num_rsp;
409         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
410 }
411
412 int hci_inquiry(void __user *arg)
413 {
414         __u8 __user *ptr = arg;
415         struct hci_inquiry_req ir;
416         struct hci_dev *hdev;
417         int err = 0, do_inquiry = 0, max_rsp;
418         long timeo;
419         __u8 *buf;
420
421         if (copy_from_user(&ir, ptr, sizeof(ir)))
422                 return -EFAULT;
423
424         if (!(hdev = hci_dev_get(ir.dev_id)))
425                 return -ENODEV;
426
427         hci_dev_lock_bh(hdev);
428         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
429                                         inquiry_cache_empty(hdev) ||
430                                         ir.flags & IREQ_CACHE_FLUSH) {
431                 inquiry_cache_flush(hdev);
432                 do_inquiry = 1;
433         }
434         hci_dev_unlock_bh(hdev);
435
436         timeo = ir.length * msecs_to_jiffies(2000);
437         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
438                 goto done;
439
440         /* for unlimited number of responses we will use buffer with 255 entries */
441         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
442
443         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
444          * copy it to the user space.
445          */
446         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
447                 err = -ENOMEM;
448                 goto done;
449         }
450
451         hci_dev_lock_bh(hdev);
452         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
453         hci_dev_unlock_bh(hdev);
454
455         BT_DBG("num_rsp %d", ir.num_rsp);
456
457         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
458                 ptr += sizeof(ir);
459                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
460                                         ir.num_rsp))
461                         err = -EFAULT;
462         } else
463                 err = -EFAULT;
464
465         kfree(buf);
466
467 done:
468         hci_dev_put(hdev);
469         return err;
470 }
471
472 /* ---- HCI ioctl helpers ---- */
473
474 int hci_dev_open(__u16 dev)
475 {
476         struct hci_dev *hdev;
477         int ret = 0;
478
479         if (!(hdev = hci_dev_get(dev)))
480                 return -ENODEV;
481
482         BT_DBG("%s %p", hdev->name, hdev);
483
484         hci_req_lock(hdev);
485
486         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
487                 ret = -ERFKILL;
488                 goto done;
489         }
490
491         if (test_bit(HCI_UP, &hdev->flags)) {
492                 ret = -EALREADY;
493                 goto done;
494         }
495
496         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
497                 set_bit(HCI_RAW, &hdev->flags);
498
499         /* Treat all non BR/EDR controllers as raw devices for now */
500         if (hdev->dev_type != HCI_BREDR)
501                 set_bit(HCI_RAW, &hdev->flags);
502
503         if (hdev->open(hdev)) {
504                 ret = -EIO;
505                 goto done;
506         }
507
508         if (!test_bit(HCI_RAW, &hdev->flags)) {
509                 atomic_set(&hdev->cmd_cnt, 1);
510                 set_bit(HCI_INIT, &hdev->flags);
511
512                 //__hci_request(hdev, hci_reset_req, 0, HZ);
513                 ret = __hci_request(hdev, hci_init_req, 0,
514                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
515
516                 clear_bit(HCI_INIT, &hdev->flags);
517         }
518
519         if (!ret) {
520                 hci_dev_hold(hdev);
521                 set_bit(HCI_UP, &hdev->flags);
522                 hci_notify(hdev, HCI_DEV_UP);
523         } else {
524                 /* Init failed, cleanup */
525                 tasklet_kill(&hdev->rx_task);
526                 tasklet_kill(&hdev->tx_task);
527                 tasklet_kill(&hdev->cmd_task);
528
529                 skb_queue_purge(&hdev->cmd_q);
530                 skb_queue_purge(&hdev->rx_q);
531
532                 if (hdev->flush)
533                         hdev->flush(hdev);
534
535                 if (hdev->sent_cmd) {
536                         kfree_skb(hdev->sent_cmd);
537                         hdev->sent_cmd = NULL;
538                 }
539
540                 hdev->close(hdev);
541                 hdev->flags = 0;
542         }
543
544 done:
545         hci_req_unlock(hdev);
546         hci_dev_put(hdev);
547         return ret;
548 }
549
550 static int hci_dev_do_close(struct hci_dev *hdev)
551 {
552         BT_DBG("%s %p", hdev->name, hdev);
553
554         hci_req_cancel(hdev, ENODEV);
555         hci_req_lock(hdev);
556
557         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
558                 hci_req_unlock(hdev);
559                 return 0;
560         }
561
562         /* Kill RX and TX tasks */
563         tasklet_kill(&hdev->rx_task);
564         tasklet_kill(&hdev->tx_task);
565
566         hci_dev_lock_bh(hdev);
567         inquiry_cache_flush(hdev);
568         hci_conn_hash_flush(hdev);
569         hci_dev_unlock_bh(hdev);
570
571         hci_notify(hdev, HCI_DEV_DOWN);
572
573         if (hdev->flush)
574                 hdev->flush(hdev);
575
576         /* Reset device */
577         skb_queue_purge(&hdev->cmd_q);
578         atomic_set(&hdev->cmd_cnt, 1);
579         if (!test_bit(HCI_RAW, &hdev->flags)) {
580                 set_bit(HCI_INIT, &hdev->flags);
581                 __hci_request(hdev, hci_reset_req, 0,
582                                         msecs_to_jiffies(250));
583                 clear_bit(HCI_INIT, &hdev->flags);
584         }
585
586         /* Kill cmd task */
587         tasklet_kill(&hdev->cmd_task);
588
589         /* Drop queues */
590         skb_queue_purge(&hdev->rx_q);
591         skb_queue_purge(&hdev->cmd_q);
592         skb_queue_purge(&hdev->raw_q);
593
594         /* Drop last sent command */
595         if (hdev->sent_cmd) {
596                 kfree_skb(hdev->sent_cmd);
597                 hdev->sent_cmd = NULL;
598         }
599
600         /* After this point our queues are empty
601          * and no tasks are scheduled. */
602         hdev->close(hdev);
603
604         /* Clear flags */
605         hdev->flags = 0;
606
607         hci_req_unlock(hdev);
608
609         hci_dev_put(hdev);
610         return 0;
611 }
612
613 int hci_dev_close(__u16 dev)
614 {
615         struct hci_dev *hdev;
616         int err;
617
618         if (!(hdev = hci_dev_get(dev)))
619                 return -ENODEV;
620         err = hci_dev_do_close(hdev);
621         hci_dev_put(hdev);
622         return err;
623 }
624
625 int hci_dev_reset(__u16 dev)
626 {
627         struct hci_dev *hdev;
628         int ret = 0;
629
630         if (!(hdev = hci_dev_get(dev)))
631                 return -ENODEV;
632
633         hci_req_lock(hdev);
634         tasklet_disable(&hdev->tx_task);
635
636         if (!test_bit(HCI_UP, &hdev->flags))
637                 goto done;
638
639         /* Drop queues */
640         skb_queue_purge(&hdev->rx_q);
641         skb_queue_purge(&hdev->cmd_q);
642
643         hci_dev_lock_bh(hdev);
644         inquiry_cache_flush(hdev);
645         hci_conn_hash_flush(hdev);
646         hci_dev_unlock_bh(hdev);
647
648         if (hdev->flush)
649                 hdev->flush(hdev);
650
651         atomic_set(&hdev->cmd_cnt, 1);
652         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
653
654         if (!test_bit(HCI_RAW, &hdev->flags))
655                 ret = __hci_request(hdev, hci_reset_req, 0,
656                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
657
658 done:
659         tasklet_enable(&hdev->tx_task);
660         hci_req_unlock(hdev);
661         hci_dev_put(hdev);
662         return ret;
663 }
664
665 int hci_dev_reset_stat(__u16 dev)
666 {
667         struct hci_dev *hdev;
668         int ret = 0;
669
670         if (!(hdev = hci_dev_get(dev)))
671                 return -ENODEV;
672
673         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
674
675         hci_dev_put(hdev);
676
677         return ret;
678 }
679
680 int hci_dev_cmd(unsigned int cmd, void __user *arg)
681 {
682         struct hci_dev *hdev;
683         struct hci_dev_req dr;
684         int err = 0;
685
686         if (copy_from_user(&dr, arg, sizeof(dr)))
687                 return -EFAULT;
688
689         if (!(hdev = hci_dev_get(dr.dev_id)))
690                 return -ENODEV;
691
692         switch (cmd) {
693         case HCISETAUTH:
694                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
695                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
696                 break;
697
698         case HCISETENCRYPT:
699                 if (!lmp_encrypt_capable(hdev)) {
700                         err = -EOPNOTSUPP;
701                         break;
702                 }
703
704                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
705                         /* Auth must be enabled first */
706                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
707                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
708                         if (err)
709                                 break;
710                 }
711
712                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
713                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
714                 break;
715
716         case HCISETSCAN:
717                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
718                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
719                 break;
720
721         case HCISETLINKPOL:
722                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
723                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
724                 break;
725
726         case HCISETLINKMODE:
727                 hdev->link_mode = ((__u16) dr.dev_opt) &
728                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
729                 break;
730
731         case HCISETPTYPE:
732                 hdev->pkt_type = (__u16) dr.dev_opt;
733                 break;
734
735         case HCISETACLMTU:
736                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
737                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
738                 break;
739
740         case HCISETSCOMTU:
741                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
742                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
743                 break;
744
745         default:
746                 err = -EINVAL;
747                 break;
748         }
749
750         hci_dev_put(hdev);
751         return err;
752 }
753
754 int hci_get_dev_list(void __user *arg)
755 {
756         struct hci_dev_list_req *dl;
757         struct hci_dev_req *dr;
758         struct list_head *p;
759         int n = 0, size, err;
760         __u16 dev_num;
761
762         if (get_user(dev_num, (__u16 __user *) arg))
763                 return -EFAULT;
764
765         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
766                 return -EINVAL;
767
768         size = sizeof(*dl) + dev_num * sizeof(*dr);
769
770         if (!(dl = kzalloc(size, GFP_KERNEL)))
771                 return -ENOMEM;
772
773         dr = dl->dev_req;
774
775         read_lock_bh(&hci_dev_list_lock);
776         list_for_each(p, &hci_dev_list) {
777                 struct hci_dev *hdev;
778                 hdev = list_entry(p, struct hci_dev, list);
779                 (dr + n)->dev_id  = hdev->id;
780                 (dr + n)->dev_opt = hdev->flags;
781                 if (++n >= dev_num)
782                         break;
783         }
784         read_unlock_bh(&hci_dev_list_lock);
785
786         dl->dev_num = n;
787         size = sizeof(*dl) + n * sizeof(*dr);
788
789         err = copy_to_user(arg, dl, size);
790         kfree(dl);
791
792         return err ? -EFAULT : 0;
793 }
794
795 int hci_get_dev_info(void __user *arg)
796 {
797         struct hci_dev *hdev;
798         struct hci_dev_info di;
799         int err = 0;
800
801         if (copy_from_user(&di, arg, sizeof(di)))
802                 return -EFAULT;
803
804         if (!(hdev = hci_dev_get(di.dev_id)))
805                 return -ENODEV;
806
807         strcpy(di.name, hdev->name);
808         di.bdaddr   = hdev->bdaddr;
809         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
810         di.flags    = hdev->flags;
811         di.pkt_type = hdev->pkt_type;
812         di.acl_mtu  = hdev->acl_mtu;
813         di.acl_pkts = hdev->acl_pkts;
814         di.sco_mtu  = hdev->sco_mtu;
815         di.sco_pkts = hdev->sco_pkts;
816         di.link_policy = hdev->link_policy;
817         di.link_mode   = hdev->link_mode;
818
819         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
820         memcpy(&di.features, &hdev->features, sizeof(di.features));
821
822         if (copy_to_user(arg, &di, sizeof(di)))
823                 err = -EFAULT;
824
825         hci_dev_put(hdev);
826
827         return err;
828 }
829
830 /* ---- Interface to HCI drivers ---- */
831
832 static int hci_rfkill_set_block(void *data, bool blocked)
833 {
834         struct hci_dev *hdev = data;
835
836         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
837
838         if (!blocked)
839                 return 0;
840
841         hci_dev_do_close(hdev);
842
843         return 0;
844 }
845
846 static const struct rfkill_ops hci_rfkill_ops = {
847         .set_block = hci_rfkill_set_block,
848 };
849
850 /* Alloc HCI device */
851 struct hci_dev *hci_alloc_dev(void)
852 {
853         struct hci_dev *hdev;
854
855         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
856         if (!hdev)
857                 return NULL;
858
859         skb_queue_head_init(&hdev->driver_init);
860
861         return hdev;
862 }
863 EXPORT_SYMBOL(hci_alloc_dev);
864
865 /* Free HCI device */
866 void hci_free_dev(struct hci_dev *hdev)
867 {
868         skb_queue_purge(&hdev->driver_init);
869
870         /* will free via device release */
871         put_device(&hdev->dev);
872 }
873 EXPORT_SYMBOL(hci_free_dev);
874
875 /* Register HCI device */
876 int hci_register_dev(struct hci_dev *hdev)
877 {
878         struct list_head *head = &hci_dev_list, *p;
879         int i, id = 0;
880
881         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
882                                                 hdev->bus, hdev->owner);
883
884         if (!hdev->open || !hdev->close || !hdev->destruct)
885                 return -EINVAL;
886
887         write_lock_bh(&hci_dev_list_lock);
888
889         /* Find first available device id */
890         list_for_each(p, &hci_dev_list) {
891                 if (list_entry(p, struct hci_dev, list)->id != id)
892                         break;
893                 head = p; id++;
894         }
895
896         sprintf(hdev->name, "hci%d", id);
897         hdev->id = id;
898         list_add(&hdev->list, head);
899
900         atomic_set(&hdev->refcnt, 1);
901         spin_lock_init(&hdev->lock);
902
903         hdev->flags = 0;
904         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
905         hdev->esco_type = (ESCO_HV1);
906         hdev->link_mode = (HCI_LM_ACCEPT);
907
908         hdev->idle_timeout = 0;
909         hdev->sniff_max_interval = 800;
910         hdev->sniff_min_interval = 80;
911
912         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
913         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
914         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
915
916         skb_queue_head_init(&hdev->rx_q);
917         skb_queue_head_init(&hdev->cmd_q);
918         skb_queue_head_init(&hdev->raw_q);
919
920         for (i = 0; i < 3; i++)
921                 hdev->reassembly[i] = NULL;
922
923         init_waitqueue_head(&hdev->req_wait_q);
924         mutex_init(&hdev->req_lock);
925
926         inquiry_cache_init(hdev);
927
928         hci_conn_hash_init(hdev);
929
930         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
931
932         atomic_set(&hdev->promisc, 0);
933
934         write_unlock_bh(&hci_dev_list_lock);
935
936         hci_register_sysfs(hdev);
937
938         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
939                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
940         if (hdev->rfkill) {
941                 if (rfkill_register(hdev->rfkill) < 0) {
942                         rfkill_destroy(hdev->rfkill);
943                         hdev->rfkill = NULL;
944                 }
945         }
946
947         hci_notify(hdev, HCI_DEV_REG);
948
949         return id;
950 }
951 EXPORT_SYMBOL(hci_register_dev);
952
953 /* Unregister HCI device */
954 int hci_unregister_dev(struct hci_dev *hdev)
955 {
956         int i;
957
958         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
959
960         write_lock_bh(&hci_dev_list_lock);
961         list_del(&hdev->list);
962         write_unlock_bh(&hci_dev_list_lock);
963
964         hci_dev_do_close(hdev);
965
966         for (i = 0; i < 3; i++)
967                 kfree_skb(hdev->reassembly[i]);
968
969         hci_notify(hdev, HCI_DEV_UNREG);
970
971         if (hdev->rfkill) {
972                 rfkill_unregister(hdev->rfkill);
973                 rfkill_destroy(hdev->rfkill);
974         }
975
976         hci_unregister_sysfs(hdev);
977
978         __hci_dev_put(hdev);
979
980         return 0;
981 }
982 EXPORT_SYMBOL(hci_unregister_dev);
983
984 /* Suspend HCI device */
985 int hci_suspend_dev(struct hci_dev *hdev)
986 {
987         hci_notify(hdev, HCI_DEV_SUSPEND);
988         return 0;
989 }
990 EXPORT_SYMBOL(hci_suspend_dev);
991
992 /* Resume HCI device */
993 int hci_resume_dev(struct hci_dev *hdev)
994 {
995         hci_notify(hdev, HCI_DEV_RESUME);
996         return 0;
997 }
998 EXPORT_SYMBOL(hci_resume_dev);
999
1000 /* Receive frame from HCI drivers */
1001 int hci_recv_frame(struct sk_buff *skb)
1002 {
1003         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1004         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1005                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1006                 kfree_skb(skb);
1007                 return -ENXIO;
1008         }
1009
1010         /* Incomming skb */
1011         bt_cb(skb)->incoming = 1;
1012
1013         /* Time stamp */
1014         __net_timestamp(skb);
1015
1016         /* Queue frame for rx task */
1017         skb_queue_tail(&hdev->rx_q, skb);
1018         tasklet_schedule(&hdev->rx_task);
1019
1020         return 0;
1021 }
1022 EXPORT_SYMBOL(hci_recv_frame);
1023
1024 /* Receive packet type fragment */
1025 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 2])
1026
1027 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1028 {
1029         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1030                 return -EILSEQ;
1031
1032         while (count) {
1033                 struct sk_buff *skb = __reassembly(hdev, type);
1034                 struct { int expect; } *scb;
1035                 int len = 0;
1036
1037                 if (!skb) {
1038                         /* Start of the frame */
1039
1040                         switch (type) {
1041                         case HCI_EVENT_PKT:
1042                                 if (count >= HCI_EVENT_HDR_SIZE) {
1043                                         struct hci_event_hdr *h = data;
1044                                         len = HCI_EVENT_HDR_SIZE + h->plen;
1045                                 } else
1046                                         return -EILSEQ;
1047                                 break;
1048
1049                         case HCI_ACLDATA_PKT:
1050                                 if (count >= HCI_ACL_HDR_SIZE) {
1051                                         struct hci_acl_hdr *h = data;
1052                                         len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1053                                 } else
1054                                         return -EILSEQ;
1055                                 break;
1056
1057                         case HCI_SCODATA_PKT:
1058                                 if (count >= HCI_SCO_HDR_SIZE) {
1059                                         struct hci_sco_hdr *h = data;
1060                                         len = HCI_SCO_HDR_SIZE + h->dlen;
1061                                 } else
1062                                         return -EILSEQ;
1063                                 break;
1064                         }
1065
1066                         skb = bt_skb_alloc(len, GFP_ATOMIC);
1067                         if (!skb) {
1068                                 BT_ERR("%s no memory for packet", hdev->name);
1069                                 return -ENOMEM;
1070                         }
1071
1072                         skb->dev = (void *) hdev;
1073                         bt_cb(skb)->pkt_type = type;
1074
1075                         __reassembly(hdev, type) = skb;
1076
1077                         scb = (void *) skb->cb;
1078                         scb->expect = len;
1079                 } else {
1080                         /* Continuation */
1081
1082                         scb = (void *) skb->cb;
1083                         len = scb->expect;
1084                 }
1085
1086                 len = min(len, count);
1087
1088                 memcpy(skb_put(skb, len), data, len);
1089
1090                 scb->expect -= len;
1091
1092                 if (scb->expect == 0) {
1093                         /* Complete frame */
1094
1095                         __reassembly(hdev, type) = NULL;
1096
1097                         bt_cb(skb)->pkt_type = type;
1098                         hci_recv_frame(skb);
1099                 }
1100
1101                 count -= len; data += len;
1102         }
1103
1104         return 0;
1105 }
1106 EXPORT_SYMBOL(hci_recv_fragment);
1107
1108 /* ---- Interface to upper protocols ---- */
1109
1110 /* Register/Unregister protocols.
1111  * hci_task_lock is used to ensure that no tasks are running. */
1112 int hci_register_proto(struct hci_proto *hp)
1113 {
1114         int err = 0;
1115
1116         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1117
1118         if (hp->id >= HCI_MAX_PROTO)
1119                 return -EINVAL;
1120
1121         write_lock_bh(&hci_task_lock);
1122
1123         if (!hci_proto[hp->id])
1124                 hci_proto[hp->id] = hp;
1125         else
1126                 err = -EEXIST;
1127
1128         write_unlock_bh(&hci_task_lock);
1129
1130         return err;
1131 }
1132 EXPORT_SYMBOL(hci_register_proto);
1133
1134 int hci_unregister_proto(struct hci_proto *hp)
1135 {
1136         int err = 0;
1137
1138         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1139
1140         if (hp->id >= HCI_MAX_PROTO)
1141                 return -EINVAL;
1142
1143         write_lock_bh(&hci_task_lock);
1144
1145         if (hci_proto[hp->id])
1146                 hci_proto[hp->id] = NULL;
1147         else
1148                 err = -ENOENT;
1149
1150         write_unlock_bh(&hci_task_lock);
1151
1152         return err;
1153 }
1154 EXPORT_SYMBOL(hci_unregister_proto);
1155
1156 int hci_register_cb(struct hci_cb *cb)
1157 {
1158         BT_DBG("%p name %s", cb, cb->name);
1159
1160         write_lock_bh(&hci_cb_list_lock);
1161         list_add(&cb->list, &hci_cb_list);
1162         write_unlock_bh(&hci_cb_list_lock);
1163
1164         return 0;
1165 }
1166 EXPORT_SYMBOL(hci_register_cb);
1167
1168 int hci_unregister_cb(struct hci_cb *cb)
1169 {
1170         BT_DBG("%p name %s", cb, cb->name);
1171
1172         write_lock_bh(&hci_cb_list_lock);
1173         list_del(&cb->list);
1174         write_unlock_bh(&hci_cb_list_lock);
1175
1176         return 0;
1177 }
1178 EXPORT_SYMBOL(hci_unregister_cb);
1179
1180 static int hci_send_frame(struct sk_buff *skb)
1181 {
1182         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1183
1184         if (!hdev) {
1185                 kfree_skb(skb);
1186                 return -ENODEV;
1187         }
1188
1189         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1190
1191         if (atomic_read(&hdev->promisc)) {
1192                 /* Time stamp */
1193                 __net_timestamp(skb);
1194
1195                 hci_send_to_sock(hdev, skb);
1196         }
1197
1198         /* Get rid of skb owner, prior to sending to the driver. */
1199         skb_orphan(skb);
1200
1201         return hdev->send(skb);
1202 }
1203
1204 /* Send HCI command */
1205 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1206 {
1207         int len = HCI_COMMAND_HDR_SIZE + plen;
1208         struct hci_command_hdr *hdr;
1209         struct sk_buff *skb;
1210
1211         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1212
1213         skb = bt_skb_alloc(len, GFP_ATOMIC);
1214         if (!skb) {
1215                 BT_ERR("%s no memory for command", hdev->name);
1216                 return -ENOMEM;
1217         }
1218
1219         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1220         hdr->opcode = cpu_to_le16(opcode);
1221         hdr->plen   = plen;
1222
1223         if (plen)
1224                 memcpy(skb_put(skb, plen), param, plen);
1225
1226         BT_DBG("skb len %d", skb->len);
1227
1228         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1229         skb->dev = (void *) hdev;
1230
1231         skb_queue_tail(&hdev->cmd_q, skb);
1232         tasklet_schedule(&hdev->cmd_task);
1233
1234         return 0;
1235 }
1236
1237 /* Get data from the previously sent command */
1238 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1239 {
1240         struct hci_command_hdr *hdr;
1241
1242         if (!hdev->sent_cmd)
1243                 return NULL;
1244
1245         hdr = (void *) hdev->sent_cmd->data;
1246
1247         if (hdr->opcode != cpu_to_le16(opcode))
1248                 return NULL;
1249
1250         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1251
1252         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1253 }
1254
1255 /* Send ACL data */
1256 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1257 {
1258         struct hci_acl_hdr *hdr;
1259         int len = skb->len;
1260
1261         skb_push(skb, HCI_ACL_HDR_SIZE);
1262         skb_reset_transport_header(skb);
1263         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1264         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1265         hdr->dlen   = cpu_to_le16(len);
1266 }
1267
1268 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1269 {
1270         struct hci_dev *hdev = conn->hdev;
1271         struct sk_buff *list;
1272
1273         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1274
1275         skb->dev = (void *) hdev;
1276         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1277         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1278
1279         if (!(list = skb_shinfo(skb)->frag_list)) {
1280                 /* Non fragmented */
1281                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1282
1283                 skb_queue_tail(&conn->data_q, skb);
1284         } else {
1285                 /* Fragmented */
1286                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1287
1288                 skb_shinfo(skb)->frag_list = NULL;
1289
1290                 /* Queue all fragments atomically */
1291                 spin_lock_bh(&conn->data_q.lock);
1292
1293                 __skb_queue_tail(&conn->data_q, skb);
1294                 do {
1295                         skb = list; list = list->next;
1296
1297                         skb->dev = (void *) hdev;
1298                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1299                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1300
1301                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1302
1303                         __skb_queue_tail(&conn->data_q, skb);
1304                 } while (list);
1305
1306                 spin_unlock_bh(&conn->data_q.lock);
1307         }
1308
1309         tasklet_schedule(&hdev->tx_task);
1310
1311         return 0;
1312 }
1313 EXPORT_SYMBOL(hci_send_acl);
1314
1315 /* Send SCO data */
1316 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1317 {
1318         struct hci_dev *hdev = conn->hdev;
1319         struct hci_sco_hdr hdr;
1320
1321         BT_DBG("%s len %d", hdev->name, skb->len);
1322
1323         if (skb->len > hdev->sco_mtu) {
1324                 kfree_skb(skb);
1325                 return -EINVAL;
1326         }
1327
1328         hdr.handle = cpu_to_le16(conn->handle);
1329         hdr.dlen   = skb->len;
1330
1331         skb_push(skb, HCI_SCO_HDR_SIZE);
1332         skb_reset_transport_header(skb);
1333         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1334
1335         skb->dev = (void *) hdev;
1336         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1337
1338         skb_queue_tail(&conn->data_q, skb);
1339         tasklet_schedule(&hdev->tx_task);
1340
1341         return 0;
1342 }
1343 EXPORT_SYMBOL(hci_send_sco);
1344
1345 /* ---- HCI TX task (outgoing data) ---- */
1346
1347 /* HCI Connection scheduler */
1348 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1349 {
1350         struct hci_conn_hash *h = &hdev->conn_hash;
1351         struct hci_conn *conn = NULL;
1352         int num = 0, min = ~0;
1353         struct list_head *p;
1354
1355         /* We don't have to lock device here. Connections are always
1356          * added and removed with TX task disabled. */
1357         list_for_each(p, &h->list) {
1358                 struct hci_conn *c;
1359                 c = list_entry(p, struct hci_conn, list);
1360
1361                 if (c->type != type || skb_queue_empty(&c->data_q))
1362                         continue;
1363
1364                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1365                         continue;
1366
1367                 num++;
1368
1369                 if (c->sent < min) {
1370                         min  = c->sent;
1371                         conn = c;
1372                 }
1373         }
1374
1375         if (conn) {
1376                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1377                 int q = cnt / num;
1378                 *quote = q ? q : 1;
1379         } else
1380                 *quote = 0;
1381
1382         BT_DBG("conn %p quote %d", conn, *quote);
1383         return conn;
1384 }
1385
1386 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1387 {
1388         struct hci_conn_hash *h = &hdev->conn_hash;
1389         struct list_head *p;
1390         struct hci_conn  *c;
1391
1392         BT_ERR("%s ACL tx timeout", hdev->name);
1393
1394         /* Kill stalled connections */
1395         list_for_each(p, &h->list) {
1396                 c = list_entry(p, struct hci_conn, list);
1397                 if (c->type == ACL_LINK && c->sent) {
1398                         BT_ERR("%s killing stalled ACL connection %s",
1399                                 hdev->name, batostr(&c->dst));
1400                         hci_acl_disconn(c, 0x13);
1401                 }
1402         }
1403 }
1404
1405 static inline void hci_sched_acl(struct hci_dev *hdev)
1406 {
1407         struct hci_conn *conn;
1408         struct sk_buff *skb;
1409         int quote;
1410
1411         BT_DBG("%s", hdev->name);
1412
1413         if (!test_bit(HCI_RAW, &hdev->flags)) {
1414                 /* ACL tx timeout must be longer than maximum
1415                  * link supervision timeout (40.9 seconds) */
1416                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1417                         hci_acl_tx_to(hdev);
1418         }
1419
1420         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1421                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1422                         BT_DBG("skb %p len %d", skb, skb->len);
1423
1424                         hci_conn_enter_active_mode(conn);
1425
1426                         hci_send_frame(skb);
1427                         hdev->acl_last_tx = jiffies;
1428
1429                         hdev->acl_cnt--;
1430                         conn->sent++;
1431                 }
1432         }
1433 }
1434
1435 /* Schedule SCO */
1436 static inline void hci_sched_sco(struct hci_dev *hdev)
1437 {
1438         struct hci_conn *conn;
1439         struct sk_buff *skb;
1440         int quote;
1441
1442         BT_DBG("%s", hdev->name);
1443
1444         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1445                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1446                         BT_DBG("skb %p len %d", skb, skb->len);
1447                         hci_send_frame(skb);
1448
1449                         conn->sent++;
1450                         if (conn->sent == ~0)
1451                                 conn->sent = 0;
1452                 }
1453         }
1454 }
1455
1456 static inline void hci_sched_esco(struct hci_dev *hdev)
1457 {
1458         struct hci_conn *conn;
1459         struct sk_buff *skb;
1460         int quote;
1461
1462         BT_DBG("%s", hdev->name);
1463
1464         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1465                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1466                         BT_DBG("skb %p len %d", skb, skb->len);
1467                         hci_send_frame(skb);
1468
1469                         conn->sent++;
1470                         if (conn->sent == ~0)
1471                                 conn->sent = 0;
1472                 }
1473         }
1474 }
1475
1476 static void hci_tx_task(unsigned long arg)
1477 {
1478         struct hci_dev *hdev = (struct hci_dev *) arg;
1479         struct sk_buff *skb;
1480
1481         read_lock(&hci_task_lock);
1482
1483         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1484
1485         /* Schedule queues and send stuff to HCI driver */
1486
1487         hci_sched_acl(hdev);
1488
1489         hci_sched_sco(hdev);
1490
1491         hci_sched_esco(hdev);
1492
1493         /* Send next queued raw (unknown type) packet */
1494         while ((skb = skb_dequeue(&hdev->raw_q)))
1495                 hci_send_frame(skb);
1496
1497         read_unlock(&hci_task_lock);
1498 }
1499
1500 /* ----- HCI RX task (incoming data proccessing) ----- */
1501
1502 /* ACL data packet */
1503 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1504 {
1505         struct hci_acl_hdr *hdr = (void *) skb->data;
1506         struct hci_conn *conn;
1507         __u16 handle, flags;
1508
1509         skb_pull(skb, HCI_ACL_HDR_SIZE);
1510
1511         handle = __le16_to_cpu(hdr->handle);
1512         flags  = hci_flags(handle);
1513         handle = hci_handle(handle);
1514
1515         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1516
1517         hdev->stat.acl_rx++;
1518
1519         hci_dev_lock(hdev);
1520         conn = hci_conn_hash_lookup_handle(hdev, handle);
1521         hci_dev_unlock(hdev);
1522
1523         if (conn) {
1524                 register struct hci_proto *hp;
1525
1526                 hci_conn_enter_active_mode(conn);
1527
1528                 /* Send to upper protocol */
1529                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1530                         hp->recv_acldata(conn, skb, flags);
1531                         return;
1532                 }
1533         } else {
1534                 BT_ERR("%s ACL packet for unknown connection handle %d",
1535                         hdev->name, handle);
1536         }
1537
1538         kfree_skb(skb);
1539 }
1540
1541 /* SCO data packet */
1542 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1543 {
1544         struct hci_sco_hdr *hdr = (void *) skb->data;
1545         struct hci_conn *conn;
1546         __u16 handle;
1547
1548         skb_pull(skb, HCI_SCO_HDR_SIZE);
1549
1550         handle = __le16_to_cpu(hdr->handle);
1551
1552         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1553
1554         hdev->stat.sco_rx++;
1555
1556         hci_dev_lock(hdev);
1557         conn = hci_conn_hash_lookup_handle(hdev, handle);
1558         hci_dev_unlock(hdev);
1559
1560         if (conn) {
1561                 register struct hci_proto *hp;
1562
1563                 /* Send to upper protocol */
1564                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1565                         hp->recv_scodata(conn, skb);
1566                         return;
1567                 }
1568         } else {
1569                 BT_ERR("%s SCO packet for unknown connection handle %d",
1570                         hdev->name, handle);
1571         }
1572
1573         kfree_skb(skb);
1574 }
1575
1576 static void hci_rx_task(unsigned long arg)
1577 {
1578         struct hci_dev *hdev = (struct hci_dev *) arg;
1579         struct sk_buff *skb;
1580
1581         BT_DBG("%s", hdev->name);
1582
1583         read_lock(&hci_task_lock);
1584
1585         while ((skb = skb_dequeue(&hdev->rx_q))) {
1586                 if (atomic_read(&hdev->promisc)) {
1587                         /* Send copy to the sockets */
1588                         hci_send_to_sock(hdev, skb);
1589                 }
1590
1591                 if (test_bit(HCI_RAW, &hdev->flags)) {
1592                         kfree_skb(skb);
1593                         continue;
1594                 }
1595
1596                 if (test_bit(HCI_INIT, &hdev->flags)) {
1597                         /* Don't process data packets in this states. */
1598                         switch (bt_cb(skb)->pkt_type) {
1599                         case HCI_ACLDATA_PKT:
1600                         case HCI_SCODATA_PKT:
1601                                 kfree_skb(skb);
1602                                 continue;
1603                         }
1604                 }
1605
1606                 /* Process frame */
1607                 switch (bt_cb(skb)->pkt_type) {
1608                 case HCI_EVENT_PKT:
1609                         hci_event_packet(hdev, skb);
1610                         break;
1611
1612                 case HCI_ACLDATA_PKT:
1613                         BT_DBG("%s ACL data packet", hdev->name);
1614                         hci_acldata_packet(hdev, skb);
1615                         break;
1616
1617                 case HCI_SCODATA_PKT:
1618                         BT_DBG("%s SCO data packet", hdev->name);
1619                         hci_scodata_packet(hdev, skb);
1620                         break;
1621
1622                 default:
1623                         kfree_skb(skb);
1624                         break;
1625                 }
1626         }
1627
1628         read_unlock(&hci_task_lock);
1629 }
1630
1631 static void hci_cmd_task(unsigned long arg)
1632 {
1633         struct hci_dev *hdev = (struct hci_dev *) arg;
1634         struct sk_buff *skb;
1635
1636         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1637
1638         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1639                 BT_ERR("%s command tx timeout", hdev->name);
1640                 atomic_set(&hdev->cmd_cnt, 1);
1641         }
1642
1643         /* Send queued commands */
1644         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1645                 kfree_skb(hdev->sent_cmd);
1646
1647                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1648                         atomic_dec(&hdev->cmd_cnt);
1649                         hci_send_frame(skb);
1650                         hdev->cmd_last_tx = jiffies;
1651                 } else {
1652                         skb_queue_head(&hdev->cmd_q, skb);
1653                         tasklet_schedule(&hdev->cmd_task);
1654                 }
1655         }
1656 }