Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/module.h>
28 #include <linux/kmod.h>
29
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/skbuff.h>
39 #include <linux/interrupt.h>
40 #include <linux/notifier.h>
41 #include <net/sock.h>
42
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/unaligned.h>
46
47 #include <net/bluetooth/bluetooth.h>
48 #include <net/bluetooth/hci_core.h>
49
50 #ifndef CONFIG_BT_HCI_CORE_DEBUG
51 #undef  BT_DBG
52 #define BT_DBG(D...)
53 #endif
54
55 static void hci_cmd_task(unsigned long arg);
56 static void hci_rx_task(unsigned long arg);
57 static void hci_tx_task(unsigned long arg);
58 static void hci_notify(struct hci_dev *hdev, int event);
59
60 static DEFINE_RWLOCK(hci_task_lock);
61
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
65
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
69
70 /* HCI protocols */
71 #define HCI_MAX_PROTO   2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77 /* ---- HCI notifications ---- */
78
79 int hci_register_notifier(struct notifier_block *nb)
80 {
81         return atomic_notifier_chain_register(&hci_notifier, nb);
82 }
83
84 int hci_unregister_notifier(struct notifier_block *nb)
85 {
86         return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 }
88
89 static void hci_notify(struct hci_dev *hdev, int event)
90 {
91         atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 }
93
94 /* ---- HCI requests ---- */
95
96 void hci_req_complete(struct hci_dev *hdev, int result)
97 {
98         BT_DBG("%s result 0x%2.2x", hdev->name, result);
99
100         if (hdev->req_status == HCI_REQ_PEND) {
101                 hdev->req_result = result;
102                 hdev->req_status = HCI_REQ_DONE;
103                 wake_up_interruptible(&hdev->req_wait_q);
104         }
105 }
106
107 static void hci_req_cancel(struct hci_dev *hdev, int err)
108 {
109         BT_DBG("%s err 0x%2.2x", hdev->name, err);
110
111         if (hdev->req_status == HCI_REQ_PEND) {
112                 hdev->req_result = err;
113                 hdev->req_status = HCI_REQ_CANCELED;
114                 wake_up_interruptible(&hdev->req_wait_q);
115         }
116 }
117
118 /* Execute request and wait for completion. */
119 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
120                                 unsigned long opt, __u32 timeout)
121 {
122         DECLARE_WAITQUEUE(wait, current);
123         int err = 0;
124
125         BT_DBG("%s start", hdev->name);
126
127         hdev->req_status = HCI_REQ_PEND;
128
129         add_wait_queue(&hdev->req_wait_q, &wait);
130         set_current_state(TASK_INTERRUPTIBLE);
131
132         req(hdev, opt);
133         schedule_timeout(timeout);
134
135         remove_wait_queue(&hdev->req_wait_q, &wait);
136
137         if (signal_pending(current))
138                 return -EINTR;
139
140         switch (hdev->req_status) {
141         case HCI_REQ_DONE:
142                 err = -bt_err(hdev->req_result);
143                 break;
144
145         case HCI_REQ_CANCELED:
146                 err = -hdev->req_result;
147                 break;
148
149         default:
150                 err = -ETIMEDOUT;
151                 break;
152         }
153
154         hdev->req_status = hdev->req_result = 0;
155
156         BT_DBG("%s end: err %d", hdev->name, err);
157
158         return err;
159 }
160
161 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
162                                 unsigned long opt, __u32 timeout)
163 {
164         int ret;
165
166         /* Serialize all requests */
167         hci_req_lock(hdev);
168         ret = __hci_request(hdev, req, opt, timeout);
169         hci_req_unlock(hdev);
170
171         return ret;
172 }
173
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175 {
176         BT_DBG("%s %ld", hdev->name, opt);
177
178         /* Reset device */
179         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
180 }
181
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183 {
184         struct sk_buff *skb;
185         __le16 param;
186
187         BT_DBG("%s %ld", hdev->name, opt);
188
189         /* Driver initialization */
190
191         /* Special commands */
192         while ((skb = skb_dequeue(&hdev->driver_init))) {
193                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
194                 skb->dev = (void *) hdev;
195                 skb_queue_tail(&hdev->cmd_q, skb);
196                 hci_sched_cmd(hdev);
197         }
198         skb_queue_purge(&hdev->driver_init);
199
200         /* Mandatory initialization */
201
202         /* Reset */
203         if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
204                         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
205
206         /* Read Local Supported Features */
207         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
208
209         /* Read Local Version */
210         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION, 0, NULL);
211
212         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
214
215 #if 0
216         /* Host buffer size */
217         {
218                 struct hci_cp_host_buffer_size cp;
219                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
220                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
221                 cp.acl_max_pkt = cpu_to_le16(0xffff);
222                 cp.sco_max_pkt = cpu_to_le16(0xffff);
223                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
224         }
225 #endif
226
227         /* Read BD Address */
228         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
229
230         /* Read Voice Setting */
231         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
232
233         /* Optional initialization */
234
235         /* Clear Event Filters */
236         {
237                 struct hci_cp_set_event_flt cp;
238                 cp.flt_type  = HCI_FLT_CLEAR_ALL;
239                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
240         }
241
242         /* Page timeout ~20 secs */
243         param = cpu_to_le16(0x8000);
244         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
245
246         /* Connection accept timeout ~20 secs */
247         param = cpu_to_le16(0x7d00);
248         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
249 }
250
251 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
252 {
253         __u8 scan = opt;
254
255         BT_DBG("%s %x", hdev->name, scan);
256
257         /* Inquiry and Page scans */
258         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
259 }
260
261 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
262 {
263         __u8 auth = opt;
264
265         BT_DBG("%s %x", hdev->name, auth);
266
267         /* Authentication */
268         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
269 }
270
271 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
272 {
273         __u8 encrypt = opt;
274
275         BT_DBG("%s %x", hdev->name, encrypt);
276
277         /* Authentication */
278         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
279 }
280
281 /* Get HCI device by index.
282  * Device is held on return. */
283 struct hci_dev *hci_dev_get(int index)
284 {
285         struct hci_dev *hdev = NULL;
286         struct list_head *p;
287
288         BT_DBG("%d", index);
289
290         if (index < 0)
291                 return NULL;
292
293         read_lock(&hci_dev_list_lock);
294         list_for_each(p, &hci_dev_list) {
295                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
296                 if (d->id == index) {
297                         hdev = hci_dev_hold(d);
298                         break;
299                 }
300         }
301         read_unlock(&hci_dev_list_lock);
302         return hdev;
303 }
304
305 /* ---- Inquiry support ---- */
306 static void inquiry_cache_flush(struct hci_dev *hdev)
307 {
308         struct inquiry_cache *cache = &hdev->inq_cache;
309         struct inquiry_entry *next  = cache->list, *e;
310
311         BT_DBG("cache %p", cache);
312
313         cache->list = NULL;
314         while ((e = next)) {
315                 next = e->next;
316                 kfree(e);
317         }
318 }
319
320 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
321 {
322         struct inquiry_cache *cache = &hdev->inq_cache;
323         struct inquiry_entry *e;
324
325         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
326
327         for (e = cache->list; e; e = e->next)
328                 if (!bacmp(&e->data.bdaddr, bdaddr))
329                         break;
330         return e;
331 }
332
333 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
334 {
335         struct inquiry_cache *cache = &hdev->inq_cache;
336         struct inquiry_entry *e;
337
338         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
339
340         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
341                 /* Entry not in the cache. Add new one. */
342                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
343                         return;
344                 e->next     = cache->list;
345                 cache->list = e;
346         }
347
348         memcpy(&e->data, data, sizeof(*data));
349         e->timestamp = jiffies;
350         cache->timestamp = jiffies;
351 }
352
353 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
354 {
355         struct inquiry_cache *cache = &hdev->inq_cache;
356         struct inquiry_info *info = (struct inquiry_info *) buf;
357         struct inquiry_entry *e;
358         int copied = 0;
359
360         for (e = cache->list; e && copied < num; e = e->next, copied++) {
361                 struct inquiry_data *data = &e->data;
362                 bacpy(&info->bdaddr, &data->bdaddr);
363                 info->pscan_rep_mode    = data->pscan_rep_mode;
364                 info->pscan_period_mode = data->pscan_period_mode;
365                 info->pscan_mode        = data->pscan_mode;
366                 memcpy(info->dev_class, data->dev_class, 3);
367                 info->clock_offset      = data->clock_offset;
368                 info++;
369         }
370
371         BT_DBG("cache %p, copied %d", cache, copied);
372         return copied;
373 }
374
375 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
376 {
377         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
378         struct hci_cp_inquiry cp;
379
380         BT_DBG("%s", hdev->name);
381
382         if (test_bit(HCI_INQUIRY, &hdev->flags))
383                 return;
384
385         /* Start Inquiry */
386         memcpy(&cp.lap, &ir->lap, 3);
387         cp.length  = ir->length;
388         cp.num_rsp = ir->num_rsp;
389         hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
390 }
391
392 int hci_inquiry(void __user *arg)
393 {
394         __u8 __user *ptr = arg;
395         struct hci_inquiry_req ir;
396         struct hci_dev *hdev;
397         int err = 0, do_inquiry = 0, max_rsp;
398         long timeo;
399         __u8 *buf;
400
401         if (copy_from_user(&ir, ptr, sizeof(ir)))
402                 return -EFAULT;
403
404         if (!(hdev = hci_dev_get(ir.dev_id)))
405                 return -ENODEV;
406
407         hci_dev_lock_bh(hdev);
408         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
409                                         inquiry_cache_empty(hdev) ||
410                                         ir.flags & IREQ_CACHE_FLUSH) {
411                 inquiry_cache_flush(hdev);
412                 do_inquiry = 1;
413         }
414         hci_dev_unlock_bh(hdev);
415
416         timeo = ir.length * msecs_to_jiffies(2000);
417         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
418                 goto done;
419
420         /* for unlimited number of responses we will use buffer with 255 entries */
421         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
422
423         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
424          * copy it to the user space.
425          */
426         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
427                 err = -ENOMEM;
428                 goto done;
429         }
430
431         hci_dev_lock_bh(hdev);
432         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
433         hci_dev_unlock_bh(hdev);
434
435         BT_DBG("num_rsp %d", ir.num_rsp);
436
437         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
438                 ptr += sizeof(ir);
439                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
440                                         ir.num_rsp))
441                         err = -EFAULT;
442         } else
443                 err = -EFAULT;
444
445         kfree(buf);
446
447 done:
448         hci_dev_put(hdev);
449         return err;
450 }
451
452 /* ---- HCI ioctl helpers ---- */
453
454 int hci_dev_open(__u16 dev)
455 {
456         struct hci_dev *hdev;
457         int ret = 0;
458
459         if (!(hdev = hci_dev_get(dev)))
460                 return -ENODEV;
461
462         BT_DBG("%s %p", hdev->name, hdev);
463
464         hci_req_lock(hdev);
465
466         if (test_bit(HCI_UP, &hdev->flags)) {
467                 ret = -EALREADY;
468                 goto done;
469         }
470
471         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
472                 set_bit(HCI_RAW, &hdev->flags);
473
474         if (hdev->open(hdev)) {
475                 ret = -EIO;
476                 goto done;
477         }
478
479         if (!test_bit(HCI_RAW, &hdev->flags)) {
480                 atomic_set(&hdev->cmd_cnt, 1);
481                 set_bit(HCI_INIT, &hdev->flags);
482
483                 //__hci_request(hdev, hci_reset_req, 0, HZ);
484                 ret = __hci_request(hdev, hci_init_req, 0,
485                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
486
487                 clear_bit(HCI_INIT, &hdev->flags);
488         }
489
490         if (!ret) {
491                 hci_dev_hold(hdev);
492                 set_bit(HCI_UP, &hdev->flags);
493                 hci_notify(hdev, HCI_DEV_UP);
494         } else {
495                 /* Init failed, cleanup */
496                 tasklet_kill(&hdev->rx_task);
497                 tasklet_kill(&hdev->tx_task);
498                 tasklet_kill(&hdev->cmd_task);
499
500                 skb_queue_purge(&hdev->cmd_q);
501                 skb_queue_purge(&hdev->rx_q);
502
503                 if (hdev->flush)
504                         hdev->flush(hdev);
505
506                 if (hdev->sent_cmd) {
507                         kfree_skb(hdev->sent_cmd);
508                         hdev->sent_cmd = NULL;
509                 }
510
511                 hdev->close(hdev);
512                 hdev->flags = 0;
513         }
514
515 done:
516         hci_req_unlock(hdev);
517         hci_dev_put(hdev);
518         return ret;
519 }
520
521 static int hci_dev_do_close(struct hci_dev *hdev)
522 {
523         BT_DBG("%s %p", hdev->name, hdev);
524
525         hci_req_cancel(hdev, ENODEV);
526         hci_req_lock(hdev);
527
528         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
529                 hci_req_unlock(hdev);
530                 return 0;
531         }
532
533         /* Kill RX and TX tasks */
534         tasklet_kill(&hdev->rx_task);
535         tasklet_kill(&hdev->tx_task);
536
537         hci_dev_lock_bh(hdev);
538         inquiry_cache_flush(hdev);
539         hci_conn_hash_flush(hdev);
540         hci_dev_unlock_bh(hdev);
541
542         hci_notify(hdev, HCI_DEV_DOWN);
543
544         if (hdev->flush)
545                 hdev->flush(hdev);
546
547         /* Reset device */
548         skb_queue_purge(&hdev->cmd_q);
549         atomic_set(&hdev->cmd_cnt, 1);
550         if (!test_bit(HCI_RAW, &hdev->flags)) {
551                 set_bit(HCI_INIT, &hdev->flags);
552                 __hci_request(hdev, hci_reset_req, 0,
553                                         msecs_to_jiffies(250));
554                 clear_bit(HCI_INIT, &hdev->flags);
555         }
556
557         /* Kill cmd task */
558         tasklet_kill(&hdev->cmd_task);
559
560         /* Drop queues */
561         skb_queue_purge(&hdev->rx_q);
562         skb_queue_purge(&hdev->cmd_q);
563         skb_queue_purge(&hdev->raw_q);
564
565         /* Drop last sent command */
566         if (hdev->sent_cmd) {
567                 kfree_skb(hdev->sent_cmd);
568                 hdev->sent_cmd = NULL;
569         }
570
571         /* After this point our queues are empty
572          * and no tasks are scheduled. */
573         hdev->close(hdev);
574
575         /* Clear flags */
576         hdev->flags = 0;
577
578         hci_req_unlock(hdev);
579
580         hci_dev_put(hdev);
581         return 0;
582 }
583
584 int hci_dev_close(__u16 dev)
585 {
586         struct hci_dev *hdev;
587         int err;
588
589         if (!(hdev = hci_dev_get(dev)))
590                 return -ENODEV;
591         err = hci_dev_do_close(hdev);
592         hci_dev_put(hdev);
593         return err;
594 }
595
596 int hci_dev_reset(__u16 dev)
597 {
598         struct hci_dev *hdev;
599         int ret = 0;
600
601         if (!(hdev = hci_dev_get(dev)))
602                 return -ENODEV;
603
604         hci_req_lock(hdev);
605         tasklet_disable(&hdev->tx_task);
606
607         if (!test_bit(HCI_UP, &hdev->flags))
608                 goto done;
609
610         /* Drop queues */
611         skb_queue_purge(&hdev->rx_q);
612         skb_queue_purge(&hdev->cmd_q);
613
614         hci_dev_lock_bh(hdev);
615         inquiry_cache_flush(hdev);
616         hci_conn_hash_flush(hdev);
617         hci_dev_unlock_bh(hdev);
618
619         if (hdev->flush)
620                 hdev->flush(hdev);
621
622         atomic_set(&hdev->cmd_cnt, 1);
623         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
624
625         if (!test_bit(HCI_RAW, &hdev->flags))
626                 ret = __hci_request(hdev, hci_reset_req, 0,
627                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
628
629 done:
630         tasklet_enable(&hdev->tx_task);
631         hci_req_unlock(hdev);
632         hci_dev_put(hdev);
633         return ret;
634 }
635
636 int hci_dev_reset_stat(__u16 dev)
637 {
638         struct hci_dev *hdev;
639         int ret = 0;
640
641         if (!(hdev = hci_dev_get(dev)))
642                 return -ENODEV;
643
644         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
645
646         hci_dev_put(hdev);
647
648         return ret;
649 }
650
651 int hci_dev_cmd(unsigned int cmd, void __user *arg)
652 {
653         struct hci_dev *hdev;
654         struct hci_dev_req dr;
655         int err = 0;
656
657         if (copy_from_user(&dr, arg, sizeof(dr)))
658                 return -EFAULT;
659
660         if (!(hdev = hci_dev_get(dr.dev_id)))
661                 return -ENODEV;
662
663         switch (cmd) {
664         case HCISETAUTH:
665                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
666                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
667                 break;
668
669         case HCISETENCRYPT:
670                 if (!lmp_encrypt_capable(hdev)) {
671                         err = -EOPNOTSUPP;
672                         break;
673                 }
674
675                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
676                         /* Auth must be enabled first */
677                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
678                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
679                         if (err)
680                                 break;
681                 }
682
683                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
684                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
685                 break;
686
687         case HCISETSCAN:
688                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
689                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
690                 break;
691
692         case HCISETPTYPE:
693                 hdev->pkt_type = (__u16) dr.dev_opt;
694                 break;
695
696         case HCISETLINKPOL:
697                 hdev->link_policy = (__u16) dr.dev_opt;
698                 break;
699
700         case HCISETLINKMODE:
701                 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
702                 break;
703
704         case HCISETACLMTU:
705                 hdev->acl_mtu  = *((__u16 *)&dr.dev_opt + 1);
706                 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
707                 break;
708
709         case HCISETSCOMTU:
710                 hdev->sco_mtu  = *((__u16 *)&dr.dev_opt + 1);
711                 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
712                 break;
713
714         default:
715                 err = -EINVAL;
716                 break;
717         }
718         hci_dev_put(hdev);
719         return err;
720 }
721
722 int hci_get_dev_list(void __user *arg)
723 {
724         struct hci_dev_list_req *dl;
725         struct hci_dev_req *dr;
726         struct list_head *p;
727         int n = 0, size, err;
728         __u16 dev_num;
729
730         if (get_user(dev_num, (__u16 __user *) arg))
731                 return -EFAULT;
732
733         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
734                 return -EINVAL;
735
736         size = sizeof(*dl) + dev_num * sizeof(*dr);
737
738         if (!(dl = kmalloc(size, GFP_KERNEL)))
739                 return -ENOMEM;
740
741         dr = dl->dev_req;
742
743         read_lock_bh(&hci_dev_list_lock);
744         list_for_each(p, &hci_dev_list) {
745                 struct hci_dev *hdev;
746                 hdev = list_entry(p, struct hci_dev, list);
747                 (dr + n)->dev_id  = hdev->id;
748                 (dr + n)->dev_opt = hdev->flags;
749                 if (++n >= dev_num)
750                         break;
751         }
752         read_unlock_bh(&hci_dev_list_lock);
753
754         dl->dev_num = n;
755         size = sizeof(*dl) + n * sizeof(*dr);
756
757         err = copy_to_user(arg, dl, size);
758         kfree(dl);
759
760         return err ? -EFAULT : 0;
761 }
762
763 int hci_get_dev_info(void __user *arg)
764 {
765         struct hci_dev *hdev;
766         struct hci_dev_info di;
767         int err = 0;
768
769         if (copy_from_user(&di, arg, sizeof(di)))
770                 return -EFAULT;
771
772         if (!(hdev = hci_dev_get(di.dev_id)))
773                 return -ENODEV;
774
775         strcpy(di.name, hdev->name);
776         di.bdaddr   = hdev->bdaddr;
777         di.type     = hdev->type;
778         di.flags    = hdev->flags;
779         di.pkt_type = hdev->pkt_type;
780         di.acl_mtu  = hdev->acl_mtu;
781         di.acl_pkts = hdev->acl_pkts;
782         di.sco_mtu  = hdev->sco_mtu;
783         di.sco_pkts = hdev->sco_pkts;
784         di.link_policy = hdev->link_policy;
785         di.link_mode   = hdev->link_mode;
786
787         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
788         memcpy(&di.features, &hdev->features, sizeof(di.features));
789
790         if (copy_to_user(arg, &di, sizeof(di)))
791                 err = -EFAULT;
792
793         hci_dev_put(hdev);
794
795         return err;
796 }
797
798 /* ---- Interface to HCI drivers ---- */
799
800 /* Alloc HCI device */
801 struct hci_dev *hci_alloc_dev(void)
802 {
803         struct hci_dev *hdev;
804
805         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
806         if (!hdev)
807                 return NULL;
808
809         skb_queue_head_init(&hdev->driver_init);
810
811         return hdev;
812 }
813 EXPORT_SYMBOL(hci_alloc_dev);
814
815 /* Free HCI device */
816 void hci_free_dev(struct hci_dev *hdev)
817 {
818         skb_queue_purge(&hdev->driver_init);
819
820         /* will free via device release */
821         put_device(&hdev->dev);
822 }
823 EXPORT_SYMBOL(hci_free_dev);
824
825 /* Register HCI device */
826 int hci_register_dev(struct hci_dev *hdev)
827 {
828         struct list_head *head = &hci_dev_list, *p;
829         int i, id = 0;
830
831         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
832
833         if (!hdev->open || !hdev->close || !hdev->destruct)
834                 return -EINVAL;
835
836         write_lock_bh(&hci_dev_list_lock);
837
838         /* Find first available device id */
839         list_for_each(p, &hci_dev_list) {
840                 if (list_entry(p, struct hci_dev, list)->id != id)
841                         break;
842                 head = p; id++;
843         }
844
845         sprintf(hdev->name, "hci%d", id);
846         hdev->id = id;
847         list_add(&hdev->list, head);
848
849         atomic_set(&hdev->refcnt, 1);
850         spin_lock_init(&hdev->lock);
851
852         hdev->flags = 0;
853         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
854         hdev->esco_type = (ESCO_HV1);
855         hdev->link_mode = (HCI_LM_ACCEPT);
856
857         hdev->idle_timeout = 0;
858         hdev->sniff_max_interval = 800;
859         hdev->sniff_min_interval = 80;
860
861         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
862         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
863         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
864
865         skb_queue_head_init(&hdev->rx_q);
866         skb_queue_head_init(&hdev->cmd_q);
867         skb_queue_head_init(&hdev->raw_q);
868
869         for (i = 0; i < 3; i++)
870                 hdev->reassembly[i] = NULL;
871
872         init_waitqueue_head(&hdev->req_wait_q);
873         init_MUTEX(&hdev->req_lock);
874
875         inquiry_cache_init(hdev);
876
877         hci_conn_hash_init(hdev);
878
879         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
880
881         atomic_set(&hdev->promisc, 0);
882
883         write_unlock_bh(&hci_dev_list_lock);
884
885         hci_register_sysfs(hdev);
886
887         hci_notify(hdev, HCI_DEV_REG);
888
889         return id;
890 }
891 EXPORT_SYMBOL(hci_register_dev);
892
893 /* Unregister HCI device */
894 int hci_unregister_dev(struct hci_dev *hdev)
895 {
896         int i;
897
898         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
899
900         hci_unregister_sysfs(hdev);
901
902         write_lock_bh(&hci_dev_list_lock);
903         list_del(&hdev->list);
904         write_unlock_bh(&hci_dev_list_lock);
905
906         hci_dev_do_close(hdev);
907
908         for (i = 0; i < 3; i++)
909                 kfree_skb(hdev->reassembly[i]);
910
911         hci_notify(hdev, HCI_DEV_UNREG);
912
913         __hci_dev_put(hdev);
914
915         return 0;
916 }
917 EXPORT_SYMBOL(hci_unregister_dev);
918
919 /* Suspend HCI device */
920 int hci_suspend_dev(struct hci_dev *hdev)
921 {
922         hci_notify(hdev, HCI_DEV_SUSPEND);
923         return 0;
924 }
925 EXPORT_SYMBOL(hci_suspend_dev);
926
927 /* Resume HCI device */
928 int hci_resume_dev(struct hci_dev *hdev)
929 {
930         hci_notify(hdev, HCI_DEV_RESUME);
931         return 0;
932 }
933 EXPORT_SYMBOL(hci_resume_dev);
934
935 /* Receive packet type fragment */
936 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 2])
937
938 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
939 {
940         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
941                 return -EILSEQ;
942
943         while (count) {
944                 struct sk_buff *skb = __reassembly(hdev, type);
945                 struct { int expect; } *scb;
946                 int len = 0;
947
948                 if (!skb) {
949                         /* Start of the frame */
950
951                         switch (type) {
952                         case HCI_EVENT_PKT:
953                                 if (count >= HCI_EVENT_HDR_SIZE) {
954                                         struct hci_event_hdr *h = data;
955                                         len = HCI_EVENT_HDR_SIZE + h->plen;
956                                 } else
957                                         return -EILSEQ;
958                                 break;
959
960                         case HCI_ACLDATA_PKT:
961                                 if (count >= HCI_ACL_HDR_SIZE) {
962                                         struct hci_acl_hdr *h = data;
963                                         len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
964                                 } else
965                                         return -EILSEQ;
966                                 break;
967
968                         case HCI_SCODATA_PKT:
969                                 if (count >= HCI_SCO_HDR_SIZE) {
970                                         struct hci_sco_hdr *h = data;
971                                         len = HCI_SCO_HDR_SIZE + h->dlen;
972                                 } else
973                                         return -EILSEQ;
974                                 break;
975                         }
976
977                         skb = bt_skb_alloc(len, GFP_ATOMIC);
978                         if (!skb) {
979                                 BT_ERR("%s no memory for packet", hdev->name);
980                                 return -ENOMEM;
981                         }
982
983                         skb->dev = (void *) hdev;
984                         bt_cb(skb)->pkt_type = type;
985         
986                         __reassembly(hdev, type) = skb;
987
988                         scb = (void *) skb->cb;
989                         scb->expect = len;
990                 } else {
991                         /* Continuation */
992
993                         scb = (void *) skb->cb;
994                         len = scb->expect;
995                 }
996
997                 len = min(len, count);
998
999                 memcpy(skb_put(skb, len), data, len);
1000
1001                 scb->expect -= len;
1002
1003                 if (scb->expect == 0) {
1004                         /* Complete frame */
1005
1006                         __reassembly(hdev, type) = NULL;
1007
1008                         bt_cb(skb)->pkt_type = type;
1009                         hci_recv_frame(skb);
1010                 }
1011
1012                 count -= len; data += len;
1013         }
1014
1015         return 0;
1016 }
1017 EXPORT_SYMBOL(hci_recv_fragment);
1018
1019 /* ---- Interface to upper protocols ---- */
1020
1021 /* Register/Unregister protocols.
1022  * hci_task_lock is used to ensure that no tasks are running. */
1023 int hci_register_proto(struct hci_proto *hp)
1024 {
1025         int err = 0;
1026
1027         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1028
1029         if (hp->id >= HCI_MAX_PROTO)
1030                 return -EINVAL;
1031
1032         write_lock_bh(&hci_task_lock);
1033
1034         if (!hci_proto[hp->id])
1035                 hci_proto[hp->id] = hp;
1036         else
1037                 err = -EEXIST;
1038
1039         write_unlock_bh(&hci_task_lock);
1040
1041         return err;
1042 }
1043 EXPORT_SYMBOL(hci_register_proto);
1044
1045 int hci_unregister_proto(struct hci_proto *hp)
1046 {
1047         int err = 0;
1048
1049         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1050
1051         if (hp->id >= HCI_MAX_PROTO)
1052                 return -EINVAL;
1053
1054         write_lock_bh(&hci_task_lock);
1055
1056         if (hci_proto[hp->id])
1057                 hci_proto[hp->id] = NULL;
1058         else
1059                 err = -ENOENT;
1060
1061         write_unlock_bh(&hci_task_lock);
1062
1063         return err;
1064 }
1065 EXPORT_SYMBOL(hci_unregister_proto);
1066
1067 int hci_register_cb(struct hci_cb *cb)
1068 {
1069         BT_DBG("%p name %s", cb, cb->name);
1070
1071         write_lock_bh(&hci_cb_list_lock);
1072         list_add(&cb->list, &hci_cb_list);
1073         write_unlock_bh(&hci_cb_list_lock);
1074
1075         return 0;
1076 }
1077 EXPORT_SYMBOL(hci_register_cb);
1078
1079 int hci_unregister_cb(struct hci_cb *cb)
1080 {
1081         BT_DBG("%p name %s", cb, cb->name);
1082
1083         write_lock_bh(&hci_cb_list_lock);
1084         list_del(&cb->list);
1085         write_unlock_bh(&hci_cb_list_lock);
1086
1087         return 0;
1088 }
1089 EXPORT_SYMBOL(hci_unregister_cb);
1090
1091 static int hci_send_frame(struct sk_buff *skb)
1092 {
1093         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1094
1095         if (!hdev) {
1096                 kfree_skb(skb);
1097                 return -ENODEV;
1098         }
1099
1100         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1101
1102         if (atomic_read(&hdev->promisc)) {
1103                 /* Time stamp */
1104                 __net_timestamp(skb);
1105
1106                 hci_send_to_sock(hdev, skb);
1107         }
1108
1109         /* Get rid of skb owner, prior to sending to the driver. */
1110         skb_orphan(skb);
1111
1112         return hdev->send(skb);
1113 }
1114
1115 /* Send HCI command */
1116 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
1117 {
1118         int len = HCI_COMMAND_HDR_SIZE + plen;
1119         struct hci_command_hdr *hdr;
1120         struct sk_buff *skb;
1121
1122         BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1123
1124         skb = bt_skb_alloc(len, GFP_ATOMIC);
1125         if (!skb) {
1126                 BT_ERR("%s no memory for command", hdev->name);
1127                 return -ENOMEM;
1128         }
1129
1130         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1131         hdr->opcode = cpu_to_le16(hci_opcode_pack(ogf, ocf));
1132         hdr->plen   = plen;
1133
1134         if (plen)
1135                 memcpy(skb_put(skb, plen), param, plen);
1136
1137         BT_DBG("skb len %d", skb->len);
1138
1139         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1140         skb->dev = (void *) hdev;
1141         skb_queue_tail(&hdev->cmd_q, skb);
1142         hci_sched_cmd(hdev);
1143
1144         return 0;
1145 }
1146
1147 /* Get data from the previously sent command */
1148 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1149 {
1150         struct hci_command_hdr *hdr;
1151
1152         if (!hdev->sent_cmd)
1153                 return NULL;
1154
1155         hdr = (void *) hdev->sent_cmd->data;
1156
1157         if (hdr->opcode != cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1158                 return NULL;
1159
1160         BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1161
1162         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1163 }
1164
1165 /* Send ACL data */
1166 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1167 {
1168         struct hci_acl_hdr *hdr;
1169         int len = skb->len;
1170
1171         skb_push(skb, HCI_ACL_HDR_SIZE);
1172         skb_reset_transport_header(skb);
1173         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1174         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1175         hdr->dlen   = cpu_to_le16(len);
1176 }
1177
1178 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1179 {
1180         struct hci_dev *hdev = conn->hdev;
1181         struct sk_buff *list;
1182
1183         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1184
1185         skb->dev = (void *) hdev;
1186         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1187         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1188
1189         if (!(list = skb_shinfo(skb)->frag_list)) {
1190                 /* Non fragmented */
1191                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1192
1193                 skb_queue_tail(&conn->data_q, skb);
1194         } else {
1195                 /* Fragmented */
1196                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1197
1198                 skb_shinfo(skb)->frag_list = NULL;
1199
1200                 /* Queue all fragments atomically */
1201                 spin_lock_bh(&conn->data_q.lock);
1202
1203                 __skb_queue_tail(&conn->data_q, skb);
1204                 do {
1205                         skb = list; list = list->next;
1206
1207                         skb->dev = (void *) hdev;
1208                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1209                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1210
1211                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1212
1213                         __skb_queue_tail(&conn->data_q, skb);
1214                 } while (list);
1215
1216                 spin_unlock_bh(&conn->data_q.lock);
1217         }
1218
1219         hci_sched_tx(hdev);
1220         return 0;
1221 }
1222 EXPORT_SYMBOL(hci_send_acl);
1223
1224 /* Send SCO data */
1225 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1226 {
1227         struct hci_dev *hdev = conn->hdev;
1228         struct hci_sco_hdr hdr;
1229
1230         BT_DBG("%s len %d", hdev->name, skb->len);
1231
1232         if (skb->len > hdev->sco_mtu) {
1233                 kfree_skb(skb);
1234                 return -EINVAL;
1235         }
1236
1237         hdr.handle = cpu_to_le16(conn->handle);
1238         hdr.dlen   = skb->len;
1239
1240         skb_push(skb, HCI_SCO_HDR_SIZE);
1241         skb_reset_transport_header(skb);
1242         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1243
1244         skb->dev = (void *) hdev;
1245         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1246         skb_queue_tail(&conn->data_q, skb);
1247         hci_sched_tx(hdev);
1248         return 0;
1249 }
1250 EXPORT_SYMBOL(hci_send_sco);
1251
1252 /* ---- HCI TX task (outgoing data) ---- */
1253
1254 /* HCI Connection scheduler */
1255 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1256 {
1257         struct hci_conn_hash *h = &hdev->conn_hash;
1258         struct hci_conn *conn = NULL;
1259         int num = 0, min = ~0;
1260         struct list_head *p;
1261
1262         /* We don't have to lock device here. Connections are always
1263          * added and removed with TX task disabled. */
1264         list_for_each(p, &h->list) {
1265                 struct hci_conn *c;
1266                 c = list_entry(p, struct hci_conn, list);
1267
1268                 if (c->type != type || c->state != BT_CONNECTED
1269                                 || skb_queue_empty(&c->data_q))
1270                         continue;
1271                 num++;
1272
1273                 if (c->sent < min) {
1274                         min  = c->sent;
1275                         conn = c;
1276                 }
1277         }
1278
1279         if (conn) {
1280                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1281                 int q = cnt / num;
1282                 *quote = q ? q : 1;
1283         } else
1284                 *quote = 0;
1285
1286         BT_DBG("conn %p quote %d", conn, *quote);
1287         return conn;
1288 }
1289
1290 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1291 {
1292         struct hci_conn_hash *h = &hdev->conn_hash;
1293         struct list_head *p;
1294         struct hci_conn  *c;
1295
1296         BT_ERR("%s ACL tx timeout", hdev->name);
1297
1298         /* Kill stalled connections */
1299         list_for_each(p, &h->list) {
1300                 c = list_entry(p, struct hci_conn, list);
1301                 if (c->type == ACL_LINK && c->sent) {
1302                         BT_ERR("%s killing stalled ACL connection %s",
1303                                 hdev->name, batostr(&c->dst));
1304                         hci_acl_disconn(c, 0x13);
1305                 }
1306         }
1307 }
1308
1309 static inline void hci_sched_acl(struct hci_dev *hdev)
1310 {
1311         struct hci_conn *conn;
1312         struct sk_buff *skb;
1313         int quote;
1314
1315         BT_DBG("%s", hdev->name);
1316
1317         if (!test_bit(HCI_RAW, &hdev->flags)) {
1318                 /* ACL tx timeout must be longer than maximum
1319                  * link supervision timeout (40.9 seconds) */
1320                 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1321                         hci_acl_tx_to(hdev);
1322         }
1323
1324         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1325                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1326                         BT_DBG("skb %p len %d", skb, skb->len);
1327
1328                         hci_conn_enter_active_mode(conn);
1329
1330                         hci_send_frame(skb);
1331                         hdev->acl_last_tx = jiffies;
1332
1333                         hdev->acl_cnt--;
1334                         conn->sent++;
1335                 }
1336         }
1337 }
1338
1339 /* Schedule SCO */
1340 static inline void hci_sched_sco(struct hci_dev *hdev)
1341 {
1342         struct hci_conn *conn;
1343         struct sk_buff *skb;
1344         int quote;
1345
1346         BT_DBG("%s", hdev->name);
1347
1348         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1349                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1350                         BT_DBG("skb %p len %d", skb, skb->len);
1351                         hci_send_frame(skb);
1352
1353                         conn->sent++;
1354                         if (conn->sent == ~0)
1355                                 conn->sent = 0;
1356                 }
1357         }
1358 }
1359
1360 static void hci_tx_task(unsigned long arg)
1361 {
1362         struct hci_dev *hdev = (struct hci_dev *) arg;
1363         struct sk_buff *skb;
1364
1365         read_lock(&hci_task_lock);
1366
1367         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1368
1369         /* Schedule queues and send stuff to HCI driver */
1370
1371         hci_sched_acl(hdev);
1372
1373         hci_sched_sco(hdev);
1374
1375         /* Send next queued raw (unknown type) packet */
1376         while ((skb = skb_dequeue(&hdev->raw_q)))
1377                 hci_send_frame(skb);
1378
1379         read_unlock(&hci_task_lock);
1380 }
1381
1382 /* ----- HCI RX task (incoming data proccessing) ----- */
1383
1384 /* ACL data packet */
1385 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1386 {
1387         struct hci_acl_hdr *hdr = (void *) skb->data;
1388         struct hci_conn *conn;
1389         __u16 handle, flags;
1390
1391         skb_pull(skb, HCI_ACL_HDR_SIZE);
1392
1393         handle = __le16_to_cpu(hdr->handle);
1394         flags  = hci_flags(handle);
1395         handle = hci_handle(handle);
1396
1397         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1398
1399         hdev->stat.acl_rx++;
1400
1401         hci_dev_lock(hdev);
1402         conn = hci_conn_hash_lookup_handle(hdev, handle);
1403         hci_dev_unlock(hdev);
1404
1405         if (conn) {
1406                 register struct hci_proto *hp;
1407
1408                 hci_conn_enter_active_mode(conn);
1409
1410                 /* Send to upper protocol */
1411                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1412                         hp->recv_acldata(conn, skb, flags);
1413                         return;
1414                 }
1415         } else {
1416                 BT_ERR("%s ACL packet for unknown connection handle %d",
1417                         hdev->name, handle);
1418         }
1419
1420         kfree_skb(skb);
1421 }
1422
1423 /* SCO data packet */
1424 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1425 {
1426         struct hci_sco_hdr *hdr = (void *) skb->data;
1427         struct hci_conn *conn;
1428         __u16 handle;
1429
1430         skb_pull(skb, HCI_SCO_HDR_SIZE);
1431
1432         handle = __le16_to_cpu(hdr->handle);
1433
1434         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1435
1436         hdev->stat.sco_rx++;
1437
1438         hci_dev_lock(hdev);
1439         conn = hci_conn_hash_lookup_handle(hdev, handle);
1440         hci_dev_unlock(hdev);
1441
1442         if (conn) {
1443                 register struct hci_proto *hp;
1444
1445                 /* Send to upper protocol */
1446                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1447                         hp->recv_scodata(conn, skb);
1448                         return;
1449                 }
1450         } else {
1451                 BT_ERR("%s SCO packet for unknown connection handle %d",
1452                         hdev->name, handle);
1453         }
1454
1455         kfree_skb(skb);
1456 }
1457
1458 static void hci_rx_task(unsigned long arg)
1459 {
1460         struct hci_dev *hdev = (struct hci_dev *) arg;
1461         struct sk_buff *skb;
1462
1463         BT_DBG("%s", hdev->name);
1464
1465         read_lock(&hci_task_lock);
1466
1467         while ((skb = skb_dequeue(&hdev->rx_q))) {
1468                 if (atomic_read(&hdev->promisc)) {
1469                         /* Send copy to the sockets */
1470                         hci_send_to_sock(hdev, skb);
1471                 }
1472
1473                 if (test_bit(HCI_RAW, &hdev->flags)) {
1474                         kfree_skb(skb);
1475                         continue;
1476                 }
1477
1478                 if (test_bit(HCI_INIT, &hdev->flags)) {
1479                         /* Don't process data packets in this states. */
1480                         switch (bt_cb(skb)->pkt_type) {
1481                         case HCI_ACLDATA_PKT:
1482                         case HCI_SCODATA_PKT:
1483                                 kfree_skb(skb);
1484                                 continue;
1485                         }
1486                 }
1487
1488                 /* Process frame */
1489                 switch (bt_cb(skb)->pkt_type) {
1490                 case HCI_EVENT_PKT:
1491                         hci_event_packet(hdev, skb);
1492                         break;
1493
1494                 case HCI_ACLDATA_PKT:
1495                         BT_DBG("%s ACL data packet", hdev->name);
1496                         hci_acldata_packet(hdev, skb);
1497                         break;
1498
1499                 case HCI_SCODATA_PKT:
1500                         BT_DBG("%s SCO data packet", hdev->name);
1501                         hci_scodata_packet(hdev, skb);
1502                         break;
1503
1504                 default:
1505                         kfree_skb(skb);
1506                         break;
1507                 }
1508         }
1509
1510         read_unlock(&hci_task_lock);
1511 }
1512
1513 static void hci_cmd_task(unsigned long arg)
1514 {
1515         struct hci_dev *hdev = (struct hci_dev *) arg;
1516         struct sk_buff *skb;
1517
1518         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1519
1520         if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1521                 BT_ERR("%s command tx timeout", hdev->name);
1522                 atomic_set(&hdev->cmd_cnt, 1);
1523         }
1524
1525         /* Send queued commands */
1526         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1527                 if (hdev->sent_cmd)
1528                         kfree_skb(hdev->sent_cmd);
1529
1530                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1531                         atomic_dec(&hdev->cmd_cnt);
1532                         hci_send_frame(skb);
1533                         hdev->cmd_last_tx = jiffies;
1534                 } else {
1535                         skb_queue_head(&hdev->cmd_q, skb);
1536                         hci_sched_cmd(hdev);
1537                 }
1538         }
1539 }