Merge branch 'staging-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <net/sock.h>
46
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53
54 #define AUTO_OFF_TIMEOUT 2000
55
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59 static void hci_notify(struct hci_dev *hdev, int event);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO   2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82         return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87         return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92         atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
101         /* If this is the init phase check if the completed command matches
102          * the last init command, and if not just return.
103          */
104         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105                 return;
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116         BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118         if (hdev->req_status == HCI_REQ_PEND) {
119                 hdev->req_result = err;
120                 hdev->req_status = HCI_REQ_CANCELED;
121                 wake_up_interruptible(&hdev->req_wait_q);
122         }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127                                         unsigned long opt, __u32 timeout)
128 {
129         DECLARE_WAITQUEUE(wait, current);
130         int err = 0;
131
132         BT_DBG("%s start", hdev->name);
133
134         hdev->req_status = HCI_REQ_PEND;
135
136         add_wait_queue(&hdev->req_wait_q, &wait);
137         set_current_state(TASK_INTERRUPTIBLE);
138
139         req(hdev, opt);
140         schedule_timeout(timeout);
141
142         remove_wait_queue(&hdev->req_wait_q, &wait);
143
144         if (signal_pending(current))
145                 return -EINTR;
146
147         switch (hdev->req_status) {
148         case HCI_REQ_DONE:
149                 err = -bt_err(hdev->req_result);
150                 break;
151
152         case HCI_REQ_CANCELED:
153                 err = -hdev->req_result;
154                 break;
155
156         default:
157                 err = -ETIMEDOUT;
158                 break;
159         }
160
161         hdev->req_status = hdev->req_result = 0;
162
163         BT_DBG("%s end: err %d", hdev->name, err);
164
165         return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169                                         unsigned long opt, __u32 timeout)
170 {
171         int ret;
172
173         if (!test_bit(HCI_UP, &hdev->flags))
174                 return -ENETDOWN;
175
176         /* Serialize all requests */
177         hci_req_lock(hdev);
178         ret = __hci_request(hdev, req, opt, timeout);
179         hci_req_unlock(hdev);
180
181         return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         BT_DBG("%s %ld", hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &hdev->flags);
190         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 {
195         struct hci_cp_delete_stored_link_key cp;
196         struct sk_buff *skb;
197         __le16 param;
198         __u8 flt_type;
199
200         BT_DBG("%s %ld", hdev->name, opt);
201
202         /* Driver initialization */
203
204         /* Special commands */
205         while ((skb = skb_dequeue(&hdev->driver_init))) {
206                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207                 skb->dev = (void *) hdev;
208
209                 skb_queue_tail(&hdev->cmd_q, skb);
210                 tasklet_schedule(&hdev->cmd_task);
211         }
212         skb_queue_purge(&hdev->driver_init);
213
214         /* Mandatory initialization */
215
216         /* Reset */
217         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218                         set_bit(HCI_RESET, &hdev->flags);
219                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220         }
221
222         /* Read Local Supported Features */
223         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224
225         /* Read Local Version */
226         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227
228         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230
231 #if 0
232         /* Host buffer size */
233         {
234                 struct hci_cp_host_buffer_size cp;
235                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237                 cp.acl_max_pkt = cpu_to_le16(0xffff);
238                 cp.sco_max_pkt = cpu_to_le16(0xffff);
239                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240         }
241 #endif
242
243         /* Read BD Address */
244         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246         /* Read Class of Device */
247         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249         /* Read Local Name */
250         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251
252         /* Read Voice Setting */
253         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254
255         /* Optional initialization */
256
257         /* Clear Event Filters */
258         flt_type = HCI_FLT_CLEAR_ALL;
259         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260
261         /* Connection accept timeout ~20 secs */
262         param = cpu_to_le16(0x7d00);
263         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264
265         bacpy(&cp.bdaddr, BDADDR_ANY);
266         cp.delete_all = 1;
267         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268 }
269
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 {
272         BT_DBG("%s", hdev->name);
273
274         /* Read LE buffer size */
275         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276 }
277
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 {
280         __u8 scan = opt;
281
282         BT_DBG("%s %x", hdev->name, scan);
283
284         /* Inquiry and Page scans */
285         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286 }
287
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 {
290         __u8 auth = opt;
291
292         BT_DBG("%s %x", hdev->name, auth);
293
294         /* Authentication */
295         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296 }
297
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 {
300         __u8 encrypt = opt;
301
302         BT_DBG("%s %x", hdev->name, encrypt);
303
304         /* Encryption */
305         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306 }
307
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 {
310         __le16 policy = cpu_to_le16(opt);
311
312         BT_DBG("%s %x", hdev->name, policy);
313
314         /* Default link policy */
315         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316 }
317
318 /* Get HCI device by index.
319  * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
321 {
322         struct hci_dev *hdev = NULL;
323         struct list_head *p;
324
325         BT_DBG("%d", index);
326
327         if (index < 0)
328                 return NULL;
329
330         read_lock(&hci_dev_list_lock);
331         list_for_each(p, &hci_dev_list) {
332                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
333                 if (d->id == index) {
334                         hdev = hci_dev_hold(d);
335                         break;
336                 }
337         }
338         read_unlock(&hci_dev_list_lock);
339         return hdev;
340 }
341
342 /* ---- Inquiry support ---- */
343 static void inquiry_cache_flush(struct hci_dev *hdev)
344 {
345         struct inquiry_cache *cache = &hdev->inq_cache;
346         struct inquiry_entry *next  = cache->list, *e;
347
348         BT_DBG("cache %p", cache);
349
350         cache->list = NULL;
351         while ((e = next)) {
352                 next = e->next;
353                 kfree(e);
354         }
355 }
356
357 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
358 {
359         struct inquiry_cache *cache = &hdev->inq_cache;
360         struct inquiry_entry *e;
361
362         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
363
364         for (e = cache->list; e; e = e->next)
365                 if (!bacmp(&e->data.bdaddr, bdaddr))
366                         break;
367         return e;
368 }
369
370 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
371 {
372         struct inquiry_cache *cache = &hdev->inq_cache;
373         struct inquiry_entry *ie;
374
375         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
376
377         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
378         if (!ie) {
379                 /* Entry not in the cache. Add new one. */
380                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
381                 if (!ie)
382                         return;
383
384                 ie->next = cache->list;
385                 cache->list = ie;
386         }
387
388         memcpy(&ie->data, data, sizeof(*data));
389         ie->timestamp = jiffies;
390         cache->timestamp = jiffies;
391 }
392
393 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
394 {
395         struct inquiry_cache *cache = &hdev->inq_cache;
396         struct inquiry_info *info = (struct inquiry_info *) buf;
397         struct inquiry_entry *e;
398         int copied = 0;
399
400         for (e = cache->list; e && copied < num; e = e->next, copied++) {
401                 struct inquiry_data *data = &e->data;
402                 bacpy(&info->bdaddr, &data->bdaddr);
403                 info->pscan_rep_mode    = data->pscan_rep_mode;
404                 info->pscan_period_mode = data->pscan_period_mode;
405                 info->pscan_mode        = data->pscan_mode;
406                 memcpy(info->dev_class, data->dev_class, 3);
407                 info->clock_offset      = data->clock_offset;
408                 info++;
409         }
410
411         BT_DBG("cache %p, copied %d", cache, copied);
412         return copied;
413 }
414
415 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
416 {
417         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
418         struct hci_cp_inquiry cp;
419
420         BT_DBG("%s", hdev->name);
421
422         if (test_bit(HCI_INQUIRY, &hdev->flags))
423                 return;
424
425         /* Start Inquiry */
426         memcpy(&cp.lap, &ir->lap, 3);
427         cp.length  = ir->length;
428         cp.num_rsp = ir->num_rsp;
429         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
430 }
431
432 int hci_inquiry(void __user *arg)
433 {
434         __u8 __user *ptr = arg;
435         struct hci_inquiry_req ir;
436         struct hci_dev *hdev;
437         int err = 0, do_inquiry = 0, max_rsp;
438         long timeo;
439         __u8 *buf;
440
441         if (copy_from_user(&ir, ptr, sizeof(ir)))
442                 return -EFAULT;
443
444         hdev = hci_dev_get(ir.dev_id);
445         if (!hdev)
446                 return -ENODEV;
447
448         hci_dev_lock_bh(hdev);
449         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
450                                 inquiry_cache_empty(hdev) ||
451                                 ir.flags & IREQ_CACHE_FLUSH) {
452                 inquiry_cache_flush(hdev);
453                 do_inquiry = 1;
454         }
455         hci_dev_unlock_bh(hdev);
456
457         timeo = ir.length * msecs_to_jiffies(2000);
458
459         if (do_inquiry) {
460                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
461                 if (err < 0)
462                         goto done;
463         }
464
465         /* for unlimited number of responses we will use buffer with 255 entries */
466         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
467
468         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
469          * copy it to the user space.
470          */
471         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
472         if (!buf) {
473                 err = -ENOMEM;
474                 goto done;
475         }
476
477         hci_dev_lock_bh(hdev);
478         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
479         hci_dev_unlock_bh(hdev);
480
481         BT_DBG("num_rsp %d", ir.num_rsp);
482
483         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
484                 ptr += sizeof(ir);
485                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
486                                         ir.num_rsp))
487                         err = -EFAULT;
488         } else
489                 err = -EFAULT;
490
491         kfree(buf);
492
493 done:
494         hci_dev_put(hdev);
495         return err;
496 }
497
498 /* ---- HCI ioctl helpers ---- */
499
500 int hci_dev_open(__u16 dev)
501 {
502         struct hci_dev *hdev;
503         int ret = 0;
504
505         hdev = hci_dev_get(dev);
506         if (!hdev)
507                 return -ENODEV;
508
509         BT_DBG("%s %p", hdev->name, hdev);
510
511         hci_req_lock(hdev);
512
513         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
514                 ret = -ERFKILL;
515                 goto done;
516         }
517
518         if (test_bit(HCI_UP, &hdev->flags)) {
519                 ret = -EALREADY;
520                 goto done;
521         }
522
523         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
524                 set_bit(HCI_RAW, &hdev->flags);
525
526         /* Treat all non BR/EDR controllers as raw devices for now */
527         if (hdev->dev_type != HCI_BREDR)
528                 set_bit(HCI_RAW, &hdev->flags);
529
530         if (hdev->open(hdev)) {
531                 ret = -EIO;
532                 goto done;
533         }
534
535         if (!test_bit(HCI_RAW, &hdev->flags)) {
536                 atomic_set(&hdev->cmd_cnt, 1);
537                 set_bit(HCI_INIT, &hdev->flags);
538                 hdev->init_last_cmd = 0;
539
540                 ret = __hci_request(hdev, hci_init_req, 0,
541                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
542
543                 if (lmp_le_capable(hdev))
544                         ret = __hci_request(hdev, hci_le_init_req, 0,
545                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
546
547                 clear_bit(HCI_INIT, &hdev->flags);
548         }
549
550         if (!ret) {
551                 hci_dev_hold(hdev);
552                 set_bit(HCI_UP, &hdev->flags);
553                 hci_notify(hdev, HCI_DEV_UP);
554                 if (!test_bit(HCI_SETUP, &hdev->flags))
555                         mgmt_powered(hdev->id, 1);
556         } else {
557                 /* Init failed, cleanup */
558                 tasklet_kill(&hdev->rx_task);
559                 tasklet_kill(&hdev->tx_task);
560                 tasklet_kill(&hdev->cmd_task);
561
562                 skb_queue_purge(&hdev->cmd_q);
563                 skb_queue_purge(&hdev->rx_q);
564
565                 if (hdev->flush)
566                         hdev->flush(hdev);
567
568                 if (hdev->sent_cmd) {
569                         kfree_skb(hdev->sent_cmd);
570                         hdev->sent_cmd = NULL;
571                 }
572
573                 hdev->close(hdev);
574                 hdev->flags = 0;
575         }
576
577 done:
578         hci_req_unlock(hdev);
579         hci_dev_put(hdev);
580         return ret;
581 }
582
583 static int hci_dev_do_close(struct hci_dev *hdev)
584 {
585         BT_DBG("%s %p", hdev->name, hdev);
586
587         hci_req_cancel(hdev, ENODEV);
588         hci_req_lock(hdev);
589
590         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
591                 del_timer_sync(&hdev->cmd_timer);
592                 hci_req_unlock(hdev);
593                 return 0;
594         }
595
596         /* Kill RX and TX tasks */
597         tasklet_kill(&hdev->rx_task);
598         tasklet_kill(&hdev->tx_task);
599
600         hci_dev_lock_bh(hdev);
601         inquiry_cache_flush(hdev);
602         hci_conn_hash_flush(hdev);
603         hci_dev_unlock_bh(hdev);
604
605         hci_notify(hdev, HCI_DEV_DOWN);
606
607         if (hdev->flush)
608                 hdev->flush(hdev);
609
610         /* Reset device */
611         skb_queue_purge(&hdev->cmd_q);
612         atomic_set(&hdev->cmd_cnt, 1);
613         if (!test_bit(HCI_RAW, &hdev->flags)) {
614                 set_bit(HCI_INIT, &hdev->flags);
615                 __hci_request(hdev, hci_reset_req, 0,
616                                         msecs_to_jiffies(250));
617                 clear_bit(HCI_INIT, &hdev->flags);
618         }
619
620         /* Kill cmd task */
621         tasklet_kill(&hdev->cmd_task);
622
623         /* Drop queues */
624         skb_queue_purge(&hdev->rx_q);
625         skb_queue_purge(&hdev->cmd_q);
626         skb_queue_purge(&hdev->raw_q);
627
628         /* Drop last sent command */
629         if (hdev->sent_cmd) {
630                 del_timer_sync(&hdev->cmd_timer);
631                 kfree_skb(hdev->sent_cmd);
632                 hdev->sent_cmd = NULL;
633         }
634
635         /* After this point our queues are empty
636          * and no tasks are scheduled. */
637         hdev->close(hdev);
638
639         mgmt_powered(hdev->id, 0);
640
641         /* Clear flags */
642         hdev->flags = 0;
643
644         hci_req_unlock(hdev);
645
646         hci_dev_put(hdev);
647         return 0;
648 }
649
650 int hci_dev_close(__u16 dev)
651 {
652         struct hci_dev *hdev;
653         int err;
654
655         hdev = hci_dev_get(dev);
656         if (!hdev)
657                 return -ENODEV;
658         err = hci_dev_do_close(hdev);
659         hci_dev_put(hdev);
660         return err;
661 }
662
663 int hci_dev_reset(__u16 dev)
664 {
665         struct hci_dev *hdev;
666         int ret = 0;
667
668         hdev = hci_dev_get(dev);
669         if (!hdev)
670                 return -ENODEV;
671
672         hci_req_lock(hdev);
673         tasklet_disable(&hdev->tx_task);
674
675         if (!test_bit(HCI_UP, &hdev->flags))
676                 goto done;
677
678         /* Drop queues */
679         skb_queue_purge(&hdev->rx_q);
680         skb_queue_purge(&hdev->cmd_q);
681
682         hci_dev_lock_bh(hdev);
683         inquiry_cache_flush(hdev);
684         hci_conn_hash_flush(hdev);
685         hci_dev_unlock_bh(hdev);
686
687         if (hdev->flush)
688                 hdev->flush(hdev);
689
690         atomic_set(&hdev->cmd_cnt, 1);
691         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
692
693         if (!test_bit(HCI_RAW, &hdev->flags))
694                 ret = __hci_request(hdev, hci_reset_req, 0,
695                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
696
697 done:
698         tasklet_enable(&hdev->tx_task);
699         hci_req_unlock(hdev);
700         hci_dev_put(hdev);
701         return ret;
702 }
703
704 int hci_dev_reset_stat(__u16 dev)
705 {
706         struct hci_dev *hdev;
707         int ret = 0;
708
709         hdev = hci_dev_get(dev);
710         if (!hdev)
711                 return -ENODEV;
712
713         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
714
715         hci_dev_put(hdev);
716
717         return ret;
718 }
719
720 int hci_dev_cmd(unsigned int cmd, void __user *arg)
721 {
722         struct hci_dev *hdev;
723         struct hci_dev_req dr;
724         int err = 0;
725
726         if (copy_from_user(&dr, arg, sizeof(dr)))
727                 return -EFAULT;
728
729         hdev = hci_dev_get(dr.dev_id);
730         if (!hdev)
731                 return -ENODEV;
732
733         switch (cmd) {
734         case HCISETAUTH:
735                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
736                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
737                 break;
738
739         case HCISETENCRYPT:
740                 if (!lmp_encrypt_capable(hdev)) {
741                         err = -EOPNOTSUPP;
742                         break;
743                 }
744
745                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
746                         /* Auth must be enabled first */
747                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
749                         if (err)
750                                 break;
751                 }
752
753                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
754                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
755                 break;
756
757         case HCISETSCAN:
758                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
759                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
760                 break;
761
762         case HCISETLINKPOL:
763                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
764                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
765                 break;
766
767         case HCISETLINKMODE:
768                 hdev->link_mode = ((__u16) dr.dev_opt) &
769                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
770                 break;
771
772         case HCISETPTYPE:
773                 hdev->pkt_type = (__u16) dr.dev_opt;
774                 break;
775
776         case HCISETACLMTU:
777                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
778                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
779                 break;
780
781         case HCISETSCOMTU:
782                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
783                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
784                 break;
785
786         default:
787                 err = -EINVAL;
788                 break;
789         }
790
791         hci_dev_put(hdev);
792         return err;
793 }
794
795 int hci_get_dev_list(void __user *arg)
796 {
797         struct hci_dev_list_req *dl;
798         struct hci_dev_req *dr;
799         struct list_head *p;
800         int n = 0, size, err;
801         __u16 dev_num;
802
803         if (get_user(dev_num, (__u16 __user *) arg))
804                 return -EFAULT;
805
806         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
807                 return -EINVAL;
808
809         size = sizeof(*dl) + dev_num * sizeof(*dr);
810
811         dl = kzalloc(size, GFP_KERNEL);
812         if (!dl)
813                 return -ENOMEM;
814
815         dr = dl->dev_req;
816
817         read_lock_bh(&hci_dev_list_lock);
818         list_for_each(p, &hci_dev_list) {
819                 struct hci_dev *hdev;
820
821                 hdev = list_entry(p, struct hci_dev, list);
822
823                 hci_del_off_timer(hdev);
824
825                 if (!test_bit(HCI_MGMT, &hdev->flags))
826                         set_bit(HCI_PAIRABLE, &hdev->flags);
827
828                 (dr + n)->dev_id  = hdev->id;
829                 (dr + n)->dev_opt = hdev->flags;
830
831                 if (++n >= dev_num)
832                         break;
833         }
834         read_unlock_bh(&hci_dev_list_lock);
835
836         dl->dev_num = n;
837         size = sizeof(*dl) + n * sizeof(*dr);
838
839         err = copy_to_user(arg, dl, size);
840         kfree(dl);
841
842         return err ? -EFAULT : 0;
843 }
844
845 int hci_get_dev_info(void __user *arg)
846 {
847         struct hci_dev *hdev;
848         struct hci_dev_info di;
849         int err = 0;
850
851         if (copy_from_user(&di, arg, sizeof(di)))
852                 return -EFAULT;
853
854         hdev = hci_dev_get(di.dev_id);
855         if (!hdev)
856                 return -ENODEV;
857
858         hci_del_off_timer(hdev);
859
860         if (!test_bit(HCI_MGMT, &hdev->flags))
861                 set_bit(HCI_PAIRABLE, &hdev->flags);
862
863         strcpy(di.name, hdev->name);
864         di.bdaddr   = hdev->bdaddr;
865         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
866         di.flags    = hdev->flags;
867         di.pkt_type = hdev->pkt_type;
868         di.acl_mtu  = hdev->acl_mtu;
869         di.acl_pkts = hdev->acl_pkts;
870         di.sco_mtu  = hdev->sco_mtu;
871         di.sco_pkts = hdev->sco_pkts;
872         di.link_policy = hdev->link_policy;
873         di.link_mode   = hdev->link_mode;
874
875         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876         memcpy(&di.features, &hdev->features, sizeof(di.features));
877
878         if (copy_to_user(arg, &di, sizeof(di)))
879                 err = -EFAULT;
880
881         hci_dev_put(hdev);
882
883         return err;
884 }
885
886 /* ---- Interface to HCI drivers ---- */
887
888 static int hci_rfkill_set_block(void *data, bool blocked)
889 {
890         struct hci_dev *hdev = data;
891
892         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
893
894         if (!blocked)
895                 return 0;
896
897         hci_dev_do_close(hdev);
898
899         return 0;
900 }
901
902 static const struct rfkill_ops hci_rfkill_ops = {
903         .set_block = hci_rfkill_set_block,
904 };
905
906 /* Alloc HCI device */
907 struct hci_dev *hci_alloc_dev(void)
908 {
909         struct hci_dev *hdev;
910
911         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
912         if (!hdev)
913                 return NULL;
914
915         skb_queue_head_init(&hdev->driver_init);
916
917         return hdev;
918 }
919 EXPORT_SYMBOL(hci_alloc_dev);
920
921 /* Free HCI device */
922 void hci_free_dev(struct hci_dev *hdev)
923 {
924         skb_queue_purge(&hdev->driver_init);
925
926         /* will free via device release */
927         put_device(&hdev->dev);
928 }
929 EXPORT_SYMBOL(hci_free_dev);
930
931 static void hci_power_on(struct work_struct *work)
932 {
933         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934
935         BT_DBG("%s", hdev->name);
936
937         if (hci_dev_open(hdev->id) < 0)
938                 return;
939
940         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
941                 mod_timer(&hdev->off_timer,
942                                 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943
944         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
945                 mgmt_index_added(hdev->id);
946 }
947
948 static void hci_power_off(struct work_struct *work)
949 {
950         struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
951
952         BT_DBG("%s", hdev->name);
953
954         hci_dev_close(hdev->id);
955 }
956
957 static void hci_auto_off(unsigned long data)
958 {
959         struct hci_dev *hdev = (struct hci_dev *) data;
960
961         BT_DBG("%s", hdev->name);
962
963         clear_bit(HCI_AUTO_OFF, &hdev->flags);
964
965         queue_work(hdev->workqueue, &hdev->power_off);
966 }
967
968 void hci_del_off_timer(struct hci_dev *hdev)
969 {
970         BT_DBG("%s", hdev->name);
971
972         clear_bit(HCI_AUTO_OFF, &hdev->flags);
973         del_timer(&hdev->off_timer);
974 }
975
976 int hci_uuids_clear(struct hci_dev *hdev)
977 {
978         struct list_head *p, *n;
979
980         list_for_each_safe(p, n, &hdev->uuids) {
981                 struct bt_uuid *uuid;
982
983                 uuid = list_entry(p, struct bt_uuid, list);
984
985                 list_del(p);
986                 kfree(uuid);
987         }
988
989         return 0;
990 }
991
992 int hci_link_keys_clear(struct hci_dev *hdev)
993 {
994         struct list_head *p, *n;
995
996         list_for_each_safe(p, n, &hdev->link_keys) {
997                 struct link_key *key;
998
999                 key = list_entry(p, struct link_key, list);
1000
1001                 list_del(p);
1002                 kfree(key);
1003         }
1004
1005         return 0;
1006 }
1007
1008 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1009 {
1010         struct list_head *p;
1011
1012         list_for_each(p, &hdev->link_keys) {
1013                 struct link_key *k;
1014
1015                 k = list_entry(p, struct link_key, list);
1016
1017                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1018                         return k;
1019         }
1020
1021         return NULL;
1022 }
1023
1024 int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1025                                                 u8 *val, u8 type, u8 pin_len)
1026 {
1027         struct link_key *key, *old_key;
1028         u8 old_key_type;
1029
1030         old_key = hci_find_link_key(hdev, bdaddr);
1031         if (old_key) {
1032                 old_key_type = old_key->type;
1033                 key = old_key;
1034         } else {
1035                 old_key_type = 0xff;
1036                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1037                 if (!key)
1038                         return -ENOMEM;
1039                 list_add(&key->list, &hdev->link_keys);
1040         }
1041
1042         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1043
1044         bacpy(&key->bdaddr, bdaddr);
1045         memcpy(key->val, val, 16);
1046         key->type = type;
1047         key->pin_len = pin_len;
1048
1049         if (new_key)
1050                 mgmt_new_key(hdev->id, key, old_key_type);
1051
1052         if (type == 0x06)
1053                 key->type = old_key_type;
1054
1055         return 0;
1056 }
1057
1058 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1059 {
1060         struct link_key *key;
1061
1062         key = hci_find_link_key(hdev, bdaddr);
1063         if (!key)
1064                 return -ENOENT;
1065
1066         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1067
1068         list_del(&key->list);
1069         kfree(key);
1070
1071         return 0;
1072 }
1073
1074 /* HCI command timer function */
1075 static void hci_cmd_timer(unsigned long arg)
1076 {
1077         struct hci_dev *hdev = (void *) arg;
1078
1079         BT_ERR("%s command tx timeout", hdev->name);
1080         atomic_set(&hdev->cmd_cnt, 1);
1081         clear_bit(HCI_RESET, &hdev->flags);
1082         tasklet_schedule(&hdev->cmd_task);
1083 }
1084
1085 /* Register HCI device */
1086 int hci_register_dev(struct hci_dev *hdev)
1087 {
1088         struct list_head *head = &hci_dev_list, *p;
1089         int i, id = 0;
1090
1091         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1092                                                 hdev->bus, hdev->owner);
1093
1094         if (!hdev->open || !hdev->close || !hdev->destruct)
1095                 return -EINVAL;
1096
1097         write_lock_bh(&hci_dev_list_lock);
1098
1099         /* Find first available device id */
1100         list_for_each(p, &hci_dev_list) {
1101                 if (list_entry(p, struct hci_dev, list)->id != id)
1102                         break;
1103                 head = p; id++;
1104         }
1105
1106         sprintf(hdev->name, "hci%d", id);
1107         hdev->id = id;
1108         list_add(&hdev->list, head);
1109
1110         atomic_set(&hdev->refcnt, 1);
1111         spin_lock_init(&hdev->lock);
1112
1113         hdev->flags = 0;
1114         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1115         hdev->esco_type = (ESCO_HV1);
1116         hdev->link_mode = (HCI_LM_ACCEPT);
1117         hdev->io_capability = 0x03; /* No Input No Output */
1118
1119         hdev->idle_timeout = 0;
1120         hdev->sniff_max_interval = 800;
1121         hdev->sniff_min_interval = 80;
1122
1123         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1124         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1125         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1126
1127         skb_queue_head_init(&hdev->rx_q);
1128         skb_queue_head_init(&hdev->cmd_q);
1129         skb_queue_head_init(&hdev->raw_q);
1130
1131         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1132
1133         for (i = 0; i < NUM_REASSEMBLY; i++)
1134                 hdev->reassembly[i] = NULL;
1135
1136         init_waitqueue_head(&hdev->req_wait_q);
1137         mutex_init(&hdev->req_lock);
1138
1139         inquiry_cache_init(hdev);
1140
1141         hci_conn_hash_init(hdev);
1142
1143         INIT_LIST_HEAD(&hdev->blacklist);
1144
1145         INIT_LIST_HEAD(&hdev->uuids);
1146
1147         INIT_LIST_HEAD(&hdev->link_keys);
1148
1149         INIT_WORK(&hdev->power_on, hci_power_on);
1150         INIT_WORK(&hdev->power_off, hci_power_off);
1151         setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1152
1153         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1154
1155         atomic_set(&hdev->promisc, 0);
1156
1157         write_unlock_bh(&hci_dev_list_lock);
1158
1159         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1160         if (!hdev->workqueue)
1161                 goto nomem;
1162
1163         hci_register_sysfs(hdev);
1164
1165         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1166                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1167         if (hdev->rfkill) {
1168                 if (rfkill_register(hdev->rfkill) < 0) {
1169                         rfkill_destroy(hdev->rfkill);
1170                         hdev->rfkill = NULL;
1171                 }
1172         }
1173
1174         set_bit(HCI_AUTO_OFF, &hdev->flags);
1175         set_bit(HCI_SETUP, &hdev->flags);
1176         queue_work(hdev->workqueue, &hdev->power_on);
1177
1178         hci_notify(hdev, HCI_DEV_REG);
1179
1180         return id;
1181
1182 nomem:
1183         write_lock_bh(&hci_dev_list_lock);
1184         list_del(&hdev->list);
1185         write_unlock_bh(&hci_dev_list_lock);
1186
1187         return -ENOMEM;
1188 }
1189 EXPORT_SYMBOL(hci_register_dev);
1190
1191 /* Unregister HCI device */
1192 int hci_unregister_dev(struct hci_dev *hdev)
1193 {
1194         int i;
1195
1196         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1197
1198         write_lock_bh(&hci_dev_list_lock);
1199         list_del(&hdev->list);
1200         write_unlock_bh(&hci_dev_list_lock);
1201
1202         hci_dev_do_close(hdev);
1203
1204         for (i = 0; i < NUM_REASSEMBLY; i++)
1205                 kfree_skb(hdev->reassembly[i]);
1206
1207         if (!test_bit(HCI_INIT, &hdev->flags) &&
1208                                         !test_bit(HCI_SETUP, &hdev->flags))
1209                 mgmt_index_removed(hdev->id);
1210
1211         hci_notify(hdev, HCI_DEV_UNREG);
1212
1213         if (hdev->rfkill) {
1214                 rfkill_unregister(hdev->rfkill);
1215                 rfkill_destroy(hdev->rfkill);
1216         }
1217
1218         hci_unregister_sysfs(hdev);
1219
1220         hci_del_off_timer(hdev);
1221
1222         destroy_workqueue(hdev->workqueue);
1223
1224         hci_dev_lock_bh(hdev);
1225         hci_blacklist_clear(hdev);
1226         hci_uuids_clear(hdev);
1227         hci_link_keys_clear(hdev);
1228         hci_dev_unlock_bh(hdev);
1229
1230         __hci_dev_put(hdev);
1231
1232         return 0;
1233 }
1234 EXPORT_SYMBOL(hci_unregister_dev);
1235
1236 /* Suspend HCI device */
1237 int hci_suspend_dev(struct hci_dev *hdev)
1238 {
1239         hci_notify(hdev, HCI_DEV_SUSPEND);
1240         return 0;
1241 }
1242 EXPORT_SYMBOL(hci_suspend_dev);
1243
1244 /* Resume HCI device */
1245 int hci_resume_dev(struct hci_dev *hdev)
1246 {
1247         hci_notify(hdev, HCI_DEV_RESUME);
1248         return 0;
1249 }
1250 EXPORT_SYMBOL(hci_resume_dev);
1251
1252 /* Receive frame from HCI drivers */
1253 int hci_recv_frame(struct sk_buff *skb)
1254 {
1255         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1256         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1257                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1258                 kfree_skb(skb);
1259                 return -ENXIO;
1260         }
1261
1262         /* Incomming skb */
1263         bt_cb(skb)->incoming = 1;
1264
1265         /* Time stamp */
1266         __net_timestamp(skb);
1267
1268         /* Queue frame for rx task */
1269         skb_queue_tail(&hdev->rx_q, skb);
1270         tasklet_schedule(&hdev->rx_task);
1271
1272         return 0;
1273 }
1274 EXPORT_SYMBOL(hci_recv_frame);
1275
1276 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1277                           int count, __u8 index, gfp_t gfp_mask)
1278 {
1279         int len = 0;
1280         int hlen = 0;
1281         int remain = count;
1282         struct sk_buff *skb;
1283         struct bt_skb_cb *scb;
1284
1285         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1286                                 index >= NUM_REASSEMBLY)
1287                 return -EILSEQ;
1288
1289         skb = hdev->reassembly[index];
1290
1291         if (!skb) {
1292                 switch (type) {
1293                 case HCI_ACLDATA_PKT:
1294                         len = HCI_MAX_FRAME_SIZE;
1295                         hlen = HCI_ACL_HDR_SIZE;
1296                         break;
1297                 case HCI_EVENT_PKT:
1298                         len = HCI_MAX_EVENT_SIZE;
1299                         hlen = HCI_EVENT_HDR_SIZE;
1300                         break;
1301                 case HCI_SCODATA_PKT:
1302                         len = HCI_MAX_SCO_SIZE;
1303                         hlen = HCI_SCO_HDR_SIZE;
1304                         break;
1305                 }
1306
1307                 skb = bt_skb_alloc(len, gfp_mask);
1308                 if (!skb)
1309                         return -ENOMEM;
1310
1311                 scb = (void *) skb->cb;
1312                 scb->expect = hlen;
1313                 scb->pkt_type = type;
1314
1315                 skb->dev = (void *) hdev;
1316                 hdev->reassembly[index] = skb;
1317         }
1318
1319         while (count) {
1320                 scb = (void *) skb->cb;
1321                 len = min(scb->expect, (__u16)count);
1322
1323                 memcpy(skb_put(skb, len), data, len);
1324
1325                 count -= len;
1326                 data += len;
1327                 scb->expect -= len;
1328                 remain = count;
1329
1330                 switch (type) {
1331                 case HCI_EVENT_PKT:
1332                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1333                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1334                                 scb->expect = h->plen;
1335
1336                                 if (skb_tailroom(skb) < scb->expect) {
1337                                         kfree_skb(skb);
1338                                         hdev->reassembly[index] = NULL;
1339                                         return -ENOMEM;
1340                                 }
1341                         }
1342                         break;
1343
1344                 case HCI_ACLDATA_PKT:
1345                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1346                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1347                                 scb->expect = __le16_to_cpu(h->dlen);
1348
1349                                 if (skb_tailroom(skb) < scb->expect) {
1350                                         kfree_skb(skb);
1351                                         hdev->reassembly[index] = NULL;
1352                                         return -ENOMEM;
1353                                 }
1354                         }
1355                         break;
1356
1357                 case HCI_SCODATA_PKT:
1358                         if (skb->len == HCI_SCO_HDR_SIZE) {
1359                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1360                                 scb->expect = h->dlen;
1361
1362                                 if (skb_tailroom(skb) < scb->expect) {
1363                                         kfree_skb(skb);
1364                                         hdev->reassembly[index] = NULL;
1365                                         return -ENOMEM;
1366                                 }
1367                         }
1368                         break;
1369                 }
1370
1371                 if (scb->expect == 0) {
1372                         /* Complete frame */
1373
1374                         bt_cb(skb)->pkt_type = type;
1375                         hci_recv_frame(skb);
1376
1377                         hdev->reassembly[index] = NULL;
1378                         return remain;
1379                 }
1380         }
1381
1382         return remain;
1383 }
1384
1385 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1386 {
1387         int rem = 0;
1388
1389         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1390                 return -EILSEQ;
1391
1392         while (count) {
1393                 rem = hci_reassembly(hdev, type, data, count,
1394                                                 type - 1, GFP_ATOMIC);
1395                 if (rem < 0)
1396                         return rem;
1397
1398                 data += (count - rem);
1399                 count = rem;
1400         };
1401
1402         return rem;
1403 }
1404 EXPORT_SYMBOL(hci_recv_fragment);
1405
1406 #define STREAM_REASSEMBLY 0
1407
1408 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1409 {
1410         int type;
1411         int rem = 0;
1412
1413         while (count) {
1414                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1415
1416                 if (!skb) {
1417                         struct { char type; } *pkt;
1418
1419                         /* Start of the frame */
1420                         pkt = data;
1421                         type = pkt->type;
1422
1423                         data++;
1424                         count--;
1425                 } else
1426                         type = bt_cb(skb)->pkt_type;
1427
1428                 rem = hci_reassembly(hdev, type, data,
1429                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1430                 if (rem < 0)
1431                         return rem;
1432
1433                 data += (count - rem);
1434                 count = rem;
1435         };
1436
1437         return rem;
1438 }
1439 EXPORT_SYMBOL(hci_recv_stream_fragment);
1440
1441 /* ---- Interface to upper protocols ---- */
1442
1443 /* Register/Unregister protocols.
1444  * hci_task_lock is used to ensure that no tasks are running. */
1445 int hci_register_proto(struct hci_proto *hp)
1446 {
1447         int err = 0;
1448
1449         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1450
1451         if (hp->id >= HCI_MAX_PROTO)
1452                 return -EINVAL;
1453
1454         write_lock_bh(&hci_task_lock);
1455
1456         if (!hci_proto[hp->id])
1457                 hci_proto[hp->id] = hp;
1458         else
1459                 err = -EEXIST;
1460
1461         write_unlock_bh(&hci_task_lock);
1462
1463         return err;
1464 }
1465 EXPORT_SYMBOL(hci_register_proto);
1466
1467 int hci_unregister_proto(struct hci_proto *hp)
1468 {
1469         int err = 0;
1470
1471         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1472
1473         if (hp->id >= HCI_MAX_PROTO)
1474                 return -EINVAL;
1475
1476         write_lock_bh(&hci_task_lock);
1477
1478         if (hci_proto[hp->id])
1479                 hci_proto[hp->id] = NULL;
1480         else
1481                 err = -ENOENT;
1482
1483         write_unlock_bh(&hci_task_lock);
1484
1485         return err;
1486 }
1487 EXPORT_SYMBOL(hci_unregister_proto);
1488
1489 int hci_register_cb(struct hci_cb *cb)
1490 {
1491         BT_DBG("%p name %s", cb, cb->name);
1492
1493         write_lock_bh(&hci_cb_list_lock);
1494         list_add(&cb->list, &hci_cb_list);
1495         write_unlock_bh(&hci_cb_list_lock);
1496
1497         return 0;
1498 }
1499 EXPORT_SYMBOL(hci_register_cb);
1500
1501 int hci_unregister_cb(struct hci_cb *cb)
1502 {
1503         BT_DBG("%p name %s", cb, cb->name);
1504
1505         write_lock_bh(&hci_cb_list_lock);
1506         list_del(&cb->list);
1507         write_unlock_bh(&hci_cb_list_lock);
1508
1509         return 0;
1510 }
1511 EXPORT_SYMBOL(hci_unregister_cb);
1512
1513 static int hci_send_frame(struct sk_buff *skb)
1514 {
1515         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1516
1517         if (!hdev) {
1518                 kfree_skb(skb);
1519                 return -ENODEV;
1520         }
1521
1522         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1523
1524         if (atomic_read(&hdev->promisc)) {
1525                 /* Time stamp */
1526                 __net_timestamp(skb);
1527
1528                 hci_send_to_sock(hdev, skb, NULL);
1529         }
1530
1531         /* Get rid of skb owner, prior to sending to the driver. */
1532         skb_orphan(skb);
1533
1534         return hdev->send(skb);
1535 }
1536
1537 /* Send HCI command */
1538 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1539 {
1540         int len = HCI_COMMAND_HDR_SIZE + plen;
1541         struct hci_command_hdr *hdr;
1542         struct sk_buff *skb;
1543
1544         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1545
1546         skb = bt_skb_alloc(len, GFP_ATOMIC);
1547         if (!skb) {
1548                 BT_ERR("%s no memory for command", hdev->name);
1549                 return -ENOMEM;
1550         }
1551
1552         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1553         hdr->opcode = cpu_to_le16(opcode);
1554         hdr->plen   = plen;
1555
1556         if (plen)
1557                 memcpy(skb_put(skb, plen), param, plen);
1558
1559         BT_DBG("skb len %d", skb->len);
1560
1561         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1562         skb->dev = (void *) hdev;
1563
1564         if (test_bit(HCI_INIT, &hdev->flags))
1565                 hdev->init_last_cmd = opcode;
1566
1567         skb_queue_tail(&hdev->cmd_q, skb);
1568         tasklet_schedule(&hdev->cmd_task);
1569
1570         return 0;
1571 }
1572
1573 /* Get data from the previously sent command */
1574 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1575 {
1576         struct hci_command_hdr *hdr;
1577
1578         if (!hdev->sent_cmd)
1579                 return NULL;
1580
1581         hdr = (void *) hdev->sent_cmd->data;
1582
1583         if (hdr->opcode != cpu_to_le16(opcode))
1584                 return NULL;
1585
1586         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1587
1588         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1589 }
1590
1591 /* Send ACL data */
1592 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1593 {
1594         struct hci_acl_hdr *hdr;
1595         int len = skb->len;
1596
1597         skb_push(skb, HCI_ACL_HDR_SIZE);
1598         skb_reset_transport_header(skb);
1599         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1600         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1601         hdr->dlen   = cpu_to_le16(len);
1602 }
1603
1604 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1605 {
1606         struct hci_dev *hdev = conn->hdev;
1607         struct sk_buff *list;
1608
1609         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1610
1611         skb->dev = (void *) hdev;
1612         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1613         hci_add_acl_hdr(skb, conn->handle, flags);
1614
1615         list = skb_shinfo(skb)->frag_list;
1616         if (!list) {
1617                 /* Non fragmented */
1618                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1619
1620                 skb_queue_tail(&conn->data_q, skb);
1621         } else {
1622                 /* Fragmented */
1623                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1624
1625                 skb_shinfo(skb)->frag_list = NULL;
1626
1627                 /* Queue all fragments atomically */
1628                 spin_lock_bh(&conn->data_q.lock);
1629
1630                 __skb_queue_tail(&conn->data_q, skb);
1631
1632                 flags &= ~ACL_START;
1633                 flags |= ACL_CONT;
1634                 do {
1635                         skb = list; list = list->next;
1636
1637                         skb->dev = (void *) hdev;
1638                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1639                         hci_add_acl_hdr(skb, conn->handle, flags);
1640
1641                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1642
1643                         __skb_queue_tail(&conn->data_q, skb);
1644                 } while (list);
1645
1646                 spin_unlock_bh(&conn->data_q.lock);
1647         }
1648
1649         tasklet_schedule(&hdev->tx_task);
1650 }
1651 EXPORT_SYMBOL(hci_send_acl);
1652
1653 /* Send SCO data */
1654 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1655 {
1656         struct hci_dev *hdev = conn->hdev;
1657         struct hci_sco_hdr hdr;
1658
1659         BT_DBG("%s len %d", hdev->name, skb->len);
1660
1661         hdr.handle = cpu_to_le16(conn->handle);
1662         hdr.dlen   = skb->len;
1663
1664         skb_push(skb, HCI_SCO_HDR_SIZE);
1665         skb_reset_transport_header(skb);
1666         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1667
1668         skb->dev = (void *) hdev;
1669         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1670
1671         skb_queue_tail(&conn->data_q, skb);
1672         tasklet_schedule(&hdev->tx_task);
1673 }
1674 EXPORT_SYMBOL(hci_send_sco);
1675
1676 /* ---- HCI TX task (outgoing data) ---- */
1677
1678 /* HCI Connection scheduler */
1679 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1680 {
1681         struct hci_conn_hash *h = &hdev->conn_hash;
1682         struct hci_conn *conn = NULL;
1683         int num = 0, min = ~0;
1684         struct list_head *p;
1685
1686         /* We don't have to lock device here. Connections are always
1687          * added and removed with TX task disabled. */
1688         list_for_each(p, &h->list) {
1689                 struct hci_conn *c;
1690                 c = list_entry(p, struct hci_conn, list);
1691
1692                 if (c->type != type || skb_queue_empty(&c->data_q))
1693                         continue;
1694
1695                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1696                         continue;
1697
1698                 num++;
1699
1700                 if (c->sent < min) {
1701                         min  = c->sent;
1702                         conn = c;
1703                 }
1704         }
1705
1706         if (conn) {
1707                 int cnt, q;
1708
1709                 switch (conn->type) {
1710                 case ACL_LINK:
1711                         cnt = hdev->acl_cnt;
1712                         break;
1713                 case SCO_LINK:
1714                 case ESCO_LINK:
1715                         cnt = hdev->sco_cnt;
1716                         break;
1717                 case LE_LINK:
1718                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1719                         break;
1720                 default:
1721                         cnt = 0;
1722                         BT_ERR("Unknown link type");
1723                 }
1724
1725                 q = cnt / num;
1726                 *quote = q ? q : 1;
1727         } else
1728                 *quote = 0;
1729
1730         BT_DBG("conn %p quote %d", conn, *quote);
1731         return conn;
1732 }
1733
1734 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1735 {
1736         struct hci_conn_hash *h = &hdev->conn_hash;
1737         struct list_head *p;
1738         struct hci_conn  *c;
1739
1740         BT_ERR("%s link tx timeout", hdev->name);
1741
1742         /* Kill stalled connections */
1743         list_for_each(p, &h->list) {
1744                 c = list_entry(p, struct hci_conn, list);
1745                 if (c->type == type && c->sent) {
1746                         BT_ERR("%s killing stalled connection %s",
1747                                 hdev->name, batostr(&c->dst));
1748                         hci_acl_disconn(c, 0x13);
1749                 }
1750         }
1751 }
1752
1753 static inline void hci_sched_acl(struct hci_dev *hdev)
1754 {
1755         struct hci_conn *conn;
1756         struct sk_buff *skb;
1757         int quote;
1758
1759         BT_DBG("%s", hdev->name);
1760
1761         if (!test_bit(HCI_RAW, &hdev->flags)) {
1762                 /* ACL tx timeout must be longer than maximum
1763                  * link supervision timeout (40.9 seconds) */
1764                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1765                         hci_link_tx_to(hdev, ACL_LINK);
1766         }
1767
1768         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1769                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1770                         BT_DBG("skb %p len %d", skb, skb->len);
1771
1772                         hci_conn_enter_active_mode(conn);
1773
1774                         hci_send_frame(skb);
1775                         hdev->acl_last_tx = jiffies;
1776
1777                         hdev->acl_cnt--;
1778                         conn->sent++;
1779                 }
1780         }
1781 }
1782
1783 /* Schedule SCO */
1784 static inline void hci_sched_sco(struct hci_dev *hdev)
1785 {
1786         struct hci_conn *conn;
1787         struct sk_buff *skb;
1788         int quote;
1789
1790         BT_DBG("%s", hdev->name);
1791
1792         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1793                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1794                         BT_DBG("skb %p len %d", skb, skb->len);
1795                         hci_send_frame(skb);
1796
1797                         conn->sent++;
1798                         if (conn->sent == ~0)
1799                                 conn->sent = 0;
1800                 }
1801         }
1802 }
1803
1804 static inline void hci_sched_esco(struct hci_dev *hdev)
1805 {
1806         struct hci_conn *conn;
1807         struct sk_buff *skb;
1808         int quote;
1809
1810         BT_DBG("%s", hdev->name);
1811
1812         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1813                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1814                         BT_DBG("skb %p len %d", skb, skb->len);
1815                         hci_send_frame(skb);
1816
1817                         conn->sent++;
1818                         if (conn->sent == ~0)
1819                                 conn->sent = 0;
1820                 }
1821         }
1822 }
1823
1824 static inline void hci_sched_le(struct hci_dev *hdev)
1825 {
1826         struct hci_conn *conn;
1827         struct sk_buff *skb;
1828         int quote, cnt;
1829
1830         BT_DBG("%s", hdev->name);
1831
1832         if (!test_bit(HCI_RAW, &hdev->flags)) {
1833                 /* LE tx timeout must be longer than maximum
1834                  * link supervision timeout (40.9 seconds) */
1835                 if (!hdev->le_cnt && hdev->le_pkts &&
1836                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1837                         hci_link_tx_to(hdev, LE_LINK);
1838         }
1839
1840         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1841         while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1842                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1843                         BT_DBG("skb %p len %d", skb, skb->len);
1844
1845                         hci_send_frame(skb);
1846                         hdev->le_last_tx = jiffies;
1847
1848                         cnt--;
1849                         conn->sent++;
1850                 }
1851         }
1852         if (hdev->le_pkts)
1853                 hdev->le_cnt = cnt;
1854         else
1855                 hdev->acl_cnt = cnt;
1856 }
1857
1858 static void hci_tx_task(unsigned long arg)
1859 {
1860         struct hci_dev *hdev = (struct hci_dev *) arg;
1861         struct sk_buff *skb;
1862
1863         read_lock(&hci_task_lock);
1864
1865         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1866                 hdev->sco_cnt, hdev->le_cnt);
1867
1868         /* Schedule queues and send stuff to HCI driver */
1869
1870         hci_sched_acl(hdev);
1871
1872         hci_sched_sco(hdev);
1873
1874         hci_sched_esco(hdev);
1875
1876         hci_sched_le(hdev);
1877
1878         /* Send next queued raw (unknown type) packet */
1879         while ((skb = skb_dequeue(&hdev->raw_q)))
1880                 hci_send_frame(skb);
1881
1882         read_unlock(&hci_task_lock);
1883 }
1884
1885 /* ----- HCI RX task (incoming data processing) ----- */
1886
1887 /* ACL data packet */
1888 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1889 {
1890         struct hci_acl_hdr *hdr = (void *) skb->data;
1891         struct hci_conn *conn;
1892         __u16 handle, flags;
1893
1894         skb_pull(skb, HCI_ACL_HDR_SIZE);
1895
1896         handle = __le16_to_cpu(hdr->handle);
1897         flags  = hci_flags(handle);
1898         handle = hci_handle(handle);
1899
1900         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1901
1902         hdev->stat.acl_rx++;
1903
1904         hci_dev_lock(hdev);
1905         conn = hci_conn_hash_lookup_handle(hdev, handle);
1906         hci_dev_unlock(hdev);
1907
1908         if (conn) {
1909                 register struct hci_proto *hp;
1910
1911                 hci_conn_enter_active_mode(conn);
1912
1913                 /* Send to upper protocol */
1914                 hp = hci_proto[HCI_PROTO_L2CAP];
1915                 if (hp && hp->recv_acldata) {
1916                         hp->recv_acldata(conn, skb, flags);
1917                         return;
1918                 }
1919         } else {
1920                 BT_ERR("%s ACL packet for unknown connection handle %d",
1921                         hdev->name, handle);
1922         }
1923
1924         kfree_skb(skb);
1925 }
1926
1927 /* SCO data packet */
1928 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1929 {
1930         struct hci_sco_hdr *hdr = (void *) skb->data;
1931         struct hci_conn *conn;
1932         __u16 handle;
1933
1934         skb_pull(skb, HCI_SCO_HDR_SIZE);
1935
1936         handle = __le16_to_cpu(hdr->handle);
1937
1938         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1939
1940         hdev->stat.sco_rx++;
1941
1942         hci_dev_lock(hdev);
1943         conn = hci_conn_hash_lookup_handle(hdev, handle);
1944         hci_dev_unlock(hdev);
1945
1946         if (conn) {
1947                 register struct hci_proto *hp;
1948
1949                 /* Send to upper protocol */
1950                 hp = hci_proto[HCI_PROTO_SCO];
1951                 if (hp && hp->recv_scodata) {
1952                         hp->recv_scodata(conn, skb);
1953                         return;
1954                 }
1955         } else {
1956                 BT_ERR("%s SCO packet for unknown connection handle %d",
1957                         hdev->name, handle);
1958         }
1959
1960         kfree_skb(skb);
1961 }
1962
1963 static void hci_rx_task(unsigned long arg)
1964 {
1965         struct hci_dev *hdev = (struct hci_dev *) arg;
1966         struct sk_buff *skb;
1967
1968         BT_DBG("%s", hdev->name);
1969
1970         read_lock(&hci_task_lock);
1971
1972         while ((skb = skb_dequeue(&hdev->rx_q))) {
1973                 if (atomic_read(&hdev->promisc)) {
1974                         /* Send copy to the sockets */
1975                         hci_send_to_sock(hdev, skb, NULL);
1976                 }
1977
1978                 if (test_bit(HCI_RAW, &hdev->flags)) {
1979                         kfree_skb(skb);
1980                         continue;
1981                 }
1982
1983                 if (test_bit(HCI_INIT, &hdev->flags)) {
1984                         /* Don't process data packets in this states. */
1985                         switch (bt_cb(skb)->pkt_type) {
1986                         case HCI_ACLDATA_PKT:
1987                         case HCI_SCODATA_PKT:
1988                                 kfree_skb(skb);
1989                                 continue;
1990                         }
1991                 }
1992
1993                 /* Process frame */
1994                 switch (bt_cb(skb)->pkt_type) {
1995                 case HCI_EVENT_PKT:
1996                         hci_event_packet(hdev, skb);
1997                         break;
1998
1999                 case HCI_ACLDATA_PKT:
2000                         BT_DBG("%s ACL data packet", hdev->name);
2001                         hci_acldata_packet(hdev, skb);
2002                         break;
2003
2004                 case HCI_SCODATA_PKT:
2005                         BT_DBG("%s SCO data packet", hdev->name);
2006                         hci_scodata_packet(hdev, skb);
2007                         break;
2008
2009                 default:
2010                         kfree_skb(skb);
2011                         break;
2012                 }
2013         }
2014
2015         read_unlock(&hci_task_lock);
2016 }
2017
2018 static void hci_cmd_task(unsigned long arg)
2019 {
2020         struct hci_dev *hdev = (struct hci_dev *) arg;
2021         struct sk_buff *skb;
2022
2023         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2024
2025         /* Send queued commands */
2026         if (atomic_read(&hdev->cmd_cnt)) {
2027                 skb = skb_dequeue(&hdev->cmd_q);
2028                 if (!skb)
2029                         return;
2030
2031                 kfree_skb(hdev->sent_cmd);
2032
2033                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2034                 if (hdev->sent_cmd) {
2035                         atomic_dec(&hdev->cmd_cnt);
2036                         hci_send_frame(skb);
2037                         mod_timer(&hdev->cmd_timer,
2038                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2039                 } else {
2040                         skb_queue_head(&hdev->cmd_q, skb);
2041                         tasklet_schedule(&hdev->cmd_task);
2042                 }
2043         }
2044 }