2f003224d2ea9e4d9727c643f03ad3ec35c17b7f
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
45
46 #include <asm/system.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52
53 #define AUTO_OFF_TIMEOUT 2000
54
55 static void hci_cmd_task(unsigned long arg);
56 static void hci_rx_task(unsigned long arg);
57 static void hci_tx_task(unsigned long arg);
58 static void hci_notify(struct hci_dev *hdev, int event);
59
60 static DEFINE_RWLOCK(hci_task_lock);
61
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
65
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
69
70 /* HCI protocols */
71 #define HCI_MAX_PROTO   2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77 /* ---- HCI notifications ---- */
78
79 int hci_register_notifier(struct notifier_block *nb)
80 {
81         return atomic_notifier_chain_register(&hci_notifier, nb);
82 }
83
84 int hci_unregister_notifier(struct notifier_block *nb)
85 {
86         return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 }
88
89 static void hci_notify(struct hci_dev *hdev, int event)
90 {
91         atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 }
93
94 /* ---- HCI requests ---- */
95
96 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
97 {
98         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100         /* If this is the init phase check if the completed command matches
101          * the last init command, and if not just return.
102          */
103         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104                 return;
105
106         if (hdev->req_status == HCI_REQ_PEND) {
107                 hdev->req_result = result;
108                 hdev->req_status = HCI_REQ_DONE;
109                 wake_up_interruptible(&hdev->req_wait_q);
110         }
111 }
112
113 static void hci_req_cancel(struct hci_dev *hdev, int err)
114 {
115         BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117         if (hdev->req_status == HCI_REQ_PEND) {
118                 hdev->req_result = err;
119                 hdev->req_status = HCI_REQ_CANCELED;
120                 wake_up_interruptible(&hdev->req_wait_q);
121         }
122 }
123
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
126                                 unsigned long opt, __u32 timeout)
127 {
128         DECLARE_WAITQUEUE(wait, current);
129         int err = 0;
130
131         BT_DBG("%s start", hdev->name);
132
133         hdev->req_status = HCI_REQ_PEND;
134
135         add_wait_queue(&hdev->req_wait_q, &wait);
136         set_current_state(TASK_INTERRUPTIBLE);
137
138         req(hdev, opt);
139         schedule_timeout(timeout);
140
141         remove_wait_queue(&hdev->req_wait_q, &wait);
142
143         if (signal_pending(current))
144                 return -EINTR;
145
146         switch (hdev->req_status) {
147         case HCI_REQ_DONE:
148                 err = -bt_err(hdev->req_result);
149                 break;
150
151         case HCI_REQ_CANCELED:
152                 err = -hdev->req_result;
153                 break;
154
155         default:
156                 err = -ETIMEDOUT;
157                 break;
158         }
159
160         hdev->req_status = hdev->req_result = 0;
161
162         BT_DBG("%s end: err %d", hdev->name, err);
163
164         return err;
165 }
166
167 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168                                 unsigned long opt, __u32 timeout)
169 {
170         int ret;
171
172         if (!test_bit(HCI_UP, &hdev->flags))
173                 return -ENETDOWN;
174
175         /* Serialize all requests */
176         hci_req_lock(hdev);
177         ret = __hci_request(hdev, req, opt, timeout);
178         hci_req_unlock(hdev);
179
180         return ret;
181 }
182
183 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         BT_DBG("%s %ld", hdev->name, opt);
186
187         /* Reset device */
188         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
189 }
190
191 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192 {
193         struct hci_cp_delete_stored_link_key cp;
194         struct sk_buff *skb;
195         __le16 param;
196         __u8 flt_type;
197
198         BT_DBG("%s %ld", hdev->name, opt);
199
200         /* Driver initialization */
201
202         /* Special commands */
203         while ((skb = skb_dequeue(&hdev->driver_init))) {
204                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
205                 skb->dev = (void *) hdev;
206
207                 skb_queue_tail(&hdev->cmd_q, skb);
208                 tasklet_schedule(&hdev->cmd_task);
209         }
210         skb_queue_purge(&hdev->driver_init);
211
212         /* Mandatory initialization */
213
214         /* Reset */
215         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
216                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
217
218         /* Read Local Supported Features */
219         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
220
221         /* Read Local Version */
222         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
225         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
226
227 #if 0
228         /* Host buffer size */
229         {
230                 struct hci_cp_host_buffer_size cp;
231                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
232                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
233                 cp.acl_max_pkt = cpu_to_le16(0xffff);
234                 cp.sco_max_pkt = cpu_to_le16(0xffff);
235                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
236         }
237 #endif
238
239         /* Read BD Address */
240         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242         /* Read Class of Device */
243         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245         /* Read Local Name */
246         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
247
248         /* Read Voice Setting */
249         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
250
251         /* Optional initialization */
252
253         /* Clear Event Filters */
254         flt_type = HCI_FLT_CLEAR_ALL;
255         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
256
257         /* Connection accept timeout ~20 secs */
258         param = cpu_to_le16(0x7d00);
259         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
260
261         bacpy(&cp.bdaddr, BDADDR_ANY);
262         cp.delete_all = 1;
263         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
264 }
265
266 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
267 {
268         __u8 scan = opt;
269
270         BT_DBG("%s %x", hdev->name, scan);
271
272         /* Inquiry and Page scans */
273         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
274 }
275
276 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
277 {
278         __u8 auth = opt;
279
280         BT_DBG("%s %x", hdev->name, auth);
281
282         /* Authentication */
283         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
284 }
285
286 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
287 {
288         __u8 encrypt = opt;
289
290         BT_DBG("%s %x", hdev->name, encrypt);
291
292         /* Encryption */
293         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
294 }
295
296 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
297 {
298         __le16 policy = cpu_to_le16(opt);
299
300         BT_DBG("%s %x", hdev->name, policy);
301
302         /* Default link policy */
303         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
304 }
305
306 /* Get HCI device by index.
307  * Device is held on return. */
308 struct hci_dev *hci_dev_get(int index)
309 {
310         struct hci_dev *hdev = NULL;
311         struct list_head *p;
312
313         BT_DBG("%d", index);
314
315         if (index < 0)
316                 return NULL;
317
318         read_lock(&hci_dev_list_lock);
319         list_for_each(p, &hci_dev_list) {
320                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
321                 if (d->id == index) {
322                         hdev = hci_dev_hold(d);
323                         break;
324                 }
325         }
326         read_unlock(&hci_dev_list_lock);
327         return hdev;
328 }
329
330 /* ---- Inquiry support ---- */
331 static void inquiry_cache_flush(struct hci_dev *hdev)
332 {
333         struct inquiry_cache *cache = &hdev->inq_cache;
334         struct inquiry_entry *next  = cache->list, *e;
335
336         BT_DBG("cache %p", cache);
337
338         cache->list = NULL;
339         while ((e = next)) {
340                 next = e->next;
341                 kfree(e);
342         }
343 }
344
345 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346 {
347         struct inquiry_cache *cache = &hdev->inq_cache;
348         struct inquiry_entry *e;
349
350         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352         for (e = cache->list; e; e = e->next)
353                 if (!bacmp(&e->data.bdaddr, bdaddr))
354                         break;
355         return e;
356 }
357
358 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359 {
360         struct inquiry_cache *cache = &hdev->inq_cache;
361         struct inquiry_entry *ie;
362
363         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
365         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366         if (!ie) {
367                 /* Entry not in the cache. Add new one. */
368                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369                 if (!ie)
370                         return;
371
372                 ie->next = cache->list;
373                 cache->list = ie;
374         }
375
376         memcpy(&ie->data, data, sizeof(*data));
377         ie->timestamp = jiffies;
378         cache->timestamp = jiffies;
379 }
380
381 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382 {
383         struct inquiry_cache *cache = &hdev->inq_cache;
384         struct inquiry_info *info = (struct inquiry_info *) buf;
385         struct inquiry_entry *e;
386         int copied = 0;
387
388         for (e = cache->list; e && copied < num; e = e->next, copied++) {
389                 struct inquiry_data *data = &e->data;
390                 bacpy(&info->bdaddr, &data->bdaddr);
391                 info->pscan_rep_mode    = data->pscan_rep_mode;
392                 info->pscan_period_mode = data->pscan_period_mode;
393                 info->pscan_mode        = data->pscan_mode;
394                 memcpy(info->dev_class, data->dev_class, 3);
395                 info->clock_offset      = data->clock_offset;
396                 info++;
397         }
398
399         BT_DBG("cache %p, copied %d", cache, copied);
400         return copied;
401 }
402
403 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404 {
405         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406         struct hci_cp_inquiry cp;
407
408         BT_DBG("%s", hdev->name);
409
410         if (test_bit(HCI_INQUIRY, &hdev->flags))
411                 return;
412
413         /* Start Inquiry */
414         memcpy(&cp.lap, &ir->lap, 3);
415         cp.length  = ir->length;
416         cp.num_rsp = ir->num_rsp;
417         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
418 }
419
420 int hci_inquiry(void __user *arg)
421 {
422         __u8 __user *ptr = arg;
423         struct hci_inquiry_req ir;
424         struct hci_dev *hdev;
425         int err = 0, do_inquiry = 0, max_rsp;
426         long timeo;
427         __u8 *buf;
428
429         if (copy_from_user(&ir, ptr, sizeof(ir)))
430                 return -EFAULT;
431
432         hdev = hci_dev_get(ir.dev_id);
433         if (!hdev)
434                 return -ENODEV;
435
436         hci_dev_lock_bh(hdev);
437         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
438                                 inquiry_cache_empty(hdev) ||
439                                 ir.flags & IREQ_CACHE_FLUSH) {
440                 inquiry_cache_flush(hdev);
441                 do_inquiry = 1;
442         }
443         hci_dev_unlock_bh(hdev);
444
445         timeo = ir.length * msecs_to_jiffies(2000);
446
447         if (do_inquiry) {
448                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
449                 if (err < 0)
450                         goto done;
451         }
452
453         /* for unlimited number of responses we will use buffer with 255 entries */
454         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
455
456         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
457          * copy it to the user space.
458          */
459         buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
460         if (!buf) {
461                 err = -ENOMEM;
462                 goto done;
463         }
464
465         hci_dev_lock_bh(hdev);
466         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
467         hci_dev_unlock_bh(hdev);
468
469         BT_DBG("num_rsp %d", ir.num_rsp);
470
471         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
472                 ptr += sizeof(ir);
473                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
474                                         ir.num_rsp))
475                         err = -EFAULT;
476         } else
477                 err = -EFAULT;
478
479         kfree(buf);
480
481 done:
482         hci_dev_put(hdev);
483         return err;
484 }
485
486 /* ---- HCI ioctl helpers ---- */
487
488 int hci_dev_open(__u16 dev)
489 {
490         struct hci_dev *hdev;
491         int ret = 0;
492
493         hdev = hci_dev_get(dev);
494         if (!hdev)
495                 return -ENODEV;
496
497         BT_DBG("%s %p", hdev->name, hdev);
498
499         hci_req_lock(hdev);
500
501         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502                 ret = -ERFKILL;
503                 goto done;
504         }
505
506         if (test_bit(HCI_UP, &hdev->flags)) {
507                 ret = -EALREADY;
508                 goto done;
509         }
510
511         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512                 set_bit(HCI_RAW, &hdev->flags);
513
514         /* Treat all non BR/EDR controllers as raw devices for now */
515         if (hdev->dev_type != HCI_BREDR)
516                 set_bit(HCI_RAW, &hdev->flags);
517
518         if (hdev->open(hdev)) {
519                 ret = -EIO;
520                 goto done;
521         }
522
523         if (!test_bit(HCI_RAW, &hdev->flags)) {
524                 atomic_set(&hdev->cmd_cnt, 1);
525                 set_bit(HCI_INIT, &hdev->flags);
526                 hdev->init_last_cmd = 0;
527
528                 //__hci_request(hdev, hci_reset_req, 0, HZ);
529                 ret = __hci_request(hdev, hci_init_req, 0,
530                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
531
532                 clear_bit(HCI_INIT, &hdev->flags);
533         }
534
535         if (!ret) {
536                 hci_dev_hold(hdev);
537                 set_bit(HCI_UP, &hdev->flags);
538                 hci_notify(hdev, HCI_DEV_UP);
539                 if (!test_bit(HCI_SETUP, &hdev->flags))
540                         mgmt_powered(hdev->id, 1);
541         } else {
542                 /* Init failed, cleanup */
543                 tasklet_kill(&hdev->rx_task);
544                 tasklet_kill(&hdev->tx_task);
545                 tasklet_kill(&hdev->cmd_task);
546
547                 skb_queue_purge(&hdev->cmd_q);
548                 skb_queue_purge(&hdev->rx_q);
549
550                 if (hdev->flush)
551                         hdev->flush(hdev);
552
553                 if (hdev->sent_cmd) {
554                         kfree_skb(hdev->sent_cmd);
555                         hdev->sent_cmd = NULL;
556                 }
557
558                 hdev->close(hdev);
559                 hdev->flags = 0;
560         }
561
562 done:
563         hci_req_unlock(hdev);
564         hci_dev_put(hdev);
565         return ret;
566 }
567
568 static int hci_dev_do_close(struct hci_dev *hdev)
569 {
570         BT_DBG("%s %p", hdev->name, hdev);
571
572         hci_req_cancel(hdev, ENODEV);
573         hci_req_lock(hdev);
574
575         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
576                 hci_req_unlock(hdev);
577                 return 0;
578         }
579
580         /* Kill RX and TX tasks */
581         tasklet_kill(&hdev->rx_task);
582         tasklet_kill(&hdev->tx_task);
583
584         hci_dev_lock_bh(hdev);
585         inquiry_cache_flush(hdev);
586         hci_conn_hash_flush(hdev);
587         hci_dev_unlock_bh(hdev);
588
589         hci_notify(hdev, HCI_DEV_DOWN);
590
591         if (hdev->flush)
592                 hdev->flush(hdev);
593
594         /* Reset device */
595         skb_queue_purge(&hdev->cmd_q);
596         atomic_set(&hdev->cmd_cnt, 1);
597         if (!test_bit(HCI_RAW, &hdev->flags)) {
598                 set_bit(HCI_INIT, &hdev->flags);
599                 __hci_request(hdev, hci_reset_req, 0,
600                                         msecs_to_jiffies(250));
601                 clear_bit(HCI_INIT, &hdev->flags);
602         }
603
604         /* Kill cmd task */
605         tasklet_kill(&hdev->cmd_task);
606
607         /* Drop queues */
608         skb_queue_purge(&hdev->rx_q);
609         skb_queue_purge(&hdev->cmd_q);
610         skb_queue_purge(&hdev->raw_q);
611
612         /* Drop last sent command */
613         if (hdev->sent_cmd) {
614                 kfree_skb(hdev->sent_cmd);
615                 hdev->sent_cmd = NULL;
616         }
617
618         /* After this point our queues are empty
619          * and no tasks are scheduled. */
620         hdev->close(hdev);
621
622         mgmt_powered(hdev->id, 0);
623
624         /* Clear flags */
625         hdev->flags = 0;
626
627         hci_req_unlock(hdev);
628
629         hci_dev_put(hdev);
630         return 0;
631 }
632
633 int hci_dev_close(__u16 dev)
634 {
635         struct hci_dev *hdev;
636         int err;
637
638         hdev = hci_dev_get(dev);
639         if (!hdev)
640                 return -ENODEV;
641         err = hci_dev_do_close(hdev);
642         hci_dev_put(hdev);
643         return err;
644 }
645
646 int hci_dev_reset(__u16 dev)
647 {
648         struct hci_dev *hdev;
649         int ret = 0;
650
651         hdev = hci_dev_get(dev);
652         if (!hdev)
653                 return -ENODEV;
654
655         hci_req_lock(hdev);
656         tasklet_disable(&hdev->tx_task);
657
658         if (!test_bit(HCI_UP, &hdev->flags))
659                 goto done;
660
661         /* Drop queues */
662         skb_queue_purge(&hdev->rx_q);
663         skb_queue_purge(&hdev->cmd_q);
664
665         hci_dev_lock_bh(hdev);
666         inquiry_cache_flush(hdev);
667         hci_conn_hash_flush(hdev);
668         hci_dev_unlock_bh(hdev);
669
670         if (hdev->flush)
671                 hdev->flush(hdev);
672
673         atomic_set(&hdev->cmd_cnt, 1);
674         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
675
676         if (!test_bit(HCI_RAW, &hdev->flags))
677                 ret = __hci_request(hdev, hci_reset_req, 0,
678                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
679
680 done:
681         tasklet_enable(&hdev->tx_task);
682         hci_req_unlock(hdev);
683         hci_dev_put(hdev);
684         return ret;
685 }
686
687 int hci_dev_reset_stat(__u16 dev)
688 {
689         struct hci_dev *hdev;
690         int ret = 0;
691
692         hdev = hci_dev_get(dev);
693         if (!hdev)
694                 return -ENODEV;
695
696         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
697
698         hci_dev_put(hdev);
699
700         return ret;
701 }
702
703 int hci_dev_cmd(unsigned int cmd, void __user *arg)
704 {
705         struct hci_dev *hdev;
706         struct hci_dev_req dr;
707         int err = 0;
708
709         if (copy_from_user(&dr, arg, sizeof(dr)))
710                 return -EFAULT;
711
712         hdev = hci_dev_get(dr.dev_id);
713         if (!hdev)
714                 return -ENODEV;
715
716         switch (cmd) {
717         case HCISETAUTH:
718                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
719                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
720                 break;
721
722         case HCISETENCRYPT:
723                 if (!lmp_encrypt_capable(hdev)) {
724                         err = -EOPNOTSUPP;
725                         break;
726                 }
727
728                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
729                         /* Auth must be enabled first */
730                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
731                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
732                         if (err)
733                                 break;
734                 }
735
736                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
737                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
738                 break;
739
740         case HCISETSCAN:
741                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
742                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
743                 break;
744
745         case HCISETLINKPOL:
746                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
747                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
748                 break;
749
750         case HCISETLINKMODE:
751                 hdev->link_mode = ((__u16) dr.dev_opt) &
752                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
753                 break;
754
755         case HCISETPTYPE:
756                 hdev->pkt_type = (__u16) dr.dev_opt;
757                 break;
758
759         case HCISETACLMTU:
760                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
761                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
762                 break;
763
764         case HCISETSCOMTU:
765                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
766                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
767                 break;
768
769         default:
770                 err = -EINVAL;
771                 break;
772         }
773
774         hci_dev_put(hdev);
775         return err;
776 }
777
778 int hci_get_dev_list(void __user *arg)
779 {
780         struct hci_dev_list_req *dl;
781         struct hci_dev_req *dr;
782         struct list_head *p;
783         int n = 0, size, err;
784         __u16 dev_num;
785
786         if (get_user(dev_num, (__u16 __user *) arg))
787                 return -EFAULT;
788
789         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
790                 return -EINVAL;
791
792         size = sizeof(*dl) + dev_num * sizeof(*dr);
793
794         dl = kzalloc(size, GFP_KERNEL);
795         if (!dl)
796                 return -ENOMEM;
797
798         dr = dl->dev_req;
799
800         read_lock_bh(&hci_dev_list_lock);
801         list_for_each(p, &hci_dev_list) {
802                 struct hci_dev *hdev;
803
804                 hdev = list_entry(p, struct hci_dev, list);
805
806                 hci_del_off_timer(hdev);
807
808                 if (!test_bit(HCI_MGMT, &hdev->flags))
809                         set_bit(HCI_PAIRABLE, &hdev->flags);
810
811                 (dr + n)->dev_id  = hdev->id;
812                 (dr + n)->dev_opt = hdev->flags;
813
814                 if (++n >= dev_num)
815                         break;
816         }
817         read_unlock_bh(&hci_dev_list_lock);
818
819         dl->dev_num = n;
820         size = sizeof(*dl) + n * sizeof(*dr);
821
822         err = copy_to_user(arg, dl, size);
823         kfree(dl);
824
825         return err ? -EFAULT : 0;
826 }
827
828 int hci_get_dev_info(void __user *arg)
829 {
830         struct hci_dev *hdev;
831         struct hci_dev_info di;
832         int err = 0;
833
834         if (copy_from_user(&di, arg, sizeof(di)))
835                 return -EFAULT;
836
837         hdev = hci_dev_get(di.dev_id);
838         if (!hdev)
839                 return -ENODEV;
840
841         hci_del_off_timer(hdev);
842
843         if (!test_bit(HCI_MGMT, &hdev->flags))
844                 set_bit(HCI_PAIRABLE, &hdev->flags);
845
846         strcpy(di.name, hdev->name);
847         di.bdaddr   = hdev->bdaddr;
848         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
849         di.flags    = hdev->flags;
850         di.pkt_type = hdev->pkt_type;
851         di.acl_mtu  = hdev->acl_mtu;
852         di.acl_pkts = hdev->acl_pkts;
853         di.sco_mtu  = hdev->sco_mtu;
854         di.sco_pkts = hdev->sco_pkts;
855         di.link_policy = hdev->link_policy;
856         di.link_mode   = hdev->link_mode;
857
858         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
859         memcpy(&di.features, &hdev->features, sizeof(di.features));
860
861         if (copy_to_user(arg, &di, sizeof(di)))
862                 err = -EFAULT;
863
864         hci_dev_put(hdev);
865
866         return err;
867 }
868
869 /* ---- Interface to HCI drivers ---- */
870
871 static int hci_rfkill_set_block(void *data, bool blocked)
872 {
873         struct hci_dev *hdev = data;
874
875         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
876
877         if (!blocked)
878                 return 0;
879
880         hci_dev_do_close(hdev);
881
882         return 0;
883 }
884
885 static const struct rfkill_ops hci_rfkill_ops = {
886         .set_block = hci_rfkill_set_block,
887 };
888
889 /* Alloc HCI device */
890 struct hci_dev *hci_alloc_dev(void)
891 {
892         struct hci_dev *hdev;
893
894         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
895         if (!hdev)
896                 return NULL;
897
898         skb_queue_head_init(&hdev->driver_init);
899
900         return hdev;
901 }
902 EXPORT_SYMBOL(hci_alloc_dev);
903
904 /* Free HCI device */
905 void hci_free_dev(struct hci_dev *hdev)
906 {
907         skb_queue_purge(&hdev->driver_init);
908
909         /* will free via device release */
910         put_device(&hdev->dev);
911 }
912 EXPORT_SYMBOL(hci_free_dev);
913
914 static void hci_power_on(struct work_struct *work)
915 {
916         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
917
918         BT_DBG("%s", hdev->name);
919
920         if (hci_dev_open(hdev->id) < 0)
921                 return;
922
923         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
924                 mod_timer(&hdev->off_timer,
925                                 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
926
927         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
928                 mgmt_index_added(hdev->id);
929 }
930
931 static void hci_power_off(struct work_struct *work)
932 {
933         struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
934
935         BT_DBG("%s", hdev->name);
936
937         hci_dev_close(hdev->id);
938 }
939
940 static void hci_auto_off(unsigned long data)
941 {
942         struct hci_dev *hdev = (struct hci_dev *) data;
943
944         BT_DBG("%s", hdev->name);
945
946         clear_bit(HCI_AUTO_OFF, &hdev->flags);
947
948         queue_work(hdev->workqueue, &hdev->power_off);
949 }
950
951 void hci_del_off_timer(struct hci_dev *hdev)
952 {
953         BT_DBG("%s", hdev->name);
954
955         clear_bit(HCI_AUTO_OFF, &hdev->flags);
956         del_timer(&hdev->off_timer);
957 }
958
959 int hci_uuids_clear(struct hci_dev *hdev)
960 {
961         struct list_head *p, *n;
962
963         list_for_each_safe(p, n, &hdev->uuids) {
964                 struct bt_uuid *uuid;
965
966                 uuid = list_entry(p, struct bt_uuid, list);
967
968                 list_del(p);
969                 kfree(uuid);
970         }
971
972         return 0;
973 }
974
975 int hci_link_keys_clear(struct hci_dev *hdev)
976 {
977         struct list_head *p, *n;
978
979         list_for_each_safe(p, n, &hdev->link_keys) {
980                 struct link_key *key;
981
982                 key = list_entry(p, struct link_key, list);
983
984                 list_del(p);
985                 kfree(key);
986         }
987
988         return 0;
989 }
990
991 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
992 {
993         struct list_head *p;
994
995         list_for_each(p, &hdev->link_keys) {
996                 struct link_key *k;
997
998                 k = list_entry(p, struct link_key, list);
999
1000                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1001                         return k;
1002         }
1003
1004         return NULL;
1005 }
1006
1007 int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1008                                                 u8 *val, u8 type, u8 pin_len)
1009 {
1010         struct link_key *key, *old_key;
1011         u8 old_key_type;
1012
1013         old_key = hci_find_link_key(hdev, bdaddr);
1014         if (old_key) {
1015                 old_key_type = old_key->type;
1016                 key = old_key;
1017         } else {
1018                 old_key_type = 0xff;
1019                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1020                 if (!key)
1021                         return -ENOMEM;
1022                 list_add(&key->list, &hdev->link_keys);
1023         }
1024
1025         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1026
1027         bacpy(&key->bdaddr, bdaddr);
1028         memcpy(key->val, val, 16);
1029         key->type = type;
1030         key->pin_len = pin_len;
1031
1032         if (new_key)
1033                 mgmt_new_key(hdev->id, key, old_key_type);
1034
1035         if (type == 0x06)
1036                 key->type = old_key_type;
1037
1038         return 0;
1039 }
1040
1041 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1042 {
1043         struct link_key *key;
1044
1045         key = hci_find_link_key(hdev, bdaddr);
1046         if (!key)
1047                 return -ENOENT;
1048
1049         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1050
1051         list_del(&key->list);
1052         kfree(key);
1053
1054         return 0;
1055 }
1056
1057 /* Register HCI device */
1058 int hci_register_dev(struct hci_dev *hdev)
1059 {
1060         struct list_head *head = &hci_dev_list, *p;
1061         int i, id = 0;
1062
1063         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1064                                                 hdev->bus, hdev->owner);
1065
1066         if (!hdev->open || !hdev->close || !hdev->destruct)
1067                 return -EINVAL;
1068
1069         write_lock_bh(&hci_dev_list_lock);
1070
1071         /* Find first available device id */
1072         list_for_each(p, &hci_dev_list) {
1073                 if (list_entry(p, struct hci_dev, list)->id != id)
1074                         break;
1075                 head = p; id++;
1076         }
1077
1078         sprintf(hdev->name, "hci%d", id);
1079         hdev->id = id;
1080         list_add(&hdev->list, head);
1081
1082         atomic_set(&hdev->refcnt, 1);
1083         spin_lock_init(&hdev->lock);
1084
1085         hdev->flags = 0;
1086         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1087         hdev->esco_type = (ESCO_HV1);
1088         hdev->link_mode = (HCI_LM_ACCEPT);
1089         hdev->io_capability = 0x03; /* No Input No Output */
1090
1091         hdev->idle_timeout = 0;
1092         hdev->sniff_max_interval = 800;
1093         hdev->sniff_min_interval = 80;
1094
1095         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1096         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1097         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1098
1099         skb_queue_head_init(&hdev->rx_q);
1100         skb_queue_head_init(&hdev->cmd_q);
1101         skb_queue_head_init(&hdev->raw_q);
1102
1103         for (i = 0; i < NUM_REASSEMBLY; i++)
1104                 hdev->reassembly[i] = NULL;
1105
1106         init_waitqueue_head(&hdev->req_wait_q);
1107         mutex_init(&hdev->req_lock);
1108
1109         inquiry_cache_init(hdev);
1110
1111         hci_conn_hash_init(hdev);
1112
1113         INIT_LIST_HEAD(&hdev->blacklist);
1114
1115         INIT_LIST_HEAD(&hdev->uuids);
1116
1117         INIT_LIST_HEAD(&hdev->link_keys);
1118
1119         INIT_WORK(&hdev->power_on, hci_power_on);
1120         INIT_WORK(&hdev->power_off, hci_power_off);
1121         setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1122
1123         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1124
1125         atomic_set(&hdev->promisc, 0);
1126
1127         write_unlock_bh(&hci_dev_list_lock);
1128
1129         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1130         if (!hdev->workqueue)
1131                 goto nomem;
1132
1133         hci_register_sysfs(hdev);
1134
1135         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1136                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1137         if (hdev->rfkill) {
1138                 if (rfkill_register(hdev->rfkill) < 0) {
1139                         rfkill_destroy(hdev->rfkill);
1140                         hdev->rfkill = NULL;
1141                 }
1142         }
1143
1144         set_bit(HCI_AUTO_OFF, &hdev->flags);
1145         set_bit(HCI_SETUP, &hdev->flags);
1146         queue_work(hdev->workqueue, &hdev->power_on);
1147
1148         hci_notify(hdev, HCI_DEV_REG);
1149
1150         return id;
1151
1152 nomem:
1153         write_lock_bh(&hci_dev_list_lock);
1154         list_del(&hdev->list);
1155         write_unlock_bh(&hci_dev_list_lock);
1156
1157         return -ENOMEM;
1158 }
1159 EXPORT_SYMBOL(hci_register_dev);
1160
1161 /* Unregister HCI device */
1162 int hci_unregister_dev(struct hci_dev *hdev)
1163 {
1164         int i;
1165
1166         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1167
1168         write_lock_bh(&hci_dev_list_lock);
1169         list_del(&hdev->list);
1170         write_unlock_bh(&hci_dev_list_lock);
1171
1172         hci_dev_do_close(hdev);
1173
1174         for (i = 0; i < NUM_REASSEMBLY; i++)
1175                 kfree_skb(hdev->reassembly[i]);
1176
1177         if (!test_bit(HCI_INIT, &hdev->flags) &&
1178                                         !test_bit(HCI_SETUP, &hdev->flags))
1179                 mgmt_index_removed(hdev->id);
1180
1181         hci_notify(hdev, HCI_DEV_UNREG);
1182
1183         if (hdev->rfkill) {
1184                 rfkill_unregister(hdev->rfkill);
1185                 rfkill_destroy(hdev->rfkill);
1186         }
1187
1188         hci_unregister_sysfs(hdev);
1189
1190         destroy_workqueue(hdev->workqueue);
1191
1192         hci_dev_lock_bh(hdev);
1193         hci_blacklist_clear(hdev);
1194         hci_uuids_clear(hdev);
1195         hci_link_keys_clear(hdev);
1196         hci_dev_unlock_bh(hdev);
1197
1198         __hci_dev_put(hdev);
1199
1200         return 0;
1201 }
1202 EXPORT_SYMBOL(hci_unregister_dev);
1203
1204 /* Suspend HCI device */
1205 int hci_suspend_dev(struct hci_dev *hdev)
1206 {
1207         hci_notify(hdev, HCI_DEV_SUSPEND);
1208         return 0;
1209 }
1210 EXPORT_SYMBOL(hci_suspend_dev);
1211
1212 /* Resume HCI device */
1213 int hci_resume_dev(struct hci_dev *hdev)
1214 {
1215         hci_notify(hdev, HCI_DEV_RESUME);
1216         return 0;
1217 }
1218 EXPORT_SYMBOL(hci_resume_dev);
1219
1220 /* Receive frame from HCI drivers */
1221 int hci_recv_frame(struct sk_buff *skb)
1222 {
1223         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1224         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1225                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1226                 kfree_skb(skb);
1227                 return -ENXIO;
1228         }
1229
1230         /* Incomming skb */
1231         bt_cb(skb)->incoming = 1;
1232
1233         /* Time stamp */
1234         __net_timestamp(skb);
1235
1236         /* Queue frame for rx task */
1237         skb_queue_tail(&hdev->rx_q, skb);
1238         tasklet_schedule(&hdev->rx_task);
1239
1240         return 0;
1241 }
1242 EXPORT_SYMBOL(hci_recv_frame);
1243
1244 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1245                           int count, __u8 index, gfp_t gfp_mask)
1246 {
1247         int len = 0;
1248         int hlen = 0;
1249         int remain = count;
1250         struct sk_buff *skb;
1251         struct bt_skb_cb *scb;
1252
1253         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1254                                 index >= NUM_REASSEMBLY)
1255                 return -EILSEQ;
1256
1257         skb = hdev->reassembly[index];
1258
1259         if (!skb) {
1260                 switch (type) {
1261                 case HCI_ACLDATA_PKT:
1262                         len = HCI_MAX_FRAME_SIZE;
1263                         hlen = HCI_ACL_HDR_SIZE;
1264                         break;
1265                 case HCI_EVENT_PKT:
1266                         len = HCI_MAX_EVENT_SIZE;
1267                         hlen = HCI_EVENT_HDR_SIZE;
1268                         break;
1269                 case HCI_SCODATA_PKT:
1270                         len = HCI_MAX_SCO_SIZE;
1271                         hlen = HCI_SCO_HDR_SIZE;
1272                         break;
1273                 }
1274
1275                 skb = bt_skb_alloc(len, gfp_mask);
1276                 if (!skb)
1277                         return -ENOMEM;
1278
1279                 scb = (void *) skb->cb;
1280                 scb->expect = hlen;
1281                 scb->pkt_type = type;
1282
1283                 skb->dev = (void *) hdev;
1284                 hdev->reassembly[index] = skb;
1285         }
1286
1287         while (count) {
1288                 scb = (void *) skb->cb;
1289                 len = min(scb->expect, (__u16)count);
1290
1291                 memcpy(skb_put(skb, len), data, len);
1292
1293                 count -= len;
1294                 data += len;
1295                 scb->expect -= len;
1296                 remain = count;
1297
1298                 switch (type) {
1299                 case HCI_EVENT_PKT:
1300                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1301                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1302                                 scb->expect = h->plen;
1303
1304                                 if (skb_tailroom(skb) < scb->expect) {
1305                                         kfree_skb(skb);
1306                                         hdev->reassembly[index] = NULL;
1307                                         return -ENOMEM;
1308                                 }
1309                         }
1310                         break;
1311
1312                 case HCI_ACLDATA_PKT:
1313                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1314                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1315                                 scb->expect = __le16_to_cpu(h->dlen);
1316
1317                                 if (skb_tailroom(skb) < scb->expect) {
1318                                         kfree_skb(skb);
1319                                         hdev->reassembly[index] = NULL;
1320                                         return -ENOMEM;
1321                                 }
1322                         }
1323                         break;
1324
1325                 case HCI_SCODATA_PKT:
1326                         if (skb->len == HCI_SCO_HDR_SIZE) {
1327                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1328                                 scb->expect = h->dlen;
1329
1330                                 if (skb_tailroom(skb) < scb->expect) {
1331                                         kfree_skb(skb);
1332                                         hdev->reassembly[index] = NULL;
1333                                         return -ENOMEM;
1334                                 }
1335                         }
1336                         break;
1337                 }
1338
1339                 if (scb->expect == 0) {
1340                         /* Complete frame */
1341
1342                         bt_cb(skb)->pkt_type = type;
1343                         hci_recv_frame(skb);
1344
1345                         hdev->reassembly[index] = NULL;
1346                         return remain;
1347                 }
1348         }
1349
1350         return remain;
1351 }
1352
1353 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1354 {
1355         int rem = 0;
1356
1357         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1358                 return -EILSEQ;
1359
1360         while (count) {
1361                 rem = hci_reassembly(hdev, type, data, count,
1362                                                 type - 1, GFP_ATOMIC);
1363                 if (rem < 0)
1364                         return rem;
1365
1366                 data += (count - rem);
1367                 count = rem;
1368         };
1369
1370         return rem;
1371 }
1372 EXPORT_SYMBOL(hci_recv_fragment);
1373
1374 #define STREAM_REASSEMBLY 0
1375
1376 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1377 {
1378         int type;
1379         int rem = 0;
1380
1381         while (count) {
1382                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1383
1384                 if (!skb) {
1385                         struct { char type; } *pkt;
1386
1387                         /* Start of the frame */
1388                         pkt = data;
1389                         type = pkt->type;
1390
1391                         data++;
1392                         count--;
1393                 } else
1394                         type = bt_cb(skb)->pkt_type;
1395
1396                 rem = hci_reassembly(hdev, type, data,
1397                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1398                 if (rem < 0)
1399                         return rem;
1400
1401                 data += (count - rem);
1402                 count = rem;
1403         };
1404
1405         return rem;
1406 }
1407 EXPORT_SYMBOL(hci_recv_stream_fragment);
1408
1409 /* ---- Interface to upper protocols ---- */
1410
1411 /* Register/Unregister protocols.
1412  * hci_task_lock is used to ensure that no tasks are running. */
1413 int hci_register_proto(struct hci_proto *hp)
1414 {
1415         int err = 0;
1416
1417         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1418
1419         if (hp->id >= HCI_MAX_PROTO)
1420                 return -EINVAL;
1421
1422         write_lock_bh(&hci_task_lock);
1423
1424         if (!hci_proto[hp->id])
1425                 hci_proto[hp->id] = hp;
1426         else
1427                 err = -EEXIST;
1428
1429         write_unlock_bh(&hci_task_lock);
1430
1431         return err;
1432 }
1433 EXPORT_SYMBOL(hci_register_proto);
1434
1435 int hci_unregister_proto(struct hci_proto *hp)
1436 {
1437         int err = 0;
1438
1439         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1440
1441         if (hp->id >= HCI_MAX_PROTO)
1442                 return -EINVAL;
1443
1444         write_lock_bh(&hci_task_lock);
1445
1446         if (hci_proto[hp->id])
1447                 hci_proto[hp->id] = NULL;
1448         else
1449                 err = -ENOENT;
1450
1451         write_unlock_bh(&hci_task_lock);
1452
1453         return err;
1454 }
1455 EXPORT_SYMBOL(hci_unregister_proto);
1456
1457 int hci_register_cb(struct hci_cb *cb)
1458 {
1459         BT_DBG("%p name %s", cb, cb->name);
1460
1461         write_lock_bh(&hci_cb_list_lock);
1462         list_add(&cb->list, &hci_cb_list);
1463         write_unlock_bh(&hci_cb_list_lock);
1464
1465         return 0;
1466 }
1467 EXPORT_SYMBOL(hci_register_cb);
1468
1469 int hci_unregister_cb(struct hci_cb *cb)
1470 {
1471         BT_DBG("%p name %s", cb, cb->name);
1472
1473         write_lock_bh(&hci_cb_list_lock);
1474         list_del(&cb->list);
1475         write_unlock_bh(&hci_cb_list_lock);
1476
1477         return 0;
1478 }
1479 EXPORT_SYMBOL(hci_unregister_cb);
1480
1481 static int hci_send_frame(struct sk_buff *skb)
1482 {
1483         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1484
1485         if (!hdev) {
1486                 kfree_skb(skb);
1487                 return -ENODEV;
1488         }
1489
1490         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1491
1492         if (atomic_read(&hdev->promisc)) {
1493                 /* Time stamp */
1494                 __net_timestamp(skb);
1495
1496                 hci_send_to_sock(hdev, skb, NULL);
1497         }
1498
1499         /* Get rid of skb owner, prior to sending to the driver. */
1500         skb_orphan(skb);
1501
1502         return hdev->send(skb);
1503 }
1504
1505 /* Send HCI command */
1506 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1507 {
1508         int len = HCI_COMMAND_HDR_SIZE + plen;
1509         struct hci_command_hdr *hdr;
1510         struct sk_buff *skb;
1511
1512         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1513
1514         skb = bt_skb_alloc(len, GFP_ATOMIC);
1515         if (!skb) {
1516                 BT_ERR("%s no memory for command", hdev->name);
1517                 return -ENOMEM;
1518         }
1519
1520         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1521         hdr->opcode = cpu_to_le16(opcode);
1522         hdr->plen   = plen;
1523
1524         if (plen)
1525                 memcpy(skb_put(skb, plen), param, plen);
1526
1527         BT_DBG("skb len %d", skb->len);
1528
1529         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1530         skb->dev = (void *) hdev;
1531
1532         if (test_bit(HCI_INIT, &hdev->flags))
1533                 hdev->init_last_cmd = opcode;
1534
1535         skb_queue_tail(&hdev->cmd_q, skb);
1536         tasklet_schedule(&hdev->cmd_task);
1537
1538         return 0;
1539 }
1540
1541 /* Get data from the previously sent command */
1542 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1543 {
1544         struct hci_command_hdr *hdr;
1545
1546         if (!hdev->sent_cmd)
1547                 return NULL;
1548
1549         hdr = (void *) hdev->sent_cmd->data;
1550
1551         if (hdr->opcode != cpu_to_le16(opcode))
1552                 return NULL;
1553
1554         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1555
1556         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1557 }
1558
1559 /* Send ACL data */
1560 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1561 {
1562         struct hci_acl_hdr *hdr;
1563         int len = skb->len;
1564
1565         skb_push(skb, HCI_ACL_HDR_SIZE);
1566         skb_reset_transport_header(skb);
1567         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1568         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1569         hdr->dlen   = cpu_to_le16(len);
1570 }
1571
1572 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1573 {
1574         struct hci_dev *hdev = conn->hdev;
1575         struct sk_buff *list;
1576
1577         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1578
1579         skb->dev = (void *) hdev;
1580         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1581         hci_add_acl_hdr(skb, conn->handle, flags);
1582
1583         list = skb_shinfo(skb)->frag_list;
1584         if (!list) {
1585                 /* Non fragmented */
1586                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1587
1588                 skb_queue_tail(&conn->data_q, skb);
1589         } else {
1590                 /* Fragmented */
1591                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1592
1593                 skb_shinfo(skb)->frag_list = NULL;
1594
1595                 /* Queue all fragments atomically */
1596                 spin_lock_bh(&conn->data_q.lock);
1597
1598                 __skb_queue_tail(&conn->data_q, skb);
1599
1600                 flags &= ~ACL_START;
1601                 flags |= ACL_CONT;
1602                 do {
1603                         skb = list; list = list->next;
1604
1605                         skb->dev = (void *) hdev;
1606                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1607                         hci_add_acl_hdr(skb, conn->handle, flags);
1608
1609                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1610
1611                         __skb_queue_tail(&conn->data_q, skb);
1612                 } while (list);
1613
1614                 spin_unlock_bh(&conn->data_q.lock);
1615         }
1616
1617         tasklet_schedule(&hdev->tx_task);
1618 }
1619 EXPORT_SYMBOL(hci_send_acl);
1620
1621 /* Send SCO data */
1622 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1623 {
1624         struct hci_dev *hdev = conn->hdev;
1625         struct hci_sco_hdr hdr;
1626
1627         BT_DBG("%s len %d", hdev->name, skb->len);
1628
1629         hdr.handle = cpu_to_le16(conn->handle);
1630         hdr.dlen   = skb->len;
1631
1632         skb_push(skb, HCI_SCO_HDR_SIZE);
1633         skb_reset_transport_header(skb);
1634         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1635
1636         skb->dev = (void *) hdev;
1637         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1638
1639         skb_queue_tail(&conn->data_q, skb);
1640         tasklet_schedule(&hdev->tx_task);
1641 }
1642 EXPORT_SYMBOL(hci_send_sco);
1643
1644 /* ---- HCI TX task (outgoing data) ---- */
1645
1646 /* HCI Connection scheduler */
1647 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1648 {
1649         struct hci_conn_hash *h = &hdev->conn_hash;
1650         struct hci_conn *conn = NULL;
1651         int num = 0, min = ~0;
1652         struct list_head *p;
1653
1654         /* We don't have to lock device here. Connections are always
1655          * added and removed with TX task disabled. */
1656         list_for_each(p, &h->list) {
1657                 struct hci_conn *c;
1658                 c = list_entry(p, struct hci_conn, list);
1659
1660                 if (c->type != type || skb_queue_empty(&c->data_q))
1661                         continue;
1662
1663                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1664                         continue;
1665
1666                 num++;
1667
1668                 if (c->sent < min) {
1669                         min  = c->sent;
1670                         conn = c;
1671                 }
1672         }
1673
1674         if (conn) {
1675                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1676                 int q = cnt / num;
1677                 *quote = q ? q : 1;
1678         } else
1679                 *quote = 0;
1680
1681         BT_DBG("conn %p quote %d", conn, *quote);
1682         return conn;
1683 }
1684
1685 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1686 {
1687         struct hci_conn_hash *h = &hdev->conn_hash;
1688         struct list_head *p;
1689         struct hci_conn  *c;
1690
1691         BT_ERR("%s ACL tx timeout", hdev->name);
1692
1693         /* Kill stalled connections */
1694         list_for_each(p, &h->list) {
1695                 c = list_entry(p, struct hci_conn, list);
1696                 if (c->type == ACL_LINK && c->sent) {
1697                         BT_ERR("%s killing stalled ACL connection %s",
1698                                 hdev->name, batostr(&c->dst));
1699                         hci_acl_disconn(c, 0x13);
1700                 }
1701         }
1702 }
1703
1704 static inline void hci_sched_acl(struct hci_dev *hdev)
1705 {
1706         struct hci_conn *conn;
1707         struct sk_buff *skb;
1708         int quote;
1709
1710         BT_DBG("%s", hdev->name);
1711
1712         if (!test_bit(HCI_RAW, &hdev->flags)) {
1713                 /* ACL tx timeout must be longer than maximum
1714                  * link supervision timeout (40.9 seconds) */
1715                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1716                         hci_acl_tx_to(hdev);
1717         }
1718
1719         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1720                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1721                         BT_DBG("skb %p len %d", skb, skb->len);
1722
1723                         hci_conn_enter_active_mode(conn);
1724
1725                         hci_send_frame(skb);
1726                         hdev->acl_last_tx = jiffies;
1727
1728                         hdev->acl_cnt--;
1729                         conn->sent++;
1730                 }
1731         }
1732 }
1733
1734 /* Schedule SCO */
1735 static inline void hci_sched_sco(struct hci_dev *hdev)
1736 {
1737         struct hci_conn *conn;
1738         struct sk_buff *skb;
1739         int quote;
1740
1741         BT_DBG("%s", hdev->name);
1742
1743         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1744                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1745                         BT_DBG("skb %p len %d", skb, skb->len);
1746                         hci_send_frame(skb);
1747
1748                         conn->sent++;
1749                         if (conn->sent == ~0)
1750                                 conn->sent = 0;
1751                 }
1752         }
1753 }
1754
1755 static inline void hci_sched_esco(struct hci_dev *hdev)
1756 {
1757         struct hci_conn *conn;
1758         struct sk_buff *skb;
1759         int quote;
1760
1761         BT_DBG("%s", hdev->name);
1762
1763         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1764                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1765                         BT_DBG("skb %p len %d", skb, skb->len);
1766                         hci_send_frame(skb);
1767
1768                         conn->sent++;
1769                         if (conn->sent == ~0)
1770                                 conn->sent = 0;
1771                 }
1772         }
1773 }
1774
1775 static void hci_tx_task(unsigned long arg)
1776 {
1777         struct hci_dev *hdev = (struct hci_dev *) arg;
1778         struct sk_buff *skb;
1779
1780         read_lock(&hci_task_lock);
1781
1782         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1783
1784         /* Schedule queues and send stuff to HCI driver */
1785
1786         hci_sched_acl(hdev);
1787
1788         hci_sched_sco(hdev);
1789
1790         hci_sched_esco(hdev);
1791
1792         /* Send next queued raw (unknown type) packet */
1793         while ((skb = skb_dequeue(&hdev->raw_q)))
1794                 hci_send_frame(skb);
1795
1796         read_unlock(&hci_task_lock);
1797 }
1798
1799 /* ----- HCI RX task (incoming data proccessing) ----- */
1800
1801 /* ACL data packet */
1802 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1803 {
1804         struct hci_acl_hdr *hdr = (void *) skb->data;
1805         struct hci_conn *conn;
1806         __u16 handle, flags;
1807
1808         skb_pull(skb, HCI_ACL_HDR_SIZE);
1809
1810         handle = __le16_to_cpu(hdr->handle);
1811         flags  = hci_flags(handle);
1812         handle = hci_handle(handle);
1813
1814         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1815
1816         hdev->stat.acl_rx++;
1817
1818         hci_dev_lock(hdev);
1819         conn = hci_conn_hash_lookup_handle(hdev, handle);
1820         hci_dev_unlock(hdev);
1821
1822         if (conn) {
1823                 register struct hci_proto *hp;
1824
1825                 hci_conn_enter_active_mode(conn);
1826
1827                 /* Send to upper protocol */
1828                 hp = hci_proto[HCI_PROTO_L2CAP];
1829                 if (hp && hp->recv_acldata) {
1830                         hp->recv_acldata(conn, skb, flags);
1831                         return;
1832                 }
1833         } else {
1834                 BT_ERR("%s ACL packet for unknown connection handle %d",
1835                         hdev->name, handle);
1836         }
1837
1838         kfree_skb(skb);
1839 }
1840
1841 /* SCO data packet */
1842 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1843 {
1844         struct hci_sco_hdr *hdr = (void *) skb->data;
1845         struct hci_conn *conn;
1846         __u16 handle;
1847
1848         skb_pull(skb, HCI_SCO_HDR_SIZE);
1849
1850         handle = __le16_to_cpu(hdr->handle);
1851
1852         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1853
1854         hdev->stat.sco_rx++;
1855
1856         hci_dev_lock(hdev);
1857         conn = hci_conn_hash_lookup_handle(hdev, handle);
1858         hci_dev_unlock(hdev);
1859
1860         if (conn) {
1861                 register struct hci_proto *hp;
1862
1863                 /* Send to upper protocol */
1864                 hp = hci_proto[HCI_PROTO_SCO];
1865                 if (hp && hp->recv_scodata) {
1866                         hp->recv_scodata(conn, skb);
1867                         return;
1868                 }
1869         } else {
1870                 BT_ERR("%s SCO packet for unknown connection handle %d",
1871                         hdev->name, handle);
1872         }
1873
1874         kfree_skb(skb);
1875 }
1876
1877 static void hci_rx_task(unsigned long arg)
1878 {
1879         struct hci_dev *hdev = (struct hci_dev *) arg;
1880         struct sk_buff *skb;
1881
1882         BT_DBG("%s", hdev->name);
1883
1884         read_lock(&hci_task_lock);
1885
1886         while ((skb = skb_dequeue(&hdev->rx_q))) {
1887                 if (atomic_read(&hdev->promisc)) {
1888                         /* Send copy to the sockets */
1889                         hci_send_to_sock(hdev, skb, NULL);
1890                 }
1891
1892                 if (test_bit(HCI_RAW, &hdev->flags)) {
1893                         kfree_skb(skb);
1894                         continue;
1895                 }
1896
1897                 if (test_bit(HCI_INIT, &hdev->flags)) {
1898                         /* Don't process data packets in this states. */
1899                         switch (bt_cb(skb)->pkt_type) {
1900                         case HCI_ACLDATA_PKT:
1901                         case HCI_SCODATA_PKT:
1902                                 kfree_skb(skb);
1903                                 continue;
1904                         }
1905                 }
1906
1907                 /* Process frame */
1908                 switch (bt_cb(skb)->pkt_type) {
1909                 case HCI_EVENT_PKT:
1910                         hci_event_packet(hdev, skb);
1911                         break;
1912
1913                 case HCI_ACLDATA_PKT:
1914                         BT_DBG("%s ACL data packet", hdev->name);
1915                         hci_acldata_packet(hdev, skb);
1916                         break;
1917
1918                 case HCI_SCODATA_PKT:
1919                         BT_DBG("%s SCO data packet", hdev->name);
1920                         hci_scodata_packet(hdev, skb);
1921                         break;
1922
1923                 default:
1924                         kfree_skb(skb);
1925                         break;
1926                 }
1927         }
1928
1929         read_unlock(&hci_task_lock);
1930 }
1931
1932 static void hci_cmd_task(unsigned long arg)
1933 {
1934         struct hci_dev *hdev = (struct hci_dev *) arg;
1935         struct sk_buff *skb;
1936
1937         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1938
1939         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1940                 BT_ERR("%s command tx timeout", hdev->name);
1941                 atomic_set(&hdev->cmd_cnt, 1);
1942         }
1943
1944         /* Send queued commands */
1945         if (atomic_read(&hdev->cmd_cnt)) {
1946                 skb = skb_dequeue(&hdev->cmd_q);
1947                 if (!skb)
1948                         return;
1949
1950                 kfree_skb(hdev->sent_cmd);
1951
1952                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1953                 if (hdev->sent_cmd) {
1954                         atomic_dec(&hdev->cmd_cnt);
1955                         hci_send_frame(skb);
1956                         hdev->cmd_last_tx = jiffies;
1957                 } else {
1958                         skb_queue_head(&hdev->cmd_q, skb);
1959                         tasklet_schedule(&hdev->cmd_task);
1960                 }
1961         }
1962 }