sparc32: Fixed unaligned memory copying in function __csum_partial_copy_sparc_generic
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <net/sock.h>
46
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53
54 #define AUTO_OFF_TIMEOUT 2000
55
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59 static void hci_notify(struct hci_dev *hdev, int event);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO   2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82         return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87         return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92         atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
101         /* If this is the init phase check if the completed command matches
102          * the last init command, and if not just return.
103          */
104         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105                 return;
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116         BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118         if (hdev->req_status == HCI_REQ_PEND) {
119                 hdev->req_result = err;
120                 hdev->req_status = HCI_REQ_CANCELED;
121                 wake_up_interruptible(&hdev->req_wait_q);
122         }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127                                         unsigned long opt, __u32 timeout)
128 {
129         DECLARE_WAITQUEUE(wait, current);
130         int err = 0;
131
132         BT_DBG("%s start", hdev->name);
133
134         hdev->req_status = HCI_REQ_PEND;
135
136         add_wait_queue(&hdev->req_wait_q, &wait);
137         set_current_state(TASK_INTERRUPTIBLE);
138
139         req(hdev, opt);
140         schedule_timeout(timeout);
141
142         remove_wait_queue(&hdev->req_wait_q, &wait);
143
144         if (signal_pending(current))
145                 return -EINTR;
146
147         switch (hdev->req_status) {
148         case HCI_REQ_DONE:
149                 err = -bt_err(hdev->req_result);
150                 break;
151
152         case HCI_REQ_CANCELED:
153                 err = -hdev->req_result;
154                 break;
155
156         default:
157                 err = -ETIMEDOUT;
158                 break;
159         }
160
161         hdev->req_status = hdev->req_result = 0;
162
163         BT_DBG("%s end: err %d", hdev->name, err);
164
165         return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169                                         unsigned long opt, __u32 timeout)
170 {
171         int ret;
172
173         if (!test_bit(HCI_UP, &hdev->flags))
174                 return -ENETDOWN;
175
176         /* Serialize all requests */
177         hci_req_lock(hdev);
178         ret = __hci_request(hdev, req, opt, timeout);
179         hci_req_unlock(hdev);
180
181         return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         BT_DBG("%s %ld", hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &hdev->flags);
190         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 {
195         struct hci_cp_delete_stored_link_key cp;
196         struct sk_buff *skb;
197         __le16 param;
198         __u8 flt_type;
199
200         BT_DBG("%s %ld", hdev->name, opt);
201
202         /* Driver initialization */
203
204         /* Special commands */
205         while ((skb = skb_dequeue(&hdev->driver_init))) {
206                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207                 skb->dev = (void *) hdev;
208
209                 skb_queue_tail(&hdev->cmd_q, skb);
210                 tasklet_schedule(&hdev->cmd_task);
211         }
212         skb_queue_purge(&hdev->driver_init);
213
214         /* Mandatory initialization */
215
216         /* Reset */
217         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218                         set_bit(HCI_RESET, &hdev->flags);
219                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220         }
221
222         /* Read Local Supported Features */
223         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224
225         /* Read Local Version */
226         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227
228         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230
231 #if 0
232         /* Host buffer size */
233         {
234                 struct hci_cp_host_buffer_size cp;
235                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237                 cp.acl_max_pkt = cpu_to_le16(0xffff);
238                 cp.sco_max_pkt = cpu_to_le16(0xffff);
239                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240         }
241 #endif
242
243         /* Read BD Address */
244         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246         /* Read Class of Device */
247         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249         /* Read Local Name */
250         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251
252         /* Read Voice Setting */
253         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254
255         /* Optional initialization */
256
257         /* Clear Event Filters */
258         flt_type = HCI_FLT_CLEAR_ALL;
259         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260
261         /* Connection accept timeout ~20 secs */
262         param = cpu_to_le16(0x7d00);
263         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264
265         bacpy(&cp.bdaddr, BDADDR_ANY);
266         cp.delete_all = 1;
267         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268 }
269
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 {
272         BT_DBG("%s", hdev->name);
273
274         /* Read LE buffer size */
275         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276 }
277
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 {
280         __u8 scan = opt;
281
282         BT_DBG("%s %x", hdev->name, scan);
283
284         /* Inquiry and Page scans */
285         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286 }
287
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 {
290         __u8 auth = opt;
291
292         BT_DBG("%s %x", hdev->name, auth);
293
294         /* Authentication */
295         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296 }
297
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 {
300         __u8 encrypt = opt;
301
302         BT_DBG("%s %x", hdev->name, encrypt);
303
304         /* Encryption */
305         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306 }
307
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 {
310         __le16 policy = cpu_to_le16(opt);
311
312         BT_DBG("%s %x", hdev->name, policy);
313
314         /* Default link policy */
315         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316 }
317
318 /* Get HCI device by index.
319  * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
321 {
322         struct hci_dev *hdev = NULL;
323         struct list_head *p;
324
325         BT_DBG("%d", index);
326
327         if (index < 0)
328                 return NULL;
329
330         read_lock(&hci_dev_list_lock);
331         list_for_each(p, &hci_dev_list) {
332                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
333                 if (d->id == index) {
334                         hdev = hci_dev_hold(d);
335                         break;
336                 }
337         }
338         read_unlock(&hci_dev_list_lock);
339         return hdev;
340 }
341
342 /* ---- Inquiry support ---- */
343 static void inquiry_cache_flush(struct hci_dev *hdev)
344 {
345         struct inquiry_cache *cache = &hdev->inq_cache;
346         struct inquiry_entry *next  = cache->list, *e;
347
348         BT_DBG("cache %p", cache);
349
350         cache->list = NULL;
351         while ((e = next)) {
352                 next = e->next;
353                 kfree(e);
354         }
355 }
356
357 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
358 {
359         struct inquiry_cache *cache = &hdev->inq_cache;
360         struct inquiry_entry *e;
361
362         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
363
364         for (e = cache->list; e; e = e->next)
365                 if (!bacmp(&e->data.bdaddr, bdaddr))
366                         break;
367         return e;
368 }
369
370 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
371 {
372         struct inquiry_cache *cache = &hdev->inq_cache;
373         struct inquiry_entry *ie;
374
375         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
376
377         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
378         if (!ie) {
379                 /* Entry not in the cache. Add new one. */
380                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
381                 if (!ie)
382                         return;
383
384                 ie->next = cache->list;
385                 cache->list = ie;
386         }
387
388         memcpy(&ie->data, data, sizeof(*data));
389         ie->timestamp = jiffies;
390         cache->timestamp = jiffies;
391 }
392
393 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
394 {
395         struct inquiry_cache *cache = &hdev->inq_cache;
396         struct inquiry_info *info = (struct inquiry_info *) buf;
397         struct inquiry_entry *e;
398         int copied = 0;
399
400         for (e = cache->list; e && copied < num; e = e->next, copied++) {
401                 struct inquiry_data *data = &e->data;
402                 bacpy(&info->bdaddr, &data->bdaddr);
403                 info->pscan_rep_mode    = data->pscan_rep_mode;
404                 info->pscan_period_mode = data->pscan_period_mode;
405                 info->pscan_mode        = data->pscan_mode;
406                 memcpy(info->dev_class, data->dev_class, 3);
407                 info->clock_offset      = data->clock_offset;
408                 info++;
409         }
410
411         BT_DBG("cache %p, copied %d", cache, copied);
412         return copied;
413 }
414
415 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
416 {
417         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
418         struct hci_cp_inquiry cp;
419
420         BT_DBG("%s", hdev->name);
421
422         if (test_bit(HCI_INQUIRY, &hdev->flags))
423                 return;
424
425         /* Start Inquiry */
426         memcpy(&cp.lap, &ir->lap, 3);
427         cp.length  = ir->length;
428         cp.num_rsp = ir->num_rsp;
429         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
430 }
431
432 int hci_inquiry(void __user *arg)
433 {
434         __u8 __user *ptr = arg;
435         struct hci_inquiry_req ir;
436         struct hci_dev *hdev;
437         int err = 0, do_inquiry = 0, max_rsp;
438         long timeo;
439         __u8 *buf;
440
441         if (copy_from_user(&ir, ptr, sizeof(ir)))
442                 return -EFAULT;
443
444         hdev = hci_dev_get(ir.dev_id);
445         if (!hdev)
446                 return -ENODEV;
447
448         hci_dev_lock_bh(hdev);
449         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
450                                 inquiry_cache_empty(hdev) ||
451                                 ir.flags & IREQ_CACHE_FLUSH) {
452                 inquiry_cache_flush(hdev);
453                 do_inquiry = 1;
454         }
455         hci_dev_unlock_bh(hdev);
456
457         timeo = ir.length * msecs_to_jiffies(2000);
458
459         if (do_inquiry) {
460                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
461                 if (err < 0)
462                         goto done;
463         }
464
465         /* for unlimited number of responses we will use buffer with 255 entries */
466         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
467
468         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
469          * copy it to the user space.
470          */
471         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
472         if (!buf) {
473                 err = -ENOMEM;
474                 goto done;
475         }
476
477         hci_dev_lock_bh(hdev);
478         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
479         hci_dev_unlock_bh(hdev);
480
481         BT_DBG("num_rsp %d", ir.num_rsp);
482
483         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
484                 ptr += sizeof(ir);
485                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
486                                         ir.num_rsp))
487                         err = -EFAULT;
488         } else
489                 err = -EFAULT;
490
491         kfree(buf);
492
493 done:
494         hci_dev_put(hdev);
495         return err;
496 }
497
498 /* ---- HCI ioctl helpers ---- */
499
500 int hci_dev_open(__u16 dev)
501 {
502         struct hci_dev *hdev;
503         int ret = 0;
504
505         hdev = hci_dev_get(dev);
506         if (!hdev)
507                 return -ENODEV;
508
509         BT_DBG("%s %p", hdev->name, hdev);
510
511         hci_req_lock(hdev);
512
513         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
514                 ret = -ERFKILL;
515                 goto done;
516         }
517
518         if (test_bit(HCI_UP, &hdev->flags)) {
519                 ret = -EALREADY;
520                 goto done;
521         }
522
523         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
524                 set_bit(HCI_RAW, &hdev->flags);
525
526         /* Treat all non BR/EDR controllers as raw devices for now */
527         if (hdev->dev_type != HCI_BREDR)
528                 set_bit(HCI_RAW, &hdev->flags);
529
530         if (hdev->open(hdev)) {
531                 ret = -EIO;
532                 goto done;
533         }
534
535         if (!test_bit(HCI_RAW, &hdev->flags)) {
536                 atomic_set(&hdev->cmd_cnt, 1);
537                 set_bit(HCI_INIT, &hdev->flags);
538                 hdev->init_last_cmd = 0;
539
540                 ret = __hci_request(hdev, hci_init_req, 0,
541                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
542
543                 if (lmp_le_capable(hdev))
544                         ret = __hci_request(hdev, hci_le_init_req, 0,
545                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
546
547                 clear_bit(HCI_INIT, &hdev->flags);
548         }
549
550         if (!ret) {
551                 hci_dev_hold(hdev);
552                 set_bit(HCI_UP, &hdev->flags);
553                 hci_notify(hdev, HCI_DEV_UP);
554                 if (!test_bit(HCI_SETUP, &hdev->flags))
555                         mgmt_powered(hdev->id, 1);
556         } else {
557                 /* Init failed, cleanup */
558                 tasklet_kill(&hdev->rx_task);
559                 tasklet_kill(&hdev->tx_task);
560                 tasklet_kill(&hdev->cmd_task);
561
562                 skb_queue_purge(&hdev->cmd_q);
563                 skb_queue_purge(&hdev->rx_q);
564
565                 if (hdev->flush)
566                         hdev->flush(hdev);
567
568                 if (hdev->sent_cmd) {
569                         kfree_skb(hdev->sent_cmd);
570                         hdev->sent_cmd = NULL;
571                 }
572
573                 hdev->close(hdev);
574                 hdev->flags = 0;
575         }
576
577 done:
578         hci_req_unlock(hdev);
579         hci_dev_put(hdev);
580         return ret;
581 }
582
583 static int hci_dev_do_close(struct hci_dev *hdev)
584 {
585         BT_DBG("%s %p", hdev->name, hdev);
586
587         hci_req_cancel(hdev, ENODEV);
588         hci_req_lock(hdev);
589
590         /* Stop timer, it might be running */
591         del_timer_sync(&hdev->cmd_timer);
592
593         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
594                 hci_req_unlock(hdev);
595                 return 0;
596         }
597
598         /* Kill RX and TX tasks */
599         tasklet_kill(&hdev->rx_task);
600         tasklet_kill(&hdev->tx_task);
601
602         hci_dev_lock_bh(hdev);
603         inquiry_cache_flush(hdev);
604         hci_conn_hash_flush(hdev);
605         hci_dev_unlock_bh(hdev);
606
607         hci_notify(hdev, HCI_DEV_DOWN);
608
609         if (hdev->flush)
610                 hdev->flush(hdev);
611
612         /* Reset device */
613         skb_queue_purge(&hdev->cmd_q);
614         atomic_set(&hdev->cmd_cnt, 1);
615         if (!test_bit(HCI_RAW, &hdev->flags)) {
616                 set_bit(HCI_INIT, &hdev->flags);
617                 __hci_request(hdev, hci_reset_req, 0,
618                                         msecs_to_jiffies(250));
619                 clear_bit(HCI_INIT, &hdev->flags);
620         }
621
622         /* Kill cmd task */
623         tasklet_kill(&hdev->cmd_task);
624
625         /* Drop queues */
626         skb_queue_purge(&hdev->rx_q);
627         skb_queue_purge(&hdev->cmd_q);
628         skb_queue_purge(&hdev->raw_q);
629
630         /* Drop last sent command */
631         if (hdev->sent_cmd) {
632                 kfree_skb(hdev->sent_cmd);
633                 hdev->sent_cmd = NULL;
634         }
635
636         /* After this point our queues are empty
637          * and no tasks are scheduled. */
638         hdev->close(hdev);
639
640         mgmt_powered(hdev->id, 0);
641
642         /* Clear flags */
643         hdev->flags = 0;
644
645         hci_req_unlock(hdev);
646
647         hci_dev_put(hdev);
648         return 0;
649 }
650
651 int hci_dev_close(__u16 dev)
652 {
653         struct hci_dev *hdev;
654         int err;
655
656         hdev = hci_dev_get(dev);
657         if (!hdev)
658                 return -ENODEV;
659         err = hci_dev_do_close(hdev);
660         hci_dev_put(hdev);
661         return err;
662 }
663
664 int hci_dev_reset(__u16 dev)
665 {
666         struct hci_dev *hdev;
667         int ret = 0;
668
669         hdev = hci_dev_get(dev);
670         if (!hdev)
671                 return -ENODEV;
672
673         hci_req_lock(hdev);
674         tasklet_disable(&hdev->tx_task);
675
676         if (!test_bit(HCI_UP, &hdev->flags))
677                 goto done;
678
679         /* Drop queues */
680         skb_queue_purge(&hdev->rx_q);
681         skb_queue_purge(&hdev->cmd_q);
682
683         hci_dev_lock_bh(hdev);
684         inquiry_cache_flush(hdev);
685         hci_conn_hash_flush(hdev);
686         hci_dev_unlock_bh(hdev);
687
688         if (hdev->flush)
689                 hdev->flush(hdev);
690
691         atomic_set(&hdev->cmd_cnt, 1);
692         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
693
694         if (!test_bit(HCI_RAW, &hdev->flags))
695                 ret = __hci_request(hdev, hci_reset_req, 0,
696                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
697
698 done:
699         tasklet_enable(&hdev->tx_task);
700         hci_req_unlock(hdev);
701         hci_dev_put(hdev);
702         return ret;
703 }
704
705 int hci_dev_reset_stat(__u16 dev)
706 {
707         struct hci_dev *hdev;
708         int ret = 0;
709
710         hdev = hci_dev_get(dev);
711         if (!hdev)
712                 return -ENODEV;
713
714         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
715
716         hci_dev_put(hdev);
717
718         return ret;
719 }
720
721 int hci_dev_cmd(unsigned int cmd, void __user *arg)
722 {
723         struct hci_dev *hdev;
724         struct hci_dev_req dr;
725         int err = 0;
726
727         if (copy_from_user(&dr, arg, sizeof(dr)))
728                 return -EFAULT;
729
730         hdev = hci_dev_get(dr.dev_id);
731         if (!hdev)
732                 return -ENODEV;
733
734         switch (cmd) {
735         case HCISETAUTH:
736                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
737                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
738                 break;
739
740         case HCISETENCRYPT:
741                 if (!lmp_encrypt_capable(hdev)) {
742                         err = -EOPNOTSUPP;
743                         break;
744                 }
745
746                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
747                         /* Auth must be enabled first */
748                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
749                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
750                         if (err)
751                                 break;
752                 }
753
754                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
755                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
756                 break;
757
758         case HCISETSCAN:
759                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
760                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
761                 break;
762
763         case HCISETLINKPOL:
764                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
765                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
766                 break;
767
768         case HCISETLINKMODE:
769                 hdev->link_mode = ((__u16) dr.dev_opt) &
770                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
771                 break;
772
773         case HCISETPTYPE:
774                 hdev->pkt_type = (__u16) dr.dev_opt;
775                 break;
776
777         case HCISETACLMTU:
778                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
779                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
780                 break;
781
782         case HCISETSCOMTU:
783                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
784                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
785                 break;
786
787         default:
788                 err = -EINVAL;
789                 break;
790         }
791
792         hci_dev_put(hdev);
793         return err;
794 }
795
796 int hci_get_dev_list(void __user *arg)
797 {
798         struct hci_dev_list_req *dl;
799         struct hci_dev_req *dr;
800         struct list_head *p;
801         int n = 0, size, err;
802         __u16 dev_num;
803
804         if (get_user(dev_num, (__u16 __user *) arg))
805                 return -EFAULT;
806
807         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
808                 return -EINVAL;
809
810         size = sizeof(*dl) + dev_num * sizeof(*dr);
811
812         dl = kzalloc(size, GFP_KERNEL);
813         if (!dl)
814                 return -ENOMEM;
815
816         dr = dl->dev_req;
817
818         read_lock_bh(&hci_dev_list_lock);
819         list_for_each(p, &hci_dev_list) {
820                 struct hci_dev *hdev;
821
822                 hdev = list_entry(p, struct hci_dev, list);
823
824                 hci_del_off_timer(hdev);
825
826                 if (!test_bit(HCI_MGMT, &hdev->flags))
827                         set_bit(HCI_PAIRABLE, &hdev->flags);
828
829                 (dr + n)->dev_id  = hdev->id;
830                 (dr + n)->dev_opt = hdev->flags;
831
832                 if (++n >= dev_num)
833                         break;
834         }
835         read_unlock_bh(&hci_dev_list_lock);
836
837         dl->dev_num = n;
838         size = sizeof(*dl) + n * sizeof(*dr);
839
840         err = copy_to_user(arg, dl, size);
841         kfree(dl);
842
843         return err ? -EFAULT : 0;
844 }
845
846 int hci_get_dev_info(void __user *arg)
847 {
848         struct hci_dev *hdev;
849         struct hci_dev_info di;
850         int err = 0;
851
852         if (copy_from_user(&di, arg, sizeof(di)))
853                 return -EFAULT;
854
855         hdev = hci_dev_get(di.dev_id);
856         if (!hdev)
857                 return -ENODEV;
858
859         hci_del_off_timer(hdev);
860
861         if (!test_bit(HCI_MGMT, &hdev->flags))
862                 set_bit(HCI_PAIRABLE, &hdev->flags);
863
864         strcpy(di.name, hdev->name);
865         di.bdaddr   = hdev->bdaddr;
866         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
867         di.flags    = hdev->flags;
868         di.pkt_type = hdev->pkt_type;
869         di.acl_mtu  = hdev->acl_mtu;
870         di.acl_pkts = hdev->acl_pkts;
871         di.sco_mtu  = hdev->sco_mtu;
872         di.sco_pkts = hdev->sco_pkts;
873         di.link_policy = hdev->link_policy;
874         di.link_mode   = hdev->link_mode;
875
876         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
877         memcpy(&di.features, &hdev->features, sizeof(di.features));
878
879         if (copy_to_user(arg, &di, sizeof(di)))
880                 err = -EFAULT;
881
882         hci_dev_put(hdev);
883
884         return err;
885 }
886
887 /* ---- Interface to HCI drivers ---- */
888
889 static int hci_rfkill_set_block(void *data, bool blocked)
890 {
891         struct hci_dev *hdev = data;
892
893         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
894
895         if (!blocked)
896                 return 0;
897
898         hci_dev_do_close(hdev);
899
900         return 0;
901 }
902
903 static const struct rfkill_ops hci_rfkill_ops = {
904         .set_block = hci_rfkill_set_block,
905 };
906
907 /* Alloc HCI device */
908 struct hci_dev *hci_alloc_dev(void)
909 {
910         struct hci_dev *hdev;
911
912         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
913         if (!hdev)
914                 return NULL;
915
916         skb_queue_head_init(&hdev->driver_init);
917
918         return hdev;
919 }
920 EXPORT_SYMBOL(hci_alloc_dev);
921
922 /* Free HCI device */
923 void hci_free_dev(struct hci_dev *hdev)
924 {
925         skb_queue_purge(&hdev->driver_init);
926
927         /* will free via device release */
928         put_device(&hdev->dev);
929 }
930 EXPORT_SYMBOL(hci_free_dev);
931
932 static void hci_power_on(struct work_struct *work)
933 {
934         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
935
936         BT_DBG("%s", hdev->name);
937
938         if (hci_dev_open(hdev->id) < 0)
939                 return;
940
941         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
942                 mod_timer(&hdev->off_timer,
943                                 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
944
945         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
946                 mgmt_index_added(hdev->id);
947 }
948
949 static void hci_power_off(struct work_struct *work)
950 {
951         struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
952
953         BT_DBG("%s", hdev->name);
954
955         hci_dev_close(hdev->id);
956 }
957
958 static void hci_auto_off(unsigned long data)
959 {
960         struct hci_dev *hdev = (struct hci_dev *) data;
961
962         BT_DBG("%s", hdev->name);
963
964         clear_bit(HCI_AUTO_OFF, &hdev->flags);
965
966         queue_work(hdev->workqueue, &hdev->power_off);
967 }
968
969 void hci_del_off_timer(struct hci_dev *hdev)
970 {
971         BT_DBG("%s", hdev->name);
972
973         clear_bit(HCI_AUTO_OFF, &hdev->flags);
974         del_timer(&hdev->off_timer);
975 }
976
977 int hci_uuids_clear(struct hci_dev *hdev)
978 {
979         struct list_head *p, *n;
980
981         list_for_each_safe(p, n, &hdev->uuids) {
982                 struct bt_uuid *uuid;
983
984                 uuid = list_entry(p, struct bt_uuid, list);
985
986                 list_del(p);
987                 kfree(uuid);
988         }
989
990         return 0;
991 }
992
993 int hci_link_keys_clear(struct hci_dev *hdev)
994 {
995         struct list_head *p, *n;
996
997         list_for_each_safe(p, n, &hdev->link_keys) {
998                 struct link_key *key;
999
1000                 key = list_entry(p, struct link_key, list);
1001
1002                 list_del(p);
1003                 kfree(key);
1004         }
1005
1006         return 0;
1007 }
1008
1009 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1010 {
1011         struct list_head *p;
1012
1013         list_for_each(p, &hdev->link_keys) {
1014                 struct link_key *k;
1015
1016                 k = list_entry(p, struct link_key, list);
1017
1018                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1019                         return k;
1020         }
1021
1022         return NULL;
1023 }
1024
1025 int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1026                                                 u8 *val, u8 type, u8 pin_len)
1027 {
1028         struct link_key *key, *old_key;
1029         u8 old_key_type;
1030
1031         old_key = hci_find_link_key(hdev, bdaddr);
1032         if (old_key) {
1033                 old_key_type = old_key->type;
1034                 key = old_key;
1035         } else {
1036                 old_key_type = 0xff;
1037                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1038                 if (!key)
1039                         return -ENOMEM;
1040                 list_add(&key->list, &hdev->link_keys);
1041         }
1042
1043         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1044
1045         bacpy(&key->bdaddr, bdaddr);
1046         memcpy(key->val, val, 16);
1047         key->type = type;
1048         key->pin_len = pin_len;
1049
1050         if (new_key)
1051                 mgmt_new_key(hdev->id, key, old_key_type);
1052
1053         if (type == 0x06)
1054                 key->type = old_key_type;
1055
1056         return 0;
1057 }
1058
1059 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1060 {
1061         struct link_key *key;
1062
1063         key = hci_find_link_key(hdev, bdaddr);
1064         if (!key)
1065                 return -ENOENT;
1066
1067         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1068
1069         list_del(&key->list);
1070         kfree(key);
1071
1072         return 0;
1073 }
1074
1075 /* HCI command timer function */
1076 static void hci_cmd_timer(unsigned long arg)
1077 {
1078         struct hci_dev *hdev = (void *) arg;
1079
1080         BT_ERR("%s command tx timeout", hdev->name);
1081         atomic_set(&hdev->cmd_cnt, 1);
1082         clear_bit(HCI_RESET, &hdev->flags);
1083         tasklet_schedule(&hdev->cmd_task);
1084 }
1085
1086 /* Register HCI device */
1087 int hci_register_dev(struct hci_dev *hdev)
1088 {
1089         struct list_head *head = &hci_dev_list, *p;
1090         int i, id = 0;
1091
1092         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1093                                                 hdev->bus, hdev->owner);
1094
1095         if (!hdev->open || !hdev->close || !hdev->destruct)
1096                 return -EINVAL;
1097
1098         write_lock_bh(&hci_dev_list_lock);
1099
1100         /* Find first available device id */
1101         list_for_each(p, &hci_dev_list) {
1102                 if (list_entry(p, struct hci_dev, list)->id != id)
1103                         break;
1104                 head = p; id++;
1105         }
1106
1107         sprintf(hdev->name, "hci%d", id);
1108         hdev->id = id;
1109         list_add(&hdev->list, head);
1110
1111         atomic_set(&hdev->refcnt, 1);
1112         spin_lock_init(&hdev->lock);
1113
1114         hdev->flags = 0;
1115         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1116         hdev->esco_type = (ESCO_HV1);
1117         hdev->link_mode = (HCI_LM_ACCEPT);
1118         hdev->io_capability = 0x03; /* No Input No Output */
1119
1120         hdev->idle_timeout = 0;
1121         hdev->sniff_max_interval = 800;
1122         hdev->sniff_min_interval = 80;
1123
1124         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1125         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1126         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1127
1128         skb_queue_head_init(&hdev->rx_q);
1129         skb_queue_head_init(&hdev->cmd_q);
1130         skb_queue_head_init(&hdev->raw_q);
1131
1132         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1133
1134         for (i = 0; i < NUM_REASSEMBLY; i++)
1135                 hdev->reassembly[i] = NULL;
1136
1137         init_waitqueue_head(&hdev->req_wait_q);
1138         mutex_init(&hdev->req_lock);
1139
1140         inquiry_cache_init(hdev);
1141
1142         hci_conn_hash_init(hdev);
1143
1144         INIT_LIST_HEAD(&hdev->blacklist);
1145
1146         INIT_LIST_HEAD(&hdev->uuids);
1147
1148         INIT_LIST_HEAD(&hdev->link_keys);
1149
1150         INIT_WORK(&hdev->power_on, hci_power_on);
1151         INIT_WORK(&hdev->power_off, hci_power_off);
1152         setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1153
1154         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1155
1156         atomic_set(&hdev->promisc, 0);
1157
1158         write_unlock_bh(&hci_dev_list_lock);
1159
1160         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1161         if (!hdev->workqueue)
1162                 goto nomem;
1163
1164         hci_register_sysfs(hdev);
1165
1166         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1167                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1168         if (hdev->rfkill) {
1169                 if (rfkill_register(hdev->rfkill) < 0) {
1170                         rfkill_destroy(hdev->rfkill);
1171                         hdev->rfkill = NULL;
1172                 }
1173         }
1174
1175         set_bit(HCI_AUTO_OFF, &hdev->flags);
1176         set_bit(HCI_SETUP, &hdev->flags);
1177         queue_work(hdev->workqueue, &hdev->power_on);
1178
1179         hci_notify(hdev, HCI_DEV_REG);
1180
1181         return id;
1182
1183 nomem:
1184         write_lock_bh(&hci_dev_list_lock);
1185         list_del(&hdev->list);
1186         write_unlock_bh(&hci_dev_list_lock);
1187
1188         return -ENOMEM;
1189 }
1190 EXPORT_SYMBOL(hci_register_dev);
1191
1192 /* Unregister HCI device */
1193 int hci_unregister_dev(struct hci_dev *hdev)
1194 {
1195         int i;
1196
1197         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1198
1199         write_lock_bh(&hci_dev_list_lock);
1200         list_del(&hdev->list);
1201         write_unlock_bh(&hci_dev_list_lock);
1202
1203         hci_dev_do_close(hdev);
1204
1205         for (i = 0; i < NUM_REASSEMBLY; i++)
1206                 kfree_skb(hdev->reassembly[i]);
1207
1208         if (!test_bit(HCI_INIT, &hdev->flags) &&
1209                                         !test_bit(HCI_SETUP, &hdev->flags))
1210                 mgmt_index_removed(hdev->id);
1211
1212         hci_notify(hdev, HCI_DEV_UNREG);
1213
1214         if (hdev->rfkill) {
1215                 rfkill_unregister(hdev->rfkill);
1216                 rfkill_destroy(hdev->rfkill);
1217         }
1218
1219         hci_unregister_sysfs(hdev);
1220
1221         hci_del_off_timer(hdev);
1222
1223         destroy_workqueue(hdev->workqueue);
1224
1225         hci_dev_lock_bh(hdev);
1226         hci_blacklist_clear(hdev);
1227         hci_uuids_clear(hdev);
1228         hci_link_keys_clear(hdev);
1229         hci_dev_unlock_bh(hdev);
1230
1231         __hci_dev_put(hdev);
1232
1233         return 0;
1234 }
1235 EXPORT_SYMBOL(hci_unregister_dev);
1236
1237 /* Suspend HCI device */
1238 int hci_suspend_dev(struct hci_dev *hdev)
1239 {
1240         hci_notify(hdev, HCI_DEV_SUSPEND);
1241         return 0;
1242 }
1243 EXPORT_SYMBOL(hci_suspend_dev);
1244
1245 /* Resume HCI device */
1246 int hci_resume_dev(struct hci_dev *hdev)
1247 {
1248         hci_notify(hdev, HCI_DEV_RESUME);
1249         return 0;
1250 }
1251 EXPORT_SYMBOL(hci_resume_dev);
1252
1253 /* Receive frame from HCI drivers */
1254 int hci_recv_frame(struct sk_buff *skb)
1255 {
1256         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1257         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1258                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1259                 kfree_skb(skb);
1260                 return -ENXIO;
1261         }
1262
1263         /* Incomming skb */
1264         bt_cb(skb)->incoming = 1;
1265
1266         /* Time stamp */
1267         __net_timestamp(skb);
1268
1269         /* Queue frame for rx task */
1270         skb_queue_tail(&hdev->rx_q, skb);
1271         tasklet_schedule(&hdev->rx_task);
1272
1273         return 0;
1274 }
1275 EXPORT_SYMBOL(hci_recv_frame);
1276
1277 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1278                           int count, __u8 index, gfp_t gfp_mask)
1279 {
1280         int len = 0;
1281         int hlen = 0;
1282         int remain = count;
1283         struct sk_buff *skb;
1284         struct bt_skb_cb *scb;
1285
1286         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1287                                 index >= NUM_REASSEMBLY)
1288                 return -EILSEQ;
1289
1290         skb = hdev->reassembly[index];
1291
1292         if (!skb) {
1293                 switch (type) {
1294                 case HCI_ACLDATA_PKT:
1295                         len = HCI_MAX_FRAME_SIZE;
1296                         hlen = HCI_ACL_HDR_SIZE;
1297                         break;
1298                 case HCI_EVENT_PKT:
1299                         len = HCI_MAX_EVENT_SIZE;
1300                         hlen = HCI_EVENT_HDR_SIZE;
1301                         break;
1302                 case HCI_SCODATA_PKT:
1303                         len = HCI_MAX_SCO_SIZE;
1304                         hlen = HCI_SCO_HDR_SIZE;
1305                         break;
1306                 }
1307
1308                 skb = bt_skb_alloc(len, gfp_mask);
1309                 if (!skb)
1310                         return -ENOMEM;
1311
1312                 scb = (void *) skb->cb;
1313                 scb->expect = hlen;
1314                 scb->pkt_type = type;
1315
1316                 skb->dev = (void *) hdev;
1317                 hdev->reassembly[index] = skb;
1318         }
1319
1320         while (count) {
1321                 scb = (void *) skb->cb;
1322                 len = min(scb->expect, (__u16)count);
1323
1324                 memcpy(skb_put(skb, len), data, len);
1325
1326                 count -= len;
1327                 data += len;
1328                 scb->expect -= len;
1329                 remain = count;
1330
1331                 switch (type) {
1332                 case HCI_EVENT_PKT:
1333                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1334                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1335                                 scb->expect = h->plen;
1336
1337                                 if (skb_tailroom(skb) < scb->expect) {
1338                                         kfree_skb(skb);
1339                                         hdev->reassembly[index] = NULL;
1340                                         return -ENOMEM;
1341                                 }
1342                         }
1343                         break;
1344
1345                 case HCI_ACLDATA_PKT:
1346                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1347                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1348                                 scb->expect = __le16_to_cpu(h->dlen);
1349
1350                                 if (skb_tailroom(skb) < scb->expect) {
1351                                         kfree_skb(skb);
1352                                         hdev->reassembly[index] = NULL;
1353                                         return -ENOMEM;
1354                                 }
1355                         }
1356                         break;
1357
1358                 case HCI_SCODATA_PKT:
1359                         if (skb->len == HCI_SCO_HDR_SIZE) {
1360                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1361                                 scb->expect = h->dlen;
1362
1363                                 if (skb_tailroom(skb) < scb->expect) {
1364                                         kfree_skb(skb);
1365                                         hdev->reassembly[index] = NULL;
1366                                         return -ENOMEM;
1367                                 }
1368                         }
1369                         break;
1370                 }
1371
1372                 if (scb->expect == 0) {
1373                         /* Complete frame */
1374
1375                         bt_cb(skb)->pkt_type = type;
1376                         hci_recv_frame(skb);
1377
1378                         hdev->reassembly[index] = NULL;
1379                         return remain;
1380                 }
1381         }
1382
1383         return remain;
1384 }
1385
1386 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1387 {
1388         int rem = 0;
1389
1390         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1391                 return -EILSEQ;
1392
1393         while (count) {
1394                 rem = hci_reassembly(hdev, type, data, count,
1395                                                 type - 1, GFP_ATOMIC);
1396                 if (rem < 0)
1397                         return rem;
1398
1399                 data += (count - rem);
1400                 count = rem;
1401         };
1402
1403         return rem;
1404 }
1405 EXPORT_SYMBOL(hci_recv_fragment);
1406
1407 #define STREAM_REASSEMBLY 0
1408
1409 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1410 {
1411         int type;
1412         int rem = 0;
1413
1414         while (count) {
1415                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1416
1417                 if (!skb) {
1418                         struct { char type; } *pkt;
1419
1420                         /* Start of the frame */
1421                         pkt = data;
1422                         type = pkt->type;
1423
1424                         data++;
1425                         count--;
1426                 } else
1427                         type = bt_cb(skb)->pkt_type;
1428
1429                 rem = hci_reassembly(hdev, type, data,
1430                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1431                 if (rem < 0)
1432                         return rem;
1433
1434                 data += (count - rem);
1435                 count = rem;
1436         };
1437
1438         return rem;
1439 }
1440 EXPORT_SYMBOL(hci_recv_stream_fragment);
1441
1442 /* ---- Interface to upper protocols ---- */
1443
1444 /* Register/Unregister protocols.
1445  * hci_task_lock is used to ensure that no tasks are running. */
1446 int hci_register_proto(struct hci_proto *hp)
1447 {
1448         int err = 0;
1449
1450         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1451
1452         if (hp->id >= HCI_MAX_PROTO)
1453                 return -EINVAL;
1454
1455         write_lock_bh(&hci_task_lock);
1456
1457         if (!hci_proto[hp->id])
1458                 hci_proto[hp->id] = hp;
1459         else
1460                 err = -EEXIST;
1461
1462         write_unlock_bh(&hci_task_lock);
1463
1464         return err;
1465 }
1466 EXPORT_SYMBOL(hci_register_proto);
1467
1468 int hci_unregister_proto(struct hci_proto *hp)
1469 {
1470         int err = 0;
1471
1472         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1473
1474         if (hp->id >= HCI_MAX_PROTO)
1475                 return -EINVAL;
1476
1477         write_lock_bh(&hci_task_lock);
1478
1479         if (hci_proto[hp->id])
1480                 hci_proto[hp->id] = NULL;
1481         else
1482                 err = -ENOENT;
1483
1484         write_unlock_bh(&hci_task_lock);
1485
1486         return err;
1487 }
1488 EXPORT_SYMBOL(hci_unregister_proto);
1489
1490 int hci_register_cb(struct hci_cb *cb)
1491 {
1492         BT_DBG("%p name %s", cb, cb->name);
1493
1494         write_lock_bh(&hci_cb_list_lock);
1495         list_add(&cb->list, &hci_cb_list);
1496         write_unlock_bh(&hci_cb_list_lock);
1497
1498         return 0;
1499 }
1500 EXPORT_SYMBOL(hci_register_cb);
1501
1502 int hci_unregister_cb(struct hci_cb *cb)
1503 {
1504         BT_DBG("%p name %s", cb, cb->name);
1505
1506         write_lock_bh(&hci_cb_list_lock);
1507         list_del(&cb->list);
1508         write_unlock_bh(&hci_cb_list_lock);
1509
1510         return 0;
1511 }
1512 EXPORT_SYMBOL(hci_unregister_cb);
1513
1514 static int hci_send_frame(struct sk_buff *skb)
1515 {
1516         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1517
1518         if (!hdev) {
1519                 kfree_skb(skb);
1520                 return -ENODEV;
1521         }
1522
1523         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1524
1525         if (atomic_read(&hdev->promisc)) {
1526                 /* Time stamp */
1527                 __net_timestamp(skb);
1528
1529                 hci_send_to_sock(hdev, skb, NULL);
1530         }
1531
1532         /* Get rid of skb owner, prior to sending to the driver. */
1533         skb_orphan(skb);
1534
1535         return hdev->send(skb);
1536 }
1537
1538 /* Send HCI command */
1539 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1540 {
1541         int len = HCI_COMMAND_HDR_SIZE + plen;
1542         struct hci_command_hdr *hdr;
1543         struct sk_buff *skb;
1544
1545         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1546
1547         skb = bt_skb_alloc(len, GFP_ATOMIC);
1548         if (!skb) {
1549                 BT_ERR("%s no memory for command", hdev->name);
1550                 return -ENOMEM;
1551         }
1552
1553         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1554         hdr->opcode = cpu_to_le16(opcode);
1555         hdr->plen   = plen;
1556
1557         if (plen)
1558                 memcpy(skb_put(skb, plen), param, plen);
1559
1560         BT_DBG("skb len %d", skb->len);
1561
1562         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1563         skb->dev = (void *) hdev;
1564
1565         if (test_bit(HCI_INIT, &hdev->flags))
1566                 hdev->init_last_cmd = opcode;
1567
1568         skb_queue_tail(&hdev->cmd_q, skb);
1569         tasklet_schedule(&hdev->cmd_task);
1570
1571         return 0;
1572 }
1573
1574 /* Get data from the previously sent command */
1575 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1576 {
1577         struct hci_command_hdr *hdr;
1578
1579         if (!hdev->sent_cmd)
1580                 return NULL;
1581
1582         hdr = (void *) hdev->sent_cmd->data;
1583
1584         if (hdr->opcode != cpu_to_le16(opcode))
1585                 return NULL;
1586
1587         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1588
1589         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1590 }
1591
1592 /* Send ACL data */
1593 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1594 {
1595         struct hci_acl_hdr *hdr;
1596         int len = skb->len;
1597
1598         skb_push(skb, HCI_ACL_HDR_SIZE);
1599         skb_reset_transport_header(skb);
1600         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1601         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1602         hdr->dlen   = cpu_to_le16(len);
1603 }
1604
1605 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1606 {
1607         struct hci_dev *hdev = conn->hdev;
1608         struct sk_buff *list;
1609
1610         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1611
1612         skb->dev = (void *) hdev;
1613         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1614         hci_add_acl_hdr(skb, conn->handle, flags);
1615
1616         list = skb_shinfo(skb)->frag_list;
1617         if (!list) {
1618                 /* Non fragmented */
1619                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1620
1621                 skb_queue_tail(&conn->data_q, skb);
1622         } else {
1623                 /* Fragmented */
1624                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1625
1626                 skb_shinfo(skb)->frag_list = NULL;
1627
1628                 /* Queue all fragments atomically */
1629                 spin_lock_bh(&conn->data_q.lock);
1630
1631                 __skb_queue_tail(&conn->data_q, skb);
1632
1633                 flags &= ~ACL_START;
1634                 flags |= ACL_CONT;
1635                 do {
1636                         skb = list; list = list->next;
1637
1638                         skb->dev = (void *) hdev;
1639                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1640                         hci_add_acl_hdr(skb, conn->handle, flags);
1641
1642                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1643
1644                         __skb_queue_tail(&conn->data_q, skb);
1645                 } while (list);
1646
1647                 spin_unlock_bh(&conn->data_q.lock);
1648         }
1649
1650         tasklet_schedule(&hdev->tx_task);
1651 }
1652 EXPORT_SYMBOL(hci_send_acl);
1653
1654 /* Send SCO data */
1655 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1656 {
1657         struct hci_dev *hdev = conn->hdev;
1658         struct hci_sco_hdr hdr;
1659
1660         BT_DBG("%s len %d", hdev->name, skb->len);
1661
1662         hdr.handle = cpu_to_le16(conn->handle);
1663         hdr.dlen   = skb->len;
1664
1665         skb_push(skb, HCI_SCO_HDR_SIZE);
1666         skb_reset_transport_header(skb);
1667         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1668
1669         skb->dev = (void *) hdev;
1670         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1671
1672         skb_queue_tail(&conn->data_q, skb);
1673         tasklet_schedule(&hdev->tx_task);
1674 }
1675 EXPORT_SYMBOL(hci_send_sco);
1676
1677 /* ---- HCI TX task (outgoing data) ---- */
1678
1679 /* HCI Connection scheduler */
1680 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1681 {
1682         struct hci_conn_hash *h = &hdev->conn_hash;
1683         struct hci_conn *conn = NULL;
1684         int num = 0, min = ~0;
1685         struct list_head *p;
1686
1687         /* We don't have to lock device here. Connections are always
1688          * added and removed with TX task disabled. */
1689         list_for_each(p, &h->list) {
1690                 struct hci_conn *c;
1691                 c = list_entry(p, struct hci_conn, list);
1692
1693                 if (c->type != type || skb_queue_empty(&c->data_q))
1694                         continue;
1695
1696                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1697                         continue;
1698
1699                 num++;
1700
1701                 if (c->sent < min) {
1702                         min  = c->sent;
1703                         conn = c;
1704                 }
1705         }
1706
1707         if (conn) {
1708                 int cnt, q;
1709
1710                 switch (conn->type) {
1711                 case ACL_LINK:
1712                         cnt = hdev->acl_cnt;
1713                         break;
1714                 case SCO_LINK:
1715                 case ESCO_LINK:
1716                         cnt = hdev->sco_cnt;
1717                         break;
1718                 case LE_LINK:
1719                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1720                         break;
1721                 default:
1722                         cnt = 0;
1723                         BT_ERR("Unknown link type");
1724                 }
1725
1726                 q = cnt / num;
1727                 *quote = q ? q : 1;
1728         } else
1729                 *quote = 0;
1730
1731         BT_DBG("conn %p quote %d", conn, *quote);
1732         return conn;
1733 }
1734
1735 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1736 {
1737         struct hci_conn_hash *h = &hdev->conn_hash;
1738         struct list_head *p;
1739         struct hci_conn  *c;
1740
1741         BT_ERR("%s link tx timeout", hdev->name);
1742
1743         /* Kill stalled connections */
1744         list_for_each(p, &h->list) {
1745                 c = list_entry(p, struct hci_conn, list);
1746                 if (c->type == type && c->sent) {
1747                         BT_ERR("%s killing stalled connection %s",
1748                                 hdev->name, batostr(&c->dst));
1749                         hci_acl_disconn(c, 0x13);
1750                 }
1751         }
1752 }
1753
1754 static inline void hci_sched_acl(struct hci_dev *hdev)
1755 {
1756         struct hci_conn *conn;
1757         struct sk_buff *skb;
1758         int quote;
1759
1760         BT_DBG("%s", hdev->name);
1761
1762         if (!test_bit(HCI_RAW, &hdev->flags)) {
1763                 /* ACL tx timeout must be longer than maximum
1764                  * link supervision timeout (40.9 seconds) */
1765                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1766                         hci_link_tx_to(hdev, ACL_LINK);
1767         }
1768
1769         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1770                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1771                         BT_DBG("skb %p len %d", skb, skb->len);
1772
1773                         hci_conn_enter_active_mode(conn);
1774
1775                         hci_send_frame(skb);
1776                         hdev->acl_last_tx = jiffies;
1777
1778                         hdev->acl_cnt--;
1779                         conn->sent++;
1780                 }
1781         }
1782 }
1783
1784 /* Schedule SCO */
1785 static inline void hci_sched_sco(struct hci_dev *hdev)
1786 {
1787         struct hci_conn *conn;
1788         struct sk_buff *skb;
1789         int quote;
1790
1791         BT_DBG("%s", hdev->name);
1792
1793         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1794                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1795                         BT_DBG("skb %p len %d", skb, skb->len);
1796                         hci_send_frame(skb);
1797
1798                         conn->sent++;
1799                         if (conn->sent == ~0)
1800                                 conn->sent = 0;
1801                 }
1802         }
1803 }
1804
1805 static inline void hci_sched_esco(struct hci_dev *hdev)
1806 {
1807         struct hci_conn *conn;
1808         struct sk_buff *skb;
1809         int quote;
1810
1811         BT_DBG("%s", hdev->name);
1812
1813         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1814                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1815                         BT_DBG("skb %p len %d", skb, skb->len);
1816                         hci_send_frame(skb);
1817
1818                         conn->sent++;
1819                         if (conn->sent == ~0)
1820                                 conn->sent = 0;
1821                 }
1822         }
1823 }
1824
1825 static inline void hci_sched_le(struct hci_dev *hdev)
1826 {
1827         struct hci_conn *conn;
1828         struct sk_buff *skb;
1829         int quote, cnt;
1830
1831         BT_DBG("%s", hdev->name);
1832
1833         if (!test_bit(HCI_RAW, &hdev->flags)) {
1834                 /* LE tx timeout must be longer than maximum
1835                  * link supervision timeout (40.9 seconds) */
1836                 if (!hdev->le_cnt && hdev->le_pkts &&
1837                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1838                         hci_link_tx_to(hdev, LE_LINK);
1839         }
1840
1841         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1842         while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1843                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1844                         BT_DBG("skb %p len %d", skb, skb->len);
1845
1846                         hci_send_frame(skb);
1847                         hdev->le_last_tx = jiffies;
1848
1849                         cnt--;
1850                         conn->sent++;
1851                 }
1852         }
1853         if (hdev->le_pkts)
1854                 hdev->le_cnt = cnt;
1855         else
1856                 hdev->acl_cnt = cnt;
1857 }
1858
1859 static void hci_tx_task(unsigned long arg)
1860 {
1861         struct hci_dev *hdev = (struct hci_dev *) arg;
1862         struct sk_buff *skb;
1863
1864         read_lock(&hci_task_lock);
1865
1866         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1867                 hdev->sco_cnt, hdev->le_cnt);
1868
1869         /* Schedule queues and send stuff to HCI driver */
1870
1871         hci_sched_acl(hdev);
1872
1873         hci_sched_sco(hdev);
1874
1875         hci_sched_esco(hdev);
1876
1877         hci_sched_le(hdev);
1878
1879         /* Send next queued raw (unknown type) packet */
1880         while ((skb = skb_dequeue(&hdev->raw_q)))
1881                 hci_send_frame(skb);
1882
1883         read_unlock(&hci_task_lock);
1884 }
1885
1886 /* ----- HCI RX task (incoming data processing) ----- */
1887
1888 /* ACL data packet */
1889 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1890 {
1891         struct hci_acl_hdr *hdr = (void *) skb->data;
1892         struct hci_conn *conn;
1893         __u16 handle, flags;
1894
1895         skb_pull(skb, HCI_ACL_HDR_SIZE);
1896
1897         handle = __le16_to_cpu(hdr->handle);
1898         flags  = hci_flags(handle);
1899         handle = hci_handle(handle);
1900
1901         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1902
1903         hdev->stat.acl_rx++;
1904
1905         hci_dev_lock(hdev);
1906         conn = hci_conn_hash_lookup_handle(hdev, handle);
1907         hci_dev_unlock(hdev);
1908
1909         if (conn) {
1910                 register struct hci_proto *hp;
1911
1912                 hci_conn_enter_active_mode(conn);
1913
1914                 /* Send to upper protocol */
1915                 hp = hci_proto[HCI_PROTO_L2CAP];
1916                 if (hp && hp->recv_acldata) {
1917                         hp->recv_acldata(conn, skb, flags);
1918                         return;
1919                 }
1920         } else {
1921                 BT_ERR("%s ACL packet for unknown connection handle %d",
1922                         hdev->name, handle);
1923         }
1924
1925         kfree_skb(skb);
1926 }
1927
1928 /* SCO data packet */
1929 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1930 {
1931         struct hci_sco_hdr *hdr = (void *) skb->data;
1932         struct hci_conn *conn;
1933         __u16 handle;
1934
1935         skb_pull(skb, HCI_SCO_HDR_SIZE);
1936
1937         handle = __le16_to_cpu(hdr->handle);
1938
1939         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1940
1941         hdev->stat.sco_rx++;
1942
1943         hci_dev_lock(hdev);
1944         conn = hci_conn_hash_lookup_handle(hdev, handle);
1945         hci_dev_unlock(hdev);
1946
1947         if (conn) {
1948                 register struct hci_proto *hp;
1949
1950                 /* Send to upper protocol */
1951                 hp = hci_proto[HCI_PROTO_SCO];
1952                 if (hp && hp->recv_scodata) {
1953                         hp->recv_scodata(conn, skb);
1954                         return;
1955                 }
1956         } else {
1957                 BT_ERR("%s SCO packet for unknown connection handle %d",
1958                         hdev->name, handle);
1959         }
1960
1961         kfree_skb(skb);
1962 }
1963
1964 static void hci_rx_task(unsigned long arg)
1965 {
1966         struct hci_dev *hdev = (struct hci_dev *) arg;
1967         struct sk_buff *skb;
1968
1969         BT_DBG("%s", hdev->name);
1970
1971         read_lock(&hci_task_lock);
1972
1973         while ((skb = skb_dequeue(&hdev->rx_q))) {
1974                 if (atomic_read(&hdev->promisc)) {
1975                         /* Send copy to the sockets */
1976                         hci_send_to_sock(hdev, skb, NULL);
1977                 }
1978
1979                 if (test_bit(HCI_RAW, &hdev->flags)) {
1980                         kfree_skb(skb);
1981                         continue;
1982                 }
1983
1984                 if (test_bit(HCI_INIT, &hdev->flags)) {
1985                         /* Don't process data packets in this states. */
1986                         switch (bt_cb(skb)->pkt_type) {
1987                         case HCI_ACLDATA_PKT:
1988                         case HCI_SCODATA_PKT:
1989                                 kfree_skb(skb);
1990                                 continue;
1991                         }
1992                 }
1993
1994                 /* Process frame */
1995                 switch (bt_cb(skb)->pkt_type) {
1996                 case HCI_EVENT_PKT:
1997                         hci_event_packet(hdev, skb);
1998                         break;
1999
2000                 case HCI_ACLDATA_PKT:
2001                         BT_DBG("%s ACL data packet", hdev->name);
2002                         hci_acldata_packet(hdev, skb);
2003                         break;
2004
2005                 case HCI_SCODATA_PKT:
2006                         BT_DBG("%s SCO data packet", hdev->name);
2007                         hci_scodata_packet(hdev, skb);
2008                         break;
2009
2010                 default:
2011                         kfree_skb(skb);
2012                         break;
2013                 }
2014         }
2015
2016         read_unlock(&hci_task_lock);
2017 }
2018
2019 static void hci_cmd_task(unsigned long arg)
2020 {
2021         struct hci_dev *hdev = (struct hci_dev *) arg;
2022         struct sk_buff *skb;
2023
2024         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2025
2026         /* Send queued commands */
2027         if (atomic_read(&hdev->cmd_cnt)) {
2028                 skb = skb_dequeue(&hdev->cmd_q);
2029                 if (!skb)
2030                         return;
2031
2032                 kfree_skb(hdev->sent_cmd);
2033
2034                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2035                 if (hdev->sent_cmd) {
2036                         atomic_dec(&hdev->cmd_cnt);
2037                         hci_send_frame(skb);
2038                         mod_timer(&hdev->cmd_timer,
2039                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2040                 } else {
2041                         skb_queue_head(&hdev->cmd_q, skb);
2042                         tasklet_schedule(&hdev->cmd_task);
2043                 }
2044         }
2045 }