2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "mgmt_util.h"
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
42 static DEFINE_IDA(sock_cookie_ida);
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
46 /* ----- HCI socket interface ----- */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
54 struct hci_filter filter;
56 unsigned short channel;
59 char comm[TASK_COMM_LEN];
63 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
65 struct hci_dev *hdev = hci_pi(sk)->hdev;
68 return ERR_PTR(-EBADFD);
69 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
70 return ERR_PTR(-EPIPE);
74 void hci_sock_set_flag(struct sock *sk, int nr)
76 set_bit(nr, &hci_pi(sk)->flags);
79 void hci_sock_clear_flag(struct sock *sk, int nr)
81 clear_bit(nr, &hci_pi(sk)->flags);
84 int hci_sock_test_flag(struct sock *sk, int nr)
86 return test_bit(nr, &hci_pi(sk)->flags);
89 unsigned short hci_sock_get_channel(struct sock *sk)
91 return hci_pi(sk)->channel;
94 u32 hci_sock_get_cookie(struct sock *sk)
96 return hci_pi(sk)->cookie;
99 static bool hci_sock_gen_cookie(struct sock *sk)
101 int id = hci_pi(sk)->cookie;
104 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
108 hci_pi(sk)->cookie = id;
109 get_task_comm(hci_pi(sk)->comm, current);
116 static void hci_sock_free_cookie(struct sock *sk)
118 int id = hci_pi(sk)->cookie;
121 hci_pi(sk)->cookie = 0xffffffff;
122 ida_simple_remove(&sock_cookie_ida, id);
126 static inline int hci_test_bit(int nr, const void *addr)
128 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
131 /* Security filter */
132 #define HCI_SFLT_MAX_OGF 5
134 struct hci_sec_filter {
137 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
140 static const struct hci_sec_filter hci_sec_filter = {
144 { 0x1000d9fe, 0x0000b00c },
149 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
150 /* OGF_LINK_POLICY */
151 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
153 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
155 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
156 /* OGF_STATUS_PARAM */
157 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
161 static struct bt_sock_list hci_sk_list = {
162 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
165 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
167 struct hci_filter *flt;
168 int flt_type, flt_event;
171 flt = &hci_pi(sk)->filter;
173 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
175 if (!test_bit(flt_type, &flt->type_mask))
178 /* Extra filter for event packets only */
179 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
182 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
184 if (!hci_test_bit(flt_event, &flt->event_mask))
187 /* Check filter only when opcode is set */
191 if (flt_event == HCI_EV_CMD_COMPLETE &&
192 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
195 if (flt_event == HCI_EV_CMD_STATUS &&
196 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
202 /* Send frame to RAW socket */
203 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
206 struct sk_buff *skb_copy = NULL;
208 BT_DBG("hdev %p len %d", hdev, skb->len);
210 read_lock(&hci_sk_list.lock);
212 sk_for_each(sk, &hci_sk_list.head) {
213 struct sk_buff *nskb;
215 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
218 /* Don't send frame to the socket it came from */
222 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
223 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
224 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
225 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
226 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
227 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
229 if (is_filtered_packet(sk, skb))
231 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
232 if (!bt_cb(skb)->incoming)
234 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
235 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
236 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
237 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
240 /* Don't send frame to other channel types */
245 /* Create a private copy with headroom */
246 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
250 /* Put type byte before the data */
251 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
254 nskb = skb_clone(skb_copy, GFP_ATOMIC);
258 if (sock_queue_rcv_skb(sk, nskb))
262 read_unlock(&hci_sk_list.lock);
267 static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
269 struct scm_creds *creds;
271 if (!sk || WARN_ON(!skb))
274 creds = &bt_cb(skb)->creds;
276 /* Check if peer credentials is set */
277 if (!sk->sk_peer_pid) {
278 /* Check if parent peer credentials is set */
279 if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
280 sk = bt_sk(sk)->parent;
285 /* Check if scm_creds already set */
286 if (creds->pid == pid_vnr(sk->sk_peer_pid))
289 memset(creds, 0, sizeof(*creds));
291 creds->pid = pid_vnr(sk->sk_peer_pid);
292 if (sk->sk_peer_cred) {
293 creds->uid = sk->sk_peer_cred->uid;
294 creds->gid = sk->sk_peer_cred->gid;
298 static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
300 struct sk_buff *nskb;
305 nskb = skb_clone(skb, GFP_ATOMIC);
309 hci_sock_copy_creds(skb->sk, nskb);
314 /* Send frame to sockets with specific channel */
315 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
316 int flag, struct sock *skip_sk)
320 BT_DBG("channel %u len %d", channel, skb->len);
322 sk_for_each(sk, &hci_sk_list.head) {
323 struct sk_buff *nskb;
325 /* Ignore socket without the flag set */
326 if (!hci_sock_test_flag(sk, flag))
329 /* Skip the original socket */
333 if (sk->sk_state != BT_BOUND)
336 if (hci_pi(sk)->channel != channel)
339 nskb = hci_skb_clone(skb);
343 if (sock_queue_rcv_skb(sk, nskb))
349 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
350 int flag, struct sock *skip_sk)
352 read_lock(&hci_sk_list.lock);
353 __hci_send_to_channel(channel, skb, flag, skip_sk);
354 read_unlock(&hci_sk_list.lock);
357 /* Send frame to monitor socket */
358 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
360 struct sk_buff *skb_copy = NULL;
361 struct hci_mon_hdr *hdr;
364 if (!atomic_read(&monitor_promisc))
367 BT_DBG("hdev %p len %d", hdev, skb->len);
369 switch (hci_skb_pkt_type(skb)) {
370 case HCI_COMMAND_PKT:
371 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
374 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
376 case HCI_ACLDATA_PKT:
377 if (bt_cb(skb)->incoming)
378 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
380 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
382 case HCI_SCODATA_PKT:
383 if (bt_cb(skb)->incoming)
384 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
386 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
388 case HCI_ISODATA_PKT:
389 if (bt_cb(skb)->incoming)
390 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
392 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
395 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
401 /* Create a private copy with headroom */
402 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
406 hci_sock_copy_creds(skb->sk, skb_copy);
408 /* Put header before the data */
409 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
410 hdr->opcode = opcode;
411 hdr->index = cpu_to_le16(hdev->id);
412 hdr->len = cpu_to_le16(skb->len);
414 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
415 HCI_SOCK_TRUSTED, NULL);
419 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
420 void *data, u16 data_len, ktime_t tstamp,
421 int flag, struct sock *skip_sk)
427 index = cpu_to_le16(hdev->id);
429 index = cpu_to_le16(MGMT_INDEX_NONE);
431 read_lock(&hci_sk_list.lock);
433 sk_for_each(sk, &hci_sk_list.head) {
434 struct hci_mon_hdr *hdr;
437 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
440 /* Ignore socket without the flag set */
441 if (!hci_sock_test_flag(sk, flag))
444 /* Skip the original socket */
448 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
452 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
453 put_unaligned_le16(event, skb_put(skb, 2));
456 skb_put_data(skb, data, data_len);
458 skb->tstamp = tstamp;
460 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
461 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
463 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
465 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
466 HCI_SOCK_TRUSTED, NULL);
470 read_unlock(&hci_sk_list.lock);
473 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
475 struct hci_mon_hdr *hdr;
476 struct hci_mon_new_index *ni;
477 struct hci_mon_index_info *ii;
483 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
487 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
488 ni->type = hdev->dev_type;
490 bacpy(&ni->bdaddr, &hdev->bdaddr);
491 memcpy(ni->name, hdev->name, 8);
493 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
497 skb = bt_skb_alloc(0, GFP_ATOMIC);
501 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
505 if (hdev->manufacturer == 0xffff)
510 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
514 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
515 bacpy(&ii->bdaddr, &hdev->bdaddr);
516 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
518 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
522 skb = bt_skb_alloc(0, GFP_ATOMIC);
526 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
530 skb = bt_skb_alloc(0, GFP_ATOMIC);
534 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
541 __net_timestamp(skb);
543 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
544 hdr->opcode = opcode;
545 hdr->index = cpu_to_le16(hdev->id);
546 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
551 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
553 struct hci_mon_hdr *hdr;
559 /* No message needed when cookie is not present */
560 if (!hci_pi(sk)->cookie)
563 switch (hci_pi(sk)->channel) {
564 case HCI_CHANNEL_RAW:
566 ver[0] = BT_SUBSYS_VERSION;
567 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
569 case HCI_CHANNEL_USER:
571 ver[0] = BT_SUBSYS_VERSION;
572 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
574 case HCI_CHANNEL_CONTROL:
576 mgmt_fill_version_info(ver);
579 /* No message for unsupported format */
583 skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
587 hci_sock_copy_creds(sk, skb);
589 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
591 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
592 put_unaligned_le16(format, skb_put(skb, 2));
593 skb_put_data(skb, ver, sizeof(ver));
594 put_unaligned_le32(flags, skb_put(skb, 4));
595 skb_put_u8(skb, TASK_COMM_LEN);
596 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
598 __net_timestamp(skb);
600 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
601 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
602 if (hci_pi(sk)->hdev)
603 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
605 hdr->index = cpu_to_le16(HCI_DEV_NONE);
606 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
611 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
613 struct hci_mon_hdr *hdr;
616 /* No message needed when cookie is not present */
617 if (!hci_pi(sk)->cookie)
620 switch (hci_pi(sk)->channel) {
621 case HCI_CHANNEL_RAW:
622 case HCI_CHANNEL_USER:
623 case HCI_CHANNEL_CONTROL:
626 /* No message for unsupported format */
630 skb = bt_skb_alloc(4, GFP_ATOMIC);
634 hci_sock_copy_creds(sk, skb);
636 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
638 __net_timestamp(skb);
640 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
641 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
642 if (hci_pi(sk)->hdev)
643 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
645 hdr->index = cpu_to_le16(HCI_DEV_NONE);
646 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
651 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
655 struct hci_mon_hdr *hdr;
658 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
662 hci_sock_copy_creds(sk, skb);
664 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
665 put_unaligned_le16(opcode, skb_put(skb, 2));
668 skb_put_data(skb, buf, len);
670 __net_timestamp(skb);
672 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
673 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
674 hdr->index = cpu_to_le16(index);
675 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
680 static void __printf(2, 3)
681 send_monitor_note(struct sock *sk, const char *fmt, ...)
684 struct hci_mon_hdr *hdr;
689 len = vsnprintf(NULL, 0, fmt, args);
692 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
696 hci_sock_copy_creds(sk, skb);
699 vsprintf(skb_put(skb, len), fmt, args);
700 *(u8 *)skb_put(skb, 1) = 0;
703 __net_timestamp(skb);
705 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
706 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
707 hdr->index = cpu_to_le16(HCI_DEV_NONE);
708 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
710 if (sock_queue_rcv_skb(sk, skb))
714 static void send_monitor_replay(struct sock *sk)
716 struct hci_dev *hdev;
718 read_lock(&hci_dev_list_lock);
720 list_for_each_entry(hdev, &hci_dev_list, list) {
723 skb = create_monitor_event(hdev, HCI_DEV_REG);
727 if (sock_queue_rcv_skb(sk, skb))
730 if (!test_bit(HCI_RUNNING, &hdev->flags))
733 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
737 if (sock_queue_rcv_skb(sk, skb))
740 if (test_bit(HCI_UP, &hdev->flags))
741 skb = create_monitor_event(hdev, HCI_DEV_UP);
742 else if (hci_dev_test_flag(hdev, HCI_SETUP))
743 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
748 if (sock_queue_rcv_skb(sk, skb))
753 read_unlock(&hci_dev_list_lock);
756 static void send_monitor_control_replay(struct sock *mon_sk)
760 read_lock(&hci_sk_list.lock);
762 sk_for_each(sk, &hci_sk_list.head) {
765 skb = create_monitor_ctrl_open(sk);
769 if (sock_queue_rcv_skb(mon_sk, skb))
773 read_unlock(&hci_sk_list.lock);
776 /* Generate internal stack event */
777 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
779 struct hci_event_hdr *hdr;
780 struct hci_ev_stack_internal *ev;
783 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
787 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
788 hdr->evt = HCI_EV_STACK_INTERNAL;
789 hdr->plen = sizeof(*ev) + dlen;
791 ev = skb_put(skb, sizeof(*ev) + dlen);
793 memcpy(ev->data, data, dlen);
795 bt_cb(skb)->incoming = 1;
796 __net_timestamp(skb);
798 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
799 hci_send_to_sock(hdev, skb);
803 void hci_sock_dev_event(struct hci_dev *hdev, int event)
805 BT_DBG("hdev %s event %d", hdev->name, event);
807 if (atomic_read(&monitor_promisc)) {
810 /* Send event to monitor */
811 skb = create_monitor_event(hdev, event);
813 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
814 HCI_SOCK_TRUSTED, NULL);
819 if (event <= HCI_DEV_DOWN) {
820 struct hci_ev_si_device ev;
822 /* Send event to sockets */
824 ev.dev_id = hdev->id;
825 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
828 if (event == HCI_DEV_UNREG) {
831 /* Wake up sockets using this dead device */
832 read_lock(&hci_sk_list.lock);
833 sk_for_each(sk, &hci_sk_list.head) {
834 if (hci_pi(sk)->hdev == hdev) {
836 sk->sk_state_change(sk);
839 read_unlock(&hci_sk_list.lock);
843 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
845 struct hci_mgmt_chan *c;
847 list_for_each_entry(c, &mgmt_chan_list, list) {
848 if (c->channel == channel)
855 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
857 struct hci_mgmt_chan *c;
859 mutex_lock(&mgmt_chan_list_lock);
860 c = __hci_mgmt_chan_find(channel);
861 mutex_unlock(&mgmt_chan_list_lock);
866 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
868 if (c->channel < HCI_CHANNEL_CONTROL)
871 mutex_lock(&mgmt_chan_list_lock);
872 if (__hci_mgmt_chan_find(c->channel)) {
873 mutex_unlock(&mgmt_chan_list_lock);
877 list_add_tail(&c->list, &mgmt_chan_list);
879 mutex_unlock(&mgmt_chan_list_lock);
883 EXPORT_SYMBOL(hci_mgmt_chan_register);
885 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
887 mutex_lock(&mgmt_chan_list_lock);
889 mutex_unlock(&mgmt_chan_list_lock);
891 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
893 static int hci_sock_release(struct socket *sock)
895 struct sock *sk = sock->sk;
896 struct hci_dev *hdev;
899 BT_DBG("sock %p sk %p", sock, sk);
906 switch (hci_pi(sk)->channel) {
907 case HCI_CHANNEL_MONITOR:
908 atomic_dec(&monitor_promisc);
910 case HCI_CHANNEL_RAW:
911 case HCI_CHANNEL_USER:
912 case HCI_CHANNEL_CONTROL:
913 /* Send event to monitor */
914 skb = create_monitor_ctrl_close(sk);
916 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
917 HCI_SOCK_TRUSTED, NULL);
921 hci_sock_free_cookie(sk);
925 bt_sock_unlink(&hci_sk_list, sk);
927 hdev = hci_pi(sk)->hdev;
929 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
930 !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
931 /* When releasing a user channel exclusive access,
932 * call hci_dev_do_close directly instead of calling
933 * hci_dev_close to ensure the exclusive access will
934 * be released and the controller brought back down.
936 * The checking of HCI_AUTO_OFF is not needed in this
937 * case since it will have been cleared already when
938 * opening the user channel.
940 * Make sure to also check that we haven't already
941 * unregistered since all the cleanup will have already
942 * been complete and hdev will get released when we put
945 hci_dev_do_close(hdev);
946 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
947 mgmt_index_added(hdev);
950 atomic_dec(&hdev->promisc);
960 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
965 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
970 err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
972 hci_dev_unlock(hdev);
977 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
982 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
987 err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
989 hci_dev_unlock(hdev);
994 /* Ioctls that require bound socket */
995 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
998 struct hci_dev *hdev = hci_hdev_from_sock(sk);
1001 return PTR_ERR(hdev);
1003 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1006 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1009 if (hdev->dev_type != HCI_PRIMARY)
1014 if (!capable(CAP_NET_ADMIN))
1018 case HCIGETCONNINFO:
1019 return hci_get_conn_info(hdev, (void __user *)arg);
1021 case HCIGETAUTHINFO:
1022 return hci_get_auth_info(hdev, (void __user *)arg);
1025 if (!capable(CAP_NET_ADMIN))
1027 return hci_sock_reject_list_add(hdev, (void __user *)arg);
1029 case HCIUNBLOCKADDR:
1030 if (!capable(CAP_NET_ADMIN))
1032 return hci_sock_reject_list_del(hdev, (void __user *)arg);
1035 return -ENOIOCTLCMD;
1038 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1041 void __user *argp = (void __user *)arg;
1042 struct sock *sk = sock->sk;
1045 BT_DBG("cmd %x arg %lx", cmd, arg);
1047 /* Make sure the cmd is valid before doing anything */
1051 case HCIGETCONNLIST:
1061 case HCISETLINKMODE:
1066 case HCIGETCONNINFO:
1067 case HCIGETAUTHINFO:
1069 case HCIUNBLOCKADDR:
1072 return -ENOIOCTLCMD;
1077 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1082 /* When calling an ioctl on an unbound raw socket, then ensure
1083 * that the monitor gets informed. Ensure that the resulting event
1084 * is only send once by checking if the cookie exists or not. The
1085 * socket cookie will be only ever generated once for the lifetime
1086 * of a given socket.
1088 if (hci_sock_gen_cookie(sk)) {
1089 struct sk_buff *skb;
1091 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1092 * flag. Make sure that not only the current task but also
1093 * the socket opener has the required capability, since
1094 * privileged programs can be tricked into making ioctl calls
1095 * on HCI sockets, and the socket should not be marked as
1096 * trusted simply because the ioctl caller is privileged.
1098 if (sk_capable(sk, CAP_NET_ADMIN))
1099 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1101 /* Send event to monitor */
1102 skb = create_monitor_ctrl_open(sk);
1104 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1105 HCI_SOCK_TRUSTED, NULL);
1114 return hci_get_dev_list(argp);
1117 return hci_get_dev_info(argp);
1119 case HCIGETCONNLIST:
1120 return hci_get_conn_list(argp);
1123 if (!capable(CAP_NET_ADMIN))
1125 return hci_dev_open(arg);
1128 if (!capable(CAP_NET_ADMIN))
1130 return hci_dev_close(arg);
1133 if (!capable(CAP_NET_ADMIN))
1135 return hci_dev_reset(arg);
1138 if (!capable(CAP_NET_ADMIN))
1140 return hci_dev_reset_stat(arg);
1147 case HCISETLINKMODE:
1150 if (!capable(CAP_NET_ADMIN))
1152 return hci_dev_cmd(cmd, argp);
1155 return hci_inquiry(argp);
1160 err = hci_sock_bound_ioctl(sk, cmd, arg);
1167 #ifdef CONFIG_COMPAT
1168 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1176 return hci_sock_ioctl(sock, cmd, arg);
1179 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1183 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1186 struct sockaddr_hci haddr;
1187 struct sock *sk = sock->sk;
1188 struct hci_dev *hdev = NULL;
1189 struct sk_buff *skb;
1192 BT_DBG("sock %p sk %p", sock, sk);
1197 memset(&haddr, 0, sizeof(haddr));
1198 len = min_t(unsigned int, sizeof(haddr), addr_len);
1199 memcpy(&haddr, addr, len);
1201 if (haddr.hci_family != AF_BLUETOOTH)
1206 /* Allow detaching from dead device and attaching to alive device, if
1207 * the caller wants to re-bind (instead of close) this socket in
1208 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1210 hdev = hci_pi(sk)->hdev;
1211 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1212 hci_pi(sk)->hdev = NULL;
1213 sk->sk_state = BT_OPEN;
1218 if (sk->sk_state == BT_BOUND) {
1223 switch (haddr.hci_channel) {
1224 case HCI_CHANNEL_RAW:
1225 if (hci_pi(sk)->hdev) {
1230 if (haddr.hci_dev != HCI_DEV_NONE) {
1231 hdev = hci_dev_get(haddr.hci_dev);
1237 atomic_inc(&hdev->promisc);
1240 hci_pi(sk)->channel = haddr.hci_channel;
1242 if (!hci_sock_gen_cookie(sk)) {
1243 /* In the case when a cookie has already been assigned,
1244 * then there has been already an ioctl issued against
1245 * an unbound socket and with that triggered an open
1246 * notification. Send a close notification first to
1247 * allow the state transition to bounded.
1249 skb = create_monitor_ctrl_close(sk);
1251 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1252 HCI_SOCK_TRUSTED, NULL);
1257 if (capable(CAP_NET_ADMIN))
1258 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1260 hci_pi(sk)->hdev = hdev;
1262 /* Send event to monitor */
1263 skb = create_monitor_ctrl_open(sk);
1265 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1266 HCI_SOCK_TRUSTED, NULL);
1271 case HCI_CHANNEL_USER:
1272 if (hci_pi(sk)->hdev) {
1277 if (haddr.hci_dev == HCI_DEV_NONE) {
1282 if (!capable(CAP_NET_ADMIN)) {
1287 hdev = hci_dev_get(haddr.hci_dev);
1293 if (test_bit(HCI_INIT, &hdev->flags) ||
1294 hci_dev_test_flag(hdev, HCI_SETUP) ||
1295 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1296 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1297 test_bit(HCI_UP, &hdev->flags))) {
1303 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1309 mgmt_index_removed(hdev);
1311 err = hci_dev_open(hdev->id);
1313 if (err == -EALREADY) {
1314 /* In case the transport is already up and
1315 * running, clear the error here.
1317 * This can happen when opening a user
1318 * channel and HCI_AUTO_OFF grace period
1323 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1324 mgmt_index_added(hdev);
1330 hci_pi(sk)->channel = haddr.hci_channel;
1332 if (!hci_sock_gen_cookie(sk)) {
1333 /* In the case when a cookie has already been assigned,
1334 * this socket will transition from a raw socket into
1335 * a user channel socket. For a clean transition, send
1336 * the close notification first.
1338 skb = create_monitor_ctrl_close(sk);
1340 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1341 HCI_SOCK_TRUSTED, NULL);
1346 /* The user channel is restricted to CAP_NET_ADMIN
1347 * capabilities and with that implicitly trusted.
1349 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1351 hci_pi(sk)->hdev = hdev;
1353 /* Send event to monitor */
1354 skb = create_monitor_ctrl_open(sk);
1356 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1357 HCI_SOCK_TRUSTED, NULL);
1361 atomic_inc(&hdev->promisc);
1364 case HCI_CHANNEL_MONITOR:
1365 if (haddr.hci_dev != HCI_DEV_NONE) {
1370 if (!capable(CAP_NET_RAW)) {
1375 hci_pi(sk)->channel = haddr.hci_channel;
1377 /* The monitor interface is restricted to CAP_NET_RAW
1378 * capabilities and with that implicitly trusted.
1380 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1382 send_monitor_note(sk, "Linux version %s (%s)",
1383 init_utsname()->release,
1384 init_utsname()->machine);
1385 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1386 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1387 send_monitor_replay(sk);
1388 send_monitor_control_replay(sk);
1390 atomic_inc(&monitor_promisc);
1393 case HCI_CHANNEL_LOGGING:
1394 if (haddr.hci_dev != HCI_DEV_NONE) {
1399 if (!capable(CAP_NET_ADMIN)) {
1404 hci_pi(sk)->channel = haddr.hci_channel;
1408 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1413 if (haddr.hci_dev != HCI_DEV_NONE) {
1418 /* Users with CAP_NET_ADMIN capabilities are allowed
1419 * access to all management commands and events. For
1420 * untrusted users the interface is restricted and
1421 * also only untrusted events are sent.
1423 if (capable(CAP_NET_ADMIN))
1424 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1426 hci_pi(sk)->channel = haddr.hci_channel;
1428 /* At the moment the index and unconfigured index events
1429 * are enabled unconditionally. Setting them on each
1430 * socket when binding keeps this functionality. They
1431 * however might be cleared later and then sending of these
1432 * events will be disabled, but that is then intentional.
1434 * This also enables generic events that are safe to be
1435 * received by untrusted users. Example for such events
1436 * are changes to settings, class of device, name etc.
1438 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1439 if (!hci_sock_gen_cookie(sk)) {
1440 /* In the case when a cookie has already been
1441 * assigned, this socket will transition from
1442 * a raw socket into a control socket. To
1443 * allow for a clean transition, send the
1444 * close notification first.
1446 skb = create_monitor_ctrl_close(sk);
1448 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1449 HCI_SOCK_TRUSTED, NULL);
1454 /* Send event to monitor */
1455 skb = create_monitor_ctrl_open(sk);
1457 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1458 HCI_SOCK_TRUSTED, NULL);
1462 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1463 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1464 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1465 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1466 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1467 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1472 /* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1473 if (!hci_pi(sk)->mtu)
1474 hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1476 sk->sk_state = BT_BOUND;
1483 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1486 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1487 struct sock *sk = sock->sk;
1488 struct hci_dev *hdev;
1491 BT_DBG("sock %p sk %p", sock, sk);
1498 hdev = hci_hdev_from_sock(sk);
1500 err = PTR_ERR(hdev);
1504 haddr->hci_family = AF_BLUETOOTH;
1505 haddr->hci_dev = hdev->id;
1506 haddr->hci_channel= hci_pi(sk)->channel;
1507 err = sizeof(*haddr);
1514 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1515 struct sk_buff *skb)
1517 __u8 mask = hci_pi(sk)->cmsg_mask;
1519 if (mask & HCI_CMSG_DIR) {
1520 int incoming = bt_cb(skb)->incoming;
1521 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1525 if (mask & HCI_CMSG_TSTAMP) {
1526 #ifdef CONFIG_COMPAT
1527 struct old_timeval32 ctv;
1529 struct __kernel_old_timeval tv;
1533 skb_get_timestamp(skb, &tv);
1537 #ifdef CONFIG_COMPAT
1538 if (!COMPAT_USE_64BIT_TIME &&
1539 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1540 ctv.tv_sec = tv.tv_sec;
1541 ctv.tv_usec = tv.tv_usec;
1547 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1551 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1552 size_t len, int flags)
1554 struct scm_cookie scm;
1555 struct sock *sk = sock->sk;
1556 struct sk_buff *skb;
1558 unsigned int skblen;
1560 BT_DBG("sock %p, sk %p", sock, sk);
1562 if (flags & MSG_OOB)
1565 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1568 if (sk->sk_state == BT_CLOSED)
1571 skb = skb_recv_datagram(sk, flags, &err);
1578 msg->msg_flags |= MSG_TRUNC;
1582 skb_reset_transport_header(skb);
1583 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1585 switch (hci_pi(sk)->channel) {
1586 case HCI_CHANNEL_RAW:
1587 hci_sock_cmsg(sk, msg, skb);
1589 case HCI_CHANNEL_USER:
1590 case HCI_CHANNEL_MONITOR:
1591 sock_recv_timestamp(msg, sk, skb);
1594 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1595 sock_recv_timestamp(msg, sk, skb);
1599 memset(&scm, 0, sizeof(scm));
1600 scm.creds = bt_cb(skb)->creds;
1602 skb_free_datagram(sk, skb);
1604 if (flags & MSG_TRUNC)
1607 scm_recv(sock, msg, &scm, flags);
1609 return err ? : copied;
1612 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1613 struct sk_buff *skb)
1616 struct mgmt_hdr *hdr;
1617 u16 opcode, index, len;
1618 struct hci_dev *hdev = NULL;
1619 const struct hci_mgmt_handler *handler;
1620 bool var_len, no_hdev;
1623 BT_DBG("got %d bytes", skb->len);
1625 if (skb->len < sizeof(*hdr))
1628 hdr = (void *)skb->data;
1629 opcode = __le16_to_cpu(hdr->opcode);
1630 index = __le16_to_cpu(hdr->index);
1631 len = __le16_to_cpu(hdr->len);
1633 if (len != skb->len - sizeof(*hdr)) {
1638 if (chan->channel == HCI_CHANNEL_CONTROL) {
1639 struct sk_buff *cmd;
1641 /* Send event to monitor */
1642 cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1643 skb->data + sizeof(*hdr));
1645 hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1646 HCI_SOCK_TRUSTED, NULL);
1651 if (opcode >= chan->handler_count ||
1652 chan->handlers[opcode].func == NULL) {
1653 BT_DBG("Unknown op %u", opcode);
1654 err = mgmt_cmd_status(sk, index, opcode,
1655 MGMT_STATUS_UNKNOWN_COMMAND);
1659 handler = &chan->handlers[opcode];
1661 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1662 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1663 err = mgmt_cmd_status(sk, index, opcode,
1664 MGMT_STATUS_PERMISSION_DENIED);
1668 if (index != MGMT_INDEX_NONE) {
1669 hdev = hci_dev_get(index);
1671 err = mgmt_cmd_status(sk, index, opcode,
1672 MGMT_STATUS_INVALID_INDEX);
1676 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1677 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1678 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1679 err = mgmt_cmd_status(sk, index, opcode,
1680 MGMT_STATUS_INVALID_INDEX);
1684 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1685 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1686 err = mgmt_cmd_status(sk, index, opcode,
1687 MGMT_STATUS_INVALID_INDEX);
1692 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1693 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1694 if (no_hdev != !hdev) {
1695 err = mgmt_cmd_status(sk, index, opcode,
1696 MGMT_STATUS_INVALID_INDEX);
1701 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1702 if ((var_len && len < handler->data_len) ||
1703 (!var_len && len != handler->data_len)) {
1704 err = mgmt_cmd_status(sk, index, opcode,
1705 MGMT_STATUS_INVALID_PARAMS);
1709 if (hdev && chan->hdev_init)
1710 chan->hdev_init(sk, hdev);
1712 cp = skb->data + sizeof(*hdr);
1714 err = handler->func(sk, hdev, cp, len);
1727 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1730 struct hci_mon_hdr *hdr;
1731 struct hci_dev *hdev;
1735 /* The logging frame consists at minimum of the standard header,
1736 * the priority byte, the ident length byte and at least one string
1737 * terminator NUL byte. Anything shorter are invalid packets.
1739 if (skb->len < sizeof(*hdr) + 3)
1742 hdr = (void *)skb->data;
1744 if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1747 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1748 __u8 priority = skb->data[sizeof(*hdr)];
1749 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1751 /* Only the priorities 0-7 are valid and with that any other
1752 * value results in an invalid packet.
1754 * The priority byte is followed by an ident length byte and
1755 * the NUL terminated ident string. Check that the ident
1756 * length is not overflowing the packet and also that the
1757 * ident string itself is NUL terminated. In case the ident
1758 * length is zero, the length value actually doubles as NUL
1759 * terminator identifier.
1761 * The message follows the ident string (if present) and
1762 * must be NUL terminated. Otherwise it is not a valid packet.
1764 if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1765 ident_len > skb->len - sizeof(*hdr) - 3 ||
1766 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1772 index = __le16_to_cpu(hdr->index);
1774 if (index != MGMT_INDEX_NONE) {
1775 hdev = hci_dev_get(index);
1782 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1784 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1793 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1796 struct sock *sk = sock->sk;
1797 struct hci_mgmt_chan *chan;
1798 struct hci_dev *hdev;
1799 struct sk_buff *skb;
1801 const unsigned int flags = msg->msg_flags;
1803 BT_DBG("sock %p sk %p", sock, sk);
1805 if (flags & MSG_OOB)
1808 if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1811 if (len < 4 || len > hci_pi(sk)->mtu)
1814 skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1816 return PTR_ERR(skb);
1820 switch (hci_pi(sk)->channel) {
1821 case HCI_CHANNEL_RAW:
1822 case HCI_CHANNEL_USER:
1824 case HCI_CHANNEL_MONITOR:
1827 case HCI_CHANNEL_LOGGING:
1828 err = hci_logging_frame(sk, skb, flags);
1831 mutex_lock(&mgmt_chan_list_lock);
1832 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1834 err = hci_mgmt_cmd(chan, sk, skb);
1838 mutex_unlock(&mgmt_chan_list_lock);
1842 hdev = hci_hdev_from_sock(sk);
1844 err = PTR_ERR(hdev);
1848 if (!test_bit(HCI_UP, &hdev->flags)) {
1853 hci_skb_pkt_type(skb) = skb->data[0];
1856 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1857 /* No permission check is needed for user channel
1858 * since that gets enforced when binding the socket.
1860 * However check that the packet type is valid.
1862 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1863 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1864 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1865 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1870 skb_queue_tail(&hdev->raw_q, skb);
1871 queue_work(hdev->workqueue, &hdev->tx_work);
1872 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1873 u16 opcode = get_unaligned_le16(skb->data);
1874 u16 ogf = hci_opcode_ogf(opcode);
1875 u16 ocf = hci_opcode_ocf(opcode);
1877 if (((ogf > HCI_SFLT_MAX_OGF) ||
1878 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1879 &hci_sec_filter.ocf_mask[ogf])) &&
1880 !capable(CAP_NET_RAW)) {
1885 /* Since the opcode has already been extracted here, store
1886 * a copy of the value for later use by the drivers.
1888 hci_skb_opcode(skb) = opcode;
1891 skb_queue_tail(&hdev->raw_q, skb);
1892 queue_work(hdev->workqueue, &hdev->tx_work);
1894 /* Stand-alone HCI commands must be flagged as
1895 * single-command requests.
1897 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1899 skb_queue_tail(&hdev->cmd_q, skb);
1900 queue_work(hdev->workqueue, &hdev->cmd_work);
1903 if (!capable(CAP_NET_RAW)) {
1908 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1909 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1910 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1915 skb_queue_tail(&hdev->raw_q, skb);
1916 queue_work(hdev->workqueue, &hdev->tx_work);
1930 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1931 sockptr_t optval, unsigned int len)
1933 struct hci_ufilter uf = { .opcode = 0 };
1934 struct sock *sk = sock->sk;
1935 int err = 0, opt = 0;
1937 BT_DBG("sk %p, opt %d", sk, optname);
1941 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1948 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1954 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1956 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1959 case HCI_TIME_STAMP:
1960 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1966 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1968 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1973 struct hci_filter *f = &hci_pi(sk)->filter;
1975 uf.type_mask = f->type_mask;
1976 uf.opcode = f->opcode;
1977 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1978 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1981 len = min_t(unsigned int, len, sizeof(uf));
1982 if (copy_from_sockptr(&uf, optval, len)) {
1987 if (!capable(CAP_NET_RAW)) {
1988 uf.type_mask &= hci_sec_filter.type_mask;
1989 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1990 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1994 struct hci_filter *f = &hci_pi(sk)->filter;
1996 f->type_mask = uf.type_mask;
1997 f->opcode = uf.opcode;
1998 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1999 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
2013 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2014 sockptr_t optval, unsigned int len)
2016 struct sock *sk = sock->sk;
2020 BT_DBG("sk %p, opt %d", sk, optname);
2022 if (level == SOL_HCI)
2023 return hci_sock_setsockopt_old(sock, level, optname, optval,
2026 if (level != SOL_BLUETOOTH)
2027 return -ENOPROTOOPT;
2034 switch (hci_pi(sk)->channel) {
2035 /* Don't allow changing MTU for channels that are meant for HCI
2038 case HCI_CHANNEL_RAW:
2039 case HCI_CHANNEL_USER:
2044 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
2049 hci_pi(sk)->mtu = opt;
2062 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2063 char __user *optval, int __user *optlen)
2065 struct hci_ufilter uf;
2066 struct sock *sk = sock->sk;
2067 int len, opt, err = 0;
2069 BT_DBG("sk %p, opt %d", sk, optname);
2071 if (get_user(len, optlen))
2076 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2083 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2088 if (put_user(opt, optval))
2092 case HCI_TIME_STAMP:
2093 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2098 if (put_user(opt, optval))
2104 struct hci_filter *f = &hci_pi(sk)->filter;
2106 memset(&uf, 0, sizeof(uf));
2107 uf.type_mask = f->type_mask;
2108 uf.opcode = f->opcode;
2109 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2110 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2113 len = min_t(unsigned int, len, sizeof(uf));
2114 if (copy_to_user(optval, &uf, len))
2128 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2129 char __user *optval, int __user *optlen)
2131 struct sock *sk = sock->sk;
2134 BT_DBG("sk %p, opt %d", sk, optname);
2136 if (level == SOL_HCI)
2137 return hci_sock_getsockopt_old(sock, level, optname, optval,
2140 if (level != SOL_BLUETOOTH)
2141 return -ENOPROTOOPT;
2148 if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2161 static void hci_sock_destruct(struct sock *sk)
2164 skb_queue_purge(&sk->sk_receive_queue);
2165 skb_queue_purge(&sk->sk_write_queue);
2168 static const struct proto_ops hci_sock_ops = {
2169 .family = PF_BLUETOOTH,
2170 .owner = THIS_MODULE,
2171 .release = hci_sock_release,
2172 .bind = hci_sock_bind,
2173 .getname = hci_sock_getname,
2174 .sendmsg = hci_sock_sendmsg,
2175 .recvmsg = hci_sock_recvmsg,
2176 .ioctl = hci_sock_ioctl,
2177 #ifdef CONFIG_COMPAT
2178 .compat_ioctl = hci_sock_compat_ioctl,
2180 .poll = datagram_poll,
2181 .listen = sock_no_listen,
2182 .shutdown = sock_no_shutdown,
2183 .setsockopt = hci_sock_setsockopt,
2184 .getsockopt = hci_sock_getsockopt,
2185 .connect = sock_no_connect,
2186 .socketpair = sock_no_socketpair,
2187 .accept = sock_no_accept,
2188 .mmap = sock_no_mmap
2191 static struct proto hci_sk_proto = {
2193 .owner = THIS_MODULE,
2194 .obj_size = sizeof(struct hci_pinfo)
2197 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2202 BT_DBG("sock %p", sock);
2204 if (sock->type != SOCK_RAW)
2205 return -ESOCKTNOSUPPORT;
2207 sock->ops = &hci_sock_ops;
2209 sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2214 sock->state = SS_UNCONNECTED;
2215 sk->sk_destruct = hci_sock_destruct;
2217 bt_sock_link(&hci_sk_list, sk);
2221 static const struct net_proto_family hci_sock_family_ops = {
2222 .family = PF_BLUETOOTH,
2223 .owner = THIS_MODULE,
2224 .create = hci_sock_create,
2227 int __init hci_sock_init(void)
2231 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2233 err = proto_register(&hci_sk_proto, 0);
2237 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2239 BT_ERR("HCI socket registration failed");
2243 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2245 BT_ERR("Failed to create HCI proc file");
2246 bt_sock_unregister(BTPROTO_HCI);
2250 BT_INFO("HCI socket layer initialized");
2255 proto_unregister(&hci_sk_proto);
2259 void hci_sock_cleanup(void)
2261 bt_procfs_cleanup(&init_net, "hci");
2262 bt_sock_unregister(BTPROTO_HCI);
2263 proto_unregister(&hci_sk_proto);