2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Copyright (C) 2010 Google Inc.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 read_unlock(&conn->chan_lock);
117 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 struct l2cap_chan *c;
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
128 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
136 read_unlock(&conn->chan_lock);
140 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
142 u16 cid = L2CAP_CID_DYN_START;
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
152 static struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
154 struct l2cap_chan *chan;
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
165 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
167 struct sock *sk = chan->sk;
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
172 conn->disc_reason = 0x13;
174 l2cap_pi(sk)->conn = conn;
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
179 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
201 list_add(&chan->list, &conn->chan_l);
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan *chan, int err)
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
212 l2cap_sock_clear_timer(sk);
214 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
237 sk->sk_state_change(sk);
239 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE &&
240 l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE))
243 skb_queue_purge(&chan->tx_q);
245 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
246 struct srej_list *l, *tmp;
248 del_timer(&chan->retrans_timer);
249 del_timer(&chan->monitor_timer);
250 del_timer(&chan->ack_timer);
252 skb_queue_purge(&chan->srej_q);
253 skb_queue_purge(&chan->busy_q);
255 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
265 static inline u8 l2cap_get_auth_type(struct sock *sk)
267 if (sk->sk_type == SOCK_RAW) {
268 switch (l2cap_pi(sk)->sec_level) {
269 case BT_SECURITY_HIGH:
270 return HCI_AT_DEDICATED_BONDING_MITM;
271 case BT_SECURITY_MEDIUM:
272 return HCI_AT_DEDICATED_BONDING;
274 return HCI_AT_NO_BONDING;
276 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
278 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
280 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
281 return HCI_AT_NO_BONDING_MITM;
283 return HCI_AT_NO_BONDING;
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 return HCI_AT_GENERAL_BONDING_MITM;
288 case BT_SECURITY_MEDIUM:
289 return HCI_AT_GENERAL_BONDING;
291 return HCI_AT_NO_BONDING;
296 /* Service level security */
297 static inline int l2cap_check_security(struct sock *sk)
299 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
302 auth_type = l2cap_get_auth_type(sk);
304 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
308 u8 l2cap_get_ident(struct l2cap_conn *conn)
312 /* Get next available identificator.
313 * 1 - 128 are used by kernel.
314 * 129 - 199 are reserved.
315 * 200 - 254 are used by utilities like l2ping, etc.
318 spin_lock_bh(&conn->lock);
320 if (++conn->tx_ident > 128)
325 spin_unlock_bh(&conn->lock);
330 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
332 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
335 BT_DBG("code 0x%2.2x", code);
340 if (lmp_no_flush_capable(conn->hcon->hdev))
341 flags = ACL_START_NO_FLUSH;
345 hci_send_acl(conn->hcon, skb, flags);
348 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
351 struct l2cap_hdr *lh;
352 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
353 struct l2cap_conn *conn = pi->conn;
354 struct sock *sk = (struct sock *)pi;
355 int count, hlen = L2CAP_HDR_SIZE + 2;
358 if (sk->sk_state != BT_CONNECTED)
361 if (pi->fcs == L2CAP_FCS_CRC16)
364 BT_DBG("chan %p, control 0x%2.2x", chan, control);
366 count = min_t(unsigned int, conn->mtu, hlen);
367 control |= L2CAP_CTRL_FRAME_TYPE;
369 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
370 control |= L2CAP_CTRL_FINAL;
371 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
374 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
375 control |= L2CAP_CTRL_POLL;
376 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
379 skb = bt_skb_alloc(count, GFP_ATOMIC);
383 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
384 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
385 lh->cid = cpu_to_le16(pi->dcid);
386 put_unaligned_le16(control, skb_put(skb, 2));
388 if (pi->fcs == L2CAP_FCS_CRC16) {
389 u16 fcs = crc16(0, (u8 *)lh, count - 2);
390 put_unaligned_le16(fcs, skb_put(skb, 2));
393 if (lmp_no_flush_capable(conn->hcon->hdev))
394 flags = ACL_START_NO_FLUSH;
398 hci_send_acl(pi->conn->hcon, skb, flags);
401 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
403 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
404 control |= L2CAP_SUPER_RCV_NOT_READY;
405 chan->conn_state |= L2CAP_CONN_RNR_SENT;
407 control |= L2CAP_SUPER_RCV_READY;
409 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
411 l2cap_send_sframe(chan, control);
414 static inline int __l2cap_no_conn_pending(struct sock *sk)
416 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
419 static void l2cap_do_start(struct l2cap_chan *chan)
421 struct sock *sk = chan->sk;
422 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
424 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
425 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
428 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
429 struct l2cap_conn_req req;
430 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
431 req.psm = l2cap_pi(sk)->psm;
433 chan->ident = l2cap_get_ident(conn);
434 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
436 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
440 struct l2cap_info_req req;
441 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
444 conn->info_ident = l2cap_get_ident(conn);
446 mod_timer(&conn->info_timer, jiffies +
447 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
449 l2cap_send_cmd(conn, conn->info_ident,
450 L2CAP_INFO_REQ, sizeof(req), &req);
454 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
456 u32 local_feat_mask = l2cap_feat_mask;
458 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
461 case L2CAP_MODE_ERTM:
462 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
463 case L2CAP_MODE_STREAMING:
464 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
470 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
473 struct l2cap_disconn_req req;
480 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
481 del_timer(&chan->retrans_timer);
482 del_timer(&chan->monitor_timer);
483 del_timer(&chan->ack_timer);
486 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
487 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
488 l2cap_send_cmd(conn, l2cap_get_ident(conn),
489 L2CAP_DISCONN_REQ, sizeof(req), &req);
491 sk->sk_state = BT_DISCONN;
495 /* ---- L2CAP connections ---- */
496 static void l2cap_conn_start(struct l2cap_conn *conn)
498 struct l2cap_chan *chan, *tmp;
500 BT_DBG("conn %p", conn);
502 read_lock(&conn->chan_lock);
504 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
505 struct sock *sk = chan->sk;
509 if (sk->sk_type != SOCK_SEQPACKET &&
510 sk->sk_type != SOCK_STREAM) {
515 if (sk->sk_state == BT_CONNECT) {
516 struct l2cap_conn_req req;
518 if (!l2cap_check_security(sk) ||
519 !__l2cap_no_conn_pending(sk)) {
524 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
526 && l2cap_pi(sk)->conf_state &
527 L2CAP_CONF_STATE2_DEVICE) {
528 /* __l2cap_sock_close() calls list_del(chan)
529 * so release the lock */
530 read_unlock_bh(&conn->chan_lock);
531 __l2cap_sock_close(sk, ECONNRESET);
532 read_lock_bh(&conn->chan_lock);
537 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
538 req.psm = l2cap_pi(sk)->psm;
540 chan->ident = l2cap_get_ident(conn);
541 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
543 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
546 } else if (sk->sk_state == BT_CONNECT2) {
547 struct l2cap_conn_rsp rsp;
549 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
550 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
552 if (l2cap_check_security(sk)) {
553 if (bt_sk(sk)->defer_setup) {
554 struct sock *parent = bt_sk(sk)->parent;
555 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
556 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
557 parent->sk_data_ready(parent, 0);
560 sk->sk_state = BT_CONFIG;
561 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
562 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
569 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
572 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
573 rsp.result != L2CAP_CR_SUCCESS) {
578 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
579 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
580 l2cap_build_conf_req(chan, buf), buf);
581 chan->num_conf_req++;
587 read_unlock(&conn->chan_lock);
590 /* Find socket with cid and source bdaddr.
591 * Returns closest match, locked.
593 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
595 struct sock *sk = NULL, *sk1 = NULL;
596 struct hlist_node *node;
598 read_lock(&l2cap_sk_list.lock);
600 sk_for_each(sk, node, &l2cap_sk_list.head) {
601 if (state && sk->sk_state != state)
604 if (l2cap_pi(sk)->scid == cid) {
606 if (!bacmp(&bt_sk(sk)->src, src))
610 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
615 read_unlock(&l2cap_sk_list.lock);
617 return node ? sk : sk1;
620 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
622 struct sock *parent, *sk;
623 struct l2cap_chan *chan;
627 /* Check if we have socket listening on cid */
628 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 bh_lock_sock(parent);
635 /* Check for backlog size */
636 if (sk_acceptq_is_full(parent)) {
637 BT_DBG("backlog full %d", parent->sk_ack_backlog);
641 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
645 chan = l2cap_chan_alloc(sk);
651 write_lock_bh(&conn->chan_lock);
653 hci_conn_hold(conn->hcon);
655 l2cap_sock_init(sk, parent);
657 bacpy(&bt_sk(sk)->src, conn->src);
658 bacpy(&bt_sk(sk)->dst, conn->dst);
660 bt_accept_enqueue(parent, sk);
662 __l2cap_chan_add(conn, chan);
664 l2cap_pi(sk)->chan = chan;
666 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
668 sk->sk_state = BT_CONNECTED;
669 parent->sk_data_ready(parent, 0);
671 write_unlock_bh(&conn->chan_lock);
674 bh_unlock_sock(parent);
677 static void l2cap_conn_ready(struct l2cap_conn *conn)
679 struct l2cap_chan *chan;
681 BT_DBG("conn %p", conn);
683 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
684 l2cap_le_conn_ready(conn);
686 read_lock(&conn->chan_lock);
688 list_for_each_entry(chan, &conn->chan_l, list) {
689 struct sock *sk = chan->sk;
693 if (conn->hcon->type == LE_LINK) {
694 l2cap_sock_clear_timer(sk);
695 sk->sk_state = BT_CONNECTED;
696 sk->sk_state_change(sk);
699 if (sk->sk_type != SOCK_SEQPACKET &&
700 sk->sk_type != SOCK_STREAM) {
701 l2cap_sock_clear_timer(sk);
702 sk->sk_state = BT_CONNECTED;
703 sk->sk_state_change(sk);
704 } else if (sk->sk_state == BT_CONNECT)
705 l2cap_do_start(chan);
710 read_unlock(&conn->chan_lock);
713 /* Notify sockets that we cannot guaranty reliability anymore */
714 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
716 struct l2cap_chan *chan;
718 BT_DBG("conn %p", conn);
720 read_lock(&conn->chan_lock);
722 list_for_each_entry(chan, &conn->chan_l, list) {
723 struct sock *sk = chan->sk;
725 if (l2cap_pi(sk)->force_reliable)
729 read_unlock(&conn->chan_lock);
732 static void l2cap_info_timeout(unsigned long arg)
734 struct l2cap_conn *conn = (void *) arg;
736 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
737 conn->info_ident = 0;
739 l2cap_conn_start(conn);
742 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
744 struct l2cap_conn *conn = hcon->l2cap_data;
749 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
753 hcon->l2cap_data = conn;
756 BT_DBG("hcon %p conn %p", hcon, conn);
758 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
759 conn->mtu = hcon->hdev->le_mtu;
761 conn->mtu = hcon->hdev->acl_mtu;
763 conn->src = &hcon->hdev->bdaddr;
764 conn->dst = &hcon->dst;
768 spin_lock_init(&conn->lock);
769 rwlock_init(&conn->chan_lock);
771 INIT_LIST_HEAD(&conn->chan_l);
773 if (hcon->type != LE_LINK)
774 setup_timer(&conn->info_timer, l2cap_info_timeout,
775 (unsigned long) conn);
777 conn->disc_reason = 0x13;
782 static void l2cap_conn_del(struct hci_conn *hcon, int err)
784 struct l2cap_conn *conn = hcon->l2cap_data;
785 struct l2cap_chan *chan, *l;
791 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
793 kfree_skb(conn->rx_skb);
796 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
799 l2cap_chan_del(chan, err);
804 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
805 del_timer_sync(&conn->info_timer);
807 hcon->l2cap_data = NULL;
811 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
813 write_lock_bh(&conn->chan_lock);
814 __l2cap_chan_add(conn, chan);
815 write_unlock_bh(&conn->chan_lock);
818 /* ---- Socket interface ---- */
820 /* Find socket with psm and source bdaddr.
821 * Returns closest match.
823 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
825 struct sock *sk = NULL, *sk1 = NULL;
826 struct hlist_node *node;
828 read_lock(&l2cap_sk_list.lock);
830 sk_for_each(sk, node, &l2cap_sk_list.head) {
831 if (state && sk->sk_state != state)
834 if (l2cap_pi(sk)->psm == psm) {
836 if (!bacmp(&bt_sk(sk)->src, src))
840 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
845 read_unlock(&l2cap_sk_list.lock);
847 return node ? sk : sk1;
850 int l2cap_do_connect(struct sock *sk)
852 bdaddr_t *src = &bt_sk(sk)->src;
853 bdaddr_t *dst = &bt_sk(sk)->dst;
854 struct l2cap_conn *conn;
855 struct l2cap_chan *chan;
856 struct hci_conn *hcon;
857 struct hci_dev *hdev;
861 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
864 hdev = hci_get_route(dst, src);
866 return -EHOSTUNREACH;
868 hci_dev_lock_bh(hdev);
870 auth_type = l2cap_get_auth_type(sk);
872 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
873 hcon = hci_connect(hdev, LE_LINK, dst,
874 l2cap_pi(sk)->sec_level, auth_type);
876 hcon = hci_connect(hdev, ACL_LINK, dst,
877 l2cap_pi(sk)->sec_level, auth_type);
884 conn = l2cap_conn_add(hcon, 0);
891 chan = l2cap_chan_alloc(sk);
898 /* Update source addr of the socket */
899 bacpy(src, conn->src);
901 l2cap_chan_add(conn, chan);
903 l2cap_pi(sk)->chan = chan;
905 sk->sk_state = BT_CONNECT;
906 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
908 if (hcon->state == BT_CONNECTED) {
909 if (sk->sk_type != SOCK_SEQPACKET &&
910 sk->sk_type != SOCK_STREAM) {
911 l2cap_sock_clear_timer(sk);
912 if (l2cap_check_security(sk))
913 sk->sk_state = BT_CONNECTED;
915 l2cap_do_start(chan);
921 hci_dev_unlock_bh(hdev);
926 int __l2cap_wait_ack(struct sock *sk)
928 DECLARE_WAITQUEUE(wait, current);
932 add_wait_queue(sk_sleep(sk), &wait);
933 while ((l2cap_pi(sk)->chan->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
934 set_current_state(TASK_INTERRUPTIBLE);
939 if (signal_pending(current)) {
940 err = sock_intr_errno(timeo);
945 timeo = schedule_timeout(timeo);
948 err = sock_error(sk);
952 set_current_state(TASK_RUNNING);
953 remove_wait_queue(sk_sleep(sk), &wait);
957 static void l2cap_monitor_timeout(unsigned long arg)
959 struct l2cap_chan *chan = (void *) arg;
960 struct sock *sk = chan->sk;
962 BT_DBG("chan %p", chan);
965 if (chan->retry_count >= chan->remote_max_tx) {
966 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, ECONNABORTED);
972 __mod_monitor_timer();
974 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
978 static void l2cap_retrans_timeout(unsigned long arg)
980 struct l2cap_chan *chan = (void *) arg;
981 struct sock *sk = chan->sk;
983 BT_DBG("chan %p", chan);
986 chan->retry_count = 1;
987 __mod_monitor_timer();
989 chan->conn_state |= L2CAP_CONN_WAIT_F;
991 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
995 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
999 while ((skb = skb_peek(&chan->tx_q)) &&
1000 chan->unacked_frames) {
1001 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1004 skb = skb_dequeue(&chan->tx_q);
1007 chan->unacked_frames--;
1010 if (!chan->unacked_frames)
1011 del_timer(&chan->retrans_timer);
1014 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1016 struct l2cap_pinfo *pi = l2cap_pi(sk);
1017 struct hci_conn *hcon = pi->conn->hcon;
1020 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1022 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1023 flags = ACL_START_NO_FLUSH;
1027 hci_send_acl(hcon, skb, flags);
1030 void l2cap_streaming_send(struct l2cap_chan *chan)
1032 struct sock *sk = chan->sk;
1033 struct sk_buff *skb;
1034 struct l2cap_pinfo *pi = l2cap_pi(sk);
1037 while ((skb = skb_dequeue(&chan->tx_q))) {
1038 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1039 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1040 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1042 if (pi->fcs == L2CAP_FCS_CRC16) {
1043 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1044 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1047 l2cap_do_send(sk, skb);
1049 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1053 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1055 struct sock *sk = chan->sk;
1056 struct l2cap_pinfo *pi = l2cap_pi(sk);
1057 struct sk_buff *skb, *tx_skb;
1060 skb = skb_peek(&chan->tx_q);
1065 if (bt_cb(skb)->tx_seq == tx_seq)
1068 if (skb_queue_is_last(&chan->tx_q, skb))
1071 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1073 if (chan->remote_max_tx &&
1074 bt_cb(skb)->retries == chan->remote_max_tx) {
1075 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1079 tx_skb = skb_clone(skb, GFP_ATOMIC);
1080 bt_cb(skb)->retries++;
1081 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1082 control &= L2CAP_CTRL_SAR;
1084 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1085 control |= L2CAP_CTRL_FINAL;
1086 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1089 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1090 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1092 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1094 if (pi->fcs == L2CAP_FCS_CRC16) {
1095 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1096 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1099 l2cap_do_send(sk, tx_skb);
1102 int l2cap_ertm_send(struct l2cap_chan *chan)
1104 struct sk_buff *skb, *tx_skb;
1105 struct sock *sk = chan->sk;
1106 struct l2cap_pinfo *pi = l2cap_pi(sk);
1110 if (sk->sk_state != BT_CONNECTED)
1113 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1115 if (chan->remote_max_tx &&
1116 bt_cb(skb)->retries == chan->remote_max_tx) {
1117 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1121 tx_skb = skb_clone(skb, GFP_ATOMIC);
1123 bt_cb(skb)->retries++;
1125 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1126 control &= L2CAP_CTRL_SAR;
1128 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1129 control |= L2CAP_CTRL_FINAL;
1130 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1132 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1133 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1134 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1137 if (pi->fcs == L2CAP_FCS_CRC16) {
1138 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1139 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1142 l2cap_do_send(sk, tx_skb);
1144 __mod_retrans_timer();
1146 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1147 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1149 if (bt_cb(skb)->retries == 1)
1150 chan->unacked_frames++;
1152 chan->frames_sent++;
1154 if (skb_queue_is_last(&chan->tx_q, skb))
1155 chan->tx_send_head = NULL;
1157 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1165 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1169 if (!skb_queue_empty(&chan->tx_q))
1170 chan->tx_send_head = chan->tx_q.next;
1172 chan->next_tx_seq = chan->expected_ack_seq;
1173 ret = l2cap_ertm_send(chan);
1177 static void l2cap_send_ack(struct l2cap_chan *chan)
1181 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1183 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1184 control |= L2CAP_SUPER_RCV_NOT_READY;
1185 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1186 l2cap_send_sframe(chan, control);
1190 if (l2cap_ertm_send(chan) > 0)
1193 control |= L2CAP_SUPER_RCV_READY;
1194 l2cap_send_sframe(chan, control);
1197 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1199 struct srej_list *tail;
1202 control = L2CAP_SUPER_SELECT_REJECT;
1203 control |= L2CAP_CTRL_FINAL;
1205 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1206 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1208 l2cap_send_sframe(chan, control);
1211 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1213 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1214 struct sk_buff **frag;
1217 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1223 /* Continuation fragments (no L2CAP header) */
1224 frag = &skb_shinfo(skb)->frag_list;
1226 count = min_t(unsigned int, conn->mtu, len);
1228 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1231 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1237 frag = &(*frag)->next;
1243 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1245 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1246 struct sk_buff *skb;
1247 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1248 struct l2cap_hdr *lh;
1250 BT_DBG("sk %p len %d", sk, (int)len);
1252 count = min_t(unsigned int, (conn->mtu - hlen), len);
1253 skb = bt_skb_send_alloc(sk, count + hlen,
1254 msg->msg_flags & MSG_DONTWAIT, &err);
1256 return ERR_PTR(err);
1258 /* Create L2CAP header */
1259 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1260 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1261 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1262 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1264 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1265 if (unlikely(err < 0)) {
1267 return ERR_PTR(err);
1272 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1275 struct sk_buff *skb;
1276 int err, count, hlen = L2CAP_HDR_SIZE;
1277 struct l2cap_hdr *lh;
1279 BT_DBG("sk %p len %d", sk, (int)len);
1281 count = min_t(unsigned int, (conn->mtu - hlen), len);
1282 skb = bt_skb_send_alloc(sk, count + hlen,
1283 msg->msg_flags & MSG_DONTWAIT, &err);
1285 return ERR_PTR(err);
1287 /* Create L2CAP header */
1288 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1289 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1290 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1292 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1293 if (unlikely(err < 0)) {
1295 return ERR_PTR(err);
1300 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1302 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1303 struct sk_buff *skb;
1304 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1305 struct l2cap_hdr *lh;
1307 BT_DBG("sk %p len %d", sk, (int)len);
1310 return ERR_PTR(-ENOTCONN);
1315 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1318 count = min_t(unsigned int, (conn->mtu - hlen), len);
1319 skb = bt_skb_send_alloc(sk, count + hlen,
1320 msg->msg_flags & MSG_DONTWAIT, &err);
1322 return ERR_PTR(err);
1324 /* Create L2CAP header */
1325 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1326 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1327 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1328 put_unaligned_le16(control, skb_put(skb, 2));
1330 put_unaligned_le16(sdulen, skb_put(skb, 2));
1332 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1333 if (unlikely(err < 0)) {
1335 return ERR_PTR(err);
1338 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1339 put_unaligned_le16(0, skb_put(skb, 2));
1341 bt_cb(skb)->retries = 0;
1345 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1347 struct sock *sk = chan->sk;
1348 struct sk_buff *skb;
1349 struct sk_buff_head sar_queue;
1353 skb_queue_head_init(&sar_queue);
1354 control = L2CAP_SDU_START;
1355 skb = l2cap_create_iframe_pdu(sk, msg, chan->remote_mps, control, len);
1357 return PTR_ERR(skb);
1359 __skb_queue_tail(&sar_queue, skb);
1360 len -= chan->remote_mps;
1361 size += chan->remote_mps;
1366 if (len > chan->remote_mps) {
1367 control = L2CAP_SDU_CONTINUE;
1368 buflen = chan->remote_mps;
1370 control = L2CAP_SDU_END;
1374 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1376 skb_queue_purge(&sar_queue);
1377 return PTR_ERR(skb);
1380 __skb_queue_tail(&sar_queue, skb);
1384 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1385 if (chan->tx_send_head == NULL)
1386 chan->tx_send_head = sar_queue.next;
1391 static void l2cap_chan_ready(struct sock *sk)
1393 struct sock *parent = bt_sk(sk)->parent;
1395 BT_DBG("sk %p, parent %p", sk, parent);
1397 l2cap_pi(sk)->conf_state = 0;
1398 l2cap_sock_clear_timer(sk);
1401 /* Outgoing channel.
1402 * Wake up socket sleeping on connect.
1404 sk->sk_state = BT_CONNECTED;
1405 sk->sk_state_change(sk);
1407 /* Incoming channel.
1408 * Wake up socket sleeping on accept.
1410 parent->sk_data_ready(parent, 0);
1414 /* Copy frame to all raw sockets on that connection */
1415 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1417 struct sk_buff *nskb;
1418 struct l2cap_chan *chan;
1420 BT_DBG("conn %p", conn);
1422 read_lock(&conn->chan_lock);
1423 list_for_each_entry(chan, &conn->chan_l, list) {
1424 struct sock *sk = chan->sk;
1425 if (sk->sk_type != SOCK_RAW)
1428 /* Don't send frame to the socket it came from */
1431 nskb = skb_clone(skb, GFP_ATOMIC);
1435 if (sock_queue_rcv_skb(sk, nskb))
1438 read_unlock(&conn->chan_lock);
1441 /* ---- L2CAP signalling commands ---- */
1442 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1443 u8 code, u8 ident, u16 dlen, void *data)
1445 struct sk_buff *skb, **frag;
1446 struct l2cap_cmd_hdr *cmd;
1447 struct l2cap_hdr *lh;
1450 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1451 conn, code, ident, dlen);
1453 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1454 count = min_t(unsigned int, conn->mtu, len);
1456 skb = bt_skb_alloc(count, GFP_ATOMIC);
1460 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1461 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1463 if (conn->hcon->type == LE_LINK)
1464 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1466 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1468 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1471 cmd->len = cpu_to_le16(dlen);
1474 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1475 memcpy(skb_put(skb, count), data, count);
1481 /* Continuation fragments (no L2CAP header) */
1482 frag = &skb_shinfo(skb)->frag_list;
1484 count = min_t(unsigned int, conn->mtu, len);
1486 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1490 memcpy(skb_put(*frag, count), data, count);
1495 frag = &(*frag)->next;
1505 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1507 struct l2cap_conf_opt *opt = *ptr;
1510 len = L2CAP_CONF_OPT_SIZE + opt->len;
1518 *val = *((u8 *) opt->val);
1522 *val = get_unaligned_le16(opt->val);
1526 *val = get_unaligned_le32(opt->val);
1530 *val = (unsigned long) opt->val;
1534 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1538 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1540 struct l2cap_conf_opt *opt = *ptr;
1542 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1549 *((u8 *) opt->val) = val;
1553 put_unaligned_le16(val, opt->val);
1557 put_unaligned_le32(val, opt->val);
1561 memcpy(opt->val, (void *) val, len);
1565 *ptr += L2CAP_CONF_OPT_SIZE + len;
1568 static void l2cap_ack_timeout(unsigned long arg)
1570 struct l2cap_chan *chan = (void *) arg;
1572 bh_lock_sock(chan->sk);
1573 l2cap_send_ack(chan);
1574 bh_unlock_sock(chan->sk);
1577 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1579 struct sock *sk = chan->sk;
1581 chan->expected_ack_seq = 0;
1582 chan->unacked_frames = 0;
1583 chan->buffer_seq = 0;
1584 chan->num_acked = 0;
1585 chan->frames_sent = 0;
1587 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1588 (unsigned long) chan);
1589 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1590 (unsigned long) chan);
1591 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1593 skb_queue_head_init(&chan->srej_q);
1594 skb_queue_head_init(&chan->busy_q);
1596 INIT_LIST_HEAD(&chan->srej_l);
1598 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1600 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1603 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1606 case L2CAP_MODE_STREAMING:
1607 case L2CAP_MODE_ERTM:
1608 if (l2cap_mode_supported(mode, remote_feat_mask))
1612 return L2CAP_MODE_BASIC;
1616 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1618 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1619 struct l2cap_conf_req *req = data;
1620 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1621 void *ptr = req->data;
1623 BT_DBG("chan %p", chan);
1625 if (chan->num_conf_req || chan->num_conf_rsp)
1629 case L2CAP_MODE_STREAMING:
1630 case L2CAP_MODE_ERTM:
1631 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1636 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1641 if (pi->imtu != L2CAP_DEFAULT_MTU)
1642 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1645 case L2CAP_MODE_BASIC:
1646 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1647 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1650 rfc.mode = L2CAP_MODE_BASIC;
1652 rfc.max_transmit = 0;
1653 rfc.retrans_timeout = 0;
1654 rfc.monitor_timeout = 0;
1655 rfc.max_pdu_size = 0;
1657 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1658 (unsigned long) &rfc);
1661 case L2CAP_MODE_ERTM:
1662 rfc.mode = L2CAP_MODE_ERTM;
1663 rfc.txwin_size = pi->tx_win;
1664 rfc.max_transmit = pi->max_tx;
1665 rfc.retrans_timeout = 0;
1666 rfc.monitor_timeout = 0;
1667 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1668 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1669 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1671 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1672 (unsigned long) &rfc);
1674 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1677 if (pi->fcs == L2CAP_FCS_NONE ||
1678 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1679 pi->fcs = L2CAP_FCS_NONE;
1680 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1684 case L2CAP_MODE_STREAMING:
1685 rfc.mode = L2CAP_MODE_STREAMING;
1687 rfc.max_transmit = 0;
1688 rfc.retrans_timeout = 0;
1689 rfc.monitor_timeout = 0;
1690 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1691 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1692 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1694 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1695 (unsigned long) &rfc);
1697 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1700 if (pi->fcs == L2CAP_FCS_NONE ||
1701 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1702 pi->fcs = L2CAP_FCS_NONE;
1703 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1708 req->dcid = cpu_to_le16(pi->dcid);
1709 req->flags = cpu_to_le16(0);
1714 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1716 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1717 struct l2cap_conf_rsp *rsp = data;
1718 void *ptr = rsp->data;
1719 void *req = chan->conf_req;
1720 int len = chan->conf_len;
1721 int type, hint, olen;
1723 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1724 u16 mtu = L2CAP_DEFAULT_MTU;
1725 u16 result = L2CAP_CONF_SUCCESS;
1727 BT_DBG("chan %p", chan);
1729 while (len >= L2CAP_CONF_OPT_SIZE) {
1730 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1732 hint = type & L2CAP_CONF_HINT;
1733 type &= L2CAP_CONF_MASK;
1736 case L2CAP_CONF_MTU:
1740 case L2CAP_CONF_FLUSH_TO:
1744 case L2CAP_CONF_QOS:
1747 case L2CAP_CONF_RFC:
1748 if (olen == sizeof(rfc))
1749 memcpy(&rfc, (void *) val, olen);
1752 case L2CAP_CONF_FCS:
1753 if (val == L2CAP_FCS_NONE)
1754 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1762 result = L2CAP_CONF_UNKNOWN;
1763 *((u8 *) ptr++) = type;
1768 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1772 case L2CAP_MODE_STREAMING:
1773 case L2CAP_MODE_ERTM:
1774 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1775 pi->mode = l2cap_select_mode(rfc.mode,
1776 pi->conn->feat_mask);
1780 if (pi->mode != rfc.mode)
1781 return -ECONNREFUSED;
1787 if (pi->mode != rfc.mode) {
1788 result = L2CAP_CONF_UNACCEPT;
1789 rfc.mode = pi->mode;
1791 if (chan->num_conf_rsp == 1)
1792 return -ECONNREFUSED;
1794 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1795 sizeof(rfc), (unsigned long) &rfc);
1799 if (result == L2CAP_CONF_SUCCESS) {
1800 /* Configure output options and let the other side know
1801 * which ones we don't like. */
1803 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1804 result = L2CAP_CONF_UNACCEPT;
1807 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1809 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1812 case L2CAP_MODE_BASIC:
1813 pi->fcs = L2CAP_FCS_NONE;
1814 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1817 case L2CAP_MODE_ERTM:
1818 chan->remote_tx_win = rfc.txwin_size;
1819 chan->remote_max_tx = rfc.max_transmit;
1821 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1822 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1824 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1826 rfc.retrans_timeout =
1827 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1828 rfc.monitor_timeout =
1829 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1831 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1833 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1834 sizeof(rfc), (unsigned long) &rfc);
1838 case L2CAP_MODE_STREAMING:
1839 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1840 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1842 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1844 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1846 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1847 sizeof(rfc), (unsigned long) &rfc);
1852 result = L2CAP_CONF_UNACCEPT;
1854 memset(&rfc, 0, sizeof(rfc));
1855 rfc.mode = pi->mode;
1858 if (result == L2CAP_CONF_SUCCESS)
1859 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1861 rsp->scid = cpu_to_le16(pi->dcid);
1862 rsp->result = cpu_to_le16(result);
1863 rsp->flags = cpu_to_le16(0x0000);
1868 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1870 struct l2cap_pinfo *pi = l2cap_pi(sk);
1871 struct l2cap_conf_req *req = data;
1872 void *ptr = req->data;
1875 struct l2cap_conf_rfc rfc;
1877 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1879 while (len >= L2CAP_CONF_OPT_SIZE) {
1880 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1883 case L2CAP_CONF_MTU:
1884 if (val < L2CAP_DEFAULT_MIN_MTU) {
1885 *result = L2CAP_CONF_UNACCEPT;
1886 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1889 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1892 case L2CAP_CONF_FLUSH_TO:
1894 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1898 case L2CAP_CONF_RFC:
1899 if (olen == sizeof(rfc))
1900 memcpy(&rfc, (void *)val, olen);
1902 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1903 rfc.mode != pi->mode)
1904 return -ECONNREFUSED;
1908 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1909 sizeof(rfc), (unsigned long) &rfc);
1914 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1915 return -ECONNREFUSED;
1917 pi->mode = rfc.mode;
1919 if (*result == L2CAP_CONF_SUCCESS) {
1921 case L2CAP_MODE_ERTM:
1922 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1923 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1924 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1926 case L2CAP_MODE_STREAMING:
1927 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1931 req->dcid = cpu_to_le16(pi->dcid);
1932 req->flags = cpu_to_le16(0x0000);
1937 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1939 struct l2cap_conf_rsp *rsp = data;
1940 void *ptr = rsp->data;
1942 BT_DBG("sk %p", sk);
1944 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1945 rsp->result = cpu_to_le16(result);
1946 rsp->flags = cpu_to_le16(flags);
1951 void __l2cap_connect_rsp_defer(struct sock *sk)
1953 struct l2cap_conn_rsp rsp;
1954 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1955 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1958 sk->sk_state = BT_CONFIG;
1960 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1961 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1962 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1963 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1964 l2cap_send_cmd(conn, chan->ident,
1965 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1967 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
1970 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1971 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1972 l2cap_build_conf_req(chan, buf), buf);
1973 chan->num_conf_req++;
1976 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1978 struct l2cap_pinfo *pi = l2cap_pi(sk);
1981 struct l2cap_conf_rfc rfc;
1983 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1985 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1988 while (len >= L2CAP_CONF_OPT_SIZE) {
1989 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1992 case L2CAP_CONF_RFC:
1993 if (olen == sizeof(rfc))
1994 memcpy(&rfc, (void *)val, olen);
2001 case L2CAP_MODE_ERTM:
2002 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2003 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2004 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2006 case L2CAP_MODE_STREAMING:
2007 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2011 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2013 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2015 if (rej->reason != 0x0000)
2018 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2019 cmd->ident == conn->info_ident) {
2020 del_timer(&conn->info_timer);
2022 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2023 conn->info_ident = 0;
2025 l2cap_conn_start(conn);
2031 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2033 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2034 struct l2cap_conn_rsp rsp;
2035 struct l2cap_chan *chan = NULL;
2036 struct sock *parent, *sk = NULL;
2037 int result, status = L2CAP_CS_NO_INFO;
2039 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2040 __le16 psm = req->psm;
2042 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2044 /* Check if we have socket listening on psm */
2045 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2047 result = L2CAP_CR_BAD_PSM;
2051 bh_lock_sock(parent);
2053 /* Check if the ACL is secure enough (if not SDP) */
2054 if (psm != cpu_to_le16(0x0001) &&
2055 !hci_conn_check_link_mode(conn->hcon)) {
2056 conn->disc_reason = 0x05;
2057 result = L2CAP_CR_SEC_BLOCK;
2061 result = L2CAP_CR_NO_MEM;
2063 /* Check for backlog size */
2064 if (sk_acceptq_is_full(parent)) {
2065 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2069 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2073 chan = l2cap_chan_alloc(sk);
2075 l2cap_sock_kill(sk);
2079 write_lock_bh(&conn->chan_lock);
2081 /* Check if we already have channel with that dcid */
2082 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2083 write_unlock_bh(&conn->chan_lock);
2084 sock_set_flag(sk, SOCK_ZAPPED);
2085 l2cap_sock_kill(sk);
2089 hci_conn_hold(conn->hcon);
2091 l2cap_sock_init(sk, parent);
2092 bacpy(&bt_sk(sk)->src, conn->src);
2093 bacpy(&bt_sk(sk)->dst, conn->dst);
2094 l2cap_pi(sk)->psm = psm;
2095 l2cap_pi(sk)->dcid = scid;
2097 bt_accept_enqueue(parent, sk);
2099 __l2cap_chan_add(conn, chan);
2101 l2cap_pi(sk)->chan = chan;
2103 dcid = l2cap_pi(sk)->scid;
2105 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2107 chan->ident = cmd->ident;
2109 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2110 if (l2cap_check_security(sk)) {
2111 if (bt_sk(sk)->defer_setup) {
2112 sk->sk_state = BT_CONNECT2;
2113 result = L2CAP_CR_PEND;
2114 status = L2CAP_CS_AUTHOR_PEND;
2115 parent->sk_data_ready(parent, 0);
2117 sk->sk_state = BT_CONFIG;
2118 result = L2CAP_CR_SUCCESS;
2119 status = L2CAP_CS_NO_INFO;
2122 sk->sk_state = BT_CONNECT2;
2123 result = L2CAP_CR_PEND;
2124 status = L2CAP_CS_AUTHEN_PEND;
2127 sk->sk_state = BT_CONNECT2;
2128 result = L2CAP_CR_PEND;
2129 status = L2CAP_CS_NO_INFO;
2132 write_unlock_bh(&conn->chan_lock);
2135 bh_unlock_sock(parent);
2138 rsp.scid = cpu_to_le16(scid);
2139 rsp.dcid = cpu_to_le16(dcid);
2140 rsp.result = cpu_to_le16(result);
2141 rsp.status = cpu_to_le16(status);
2142 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2144 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2145 struct l2cap_info_req info;
2146 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2148 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2149 conn->info_ident = l2cap_get_ident(conn);
2151 mod_timer(&conn->info_timer, jiffies +
2152 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2154 l2cap_send_cmd(conn, conn->info_ident,
2155 L2CAP_INFO_REQ, sizeof(info), &info);
2158 if (chan && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2159 result == L2CAP_CR_SUCCESS) {
2161 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2162 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2163 l2cap_build_conf_req(chan, buf), buf);
2164 chan->num_conf_req++;
2170 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2172 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2173 u16 scid, dcid, result, status;
2174 struct l2cap_chan *chan;
2178 scid = __le16_to_cpu(rsp->scid);
2179 dcid = __le16_to_cpu(rsp->dcid);
2180 result = __le16_to_cpu(rsp->result);
2181 status = __le16_to_cpu(rsp->status);
2183 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2186 chan = l2cap_get_chan_by_scid(conn, scid);
2190 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2198 case L2CAP_CR_SUCCESS:
2199 sk->sk_state = BT_CONFIG;
2201 l2cap_pi(sk)->dcid = dcid;
2202 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2204 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2207 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2209 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2210 l2cap_build_conf_req(chan, req), req);
2211 chan->num_conf_req++;
2215 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2219 /* don't delete l2cap channel if sk is owned by user */
2220 if (sock_owned_by_user(sk)) {
2221 sk->sk_state = BT_DISCONN;
2222 l2cap_sock_clear_timer(sk);
2223 l2cap_sock_set_timer(sk, HZ / 5);
2227 l2cap_chan_del(chan, ECONNREFUSED);
2235 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2237 /* FCS is enabled only in ERTM or streaming mode, if one or both
2240 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2241 pi->fcs = L2CAP_FCS_NONE;
2242 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2243 pi->fcs = L2CAP_FCS_CRC16;
2246 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2248 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2251 struct l2cap_chan *chan;
2255 dcid = __le16_to_cpu(req->dcid);
2256 flags = __le16_to_cpu(req->flags);
2258 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2260 chan = l2cap_get_chan_by_scid(conn, dcid);
2266 if (sk->sk_state != BT_CONFIG) {
2267 struct l2cap_cmd_rej rej;
2269 rej.reason = cpu_to_le16(0x0002);
2270 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2275 /* Reject if config buffer is too small. */
2276 len = cmd_len - sizeof(*req);
2277 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2278 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2279 l2cap_build_conf_rsp(sk, rsp,
2280 L2CAP_CONF_REJECT, flags), rsp);
2285 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2286 chan->conf_len += len;
2288 if (flags & 0x0001) {
2289 /* Incomplete config. Send empty response. */
2290 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2291 l2cap_build_conf_rsp(sk, rsp,
2292 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2296 /* Complete config. */
2297 len = l2cap_parse_conf_req(chan, rsp);
2299 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2303 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2304 chan->num_conf_rsp++;
2306 /* Reset config buffer. */
2309 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2312 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2313 set_default_fcs(l2cap_pi(sk));
2315 sk->sk_state = BT_CONNECTED;
2317 chan->next_tx_seq = 0;
2318 chan->expected_tx_seq = 0;
2319 skb_queue_head_init(&chan->tx_q);
2320 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2321 l2cap_ertm_init(chan);
2323 l2cap_chan_ready(sk);
2327 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2329 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2330 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2331 l2cap_build_conf_req(chan, buf), buf);
2332 chan->num_conf_req++;
2340 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2342 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2343 u16 scid, flags, result;
2344 struct l2cap_chan *chan;
2346 int len = cmd->len - sizeof(*rsp);
2348 scid = __le16_to_cpu(rsp->scid);
2349 flags = __le16_to_cpu(rsp->flags);
2350 result = __le16_to_cpu(rsp->result);
2352 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2353 scid, flags, result);
2355 chan = l2cap_get_chan_by_scid(conn, scid);
2362 case L2CAP_CONF_SUCCESS:
2363 l2cap_conf_rfc_get(sk, rsp->data, len);
2366 case L2CAP_CONF_UNACCEPT:
2367 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2370 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2371 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2375 /* throw out any old stored conf requests */
2376 result = L2CAP_CONF_SUCCESS;
2377 len = l2cap_parse_conf_rsp(sk, rsp->data,
2380 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2384 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2385 L2CAP_CONF_REQ, len, req);
2386 chan->num_conf_req++;
2387 if (result != L2CAP_CONF_SUCCESS)
2393 sk->sk_err = ECONNRESET;
2394 l2cap_sock_set_timer(sk, HZ * 5);
2395 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2402 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2404 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2405 set_default_fcs(l2cap_pi(sk));
2407 sk->sk_state = BT_CONNECTED;
2408 chan->next_tx_seq = 0;
2409 chan->expected_tx_seq = 0;
2410 skb_queue_head_init(&chan->tx_q);
2411 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2412 l2cap_ertm_init(chan);
2414 l2cap_chan_ready(sk);
2422 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2424 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2425 struct l2cap_disconn_rsp rsp;
2427 struct l2cap_chan *chan;
2430 scid = __le16_to_cpu(req->scid);
2431 dcid = __le16_to_cpu(req->dcid);
2433 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2435 chan = l2cap_get_chan_by_scid(conn, dcid);
2441 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2442 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2443 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2445 sk->sk_shutdown = SHUTDOWN_MASK;
2447 /* don't delete l2cap channel if sk is owned by user */
2448 if (sock_owned_by_user(sk)) {
2449 sk->sk_state = BT_DISCONN;
2450 l2cap_sock_clear_timer(sk);
2451 l2cap_sock_set_timer(sk, HZ / 5);
2456 l2cap_chan_del(chan, ECONNRESET);
2459 l2cap_sock_kill(sk);
2463 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2465 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2467 struct l2cap_chan *chan;
2470 scid = __le16_to_cpu(rsp->scid);
2471 dcid = __le16_to_cpu(rsp->dcid);
2473 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2475 chan = l2cap_get_chan_by_scid(conn, scid);
2481 /* don't delete l2cap channel if sk is owned by user */
2482 if (sock_owned_by_user(sk)) {
2483 sk->sk_state = BT_DISCONN;
2484 l2cap_sock_clear_timer(sk);
2485 l2cap_sock_set_timer(sk, HZ / 5);
2490 l2cap_chan_del(chan, 0);
2493 l2cap_sock_kill(sk);
2497 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2499 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2502 type = __le16_to_cpu(req->type);
2504 BT_DBG("type 0x%4.4x", type);
2506 if (type == L2CAP_IT_FEAT_MASK) {
2508 u32 feat_mask = l2cap_feat_mask;
2509 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2510 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2511 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2513 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2515 put_unaligned_le32(feat_mask, rsp->data);
2516 l2cap_send_cmd(conn, cmd->ident,
2517 L2CAP_INFO_RSP, sizeof(buf), buf);
2518 } else if (type == L2CAP_IT_FIXED_CHAN) {
2520 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2521 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2522 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2523 memcpy(buf + 4, l2cap_fixed_chan, 8);
2524 l2cap_send_cmd(conn, cmd->ident,
2525 L2CAP_INFO_RSP, sizeof(buf), buf);
2527 struct l2cap_info_rsp rsp;
2528 rsp.type = cpu_to_le16(type);
2529 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2530 l2cap_send_cmd(conn, cmd->ident,
2531 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2537 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2539 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2542 type = __le16_to_cpu(rsp->type);
2543 result = __le16_to_cpu(rsp->result);
2545 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2547 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2548 if (cmd->ident != conn->info_ident ||
2549 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2552 del_timer(&conn->info_timer);
2554 if (result != L2CAP_IR_SUCCESS) {
2555 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2556 conn->info_ident = 0;
2558 l2cap_conn_start(conn);
2563 if (type == L2CAP_IT_FEAT_MASK) {
2564 conn->feat_mask = get_unaligned_le32(rsp->data);
2566 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2567 struct l2cap_info_req req;
2568 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2570 conn->info_ident = l2cap_get_ident(conn);
2572 l2cap_send_cmd(conn, conn->info_ident,
2573 L2CAP_INFO_REQ, sizeof(req), &req);
2575 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2576 conn->info_ident = 0;
2578 l2cap_conn_start(conn);
2580 } else if (type == L2CAP_IT_FIXED_CHAN) {
2581 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2582 conn->info_ident = 0;
2584 l2cap_conn_start(conn);
2590 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2595 if (min > max || min < 6 || max > 3200)
2598 if (to_multiplier < 10 || to_multiplier > 3200)
2601 if (max >= to_multiplier * 8)
2604 max_latency = (to_multiplier * 8 / max) - 1;
2605 if (latency > 499 || latency > max_latency)
2611 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2612 struct l2cap_cmd_hdr *cmd, u8 *data)
2614 struct hci_conn *hcon = conn->hcon;
2615 struct l2cap_conn_param_update_req *req;
2616 struct l2cap_conn_param_update_rsp rsp;
2617 u16 min, max, latency, to_multiplier, cmd_len;
2620 if (!(hcon->link_mode & HCI_LM_MASTER))
2623 cmd_len = __le16_to_cpu(cmd->len);
2624 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2627 req = (struct l2cap_conn_param_update_req *) data;
2628 min = __le16_to_cpu(req->min);
2629 max = __le16_to_cpu(req->max);
2630 latency = __le16_to_cpu(req->latency);
2631 to_multiplier = __le16_to_cpu(req->to_multiplier);
2633 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2634 min, max, latency, to_multiplier);
2636 memset(&rsp, 0, sizeof(rsp));
2638 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2640 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2642 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2644 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2648 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2653 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2654 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2658 switch (cmd->code) {
2659 case L2CAP_COMMAND_REJ:
2660 l2cap_command_rej(conn, cmd, data);
2663 case L2CAP_CONN_REQ:
2664 err = l2cap_connect_req(conn, cmd, data);
2667 case L2CAP_CONN_RSP:
2668 err = l2cap_connect_rsp(conn, cmd, data);
2671 case L2CAP_CONF_REQ:
2672 err = l2cap_config_req(conn, cmd, cmd_len, data);
2675 case L2CAP_CONF_RSP:
2676 err = l2cap_config_rsp(conn, cmd, data);
2679 case L2CAP_DISCONN_REQ:
2680 err = l2cap_disconnect_req(conn, cmd, data);
2683 case L2CAP_DISCONN_RSP:
2684 err = l2cap_disconnect_rsp(conn, cmd, data);
2687 case L2CAP_ECHO_REQ:
2688 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2691 case L2CAP_ECHO_RSP:
2694 case L2CAP_INFO_REQ:
2695 err = l2cap_information_req(conn, cmd, data);
2698 case L2CAP_INFO_RSP:
2699 err = l2cap_information_rsp(conn, cmd, data);
2703 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2711 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2712 struct l2cap_cmd_hdr *cmd, u8 *data)
2714 switch (cmd->code) {
2715 case L2CAP_COMMAND_REJ:
2718 case L2CAP_CONN_PARAM_UPDATE_REQ:
2719 return l2cap_conn_param_update_req(conn, cmd, data);
2721 case L2CAP_CONN_PARAM_UPDATE_RSP:
2725 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2730 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2731 struct sk_buff *skb)
2733 u8 *data = skb->data;
2735 struct l2cap_cmd_hdr cmd;
2738 l2cap_raw_recv(conn, skb);
2740 while (len >= L2CAP_CMD_HDR_SIZE) {
2742 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2743 data += L2CAP_CMD_HDR_SIZE;
2744 len -= L2CAP_CMD_HDR_SIZE;
2746 cmd_len = le16_to_cpu(cmd.len);
2748 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2750 if (cmd_len > len || !cmd.ident) {
2751 BT_DBG("corrupted command");
2755 if (conn->hcon->type == LE_LINK)
2756 err = l2cap_le_sig_cmd(conn, &cmd, data);
2758 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2761 struct l2cap_cmd_rej rej;
2763 BT_ERR("Wrong link type (%d)", err);
2765 /* FIXME: Map err to a valid reason */
2766 rej.reason = cpu_to_le16(0);
2767 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2777 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2779 u16 our_fcs, rcv_fcs;
2780 int hdr_size = L2CAP_HDR_SIZE + 2;
2782 if (pi->fcs == L2CAP_FCS_CRC16) {
2783 skb_trim(skb, skb->len - 2);
2784 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2785 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2787 if (our_fcs != rcv_fcs)
2793 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2797 chan->frames_sent = 0;
2799 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2801 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2802 control |= L2CAP_SUPER_RCV_NOT_READY;
2803 l2cap_send_sframe(chan, control);
2804 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2807 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2808 l2cap_retransmit_frames(chan);
2810 l2cap_ertm_send(chan);
2812 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2813 chan->frames_sent == 0) {
2814 control |= L2CAP_SUPER_RCV_READY;
2815 l2cap_send_sframe(chan, control);
2819 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2821 struct sk_buff *next_skb;
2822 int tx_seq_offset, next_tx_seq_offset;
2824 bt_cb(skb)->tx_seq = tx_seq;
2825 bt_cb(skb)->sar = sar;
2827 next_skb = skb_peek(&chan->srej_q);
2829 __skb_queue_tail(&chan->srej_q, skb);
2833 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2834 if (tx_seq_offset < 0)
2835 tx_seq_offset += 64;
2838 if (bt_cb(next_skb)->tx_seq == tx_seq)
2841 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2842 chan->buffer_seq) % 64;
2843 if (next_tx_seq_offset < 0)
2844 next_tx_seq_offset += 64;
2846 if (next_tx_seq_offset > tx_seq_offset) {
2847 __skb_queue_before(&chan->srej_q, next_skb, skb);
2851 if (skb_queue_is_last(&chan->srej_q, next_skb))
2854 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2856 __skb_queue_tail(&chan->srej_q, skb);
2861 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2863 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2864 struct sk_buff *_skb;
2867 switch (control & L2CAP_CTRL_SAR) {
2868 case L2CAP_SDU_UNSEGMENTED:
2869 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2872 err = sock_queue_rcv_skb(chan->sk, skb);
2878 case L2CAP_SDU_START:
2879 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2882 chan->sdu_len = get_unaligned_le16(skb->data);
2884 if (chan->sdu_len > pi->imtu)
2887 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2891 /* pull sdu_len bytes only after alloc, because of Local Busy
2892 * condition we have to be sure that this will be executed
2893 * only once, i.e., when alloc does not fail */
2896 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2898 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2899 chan->partial_sdu_len = skb->len;
2902 case L2CAP_SDU_CONTINUE:
2903 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2909 chan->partial_sdu_len += skb->len;
2910 if (chan->partial_sdu_len > chan->sdu_len)
2913 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2918 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2924 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2925 chan->partial_sdu_len += skb->len;
2927 if (chan->partial_sdu_len > pi->imtu)
2930 if (chan->partial_sdu_len != chan->sdu_len)
2933 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2936 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2938 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2942 err = sock_queue_rcv_skb(chan->sk, _skb);
2945 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2949 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2950 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2952 kfree_skb(chan->sdu);
2960 kfree_skb(chan->sdu);
2964 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
2969 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2971 struct sk_buff *skb;
2975 while ((skb = skb_dequeue(&chan->busy_q))) {
2976 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2977 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2979 skb_queue_head(&chan->busy_q, skb);
2983 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2986 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2989 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2990 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2991 l2cap_send_sframe(chan, control);
2992 chan->retry_count = 1;
2994 del_timer(&chan->retrans_timer);
2995 __mod_monitor_timer();
2997 chan->conn_state |= L2CAP_CONN_WAIT_F;
3000 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3001 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3003 BT_DBG("chan %p, Exit local busy", chan);
3008 static void l2cap_busy_work(struct work_struct *work)
3010 DECLARE_WAITQUEUE(wait, current);
3011 struct l2cap_chan *chan =
3012 container_of(work, struct l2cap_chan, busy_work);
3013 struct sock *sk = chan->sk;
3014 int n_tries = 0, timeo = HZ/5, err;
3015 struct sk_buff *skb;
3019 add_wait_queue(sk_sleep(sk), &wait);
3020 while ((skb = skb_peek(&chan->busy_q))) {
3021 set_current_state(TASK_INTERRUPTIBLE);
3023 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3025 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, EBUSY);
3032 if (signal_pending(current)) {
3033 err = sock_intr_errno(timeo);
3038 timeo = schedule_timeout(timeo);
3041 err = sock_error(sk);
3045 if (l2cap_try_push_rx_skb(chan) == 0)
3049 set_current_state(TASK_RUNNING);
3050 remove_wait_queue(sk_sleep(sk), &wait);
3055 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3059 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3060 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3061 __skb_queue_tail(&chan->busy_q, skb);
3062 return l2cap_try_push_rx_skb(chan);
3067 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3069 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3073 /* Busy Condition */
3074 BT_DBG("chan %p, Enter local busy", chan);
3076 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3077 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3078 __skb_queue_tail(&chan->busy_q, skb);
3080 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3081 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3082 l2cap_send_sframe(chan, sctrl);
3084 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3086 del_timer(&chan->ack_timer);
3088 queue_work(_busy_wq, &chan->busy_work);
3093 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3095 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3096 struct sk_buff *_skb;
3100 * TODO: We have to notify the userland if some data is lost with the
3104 switch (control & L2CAP_CTRL_SAR) {
3105 case L2CAP_SDU_UNSEGMENTED:
3106 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3107 kfree_skb(chan->sdu);
3111 err = sock_queue_rcv_skb(chan->sk, skb);
3117 case L2CAP_SDU_START:
3118 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3119 kfree_skb(chan->sdu);
3123 chan->sdu_len = get_unaligned_le16(skb->data);
3126 if (chan->sdu_len > pi->imtu) {
3131 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3137 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3139 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3140 chan->partial_sdu_len = skb->len;
3144 case L2CAP_SDU_CONTINUE:
3145 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3148 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3150 chan->partial_sdu_len += skb->len;
3151 if (chan->partial_sdu_len > chan->sdu_len)
3152 kfree_skb(chan->sdu);
3159 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3162 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3164 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3165 chan->partial_sdu_len += skb->len;
3167 if (chan->partial_sdu_len > pi->imtu)
3170 if (chan->partial_sdu_len == chan->sdu_len) {
3171 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3172 err = sock_queue_rcv_skb(chan->sk, _skb);
3179 kfree_skb(chan->sdu);
3187 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3189 struct sk_buff *skb;
3192 while ((skb = skb_peek(&chan->srej_q))) {
3193 if (bt_cb(skb)->tx_seq != tx_seq)
3196 skb = skb_dequeue(&chan->srej_q);
3197 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3198 l2cap_ertm_reassembly_sdu(chan, skb, control);
3199 chan->buffer_seq_srej =
3200 (chan->buffer_seq_srej + 1) % 64;
3201 tx_seq = (tx_seq + 1) % 64;
3205 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3207 struct srej_list *l, *tmp;
3210 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3211 if (l->tx_seq == tx_seq) {
3216 control = L2CAP_SUPER_SELECT_REJECT;
3217 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3218 l2cap_send_sframe(chan, control);
3220 list_add_tail(&l->list, &chan->srej_l);
3224 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3226 struct srej_list *new;
3229 while (tx_seq != chan->expected_tx_seq) {
3230 control = L2CAP_SUPER_SELECT_REJECT;
3231 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3232 l2cap_send_sframe(chan, control);
3234 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3235 new->tx_seq = chan->expected_tx_seq;
3236 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3237 list_add_tail(&new->list, &chan->srej_l);
3239 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3242 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3244 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3245 u8 tx_seq = __get_txseq(rx_control);
3246 u8 req_seq = __get_reqseq(rx_control);
3247 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3248 int tx_seq_offset, expected_tx_seq_offset;
3249 int num_to_ack = (pi->tx_win/6) + 1;
3252 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3253 tx_seq, rx_control);
3255 if (L2CAP_CTRL_FINAL & rx_control &&
3256 chan->conn_state & L2CAP_CONN_WAIT_F) {
3257 del_timer(&chan->monitor_timer);
3258 if (chan->unacked_frames > 0)
3259 __mod_retrans_timer();
3260 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3263 chan->expected_ack_seq = req_seq;
3264 l2cap_drop_acked_frames(chan);
3266 if (tx_seq == chan->expected_tx_seq)
3269 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3270 if (tx_seq_offset < 0)
3271 tx_seq_offset += 64;
3273 /* invalid tx_seq */
3274 if (tx_seq_offset >= pi->tx_win) {
3275 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3279 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3282 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3283 struct srej_list *first;
3285 first = list_first_entry(&chan->srej_l,
3286 struct srej_list, list);
3287 if (tx_seq == first->tx_seq) {
3288 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3289 l2cap_check_srej_gap(chan, tx_seq);
3291 list_del(&first->list);
3294 if (list_empty(&chan->srej_l)) {
3295 chan->buffer_seq = chan->buffer_seq_srej;
3296 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3297 l2cap_send_ack(chan);
3298 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3301 struct srej_list *l;
3303 /* duplicated tx_seq */
3304 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3307 list_for_each_entry(l, &chan->srej_l, list) {
3308 if (l->tx_seq == tx_seq) {
3309 l2cap_resend_srejframe(chan, tx_seq);
3313 l2cap_send_srejframe(chan, tx_seq);
3316 expected_tx_seq_offset =
3317 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3318 if (expected_tx_seq_offset < 0)
3319 expected_tx_seq_offset += 64;
3321 /* duplicated tx_seq */
3322 if (tx_seq_offset < expected_tx_seq_offset)
3325 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3327 BT_DBG("chan %p, Enter SREJ", chan);
3329 INIT_LIST_HEAD(&chan->srej_l);
3330 chan->buffer_seq_srej = chan->buffer_seq;
3332 __skb_queue_head_init(&chan->srej_q);
3333 __skb_queue_head_init(&chan->busy_q);
3334 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3336 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3338 l2cap_send_srejframe(chan, tx_seq);
3340 del_timer(&chan->ack_timer);
3345 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3347 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3348 bt_cb(skb)->tx_seq = tx_seq;
3349 bt_cb(skb)->sar = sar;
3350 __skb_queue_tail(&chan->srej_q, skb);
3354 err = l2cap_push_rx_skb(chan, skb, rx_control);
3358 if (rx_control & L2CAP_CTRL_FINAL) {
3359 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3360 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3362 l2cap_retransmit_frames(chan);
3367 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3368 if (chan->num_acked == num_to_ack - 1)
3369 l2cap_send_ack(chan);
3378 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3380 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3383 chan->expected_ack_seq = __get_reqseq(rx_control);
3384 l2cap_drop_acked_frames(chan);
3386 if (rx_control & L2CAP_CTRL_POLL) {
3387 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3388 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3389 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3390 (chan->unacked_frames > 0))
3391 __mod_retrans_timer();
3393 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3394 l2cap_send_srejtail(chan);
3396 l2cap_send_i_or_rr_or_rnr(chan);
3399 } else if (rx_control & L2CAP_CTRL_FINAL) {
3400 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3402 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3403 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3405 l2cap_retransmit_frames(chan);
3408 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3409 (chan->unacked_frames > 0))
3410 __mod_retrans_timer();
3412 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3413 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3414 l2cap_send_ack(chan);
3416 l2cap_ertm_send(chan);
3420 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3422 u8 tx_seq = __get_reqseq(rx_control);
3424 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3426 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3428 chan->expected_ack_seq = tx_seq;
3429 l2cap_drop_acked_frames(chan);
3431 if (rx_control & L2CAP_CTRL_FINAL) {
3432 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3433 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3435 l2cap_retransmit_frames(chan);
3437 l2cap_retransmit_frames(chan);
3439 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3440 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3443 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3445 u8 tx_seq = __get_reqseq(rx_control);
3447 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3449 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3451 if (rx_control & L2CAP_CTRL_POLL) {
3452 chan->expected_ack_seq = tx_seq;
3453 l2cap_drop_acked_frames(chan);
3455 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3456 l2cap_retransmit_one_frame(chan, tx_seq);
3458 l2cap_ertm_send(chan);
3460 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3461 chan->srej_save_reqseq = tx_seq;
3462 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3464 } else if (rx_control & L2CAP_CTRL_FINAL) {
3465 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3466 chan->srej_save_reqseq == tx_seq)
3467 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3469 l2cap_retransmit_one_frame(chan, tx_seq);
3471 l2cap_retransmit_one_frame(chan, tx_seq);
3472 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3473 chan->srej_save_reqseq = tx_seq;
3474 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3479 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3481 u8 tx_seq = __get_reqseq(rx_control);
3483 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3485 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3486 chan->expected_ack_seq = tx_seq;
3487 l2cap_drop_acked_frames(chan);
3489 if (rx_control & L2CAP_CTRL_POLL)
3490 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3492 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3493 del_timer(&chan->retrans_timer);
3494 if (rx_control & L2CAP_CTRL_POLL)
3495 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3499 if (rx_control & L2CAP_CTRL_POLL)
3500 l2cap_send_srejtail(chan);
3502 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3505 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3507 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3509 if (L2CAP_CTRL_FINAL & rx_control &&
3510 chan->conn_state & L2CAP_CONN_WAIT_F) {
3511 del_timer(&chan->monitor_timer);
3512 if (chan->unacked_frames > 0)
3513 __mod_retrans_timer();
3514 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3517 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3518 case L2CAP_SUPER_RCV_READY:
3519 l2cap_data_channel_rrframe(chan, rx_control);
3522 case L2CAP_SUPER_REJECT:
3523 l2cap_data_channel_rejframe(chan, rx_control);
3526 case L2CAP_SUPER_SELECT_REJECT:
3527 l2cap_data_channel_srejframe(chan, rx_control);
3530 case L2CAP_SUPER_RCV_NOT_READY:
3531 l2cap_data_channel_rnrframe(chan, rx_control);
3539 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3541 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3542 struct l2cap_pinfo *pi = l2cap_pi(sk);
3545 int len, next_tx_seq_offset, req_seq_offset;
3547 control = get_unaligned_le16(skb->data);
3552 * We can just drop the corrupted I-frame here.
3553 * Receiver will miss it and start proper recovery
3554 * procedures and ask retransmission.
3556 if (l2cap_check_fcs(pi, skb))
3559 if (__is_sar_start(control) && __is_iframe(control))
3562 if (pi->fcs == L2CAP_FCS_CRC16)
3565 if (len > pi->mps) {
3566 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3570 req_seq = __get_reqseq(control);
3571 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3572 if (req_seq_offset < 0)
3573 req_seq_offset += 64;
3575 next_tx_seq_offset =
3576 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3577 if (next_tx_seq_offset < 0)
3578 next_tx_seq_offset += 64;
3580 /* check for invalid req-seq */
3581 if (req_seq_offset > next_tx_seq_offset) {
3582 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3586 if (__is_iframe(control)) {
3588 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3592 l2cap_data_channel_iframe(chan, control, skb);
3596 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3600 l2cap_data_channel_sframe(chan, control, skb);
3610 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3612 struct l2cap_chan *chan;
3613 struct sock *sk = NULL;
3614 struct l2cap_pinfo *pi;
3619 chan = l2cap_get_chan_by_scid(conn, cid);
3621 BT_DBG("unknown cid 0x%4.4x", cid);
3628 BT_DBG("chan %p, len %d", chan, skb->len);
3630 if (sk->sk_state != BT_CONNECTED)
3634 case L2CAP_MODE_BASIC:
3635 /* If socket recv buffers overflows we drop data here
3636 * which is *bad* because L2CAP has to be reliable.
3637 * But we don't have any other choice. L2CAP doesn't
3638 * provide flow control mechanism. */
3640 if (pi->imtu < skb->len)
3643 if (!sock_queue_rcv_skb(sk, skb))
3647 case L2CAP_MODE_ERTM:
3648 if (!sock_owned_by_user(sk)) {
3649 l2cap_ertm_data_rcv(sk, skb);
3651 if (sk_add_backlog(sk, skb))
3657 case L2CAP_MODE_STREAMING:
3658 control = get_unaligned_le16(skb->data);
3662 if (l2cap_check_fcs(pi, skb))
3665 if (__is_sar_start(control))
3668 if (pi->fcs == L2CAP_FCS_CRC16)
3671 if (len > pi->mps || len < 0 || __is_sframe(control))
3674 tx_seq = __get_txseq(control);
3676 if (chan->expected_tx_seq == tx_seq)
3677 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3679 chan->expected_tx_seq = (tx_seq + 1) % 64;
3681 l2cap_streaming_reassembly_sdu(chan, skb, control);
3686 BT_DBG("chan %p: bad mode 0x%2.2x", chan, pi->mode);
3700 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3704 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3710 BT_DBG("sk %p, len %d", sk, skb->len);
3712 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3715 if (l2cap_pi(sk)->imtu < skb->len)
3718 if (!sock_queue_rcv_skb(sk, skb))
3730 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3734 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
3740 BT_DBG("sk %p, len %d", sk, skb->len);
3742 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3745 if (l2cap_pi(sk)->imtu < skb->len)
3748 if (!sock_queue_rcv_skb(sk, skb))
3760 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3762 struct l2cap_hdr *lh = (void *) skb->data;
3766 skb_pull(skb, L2CAP_HDR_SIZE);
3767 cid = __le16_to_cpu(lh->cid);
3768 len = __le16_to_cpu(lh->len);
3770 if (len != skb->len) {
3775 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3778 case L2CAP_CID_LE_SIGNALING:
3779 case L2CAP_CID_SIGNALING:
3780 l2cap_sig_channel(conn, skb);
3783 case L2CAP_CID_CONN_LESS:
3784 psm = get_unaligned_le16(skb->data);
3786 l2cap_conless_channel(conn, psm, skb);
3789 case L2CAP_CID_LE_DATA:
3790 l2cap_att_channel(conn, cid, skb);
3794 l2cap_data_channel(conn, cid, skb);
3799 /* ---- L2CAP interface with lower layer (HCI) ---- */
3801 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3803 int exact = 0, lm1 = 0, lm2 = 0;
3804 register struct sock *sk;
3805 struct hlist_node *node;
3807 if (type != ACL_LINK)
3810 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3812 /* Find listening sockets and check their link_mode */
3813 read_lock(&l2cap_sk_list.lock);
3814 sk_for_each(sk, node, &l2cap_sk_list.head) {
3815 if (sk->sk_state != BT_LISTEN)
3818 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3819 lm1 |= HCI_LM_ACCEPT;
3820 if (l2cap_pi(sk)->role_switch)
3821 lm1 |= HCI_LM_MASTER;
3823 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3824 lm2 |= HCI_LM_ACCEPT;
3825 if (l2cap_pi(sk)->role_switch)
3826 lm2 |= HCI_LM_MASTER;
3829 read_unlock(&l2cap_sk_list.lock);
3831 return exact ? lm1 : lm2;
3834 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3836 struct l2cap_conn *conn;
3838 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3840 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3844 conn = l2cap_conn_add(hcon, status);
3846 l2cap_conn_ready(conn);
3848 l2cap_conn_del(hcon, bt_err(status));
3853 static int l2cap_disconn_ind(struct hci_conn *hcon)
3855 struct l2cap_conn *conn = hcon->l2cap_data;
3857 BT_DBG("hcon %p", hcon);
3859 if (hcon->type != ACL_LINK || !conn)
3862 return conn->disc_reason;
3865 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3867 BT_DBG("hcon %p reason %d", hcon, reason);
3869 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3872 l2cap_conn_del(hcon, bt_err(reason));
3877 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3879 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3882 if (encrypt == 0x00) {
3883 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3884 l2cap_sock_clear_timer(sk);
3885 l2cap_sock_set_timer(sk, HZ * 5);
3886 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3887 __l2cap_sock_close(sk, ECONNREFUSED);
3889 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3890 l2cap_sock_clear_timer(sk);
3894 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3896 struct l2cap_conn *conn = hcon->l2cap_data;
3897 struct l2cap_chan *chan;
3902 BT_DBG("conn %p", conn);
3904 read_lock(&conn->chan_lock);
3906 list_for_each_entry(chan, &conn->chan_l, list) {
3907 struct sock *sk = chan->sk;
3911 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3916 if (!status && (sk->sk_state == BT_CONNECTED ||
3917 sk->sk_state == BT_CONFIG)) {
3918 l2cap_check_encryption(sk, encrypt);
3923 if (sk->sk_state == BT_CONNECT) {
3925 struct l2cap_conn_req req;
3926 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3927 req.psm = l2cap_pi(sk)->psm;
3929 chan->ident = l2cap_get_ident(conn);
3930 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3932 l2cap_send_cmd(conn, chan->ident,
3933 L2CAP_CONN_REQ, sizeof(req), &req);
3935 l2cap_sock_clear_timer(sk);
3936 l2cap_sock_set_timer(sk, HZ / 10);
3938 } else if (sk->sk_state == BT_CONNECT2) {
3939 struct l2cap_conn_rsp rsp;
3943 sk->sk_state = BT_CONFIG;
3944 result = L2CAP_CR_SUCCESS;
3946 sk->sk_state = BT_DISCONN;
3947 l2cap_sock_set_timer(sk, HZ / 10);
3948 result = L2CAP_CR_SEC_BLOCK;
3951 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3952 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3953 rsp.result = cpu_to_le16(result);
3954 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3955 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3962 read_unlock(&conn->chan_lock);
3967 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3969 struct l2cap_conn *conn = hcon->l2cap_data;
3972 conn = l2cap_conn_add(hcon, 0);
3977 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3979 if (!(flags & ACL_CONT)) {
3980 struct l2cap_hdr *hdr;
3981 struct l2cap_chan *chan;
3986 BT_ERR("Unexpected start frame (len %d)", skb->len);
3987 kfree_skb(conn->rx_skb);
3988 conn->rx_skb = NULL;
3990 l2cap_conn_unreliable(conn, ECOMM);
3993 /* Start fragment always begin with Basic L2CAP header */
3994 if (skb->len < L2CAP_HDR_SIZE) {
3995 BT_ERR("Frame is too short (len %d)", skb->len);
3996 l2cap_conn_unreliable(conn, ECOMM);
4000 hdr = (struct l2cap_hdr *) skb->data;
4001 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4002 cid = __le16_to_cpu(hdr->cid);
4004 if (len == skb->len) {
4005 /* Complete frame received */
4006 l2cap_recv_frame(conn, skb);
4010 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4012 if (skb->len > len) {
4013 BT_ERR("Frame is too long (len %d, expected len %d)",
4015 l2cap_conn_unreliable(conn, ECOMM);
4019 chan = l2cap_get_chan_by_scid(conn, cid);
4021 if (chan && chan->sk) {
4022 struct sock *sk = chan->sk;
4024 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4025 BT_ERR("Frame exceeding recv MTU (len %d, "
4027 l2cap_pi(sk)->imtu);
4029 l2cap_conn_unreliable(conn, ECOMM);
4035 /* Allocate skb for the complete frame (with header) */
4036 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4040 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4042 conn->rx_len = len - skb->len;
4044 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4046 if (!conn->rx_len) {
4047 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4048 l2cap_conn_unreliable(conn, ECOMM);
4052 if (skb->len > conn->rx_len) {
4053 BT_ERR("Fragment is too long (len %d, expected %d)",
4054 skb->len, conn->rx_len);
4055 kfree_skb(conn->rx_skb);
4056 conn->rx_skb = NULL;
4058 l2cap_conn_unreliable(conn, ECOMM);
4062 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4064 conn->rx_len -= skb->len;
4066 if (!conn->rx_len) {
4067 /* Complete frame received */
4068 l2cap_recv_frame(conn, conn->rx_skb);
4069 conn->rx_skb = NULL;
4078 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4081 struct hlist_node *node;
4083 read_lock_bh(&l2cap_sk_list.lock);
4085 sk_for_each(sk, node, &l2cap_sk_list.head) {
4086 struct l2cap_pinfo *pi = l2cap_pi(sk);
4088 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4089 batostr(&bt_sk(sk)->src),
4090 batostr(&bt_sk(sk)->dst),
4091 sk->sk_state, __le16_to_cpu(pi->psm),
4093 pi->imtu, pi->omtu, pi->sec_level,
4097 read_unlock_bh(&l2cap_sk_list.lock);
4102 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4104 return single_open(file, l2cap_debugfs_show, inode->i_private);
4107 static const struct file_operations l2cap_debugfs_fops = {
4108 .open = l2cap_debugfs_open,
4110 .llseek = seq_lseek,
4111 .release = single_release,
4114 static struct dentry *l2cap_debugfs;
4116 static struct hci_proto l2cap_hci_proto = {
4118 .id = HCI_PROTO_L2CAP,
4119 .connect_ind = l2cap_connect_ind,
4120 .connect_cfm = l2cap_connect_cfm,
4121 .disconn_ind = l2cap_disconn_ind,
4122 .disconn_cfm = l2cap_disconn_cfm,
4123 .security_cfm = l2cap_security_cfm,
4124 .recv_acldata = l2cap_recv_acldata
4127 int __init l2cap_init(void)
4131 err = l2cap_init_sockets();
4135 _busy_wq = create_singlethread_workqueue("l2cap");
4141 err = hci_register_proto(&l2cap_hci_proto);
4143 BT_ERR("L2CAP protocol registration failed");
4144 bt_sock_unregister(BTPROTO_L2CAP);
4149 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4150 bt_debugfs, NULL, &l2cap_debugfs_fops);
4152 BT_ERR("Failed to create L2CAP debug file");
4158 destroy_workqueue(_busy_wq);
4159 l2cap_cleanup_sockets();
4163 void l2cap_exit(void)
4165 debugfs_remove(l2cap_debugfs);
4167 flush_workqueue(_busy_wq);
4168 destroy_workqueue(_busy_wq);
4170 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4171 BT_ERR("L2CAP protocol unregistration failed");
4173 l2cap_cleanup_sockets();
4176 module_param(disable_ertm, bool, 0644);
4177 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");