2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
43 #define LE_FLOWCTL_MAX_CREDITS 65535
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
72 return BDADDR_LE_RANDOM;
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
80 return bdaddr_type(hcon->type, hcon->src_type);
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
85 return bdaddr_type(hcon->type, hcon->dst_type);
88 /* ---- L2CAP channels ---- */
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
95 list_for_each_entry(c, &conn->chan_l, list) {
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns a reference locked channel.
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 struct l2cap_chan *c;
122 mutex_lock(&conn->chan_lock);
123 c = __l2cap_get_chan_by_scid(conn, cid);
125 /* Only lock if chan reference is not 0 */
126 c = l2cap_chan_hold_unless_zero(c);
130 mutex_unlock(&conn->chan_lock);
135 /* Find channel with given DCID.
136 * Returns a reference locked channel.
138 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 struct l2cap_chan *c;
143 mutex_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_dcid(conn, cid);
146 /* Only lock if chan reference is not 0 */
147 c = l2cap_chan_hold_unless_zero(c);
151 mutex_unlock(&conn->chan_lock);
156 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 struct l2cap_chan *c;
161 list_for_each_entry(c, &conn->chan_l, list) {
162 if (c->ident == ident)
168 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
171 struct l2cap_chan *c;
173 list_for_each_entry(c, &chan_list, global_l) {
174 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
177 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
180 if (c->sport == psm && !bacmp(&c->src, src))
186 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
190 write_lock(&chan_list_lock);
192 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
202 u16 p, start, end, incr;
204 if (chan->src_type == BDADDR_BREDR) {
205 start = L2CAP_PSM_DYN_START;
206 end = L2CAP_PSM_AUTO_END;
209 start = L2CAP_PSM_LE_DYN_START;
210 end = L2CAP_PSM_LE_DYN_END;
215 for (p = start; p <= end; p += incr)
216 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
218 chan->psm = cpu_to_le16(p);
219 chan->sport = cpu_to_le16(p);
226 write_unlock(&chan_list_lock);
229 EXPORT_SYMBOL_GPL(l2cap_add_psm);
231 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
233 write_lock(&chan_list_lock);
235 /* Override the defaults (which are for conn-oriented) */
236 chan->omtu = L2CAP_DEFAULT_MTU;
237 chan->chan_type = L2CAP_CHAN_FIXED;
241 write_unlock(&chan_list_lock);
246 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
250 if (conn->hcon->type == LE_LINK)
251 dyn_end = L2CAP_CID_LE_DYN_END;
253 dyn_end = L2CAP_CID_DYN_END;
255 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
256 if (!__l2cap_get_chan_by_scid(conn, cid))
263 static void l2cap_state_change(struct l2cap_chan *chan, int state)
265 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
266 state_to_string(state));
269 chan->ops->state_change(chan, state, 0);
272 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
276 chan->ops->state_change(chan, chan->state, err);
279 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
281 chan->ops->state_change(chan, chan->state, err);
284 static void __set_retrans_timer(struct l2cap_chan *chan)
286 if (!delayed_work_pending(&chan->monitor_timer) &&
287 chan->retrans_timeout) {
288 l2cap_set_timer(chan, &chan->retrans_timer,
289 msecs_to_jiffies(chan->retrans_timeout));
293 static void __set_monitor_timer(struct l2cap_chan *chan)
295 __clear_retrans_timer(chan);
296 if (chan->monitor_timeout) {
297 l2cap_set_timer(chan, &chan->monitor_timer,
298 msecs_to_jiffies(chan->monitor_timeout));
302 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
307 skb_queue_walk(head, skb) {
308 if (bt_cb(skb)->l2cap.txseq == seq)
315 /* ---- L2CAP sequence number lists ---- */
317 /* For ERTM, ordered lists of sequence numbers must be tracked for
318 * SREJ requests that are received and for frames that are to be
319 * retransmitted. These seq_list functions implement a singly-linked
320 * list in an array, where membership in the list can also be checked
321 * in constant time. Items can also be added to the tail of the list
322 * and removed from the head in constant time, without further memory
326 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
328 size_t alloc_size, i;
330 /* Allocated size is a power of 2 to map sequence numbers
331 * (which may be up to 14 bits) in to a smaller array that is
332 * sized for the negotiated ERTM transmit windows.
334 alloc_size = roundup_pow_of_two(size);
336 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
340 seq_list->mask = alloc_size - 1;
341 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
342 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 for (i = 0; i < alloc_size; i++)
344 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
349 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
351 kfree(seq_list->list);
354 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
357 /* Constant-time check for list membership */
358 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
361 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
363 u16 seq = seq_list->head;
364 u16 mask = seq_list->mask;
366 seq_list->head = seq_list->list[seq & mask];
367 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
369 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
370 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
371 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
377 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
381 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
384 for (i = 0; i <= seq_list->mask; i++)
385 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
387 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
388 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
391 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
393 u16 mask = seq_list->mask;
395 /* All appends happen in constant time */
397 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
400 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
401 seq_list->head = seq;
403 seq_list->list[seq_list->tail & mask] = seq;
405 seq_list->tail = seq;
406 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
409 static void l2cap_chan_timeout(struct work_struct *work)
411 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
413 struct l2cap_conn *conn = chan->conn;
416 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
421 mutex_lock(&conn->chan_lock);
422 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
423 * this work. No need to call l2cap_chan_hold(chan) here again.
425 l2cap_chan_lock(chan);
427 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
428 reason = ECONNREFUSED;
429 else if (chan->state == BT_CONNECT &&
430 chan->sec_level != BT_SECURITY_SDP)
431 reason = ECONNREFUSED;
435 l2cap_chan_close(chan, reason);
437 chan->ops->close(chan);
439 l2cap_chan_unlock(chan);
440 l2cap_chan_put(chan);
442 mutex_unlock(&conn->chan_lock);
445 struct l2cap_chan *l2cap_chan_create(void)
447 struct l2cap_chan *chan;
449 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
453 skb_queue_head_init(&chan->tx_q);
454 skb_queue_head_init(&chan->srej_q);
455 mutex_init(&chan->lock);
457 /* Set default lock nesting level */
458 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
460 /* Available receive buffer space is initially unknown */
463 write_lock(&chan_list_lock);
464 list_add(&chan->global_l, &chan_list);
465 write_unlock(&chan_list_lock);
467 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
468 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
469 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
470 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
472 chan->state = BT_OPEN;
474 kref_init(&chan->kref);
476 /* This flag is cleared in l2cap_chan_ready() */
477 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
479 BT_DBG("chan %p", chan);
483 EXPORT_SYMBOL_GPL(l2cap_chan_create);
485 static void l2cap_chan_destroy(struct kref *kref)
487 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
489 BT_DBG("chan %p", chan);
491 write_lock(&chan_list_lock);
492 list_del(&chan->global_l);
493 write_unlock(&chan_list_lock);
498 void l2cap_chan_hold(struct l2cap_chan *c)
500 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
505 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
507 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
509 if (!kref_get_unless_zero(&c->kref))
515 void l2cap_chan_put(struct l2cap_chan *c)
517 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
519 kref_put(&c->kref, l2cap_chan_destroy);
521 EXPORT_SYMBOL_GPL(l2cap_chan_put);
523 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
525 chan->fcs = L2CAP_FCS_CRC16;
526 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
527 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
528 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
529 chan->remote_max_tx = chan->max_tx;
530 chan->remote_tx_win = chan->tx_win;
531 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
532 chan->sec_level = BT_SECURITY_LOW;
533 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
534 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
535 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
537 chan->conf_state = 0;
538 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
540 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
542 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
544 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
546 size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
551 /* If we don't know the available space in the receiver buffer, give
552 * enough credits for a full packet.
554 if (chan->rx_avail == -1)
555 return (chan->imtu / chan->mps) + 1;
557 /* If we know how much space is available in the receive buffer, give
558 * out as many credits as would fill the buffer.
560 if (chan->rx_avail <= sdu_len)
563 return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
566 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
569 chan->sdu_last_frag = NULL;
571 chan->tx_credits = tx_credits;
572 /* Derive MPS from connection MTU to stop HCI fragmentation */
573 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
574 chan->rx_credits = l2cap_le_rx_credits(chan);
576 skb_queue_head_init(&chan->tx_q);
579 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
581 l2cap_le_flowctl_init(chan, tx_credits);
583 /* L2CAP implementations shall support a minimum MPS of 64 octets */
584 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
585 chan->mps = L2CAP_ECRED_MIN_MPS;
586 chan->rx_credits = l2cap_le_rx_credits(chan);
590 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
592 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
593 __le16_to_cpu(chan->psm), chan->dcid);
595 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
599 switch (chan->chan_type) {
600 case L2CAP_CHAN_CONN_ORIENTED:
601 /* Alloc CID for connection-oriented socket */
602 chan->scid = l2cap_alloc_cid(conn);
603 if (conn->hcon->type == ACL_LINK)
604 chan->omtu = L2CAP_DEFAULT_MTU;
607 case L2CAP_CHAN_CONN_LESS:
608 /* Connectionless socket */
609 chan->scid = L2CAP_CID_CONN_LESS;
610 chan->dcid = L2CAP_CID_CONN_LESS;
611 chan->omtu = L2CAP_DEFAULT_MTU;
614 case L2CAP_CHAN_FIXED:
615 /* Caller will set CID and CID specific MTU values */
619 /* Raw socket can send/recv signalling messages only */
620 chan->scid = L2CAP_CID_SIGNALING;
621 chan->dcid = L2CAP_CID_SIGNALING;
622 chan->omtu = L2CAP_DEFAULT_MTU;
625 chan->local_id = L2CAP_BESTEFFORT_ID;
626 chan->local_stype = L2CAP_SERV_BESTEFFORT;
627 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
628 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
629 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
630 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
632 l2cap_chan_hold(chan);
634 /* Only keep a reference for fixed channels if they requested it */
635 if (chan->chan_type != L2CAP_CHAN_FIXED ||
636 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
637 hci_conn_hold(conn->hcon);
639 list_add(&chan->list, &conn->chan_l);
642 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
644 mutex_lock(&conn->chan_lock);
645 __l2cap_chan_add(conn, chan);
646 mutex_unlock(&conn->chan_lock);
649 void l2cap_chan_del(struct l2cap_chan *chan, int err)
651 struct l2cap_conn *conn = chan->conn;
653 __clear_chan_timer(chan);
655 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
656 state_to_string(chan->state));
658 chan->ops->teardown(chan, err);
661 /* Delete from channel list */
662 list_del(&chan->list);
664 l2cap_chan_put(chan);
668 /* Reference was only held for non-fixed channels or
669 * fixed channels that explicitly requested it using the
670 * FLAG_HOLD_HCI_CONN flag.
672 if (chan->chan_type != L2CAP_CHAN_FIXED ||
673 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
674 hci_conn_drop(conn->hcon);
677 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
680 switch (chan->mode) {
681 case L2CAP_MODE_BASIC:
684 case L2CAP_MODE_LE_FLOWCTL:
685 case L2CAP_MODE_EXT_FLOWCTL:
686 skb_queue_purge(&chan->tx_q);
689 case L2CAP_MODE_ERTM:
690 __clear_retrans_timer(chan);
691 __clear_monitor_timer(chan);
692 __clear_ack_timer(chan);
694 skb_queue_purge(&chan->srej_q);
696 l2cap_seq_list_free(&chan->srej_list);
697 l2cap_seq_list_free(&chan->retrans_list);
700 case L2CAP_MODE_STREAMING:
701 skb_queue_purge(&chan->tx_q);
705 EXPORT_SYMBOL_GPL(l2cap_chan_del);
707 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
708 l2cap_chan_func_t func, void *data)
710 struct l2cap_chan *chan, *l;
712 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
713 if (chan->ident == id)
718 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
721 struct l2cap_chan *chan;
723 list_for_each_entry(chan, &conn->chan_l, list) {
728 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
734 mutex_lock(&conn->chan_lock);
735 __l2cap_chan_list(conn, func, data);
736 mutex_unlock(&conn->chan_lock);
739 EXPORT_SYMBOL_GPL(l2cap_chan_list);
741 static void l2cap_conn_update_id_addr(struct work_struct *work)
743 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
745 struct hci_conn *hcon = conn->hcon;
746 struct l2cap_chan *chan;
748 mutex_lock(&conn->chan_lock);
750 list_for_each_entry(chan, &conn->chan_l, list) {
751 l2cap_chan_lock(chan);
752 bacpy(&chan->dst, &hcon->dst);
753 chan->dst_type = bdaddr_dst_type(hcon);
754 l2cap_chan_unlock(chan);
757 mutex_unlock(&conn->chan_lock);
760 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
762 struct l2cap_conn *conn = chan->conn;
763 struct l2cap_le_conn_rsp rsp;
766 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
767 result = L2CAP_CR_LE_AUTHORIZATION;
769 result = L2CAP_CR_LE_BAD_PSM;
771 l2cap_state_change(chan, BT_DISCONN);
773 rsp.dcid = cpu_to_le16(chan->scid);
774 rsp.mtu = cpu_to_le16(chan->imtu);
775 rsp.mps = cpu_to_le16(chan->mps);
776 rsp.credits = cpu_to_le16(chan->rx_credits);
777 rsp.result = cpu_to_le16(result);
779 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
783 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
785 l2cap_state_change(chan, BT_DISCONN);
787 __l2cap_ecred_conn_rsp_defer(chan);
790 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
792 struct l2cap_conn *conn = chan->conn;
793 struct l2cap_conn_rsp rsp;
796 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
797 result = L2CAP_CR_SEC_BLOCK;
799 result = L2CAP_CR_BAD_PSM;
801 l2cap_state_change(chan, BT_DISCONN);
803 rsp.scid = cpu_to_le16(chan->dcid);
804 rsp.dcid = cpu_to_le16(chan->scid);
805 rsp.result = cpu_to_le16(result);
806 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
808 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
811 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
813 struct l2cap_conn *conn = chan->conn;
815 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
817 switch (chan->state) {
819 chan->ops->teardown(chan, 0);
824 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
825 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
826 l2cap_send_disconn_req(chan, reason);
828 l2cap_chan_del(chan, reason);
832 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
833 if (conn->hcon->type == ACL_LINK)
834 l2cap_chan_connect_reject(chan);
835 else if (conn->hcon->type == LE_LINK) {
836 switch (chan->mode) {
837 case L2CAP_MODE_LE_FLOWCTL:
838 l2cap_chan_le_connect_reject(chan);
840 case L2CAP_MODE_EXT_FLOWCTL:
841 l2cap_chan_ecred_connect_reject(chan);
847 l2cap_chan_del(chan, reason);
852 l2cap_chan_del(chan, reason);
856 chan->ops->teardown(chan, 0);
860 EXPORT_SYMBOL(l2cap_chan_close);
862 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
864 switch (chan->chan_type) {
866 switch (chan->sec_level) {
867 case BT_SECURITY_HIGH:
868 case BT_SECURITY_FIPS:
869 return HCI_AT_DEDICATED_BONDING_MITM;
870 case BT_SECURITY_MEDIUM:
871 return HCI_AT_DEDICATED_BONDING;
873 return HCI_AT_NO_BONDING;
876 case L2CAP_CHAN_CONN_LESS:
877 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
878 if (chan->sec_level == BT_SECURITY_LOW)
879 chan->sec_level = BT_SECURITY_SDP;
881 if (chan->sec_level == BT_SECURITY_HIGH ||
882 chan->sec_level == BT_SECURITY_FIPS)
883 return HCI_AT_NO_BONDING_MITM;
885 return HCI_AT_NO_BONDING;
887 case L2CAP_CHAN_CONN_ORIENTED:
888 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
889 if (chan->sec_level == BT_SECURITY_LOW)
890 chan->sec_level = BT_SECURITY_SDP;
892 if (chan->sec_level == BT_SECURITY_HIGH ||
893 chan->sec_level == BT_SECURITY_FIPS)
894 return HCI_AT_NO_BONDING_MITM;
896 return HCI_AT_NO_BONDING;
901 switch (chan->sec_level) {
902 case BT_SECURITY_HIGH:
903 case BT_SECURITY_FIPS:
904 return HCI_AT_GENERAL_BONDING_MITM;
905 case BT_SECURITY_MEDIUM:
906 return HCI_AT_GENERAL_BONDING;
908 return HCI_AT_NO_BONDING;
914 /* Service level security */
915 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
917 struct l2cap_conn *conn = chan->conn;
920 if (conn->hcon->type == LE_LINK)
921 return smp_conn_security(conn->hcon, chan->sec_level);
923 auth_type = l2cap_get_auth_type(chan);
925 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
929 static u8 l2cap_get_ident(struct l2cap_conn *conn)
933 /* Get next available identificator.
934 * 1 - 128 are used by kernel.
935 * 129 - 199 are reserved.
936 * 200 - 254 are used by utilities like l2ping, etc.
939 mutex_lock(&conn->ident_lock);
941 if (++conn->tx_ident > 128)
946 mutex_unlock(&conn->ident_lock);
951 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
954 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
957 BT_DBG("code 0x%2.2x", code);
962 /* Use NO_FLUSH if supported or we have an LE link (which does
963 * not support auto-flushing packets) */
964 if (lmp_no_flush_capable(conn->hcon->hdev) ||
965 conn->hcon->type == LE_LINK)
966 flags = ACL_START_NO_FLUSH;
970 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
971 skb->priority = HCI_PRIO_MAX;
973 hci_send_acl(conn->hchan, skb, flags);
976 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
978 struct hci_conn *hcon = chan->conn->hcon;
981 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
984 /* Use NO_FLUSH for LE links (where this is the only option) or
985 * if the BR/EDR link supports it and flushing has not been
986 * explicitly requested (through FLAG_FLUSHABLE).
988 if (hcon->type == LE_LINK ||
989 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
990 lmp_no_flush_capable(hcon->hdev)))
991 flags = ACL_START_NO_FLUSH;
995 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
996 hci_send_acl(chan->conn->hchan, skb, flags);
999 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1001 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1002 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1004 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1006 control->sframe = 1;
1007 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1008 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1014 control->sframe = 0;
1015 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1016 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1023 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1025 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1026 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1028 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1030 control->sframe = 1;
1031 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1032 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1038 control->sframe = 0;
1039 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1040 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1047 static inline void __unpack_control(struct l2cap_chan *chan,
1048 struct sk_buff *skb)
1050 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1051 __unpack_extended_control(get_unaligned_le32(skb->data),
1052 &bt_cb(skb)->l2cap);
1053 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1055 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1056 &bt_cb(skb)->l2cap);
1057 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1061 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1065 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1066 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1068 if (control->sframe) {
1069 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1070 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1071 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1073 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1074 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1080 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1084 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1085 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1087 if (control->sframe) {
1088 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1089 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1090 packed |= L2CAP_CTRL_FRAME_TYPE;
1092 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1093 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1099 static inline void __pack_control(struct l2cap_chan *chan,
1100 struct l2cap_ctrl *control,
1101 struct sk_buff *skb)
1103 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1104 put_unaligned_le32(__pack_extended_control(control),
1105 skb->data + L2CAP_HDR_SIZE);
1107 put_unaligned_le16(__pack_enhanced_control(control),
1108 skb->data + L2CAP_HDR_SIZE);
1112 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1114 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1115 return L2CAP_EXT_HDR_SIZE;
1117 return L2CAP_ENH_HDR_SIZE;
1120 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1123 struct sk_buff *skb;
1124 struct l2cap_hdr *lh;
1125 int hlen = __ertm_hdr_size(chan);
1127 if (chan->fcs == L2CAP_FCS_CRC16)
1128 hlen += L2CAP_FCS_SIZE;
1130 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1133 return ERR_PTR(-ENOMEM);
1135 lh = skb_put(skb, L2CAP_HDR_SIZE);
1136 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1137 lh->cid = cpu_to_le16(chan->dcid);
1139 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1140 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1142 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1144 if (chan->fcs == L2CAP_FCS_CRC16) {
1145 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1146 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1149 skb->priority = HCI_PRIO_MAX;
1153 static void l2cap_send_sframe(struct l2cap_chan *chan,
1154 struct l2cap_ctrl *control)
1156 struct sk_buff *skb;
1159 BT_DBG("chan %p, control %p", chan, control);
1161 if (!control->sframe)
1164 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1168 if (control->super == L2CAP_SUPER_RR)
1169 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1170 else if (control->super == L2CAP_SUPER_RNR)
1171 set_bit(CONN_RNR_SENT, &chan->conn_state);
1173 if (control->super != L2CAP_SUPER_SREJ) {
1174 chan->last_acked_seq = control->reqseq;
1175 __clear_ack_timer(chan);
1178 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1179 control->final, control->poll, control->super);
1181 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1182 control_field = __pack_extended_control(control);
1184 control_field = __pack_enhanced_control(control);
1186 skb = l2cap_create_sframe_pdu(chan, control_field);
1188 l2cap_do_send(chan, skb);
1191 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1193 struct l2cap_ctrl control;
1195 BT_DBG("chan %p, poll %d", chan, poll);
1197 memset(&control, 0, sizeof(control));
1199 control.poll = poll;
1201 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1202 control.super = L2CAP_SUPER_RNR;
1204 control.super = L2CAP_SUPER_RR;
1206 control.reqseq = chan->buffer_seq;
1207 l2cap_send_sframe(chan, &control);
1210 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1212 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1215 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1218 void l2cap_send_conn_req(struct l2cap_chan *chan)
1220 struct l2cap_conn *conn = chan->conn;
1221 struct l2cap_conn_req req;
1223 req.scid = cpu_to_le16(chan->scid);
1224 req.psm = chan->psm;
1226 chan->ident = l2cap_get_ident(conn);
1228 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1230 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1233 static void l2cap_chan_ready(struct l2cap_chan *chan)
1235 /* The channel may have already been flagged as connected in
1236 * case of receiving data before the L2CAP info req/rsp
1237 * procedure is complete.
1239 if (chan->state == BT_CONNECTED)
1242 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1243 chan->conf_state = 0;
1244 __clear_chan_timer(chan);
1246 switch (chan->mode) {
1247 case L2CAP_MODE_LE_FLOWCTL:
1248 case L2CAP_MODE_EXT_FLOWCTL:
1249 if (!chan->tx_credits)
1250 chan->ops->suspend(chan);
1254 chan->state = BT_CONNECTED;
1256 chan->ops->ready(chan);
1259 static void l2cap_le_connect(struct l2cap_chan *chan)
1261 struct l2cap_conn *conn = chan->conn;
1262 struct l2cap_le_conn_req req;
1264 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1268 chan->imtu = chan->conn->mtu;
1270 l2cap_le_flowctl_init(chan, 0);
1272 memset(&req, 0, sizeof(req));
1273 req.psm = chan->psm;
1274 req.scid = cpu_to_le16(chan->scid);
1275 req.mtu = cpu_to_le16(chan->imtu);
1276 req.mps = cpu_to_le16(chan->mps);
1277 req.credits = cpu_to_le16(chan->rx_credits);
1279 chan->ident = l2cap_get_ident(conn);
1281 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1285 struct l2cap_ecred_conn_data {
1287 struct l2cap_ecred_conn_req_hdr req;
1290 struct l2cap_chan *chan;
1295 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1297 struct l2cap_ecred_conn_data *conn = data;
1300 if (chan == conn->chan)
1303 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1306 pid = chan->ops->get_peer_pid(chan);
1308 /* Only add deferred channels with the same PID/PSM */
1309 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1310 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1313 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1316 l2cap_ecred_init(chan, 0);
1318 /* Set the same ident so we can match on the rsp */
1319 chan->ident = conn->chan->ident;
1321 /* Include all channels deferred */
1322 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1327 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1329 struct l2cap_conn *conn = chan->conn;
1330 struct l2cap_ecred_conn_data data;
1332 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1335 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1338 l2cap_ecred_init(chan, 0);
1340 memset(&data, 0, sizeof(data));
1341 data.pdu.req.psm = chan->psm;
1342 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1343 data.pdu.req.mps = cpu_to_le16(chan->mps);
1344 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1345 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1347 chan->ident = l2cap_get_ident(conn);
1351 data.pid = chan->ops->get_peer_pid(chan);
1353 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1355 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1356 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1360 static void l2cap_le_start(struct l2cap_chan *chan)
1362 struct l2cap_conn *conn = chan->conn;
1364 if (!smp_conn_security(conn->hcon, chan->sec_level))
1368 l2cap_chan_ready(chan);
1372 if (chan->state == BT_CONNECT) {
1373 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1374 l2cap_ecred_connect(chan);
1376 l2cap_le_connect(chan);
1380 static void l2cap_start_connection(struct l2cap_chan *chan)
1382 if (chan->conn->hcon->type == LE_LINK) {
1383 l2cap_le_start(chan);
1385 l2cap_send_conn_req(chan);
1389 static void l2cap_request_info(struct l2cap_conn *conn)
1391 struct l2cap_info_req req;
1393 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1396 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1398 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1399 conn->info_ident = l2cap_get_ident(conn);
1401 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1403 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1407 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1409 /* The minimum encryption key size needs to be enforced by the
1410 * host stack before establishing any L2CAP connections. The
1411 * specification in theory allows a minimum of 1, but to align
1412 * BR/EDR and LE transports, a minimum of 7 is chosen.
1414 * This check might also be called for unencrypted connections
1415 * that have no key size requirements. Ensure that the link is
1416 * actually encrypted before enforcing a key size.
1418 int min_key_size = hcon->hdev->min_enc_key_size;
1420 /* On FIPS security level, key size must be 16 bytes */
1421 if (hcon->sec_level == BT_SECURITY_FIPS)
1424 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1425 hcon->enc_key_size >= min_key_size);
1428 static void l2cap_do_start(struct l2cap_chan *chan)
1430 struct l2cap_conn *conn = chan->conn;
1432 if (conn->hcon->type == LE_LINK) {
1433 l2cap_le_start(chan);
1437 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1438 l2cap_request_info(conn);
1442 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1445 if (!l2cap_chan_check_security(chan, true) ||
1446 !__l2cap_no_conn_pending(chan))
1449 if (l2cap_check_enc_key_size(conn->hcon))
1450 l2cap_start_connection(chan);
1452 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1455 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1457 u32 local_feat_mask = l2cap_feat_mask;
1459 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1462 case L2CAP_MODE_ERTM:
1463 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1464 case L2CAP_MODE_STREAMING:
1465 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1471 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1473 struct l2cap_conn *conn = chan->conn;
1474 struct l2cap_disconn_req req;
1479 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1480 __clear_retrans_timer(chan);
1481 __clear_monitor_timer(chan);
1482 __clear_ack_timer(chan);
1485 req.dcid = cpu_to_le16(chan->dcid);
1486 req.scid = cpu_to_le16(chan->scid);
1487 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1490 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1493 /* ---- L2CAP connections ---- */
1494 static void l2cap_conn_start(struct l2cap_conn *conn)
1496 struct l2cap_chan *chan, *tmp;
1498 BT_DBG("conn %p", conn);
1500 mutex_lock(&conn->chan_lock);
1502 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1503 l2cap_chan_lock(chan);
1505 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1506 l2cap_chan_ready(chan);
1507 l2cap_chan_unlock(chan);
1511 if (chan->state == BT_CONNECT) {
1512 if (!l2cap_chan_check_security(chan, true) ||
1513 !__l2cap_no_conn_pending(chan)) {
1514 l2cap_chan_unlock(chan);
1518 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1519 && test_bit(CONF_STATE2_DEVICE,
1520 &chan->conf_state)) {
1521 l2cap_chan_close(chan, ECONNRESET);
1522 l2cap_chan_unlock(chan);
1526 if (l2cap_check_enc_key_size(conn->hcon))
1527 l2cap_start_connection(chan);
1529 l2cap_chan_close(chan, ECONNREFUSED);
1531 } else if (chan->state == BT_CONNECT2) {
1532 struct l2cap_conn_rsp rsp;
1534 rsp.scid = cpu_to_le16(chan->dcid);
1535 rsp.dcid = cpu_to_le16(chan->scid);
1537 if (l2cap_chan_check_security(chan, false)) {
1538 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1539 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1540 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1541 chan->ops->defer(chan);
1544 l2cap_state_change(chan, BT_CONFIG);
1545 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1546 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1549 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1550 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1553 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1556 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1557 rsp.result != L2CAP_CR_SUCCESS) {
1558 l2cap_chan_unlock(chan);
1562 set_bit(CONF_REQ_SENT, &chan->conf_state);
1563 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1564 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1565 chan->num_conf_req++;
1568 l2cap_chan_unlock(chan);
1571 mutex_unlock(&conn->chan_lock);
1574 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1576 struct hci_conn *hcon = conn->hcon;
1577 struct hci_dev *hdev = hcon->hdev;
1579 BT_DBG("%s conn %p", hdev->name, conn);
1581 /* For outgoing pairing which doesn't necessarily have an
1582 * associated socket (e.g. mgmt_pair_device).
1585 smp_conn_security(hcon, hcon->pending_sec_level);
1587 /* For LE peripheral connections, make sure the connection interval
1588 * is in the range of the minimum and maximum interval that has
1589 * been configured for this connection. If not, then trigger
1590 * the connection update procedure.
1592 if (hcon->role == HCI_ROLE_SLAVE &&
1593 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1594 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1595 struct l2cap_conn_param_update_req req;
1597 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1598 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1599 req.latency = cpu_to_le16(hcon->le_conn_latency);
1600 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1602 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1603 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1607 static void l2cap_conn_ready(struct l2cap_conn *conn)
1609 struct l2cap_chan *chan;
1610 struct hci_conn *hcon = conn->hcon;
1612 BT_DBG("conn %p", conn);
1614 if (hcon->type == ACL_LINK)
1615 l2cap_request_info(conn);
1617 mutex_lock(&conn->chan_lock);
1619 list_for_each_entry(chan, &conn->chan_l, list) {
1621 l2cap_chan_lock(chan);
1623 if (hcon->type == LE_LINK) {
1624 l2cap_le_start(chan);
1625 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1626 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1627 l2cap_chan_ready(chan);
1628 } else if (chan->state == BT_CONNECT) {
1629 l2cap_do_start(chan);
1632 l2cap_chan_unlock(chan);
1635 mutex_unlock(&conn->chan_lock);
1637 if (hcon->type == LE_LINK)
1638 l2cap_le_conn_ready(conn);
1640 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1643 /* Notify sockets that we cannot guaranty reliability anymore */
1644 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1646 struct l2cap_chan *chan;
1648 BT_DBG("conn %p", conn);
1650 mutex_lock(&conn->chan_lock);
1652 list_for_each_entry(chan, &conn->chan_l, list) {
1653 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1654 l2cap_chan_set_err(chan, err);
1657 mutex_unlock(&conn->chan_lock);
1660 static void l2cap_info_timeout(struct work_struct *work)
1662 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1665 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1666 conn->info_ident = 0;
1668 l2cap_conn_start(conn);
1673 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1674 * callback is called during registration. The ->remove callback is called
1675 * during unregistration.
1676 * An l2cap_user object can either be explicitly unregistered or when the
1677 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1678 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1679 * External modules must own a reference to the l2cap_conn object if they intend
1680 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1681 * any time if they don't.
1684 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1686 struct hci_dev *hdev = conn->hcon->hdev;
1689 /* We need to check whether l2cap_conn is registered. If it is not, we
1690 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1691 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1692 * relies on the parent hci_conn object to be locked. This itself relies
1693 * on the hci_dev object to be locked. So we must lock the hci device
1698 if (!list_empty(&user->list)) {
1703 /* conn->hchan is NULL after l2cap_conn_del() was called */
1709 ret = user->probe(conn, user);
1713 list_add(&user->list, &conn->users);
1717 hci_dev_unlock(hdev);
1720 EXPORT_SYMBOL(l2cap_register_user);
1722 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1724 struct hci_dev *hdev = conn->hcon->hdev;
1728 if (list_empty(&user->list))
1731 list_del_init(&user->list);
1732 user->remove(conn, user);
1735 hci_dev_unlock(hdev);
1737 EXPORT_SYMBOL(l2cap_unregister_user);
1739 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1741 struct l2cap_user *user;
1743 while (!list_empty(&conn->users)) {
1744 user = list_first_entry(&conn->users, struct l2cap_user, list);
1745 list_del_init(&user->list);
1746 user->remove(conn, user);
1750 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1752 struct l2cap_conn *conn = hcon->l2cap_data;
1753 struct l2cap_chan *chan, *l;
1758 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1760 kfree_skb(conn->rx_skb);
1762 skb_queue_purge(&conn->pending_rx);
1764 /* We can not call flush_work(&conn->pending_rx_work) here since we
1765 * might block if we are running on a worker from the same workqueue
1766 * pending_rx_work is waiting on.
1768 if (work_pending(&conn->pending_rx_work))
1769 cancel_work_sync(&conn->pending_rx_work);
1771 cancel_delayed_work_sync(&conn->id_addr_timer);
1773 l2cap_unregister_all_users(conn);
1775 /* Force the connection to be immediately dropped */
1776 hcon->disc_timeout = 0;
1778 mutex_lock(&conn->chan_lock);
1781 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1782 l2cap_chan_hold(chan);
1783 l2cap_chan_lock(chan);
1785 l2cap_chan_del(chan, err);
1787 chan->ops->close(chan);
1789 l2cap_chan_unlock(chan);
1790 l2cap_chan_put(chan);
1793 mutex_unlock(&conn->chan_lock);
1795 hci_chan_del(conn->hchan);
1797 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1798 cancel_delayed_work_sync(&conn->info_timer);
1800 hcon->l2cap_data = NULL;
1802 l2cap_conn_put(conn);
1805 static void l2cap_conn_free(struct kref *ref)
1807 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1809 hci_conn_put(conn->hcon);
1813 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1815 kref_get(&conn->ref);
1818 EXPORT_SYMBOL(l2cap_conn_get);
1820 void l2cap_conn_put(struct l2cap_conn *conn)
1822 kref_put(&conn->ref, l2cap_conn_free);
1824 EXPORT_SYMBOL(l2cap_conn_put);
1826 /* ---- Socket interface ---- */
1828 /* Find socket with psm and source / destination bdaddr.
1829 * Returns closest match.
1831 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1836 struct l2cap_chan *c, *tmp, *c1 = NULL;
1838 read_lock(&chan_list_lock);
1840 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1841 if (state && c->state != state)
1844 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1847 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1850 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1851 int src_match, dst_match;
1852 int src_any, dst_any;
1855 src_match = !bacmp(&c->src, src);
1856 dst_match = !bacmp(&c->dst, dst);
1857 if (src_match && dst_match) {
1858 if (!l2cap_chan_hold_unless_zero(c))
1861 read_unlock(&chan_list_lock);
1866 src_any = !bacmp(&c->src, BDADDR_ANY);
1867 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1868 if ((src_match && dst_any) || (src_any && dst_match) ||
1869 (src_any && dst_any))
1875 c1 = l2cap_chan_hold_unless_zero(c1);
1877 read_unlock(&chan_list_lock);
1882 static void l2cap_monitor_timeout(struct work_struct *work)
1884 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1885 monitor_timer.work);
1887 BT_DBG("chan %p", chan);
1889 l2cap_chan_lock(chan);
1892 l2cap_chan_unlock(chan);
1893 l2cap_chan_put(chan);
1897 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1899 l2cap_chan_unlock(chan);
1900 l2cap_chan_put(chan);
1903 static void l2cap_retrans_timeout(struct work_struct *work)
1905 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1906 retrans_timer.work);
1908 BT_DBG("chan %p", chan);
1910 l2cap_chan_lock(chan);
1913 l2cap_chan_unlock(chan);
1914 l2cap_chan_put(chan);
1918 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1919 l2cap_chan_unlock(chan);
1920 l2cap_chan_put(chan);
1923 static void l2cap_streaming_send(struct l2cap_chan *chan,
1924 struct sk_buff_head *skbs)
1926 struct sk_buff *skb;
1927 struct l2cap_ctrl *control;
1929 BT_DBG("chan %p, skbs %p", chan, skbs);
1931 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1933 while (!skb_queue_empty(&chan->tx_q)) {
1935 skb = skb_dequeue(&chan->tx_q);
1937 bt_cb(skb)->l2cap.retries = 1;
1938 control = &bt_cb(skb)->l2cap;
1940 control->reqseq = 0;
1941 control->txseq = chan->next_tx_seq;
1943 __pack_control(chan, control, skb);
1945 if (chan->fcs == L2CAP_FCS_CRC16) {
1946 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1947 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1950 l2cap_do_send(chan, skb);
1952 BT_DBG("Sent txseq %u", control->txseq);
1954 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1955 chan->frames_sent++;
1959 static int l2cap_ertm_send(struct l2cap_chan *chan)
1961 struct sk_buff *skb, *tx_skb;
1962 struct l2cap_ctrl *control;
1965 BT_DBG("chan %p", chan);
1967 if (chan->state != BT_CONNECTED)
1970 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1973 while (chan->tx_send_head &&
1974 chan->unacked_frames < chan->remote_tx_win &&
1975 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1977 skb = chan->tx_send_head;
1979 bt_cb(skb)->l2cap.retries = 1;
1980 control = &bt_cb(skb)->l2cap;
1982 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1985 control->reqseq = chan->buffer_seq;
1986 chan->last_acked_seq = chan->buffer_seq;
1987 control->txseq = chan->next_tx_seq;
1989 __pack_control(chan, control, skb);
1991 if (chan->fcs == L2CAP_FCS_CRC16) {
1992 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1993 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1996 /* Clone after data has been modified. Data is assumed to be
1997 read-only (for locking purposes) on cloned sk_buffs.
1999 tx_skb = skb_clone(skb, GFP_KERNEL);
2004 __set_retrans_timer(chan);
2006 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2007 chan->unacked_frames++;
2008 chan->frames_sent++;
2011 if (skb_queue_is_last(&chan->tx_q, skb))
2012 chan->tx_send_head = NULL;
2014 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2016 l2cap_do_send(chan, tx_skb);
2017 BT_DBG("Sent txseq %u", control->txseq);
2020 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2021 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2026 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2028 struct l2cap_ctrl control;
2029 struct sk_buff *skb;
2030 struct sk_buff *tx_skb;
2033 BT_DBG("chan %p", chan);
2035 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2038 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2039 seq = l2cap_seq_list_pop(&chan->retrans_list);
2041 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2043 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2048 bt_cb(skb)->l2cap.retries++;
2049 control = bt_cb(skb)->l2cap;
2051 if (chan->max_tx != 0 &&
2052 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2053 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2054 l2cap_send_disconn_req(chan, ECONNRESET);
2055 l2cap_seq_list_clear(&chan->retrans_list);
2059 control.reqseq = chan->buffer_seq;
2060 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2065 if (skb_cloned(skb)) {
2066 /* Cloned sk_buffs are read-only, so we need a
2069 tx_skb = skb_copy(skb, GFP_KERNEL);
2071 tx_skb = skb_clone(skb, GFP_KERNEL);
2075 l2cap_seq_list_clear(&chan->retrans_list);
2079 /* Update skb contents */
2080 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2081 put_unaligned_le32(__pack_extended_control(&control),
2082 tx_skb->data + L2CAP_HDR_SIZE);
2084 put_unaligned_le16(__pack_enhanced_control(&control),
2085 tx_skb->data + L2CAP_HDR_SIZE);
2089 if (chan->fcs == L2CAP_FCS_CRC16) {
2090 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2091 tx_skb->len - L2CAP_FCS_SIZE);
2092 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2096 l2cap_do_send(chan, tx_skb);
2098 BT_DBG("Resent txseq %d", control.txseq);
2100 chan->last_acked_seq = chan->buffer_seq;
2104 static void l2cap_retransmit(struct l2cap_chan *chan,
2105 struct l2cap_ctrl *control)
2107 BT_DBG("chan %p, control %p", chan, control);
2109 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2110 l2cap_ertm_resend(chan);
2113 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2114 struct l2cap_ctrl *control)
2116 struct sk_buff *skb;
2118 BT_DBG("chan %p, control %p", chan, control);
2121 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2123 l2cap_seq_list_clear(&chan->retrans_list);
2125 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2128 if (chan->unacked_frames) {
2129 skb_queue_walk(&chan->tx_q, skb) {
2130 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2131 skb == chan->tx_send_head)
2135 skb_queue_walk_from(&chan->tx_q, skb) {
2136 if (skb == chan->tx_send_head)
2139 l2cap_seq_list_append(&chan->retrans_list,
2140 bt_cb(skb)->l2cap.txseq);
2143 l2cap_ertm_resend(chan);
2147 static void l2cap_send_ack(struct l2cap_chan *chan)
2149 struct l2cap_ctrl control;
2150 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2151 chan->last_acked_seq);
2154 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2155 chan, chan->last_acked_seq, chan->buffer_seq);
2157 memset(&control, 0, sizeof(control));
2160 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2161 chan->rx_state == L2CAP_RX_STATE_RECV) {
2162 __clear_ack_timer(chan);
2163 control.super = L2CAP_SUPER_RNR;
2164 control.reqseq = chan->buffer_seq;
2165 l2cap_send_sframe(chan, &control);
2167 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2168 l2cap_ertm_send(chan);
2169 /* If any i-frames were sent, they included an ack */
2170 if (chan->buffer_seq == chan->last_acked_seq)
2174 /* Ack now if the window is 3/4ths full.
2175 * Calculate without mul or div
2177 threshold = chan->ack_win;
2178 threshold += threshold << 1;
2181 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2184 if (frames_to_ack >= threshold) {
2185 __clear_ack_timer(chan);
2186 control.super = L2CAP_SUPER_RR;
2187 control.reqseq = chan->buffer_seq;
2188 l2cap_send_sframe(chan, &control);
2193 __set_ack_timer(chan);
2197 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2198 struct msghdr *msg, int len,
2199 int count, struct sk_buff *skb)
2201 struct l2cap_conn *conn = chan->conn;
2202 struct sk_buff **frag;
2205 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2211 /* Continuation fragments (no L2CAP header) */
2212 frag = &skb_shinfo(skb)->frag_list;
2214 struct sk_buff *tmp;
2216 count = min_t(unsigned int, conn->mtu, len);
2218 tmp = chan->ops->alloc_skb(chan, 0, count,
2219 msg->msg_flags & MSG_DONTWAIT);
2221 return PTR_ERR(tmp);
2225 if (!copy_from_iter_full(skb_put(*frag, count), count,
2232 skb->len += (*frag)->len;
2233 skb->data_len += (*frag)->len;
2235 frag = &(*frag)->next;
2241 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2242 struct msghdr *msg, size_t len)
2244 struct l2cap_conn *conn = chan->conn;
2245 struct sk_buff *skb;
2246 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2247 struct l2cap_hdr *lh;
2249 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2250 __le16_to_cpu(chan->psm), len);
2252 count = min_t(unsigned int, (conn->mtu - hlen), len);
2254 skb = chan->ops->alloc_skb(chan, hlen, count,
2255 msg->msg_flags & MSG_DONTWAIT);
2259 /* Create L2CAP header */
2260 lh = skb_put(skb, L2CAP_HDR_SIZE);
2261 lh->cid = cpu_to_le16(chan->dcid);
2262 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2263 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2265 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2266 if (unlikely(err < 0)) {
2268 return ERR_PTR(err);
2273 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2274 struct msghdr *msg, size_t len)
2276 struct l2cap_conn *conn = chan->conn;
2277 struct sk_buff *skb;
2279 struct l2cap_hdr *lh;
2281 BT_DBG("chan %p len %zu", chan, len);
2283 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2285 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2286 msg->msg_flags & MSG_DONTWAIT);
2290 /* Create L2CAP header */
2291 lh = skb_put(skb, L2CAP_HDR_SIZE);
2292 lh->cid = cpu_to_le16(chan->dcid);
2293 lh->len = cpu_to_le16(len);
2295 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2296 if (unlikely(err < 0)) {
2298 return ERR_PTR(err);
2303 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2304 struct msghdr *msg, size_t len,
2307 struct l2cap_conn *conn = chan->conn;
2308 struct sk_buff *skb;
2309 int err, count, hlen;
2310 struct l2cap_hdr *lh;
2312 BT_DBG("chan %p len %zu", chan, len);
2315 return ERR_PTR(-ENOTCONN);
2317 hlen = __ertm_hdr_size(chan);
2320 hlen += L2CAP_SDULEN_SIZE;
2322 if (chan->fcs == L2CAP_FCS_CRC16)
2323 hlen += L2CAP_FCS_SIZE;
2325 count = min_t(unsigned int, (conn->mtu - hlen), len);
2327 skb = chan->ops->alloc_skb(chan, hlen, count,
2328 msg->msg_flags & MSG_DONTWAIT);
2332 /* Create L2CAP header */
2333 lh = skb_put(skb, L2CAP_HDR_SIZE);
2334 lh->cid = cpu_to_le16(chan->dcid);
2335 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2337 /* Control header is populated later */
2338 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2339 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2341 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2344 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2346 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2347 if (unlikely(err < 0)) {
2349 return ERR_PTR(err);
2352 bt_cb(skb)->l2cap.fcs = chan->fcs;
2353 bt_cb(skb)->l2cap.retries = 0;
2357 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2358 struct sk_buff_head *seg_queue,
2359 struct msghdr *msg, size_t len)
2361 struct sk_buff *skb;
2366 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2368 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2369 * so fragmented skbs are not used. The HCI layer's handling
2370 * of fragmented skbs is not compatible with ERTM's queueing.
2373 /* PDU size is derived from the HCI MTU */
2374 pdu_len = chan->conn->mtu;
2376 /* Constrain PDU size for BR/EDR connections */
2377 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2379 /* Adjust for largest possible L2CAP overhead. */
2381 pdu_len -= L2CAP_FCS_SIZE;
2383 pdu_len -= __ertm_hdr_size(chan);
2385 /* Remote device may have requested smaller PDUs */
2386 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2388 if (len <= pdu_len) {
2389 sar = L2CAP_SAR_UNSEGMENTED;
2393 sar = L2CAP_SAR_START;
2398 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2401 __skb_queue_purge(seg_queue);
2402 return PTR_ERR(skb);
2405 bt_cb(skb)->l2cap.sar = sar;
2406 __skb_queue_tail(seg_queue, skb);
2412 if (len <= pdu_len) {
2413 sar = L2CAP_SAR_END;
2416 sar = L2CAP_SAR_CONTINUE;
2423 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2425 size_t len, u16 sdulen)
2427 struct l2cap_conn *conn = chan->conn;
2428 struct sk_buff *skb;
2429 int err, count, hlen;
2430 struct l2cap_hdr *lh;
2432 BT_DBG("chan %p len %zu", chan, len);
2435 return ERR_PTR(-ENOTCONN);
2437 hlen = L2CAP_HDR_SIZE;
2440 hlen += L2CAP_SDULEN_SIZE;
2442 count = min_t(unsigned int, (conn->mtu - hlen), len);
2444 skb = chan->ops->alloc_skb(chan, hlen, count,
2445 msg->msg_flags & MSG_DONTWAIT);
2449 /* Create L2CAP header */
2450 lh = skb_put(skb, L2CAP_HDR_SIZE);
2451 lh->cid = cpu_to_le16(chan->dcid);
2452 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2455 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2457 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2458 if (unlikely(err < 0)) {
2460 return ERR_PTR(err);
2466 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2467 struct sk_buff_head *seg_queue,
2468 struct msghdr *msg, size_t len)
2470 struct sk_buff *skb;
2474 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2477 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2483 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2485 __skb_queue_purge(seg_queue);
2486 return PTR_ERR(skb);
2489 __skb_queue_tail(seg_queue, skb);
2495 pdu_len += L2CAP_SDULEN_SIZE;
2502 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2506 BT_DBG("chan %p", chan);
2508 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2509 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2514 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2515 skb_queue_len(&chan->tx_q));
2518 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2520 struct sk_buff *skb;
2522 struct sk_buff_head seg_queue;
2527 /* Connectionless channel */
2528 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2529 skb = l2cap_create_connless_pdu(chan, msg, len);
2531 return PTR_ERR(skb);
2533 l2cap_do_send(chan, skb);
2537 switch (chan->mode) {
2538 case L2CAP_MODE_LE_FLOWCTL:
2539 case L2CAP_MODE_EXT_FLOWCTL:
2540 /* Check outgoing MTU */
2541 if (len > chan->omtu)
2544 __skb_queue_head_init(&seg_queue);
2546 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2548 if (chan->state != BT_CONNECTED) {
2549 __skb_queue_purge(&seg_queue);
2556 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2558 l2cap_le_flowctl_send(chan);
2560 if (!chan->tx_credits)
2561 chan->ops->suspend(chan);
2567 case L2CAP_MODE_BASIC:
2568 /* Check outgoing MTU */
2569 if (len > chan->omtu)
2572 /* Create a basic PDU */
2573 skb = l2cap_create_basic_pdu(chan, msg, len);
2575 return PTR_ERR(skb);
2577 l2cap_do_send(chan, skb);
2581 case L2CAP_MODE_ERTM:
2582 case L2CAP_MODE_STREAMING:
2583 /* Check outgoing MTU */
2584 if (len > chan->omtu) {
2589 __skb_queue_head_init(&seg_queue);
2591 /* Do segmentation before calling in to the state machine,
2592 * since it's possible to block while waiting for memory
2595 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2600 if (chan->mode == L2CAP_MODE_ERTM)
2601 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2603 l2cap_streaming_send(chan, &seg_queue);
2607 /* If the skbs were not queued for sending, they'll still be in
2608 * seg_queue and need to be purged.
2610 __skb_queue_purge(&seg_queue);
2614 BT_DBG("bad state %1.1x", chan->mode);
2620 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2622 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2624 struct l2cap_ctrl control;
2627 BT_DBG("chan %p, txseq %u", chan, txseq);
2629 memset(&control, 0, sizeof(control));
2631 control.super = L2CAP_SUPER_SREJ;
2633 for (seq = chan->expected_tx_seq; seq != txseq;
2634 seq = __next_seq(chan, seq)) {
2635 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2636 control.reqseq = seq;
2637 l2cap_send_sframe(chan, &control);
2638 l2cap_seq_list_append(&chan->srej_list, seq);
2642 chan->expected_tx_seq = __next_seq(chan, txseq);
2645 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2647 struct l2cap_ctrl control;
2649 BT_DBG("chan %p", chan);
2651 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2654 memset(&control, 0, sizeof(control));
2656 control.super = L2CAP_SUPER_SREJ;
2657 control.reqseq = chan->srej_list.tail;
2658 l2cap_send_sframe(chan, &control);
2661 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2663 struct l2cap_ctrl control;
2667 BT_DBG("chan %p, txseq %u", chan, txseq);
2669 memset(&control, 0, sizeof(control));
2671 control.super = L2CAP_SUPER_SREJ;
2673 /* Capture initial list head to allow only one pass through the list. */
2674 initial_head = chan->srej_list.head;
2677 seq = l2cap_seq_list_pop(&chan->srej_list);
2678 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2681 control.reqseq = seq;
2682 l2cap_send_sframe(chan, &control);
2683 l2cap_seq_list_append(&chan->srej_list, seq);
2684 } while (chan->srej_list.head != initial_head);
2687 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2689 struct sk_buff *acked_skb;
2692 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2694 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2697 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2698 chan->expected_ack_seq, chan->unacked_frames);
2700 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2701 ackseq = __next_seq(chan, ackseq)) {
2703 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2705 skb_unlink(acked_skb, &chan->tx_q);
2706 kfree_skb(acked_skb);
2707 chan->unacked_frames--;
2711 chan->expected_ack_seq = reqseq;
2713 if (chan->unacked_frames == 0)
2714 __clear_retrans_timer(chan);
2716 BT_DBG("unacked_frames %u", chan->unacked_frames);
2719 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2721 BT_DBG("chan %p", chan);
2723 chan->expected_tx_seq = chan->buffer_seq;
2724 l2cap_seq_list_clear(&chan->srej_list);
2725 skb_queue_purge(&chan->srej_q);
2726 chan->rx_state = L2CAP_RX_STATE_RECV;
2729 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2730 struct l2cap_ctrl *control,
2731 struct sk_buff_head *skbs, u8 event)
2733 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2737 case L2CAP_EV_DATA_REQUEST:
2738 if (chan->tx_send_head == NULL)
2739 chan->tx_send_head = skb_peek(skbs);
2741 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2742 l2cap_ertm_send(chan);
2744 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2745 BT_DBG("Enter LOCAL_BUSY");
2746 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2748 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2749 /* The SREJ_SENT state must be aborted if we are to
2750 * enter the LOCAL_BUSY state.
2752 l2cap_abort_rx_srej_sent(chan);
2755 l2cap_send_ack(chan);
2758 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2759 BT_DBG("Exit LOCAL_BUSY");
2760 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2762 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2763 struct l2cap_ctrl local_control;
2765 memset(&local_control, 0, sizeof(local_control));
2766 local_control.sframe = 1;
2767 local_control.super = L2CAP_SUPER_RR;
2768 local_control.poll = 1;
2769 local_control.reqseq = chan->buffer_seq;
2770 l2cap_send_sframe(chan, &local_control);
2772 chan->retry_count = 1;
2773 __set_monitor_timer(chan);
2774 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2777 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2778 l2cap_process_reqseq(chan, control->reqseq);
2780 case L2CAP_EV_EXPLICIT_POLL:
2781 l2cap_send_rr_or_rnr(chan, 1);
2782 chan->retry_count = 1;
2783 __set_monitor_timer(chan);
2784 __clear_ack_timer(chan);
2785 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2787 case L2CAP_EV_RETRANS_TO:
2788 l2cap_send_rr_or_rnr(chan, 1);
2789 chan->retry_count = 1;
2790 __set_monitor_timer(chan);
2791 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2793 case L2CAP_EV_RECV_FBIT:
2794 /* Nothing to process */
2801 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2802 struct l2cap_ctrl *control,
2803 struct sk_buff_head *skbs, u8 event)
2805 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2809 case L2CAP_EV_DATA_REQUEST:
2810 if (chan->tx_send_head == NULL)
2811 chan->tx_send_head = skb_peek(skbs);
2812 /* Queue data, but don't send. */
2813 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2815 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2816 BT_DBG("Enter LOCAL_BUSY");
2817 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2819 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2820 /* The SREJ_SENT state must be aborted if we are to
2821 * enter the LOCAL_BUSY state.
2823 l2cap_abort_rx_srej_sent(chan);
2826 l2cap_send_ack(chan);
2829 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2830 BT_DBG("Exit LOCAL_BUSY");
2831 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2833 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2834 struct l2cap_ctrl local_control;
2835 memset(&local_control, 0, sizeof(local_control));
2836 local_control.sframe = 1;
2837 local_control.super = L2CAP_SUPER_RR;
2838 local_control.poll = 1;
2839 local_control.reqseq = chan->buffer_seq;
2840 l2cap_send_sframe(chan, &local_control);
2842 chan->retry_count = 1;
2843 __set_monitor_timer(chan);
2844 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2847 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2848 l2cap_process_reqseq(chan, control->reqseq);
2851 case L2CAP_EV_RECV_FBIT:
2852 if (control && control->final) {
2853 __clear_monitor_timer(chan);
2854 if (chan->unacked_frames > 0)
2855 __set_retrans_timer(chan);
2856 chan->retry_count = 0;
2857 chan->tx_state = L2CAP_TX_STATE_XMIT;
2858 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2861 case L2CAP_EV_EXPLICIT_POLL:
2864 case L2CAP_EV_MONITOR_TO:
2865 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2866 l2cap_send_rr_or_rnr(chan, 1);
2867 __set_monitor_timer(chan);
2868 chan->retry_count++;
2870 l2cap_send_disconn_req(chan, ECONNABORTED);
2878 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2879 struct sk_buff_head *skbs, u8 event)
2881 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2882 chan, control, skbs, event, chan->tx_state);
2884 switch (chan->tx_state) {
2885 case L2CAP_TX_STATE_XMIT:
2886 l2cap_tx_state_xmit(chan, control, skbs, event);
2888 case L2CAP_TX_STATE_WAIT_F:
2889 l2cap_tx_state_wait_f(chan, control, skbs, event);
2897 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2898 struct l2cap_ctrl *control)
2900 BT_DBG("chan %p, control %p", chan, control);
2901 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2904 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2905 struct l2cap_ctrl *control)
2907 BT_DBG("chan %p, control %p", chan, control);
2908 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2911 /* Copy frame to all raw sockets on that connection */
2912 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2914 struct sk_buff *nskb;
2915 struct l2cap_chan *chan;
2917 BT_DBG("conn %p", conn);
2919 mutex_lock(&conn->chan_lock);
2921 list_for_each_entry(chan, &conn->chan_l, list) {
2922 if (chan->chan_type != L2CAP_CHAN_RAW)
2925 /* Don't send frame to the channel it came from */
2926 if (bt_cb(skb)->l2cap.chan == chan)
2929 nskb = skb_clone(skb, GFP_KERNEL);
2932 if (chan->ops->recv(chan, nskb))
2936 mutex_unlock(&conn->chan_lock);
2939 /* ---- L2CAP signalling commands ---- */
2940 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2941 u8 ident, u16 dlen, void *data)
2943 struct sk_buff *skb, **frag;
2944 struct l2cap_cmd_hdr *cmd;
2945 struct l2cap_hdr *lh;
2948 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2949 conn, code, ident, dlen);
2951 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2954 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2955 count = min_t(unsigned int, conn->mtu, len);
2957 skb = bt_skb_alloc(count, GFP_KERNEL);
2961 lh = skb_put(skb, L2CAP_HDR_SIZE);
2962 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2964 if (conn->hcon->type == LE_LINK)
2965 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2967 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2969 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2972 cmd->len = cpu_to_le16(dlen);
2975 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2976 skb_put_data(skb, data, count);
2982 /* Continuation fragments (no L2CAP header) */
2983 frag = &skb_shinfo(skb)->frag_list;
2985 count = min_t(unsigned int, conn->mtu, len);
2987 *frag = bt_skb_alloc(count, GFP_KERNEL);
2991 skb_put_data(*frag, data, count);
2996 frag = &(*frag)->next;
3006 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3009 struct l2cap_conf_opt *opt = *ptr;
3012 len = L2CAP_CONF_OPT_SIZE + opt->len;
3020 *val = *((u8 *) opt->val);
3024 *val = get_unaligned_le16(opt->val);
3028 *val = get_unaligned_le32(opt->val);
3032 *val = (unsigned long) opt->val;
3036 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3040 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3042 struct l2cap_conf_opt *opt = *ptr;
3044 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3046 if (size < L2CAP_CONF_OPT_SIZE + len)
3054 *((u8 *) opt->val) = val;
3058 put_unaligned_le16(val, opt->val);
3062 put_unaligned_le32(val, opt->val);
3066 memcpy(opt->val, (void *) val, len);
3070 *ptr += L2CAP_CONF_OPT_SIZE + len;
3073 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3075 struct l2cap_conf_efs efs;
3077 switch (chan->mode) {
3078 case L2CAP_MODE_ERTM:
3079 efs.id = chan->local_id;
3080 efs.stype = chan->local_stype;
3081 efs.msdu = cpu_to_le16(chan->local_msdu);
3082 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3083 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3084 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3087 case L2CAP_MODE_STREAMING:
3089 efs.stype = L2CAP_SERV_BESTEFFORT;
3090 efs.msdu = cpu_to_le16(chan->local_msdu);
3091 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3100 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3101 (unsigned long) &efs, size);
3104 static void l2cap_ack_timeout(struct work_struct *work)
3106 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3110 BT_DBG("chan %p", chan);
3112 l2cap_chan_lock(chan);
3114 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3115 chan->last_acked_seq);
3118 l2cap_send_rr_or_rnr(chan, 0);
3120 l2cap_chan_unlock(chan);
3121 l2cap_chan_put(chan);
3124 int l2cap_ertm_init(struct l2cap_chan *chan)
3128 chan->next_tx_seq = 0;
3129 chan->expected_tx_seq = 0;
3130 chan->expected_ack_seq = 0;
3131 chan->unacked_frames = 0;
3132 chan->buffer_seq = 0;
3133 chan->frames_sent = 0;
3134 chan->last_acked_seq = 0;
3136 chan->sdu_last_frag = NULL;
3139 skb_queue_head_init(&chan->tx_q);
3141 if (chan->mode != L2CAP_MODE_ERTM)
3144 chan->rx_state = L2CAP_RX_STATE_RECV;
3145 chan->tx_state = L2CAP_TX_STATE_XMIT;
3147 skb_queue_head_init(&chan->srej_q);
3149 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3153 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3155 l2cap_seq_list_free(&chan->srej_list);
3160 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3163 case L2CAP_MODE_STREAMING:
3164 case L2CAP_MODE_ERTM:
3165 if (l2cap_mode_supported(mode, remote_feat_mask))
3169 return L2CAP_MODE_BASIC;
3173 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3175 return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3178 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3180 return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3183 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3184 struct l2cap_conf_rfc *rfc)
3186 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3187 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3190 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3192 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3193 __l2cap_ews_supported(chan->conn)) {
3194 /* use extended control field */
3195 set_bit(FLAG_EXT_CTRL, &chan->flags);
3196 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3198 chan->tx_win = min_t(u16, chan->tx_win,
3199 L2CAP_DEFAULT_TX_WINDOW);
3200 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3202 chan->ack_win = chan->tx_win;
3205 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3207 struct hci_conn *conn = chan->conn->hcon;
3209 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3211 /* The 2-DH1 packet has between 2 and 56 information bytes
3212 * (including the 2-byte payload header)
3214 if (!(conn->pkt_type & HCI_2DH1))
3217 /* The 3-DH1 packet has between 2 and 85 information bytes
3218 * (including the 2-byte payload header)
3220 if (!(conn->pkt_type & HCI_3DH1))
3223 /* The 2-DH3 packet has between 2 and 369 information bytes
3224 * (including the 2-byte payload header)
3226 if (!(conn->pkt_type & HCI_2DH3))
3229 /* The 3-DH3 packet has between 2 and 554 information bytes
3230 * (including the 2-byte payload header)
3232 if (!(conn->pkt_type & HCI_3DH3))
3235 /* The 2-DH5 packet has between 2 and 681 information bytes
3236 * (including the 2-byte payload header)
3238 if (!(conn->pkt_type & HCI_2DH5))
3241 /* The 3-DH5 packet has between 2 and 1023 information bytes
3242 * (including the 2-byte payload header)
3244 if (!(conn->pkt_type & HCI_3DH5))
3248 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3250 struct l2cap_conf_req *req = data;
3251 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3252 void *ptr = req->data;
3253 void *endptr = data + data_size;
3256 BT_DBG("chan %p", chan);
3258 if (chan->num_conf_req || chan->num_conf_rsp)
3261 switch (chan->mode) {
3262 case L2CAP_MODE_STREAMING:
3263 case L2CAP_MODE_ERTM:
3264 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3267 if (__l2cap_efs_supported(chan->conn))
3268 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3272 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3277 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3279 l2cap_mtu_auto(chan);
3280 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3284 switch (chan->mode) {
3285 case L2CAP_MODE_BASIC:
3289 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3290 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3293 rfc.mode = L2CAP_MODE_BASIC;
3295 rfc.max_transmit = 0;
3296 rfc.retrans_timeout = 0;
3297 rfc.monitor_timeout = 0;
3298 rfc.max_pdu_size = 0;
3300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3301 (unsigned long) &rfc, endptr - ptr);
3304 case L2CAP_MODE_ERTM:
3305 rfc.mode = L2CAP_MODE_ERTM;
3306 rfc.max_transmit = chan->max_tx;
3308 __l2cap_set_ertm_timeouts(chan, &rfc);
3310 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3311 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3313 rfc.max_pdu_size = cpu_to_le16(size);
3315 l2cap_txwin_setup(chan);
3317 rfc.txwin_size = min_t(u16, chan->tx_win,
3318 L2CAP_DEFAULT_TX_WINDOW);
3320 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3321 (unsigned long) &rfc, endptr - ptr);
3323 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3324 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3326 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3327 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3328 chan->tx_win, endptr - ptr);
3330 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3331 if (chan->fcs == L2CAP_FCS_NONE ||
3332 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3333 chan->fcs = L2CAP_FCS_NONE;
3334 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3335 chan->fcs, endptr - ptr);
3339 case L2CAP_MODE_STREAMING:
3340 l2cap_txwin_setup(chan);
3341 rfc.mode = L2CAP_MODE_STREAMING;
3343 rfc.max_transmit = 0;
3344 rfc.retrans_timeout = 0;
3345 rfc.monitor_timeout = 0;
3347 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3348 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3350 rfc.max_pdu_size = cpu_to_le16(size);
3352 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3353 (unsigned long) &rfc, endptr - ptr);
3355 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3356 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3358 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3359 if (chan->fcs == L2CAP_FCS_NONE ||
3360 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3361 chan->fcs = L2CAP_FCS_NONE;
3362 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3363 chan->fcs, endptr - ptr);
3368 req->dcid = cpu_to_le16(chan->dcid);
3369 req->flags = cpu_to_le16(0);
3374 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3376 struct l2cap_conf_rsp *rsp = data;
3377 void *ptr = rsp->data;
3378 void *endptr = data + data_size;
3379 void *req = chan->conf_req;
3380 int len = chan->conf_len;
3381 int type, hint, olen;
3383 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3384 struct l2cap_conf_efs efs;
3386 u16 mtu = L2CAP_DEFAULT_MTU;
3387 u16 result = L2CAP_CONF_SUCCESS;
3390 BT_DBG("chan %p", chan);
3392 while (len >= L2CAP_CONF_OPT_SIZE) {
3393 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3397 hint = type & L2CAP_CONF_HINT;
3398 type &= L2CAP_CONF_MASK;
3401 case L2CAP_CONF_MTU:
3407 case L2CAP_CONF_FLUSH_TO:
3410 chan->flush_to = val;
3413 case L2CAP_CONF_QOS:
3416 case L2CAP_CONF_RFC:
3417 if (olen != sizeof(rfc))
3419 memcpy(&rfc, (void *) val, olen);
3422 case L2CAP_CONF_FCS:
3425 if (val == L2CAP_FCS_NONE)
3426 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3429 case L2CAP_CONF_EFS:
3430 if (olen != sizeof(efs))
3433 memcpy(&efs, (void *) val, olen);
3436 case L2CAP_CONF_EWS:
3439 return -ECONNREFUSED;
3444 result = L2CAP_CONF_UNKNOWN;
3445 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3450 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3453 switch (chan->mode) {
3454 case L2CAP_MODE_STREAMING:
3455 case L2CAP_MODE_ERTM:
3456 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3457 chan->mode = l2cap_select_mode(rfc.mode,
3458 chan->conn->feat_mask);
3463 if (__l2cap_efs_supported(chan->conn))
3464 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3466 return -ECONNREFUSED;
3469 if (chan->mode != rfc.mode)
3470 return -ECONNREFUSED;
3476 if (chan->mode != rfc.mode) {
3477 result = L2CAP_CONF_UNACCEPT;
3478 rfc.mode = chan->mode;
3480 if (chan->num_conf_rsp == 1)
3481 return -ECONNREFUSED;
3483 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3484 (unsigned long) &rfc, endptr - ptr);
3487 if (result == L2CAP_CONF_SUCCESS) {
3488 /* Configure output options and let the other side know
3489 * which ones we don't like. */
3491 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3492 result = L2CAP_CONF_UNACCEPT;
3495 set_bit(CONF_MTU_DONE, &chan->conf_state);
3497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3500 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3501 efs.stype != L2CAP_SERV_NOTRAFIC &&
3502 efs.stype != chan->local_stype) {
3504 result = L2CAP_CONF_UNACCEPT;
3506 if (chan->num_conf_req >= 1)
3507 return -ECONNREFUSED;
3509 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3511 (unsigned long) &efs, endptr - ptr);
3513 /* Send PENDING Conf Rsp */
3514 result = L2CAP_CONF_PENDING;
3515 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3520 case L2CAP_MODE_BASIC:
3521 chan->fcs = L2CAP_FCS_NONE;
3522 set_bit(CONF_MODE_DONE, &chan->conf_state);
3525 case L2CAP_MODE_ERTM:
3526 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3527 chan->remote_tx_win = rfc.txwin_size;
3529 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3531 chan->remote_max_tx = rfc.max_transmit;
3533 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3534 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3535 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3536 rfc.max_pdu_size = cpu_to_le16(size);
3537 chan->remote_mps = size;
3539 __l2cap_set_ertm_timeouts(chan, &rfc);
3541 set_bit(CONF_MODE_DONE, &chan->conf_state);
3543 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3544 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3547 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3548 chan->remote_id = efs.id;
3549 chan->remote_stype = efs.stype;
3550 chan->remote_msdu = le16_to_cpu(efs.msdu);
3551 chan->remote_flush_to =
3552 le32_to_cpu(efs.flush_to);
3553 chan->remote_acc_lat =
3554 le32_to_cpu(efs.acc_lat);
3555 chan->remote_sdu_itime =
3556 le32_to_cpu(efs.sdu_itime);
3557 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3559 (unsigned long) &efs, endptr - ptr);
3563 case L2CAP_MODE_STREAMING:
3564 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3565 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3566 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3567 rfc.max_pdu_size = cpu_to_le16(size);
3568 chan->remote_mps = size;
3570 set_bit(CONF_MODE_DONE, &chan->conf_state);
3572 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3573 (unsigned long) &rfc, endptr - ptr);
3578 result = L2CAP_CONF_UNACCEPT;
3580 memset(&rfc, 0, sizeof(rfc));
3581 rfc.mode = chan->mode;
3584 if (result == L2CAP_CONF_SUCCESS)
3585 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3587 rsp->scid = cpu_to_le16(chan->dcid);
3588 rsp->result = cpu_to_le16(result);
3589 rsp->flags = cpu_to_le16(0);
3594 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3595 void *data, size_t size, u16 *result)
3597 struct l2cap_conf_req *req = data;
3598 void *ptr = req->data;
3599 void *endptr = data + size;
3602 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3603 struct l2cap_conf_efs efs;
3605 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3607 while (len >= L2CAP_CONF_OPT_SIZE) {
3608 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3613 case L2CAP_CONF_MTU:
3616 if (val < L2CAP_DEFAULT_MIN_MTU) {
3617 *result = L2CAP_CONF_UNACCEPT;
3618 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3621 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3625 case L2CAP_CONF_FLUSH_TO:
3628 chan->flush_to = val;
3629 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3630 chan->flush_to, endptr - ptr);
3633 case L2CAP_CONF_RFC:
3634 if (olen != sizeof(rfc))
3636 memcpy(&rfc, (void *)val, olen);
3637 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3638 rfc.mode != chan->mode)
3639 return -ECONNREFUSED;
3641 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3642 (unsigned long) &rfc, endptr - ptr);
3645 case L2CAP_CONF_EWS:
3648 chan->ack_win = min_t(u16, val, chan->ack_win);
3649 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3650 chan->tx_win, endptr - ptr);
3653 case L2CAP_CONF_EFS:
3654 if (olen != sizeof(efs))
3656 memcpy(&efs, (void *)val, olen);
3657 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3658 efs.stype != L2CAP_SERV_NOTRAFIC &&
3659 efs.stype != chan->local_stype)
3660 return -ECONNREFUSED;
3661 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3662 (unsigned long) &efs, endptr - ptr);
3665 case L2CAP_CONF_FCS:
3668 if (*result == L2CAP_CONF_PENDING)
3669 if (val == L2CAP_FCS_NONE)
3670 set_bit(CONF_RECV_NO_FCS,
3676 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3677 return -ECONNREFUSED;
3679 chan->mode = rfc.mode;
3681 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3683 case L2CAP_MODE_ERTM:
3684 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3685 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3686 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3687 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3688 chan->ack_win = min_t(u16, chan->ack_win,
3691 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3692 chan->local_msdu = le16_to_cpu(efs.msdu);
3693 chan->local_sdu_itime =
3694 le32_to_cpu(efs.sdu_itime);
3695 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3696 chan->local_flush_to =
3697 le32_to_cpu(efs.flush_to);
3701 case L2CAP_MODE_STREAMING:
3702 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3706 req->dcid = cpu_to_le16(chan->dcid);
3707 req->flags = cpu_to_le16(0);
3712 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3713 u16 result, u16 flags)
3715 struct l2cap_conf_rsp *rsp = data;
3716 void *ptr = rsp->data;
3718 BT_DBG("chan %p", chan);
3720 rsp->scid = cpu_to_le16(chan->dcid);
3721 rsp->result = cpu_to_le16(result);
3722 rsp->flags = cpu_to_le16(flags);
3727 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3729 struct l2cap_le_conn_rsp rsp;
3730 struct l2cap_conn *conn = chan->conn;
3732 BT_DBG("chan %p", chan);
3734 rsp.dcid = cpu_to_le16(chan->scid);
3735 rsp.mtu = cpu_to_le16(chan->imtu);
3736 rsp.mps = cpu_to_le16(chan->mps);
3737 rsp.credits = cpu_to_le16(chan->rx_credits);
3738 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3740 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3744 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3748 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3751 switch (chan->state) {
3753 /* If channel still pending accept add to result */
3759 /* If not connected or pending accept it has been refused */
3760 *result = -ECONNREFUSED;
3765 struct l2cap_ecred_rsp_data {
3767 struct l2cap_ecred_conn_rsp_hdr rsp;
3768 __le16 scid[L2CAP_ECRED_MAX_CID];
3773 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3775 struct l2cap_ecred_rsp_data *rsp = data;
3776 struct l2cap_ecred_conn_rsp *rsp_flex =
3777 container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3779 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3782 /* Reset ident so only one response is sent */
3785 /* Include all channels pending with the same ident */
3786 if (!rsp->pdu.rsp.result)
3787 rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3789 l2cap_chan_del(chan, ECONNRESET);
3792 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3794 struct l2cap_conn *conn = chan->conn;
3795 struct l2cap_ecred_rsp_data data;
3796 u16 id = chan->ident;
3802 BT_DBG("chan %p id %d", chan, id);
3804 memset(&data, 0, sizeof(data));
3806 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3807 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
3808 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3809 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3811 /* Verify that all channels are ready */
3812 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3818 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3820 /* Build response */
3821 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3823 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3824 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3828 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3830 struct l2cap_conn_rsp rsp;
3831 struct l2cap_conn *conn = chan->conn;
3835 rsp.scid = cpu_to_le16(chan->dcid);
3836 rsp.dcid = cpu_to_le16(chan->scid);
3837 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3838 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3839 rsp_code = L2CAP_CONN_RSP;
3841 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3843 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3845 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3848 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3849 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3850 chan->num_conf_req++;
3853 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3857 /* Use sane default values in case a misbehaving remote device
3858 * did not send an RFC or extended window size option.
3860 u16 txwin_ext = chan->ack_win;
3861 struct l2cap_conf_rfc rfc = {
3863 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3864 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3865 .max_pdu_size = cpu_to_le16(chan->imtu),
3866 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3869 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3871 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3874 while (len >= L2CAP_CONF_OPT_SIZE) {
3875 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3880 case L2CAP_CONF_RFC:
3881 if (olen != sizeof(rfc))
3883 memcpy(&rfc, (void *)val, olen);
3885 case L2CAP_CONF_EWS:
3894 case L2CAP_MODE_ERTM:
3895 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3896 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3897 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3898 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3899 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3901 chan->ack_win = min_t(u16, chan->ack_win,
3904 case L2CAP_MODE_STREAMING:
3905 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3909 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3910 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3913 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3915 if (cmd_len < sizeof(*rej))
3918 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3921 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3922 cmd->ident == conn->info_ident) {
3923 cancel_delayed_work(&conn->info_timer);
3925 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3926 conn->info_ident = 0;
3928 l2cap_conn_start(conn);
3934 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3935 u8 *data, u8 rsp_code)
3937 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3938 struct l2cap_conn_rsp rsp;
3939 struct l2cap_chan *chan = NULL, *pchan = NULL;
3940 int result, status = L2CAP_CS_NO_INFO;
3942 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3943 __le16 psm = req->psm;
3945 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3947 /* Check if we have socket listening on psm */
3948 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3949 &conn->hcon->dst, ACL_LINK);
3951 result = L2CAP_CR_BAD_PSM;
3955 mutex_lock(&conn->chan_lock);
3956 l2cap_chan_lock(pchan);
3958 /* Check if the ACL is secure enough (if not SDP) */
3959 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3960 !hci_conn_check_link_mode(conn->hcon)) {
3961 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3962 result = L2CAP_CR_SEC_BLOCK;
3966 result = L2CAP_CR_NO_MEM;
3968 /* Check for valid dynamic CID range (as per Erratum 3253) */
3969 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3970 result = L2CAP_CR_INVALID_SCID;
3974 /* Check if we already have channel with that dcid */
3975 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3976 result = L2CAP_CR_SCID_IN_USE;
3980 chan = pchan->ops->new_connection(pchan);
3984 /* For certain devices (ex: HID mouse), support for authentication,
3985 * pairing and bonding is optional. For such devices, inorder to avoid
3986 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3987 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3989 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3991 bacpy(&chan->src, &conn->hcon->src);
3992 bacpy(&chan->dst, &conn->hcon->dst);
3993 chan->src_type = bdaddr_src_type(conn->hcon);
3994 chan->dst_type = bdaddr_dst_type(conn->hcon);
3998 __l2cap_chan_add(conn, chan);
4002 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4004 chan->ident = cmd->ident;
4006 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4007 if (l2cap_chan_check_security(chan, false)) {
4008 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4009 l2cap_state_change(chan, BT_CONNECT2);
4010 result = L2CAP_CR_PEND;
4011 status = L2CAP_CS_AUTHOR_PEND;
4012 chan->ops->defer(chan);
4014 l2cap_state_change(chan, BT_CONFIG);
4015 result = L2CAP_CR_SUCCESS;
4016 status = L2CAP_CS_NO_INFO;
4019 l2cap_state_change(chan, BT_CONNECT2);
4020 result = L2CAP_CR_PEND;
4021 status = L2CAP_CS_AUTHEN_PEND;
4024 l2cap_state_change(chan, BT_CONNECT2);
4025 result = L2CAP_CR_PEND;
4026 status = L2CAP_CS_NO_INFO;
4030 rsp.scid = cpu_to_le16(scid);
4031 rsp.dcid = cpu_to_le16(dcid);
4032 rsp.result = cpu_to_le16(result);
4033 rsp.status = cpu_to_le16(status);
4034 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4039 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4040 struct l2cap_info_req info;
4041 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4043 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4044 conn->info_ident = l2cap_get_ident(conn);
4046 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4048 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4049 sizeof(info), &info);
4052 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4053 result == L2CAP_CR_SUCCESS) {
4055 set_bit(CONF_REQ_SENT, &chan->conf_state);
4056 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4057 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4058 chan->num_conf_req++;
4061 l2cap_chan_unlock(pchan);
4062 mutex_unlock(&conn->chan_lock);
4063 l2cap_chan_put(pchan);
4066 static int l2cap_connect_req(struct l2cap_conn *conn,
4067 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4069 struct hci_dev *hdev = conn->hcon->hdev;
4070 struct hci_conn *hcon = conn->hcon;
4072 if (cmd_len < sizeof(struct l2cap_conn_req))
4076 if (hci_dev_test_flag(hdev, HCI_MGMT))
4077 mgmt_device_connected(hdev, hcon, NULL, 0);
4078 hci_dev_unlock(hdev);
4080 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4084 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4085 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4088 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4089 u16 scid, dcid, result, status;
4090 struct l2cap_chan *chan;
4094 if (cmd_len < sizeof(*rsp))
4097 scid = __le16_to_cpu(rsp->scid);
4098 dcid = __le16_to_cpu(rsp->dcid);
4099 result = __le16_to_cpu(rsp->result);
4100 status = __le16_to_cpu(rsp->status);
4102 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4103 dcid > L2CAP_CID_DYN_END))
4106 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4107 dcid, scid, result, status);
4109 mutex_lock(&conn->chan_lock);
4112 chan = __l2cap_get_chan_by_scid(conn, scid);
4118 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4125 chan = l2cap_chan_hold_unless_zero(chan);
4133 l2cap_chan_lock(chan);
4136 case L2CAP_CR_SUCCESS:
4137 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4142 l2cap_state_change(chan, BT_CONFIG);
4145 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4147 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4150 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4151 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4152 chan->num_conf_req++;
4156 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4160 l2cap_chan_del(chan, ECONNREFUSED);
4164 l2cap_chan_unlock(chan);
4165 l2cap_chan_put(chan);
4168 mutex_unlock(&conn->chan_lock);
4173 static inline void set_default_fcs(struct l2cap_chan *chan)
4175 /* FCS is enabled only in ERTM or streaming mode, if one or both
4178 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4179 chan->fcs = L2CAP_FCS_NONE;
4180 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4181 chan->fcs = L2CAP_FCS_CRC16;
4184 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4185 u8 ident, u16 flags)
4187 struct l2cap_conn *conn = chan->conn;
4189 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4192 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4193 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4195 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4196 l2cap_build_conf_rsp(chan, data,
4197 L2CAP_CONF_SUCCESS, flags), data);
4200 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4203 struct l2cap_cmd_rej_cid rej;
4205 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4206 rej.scid = __cpu_to_le16(scid);
4207 rej.dcid = __cpu_to_le16(dcid);
4209 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4212 static inline int l2cap_config_req(struct l2cap_conn *conn,
4213 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4216 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4219 struct l2cap_chan *chan;
4222 if (cmd_len < sizeof(*req))
4225 dcid = __le16_to_cpu(req->dcid);
4226 flags = __le16_to_cpu(req->flags);
4228 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4230 chan = l2cap_get_chan_by_scid(conn, dcid);
4232 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4236 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4237 chan->state != BT_CONNECTED) {
4238 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4243 /* Reject if config buffer is too small. */
4244 len = cmd_len - sizeof(*req);
4245 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4246 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4247 l2cap_build_conf_rsp(chan, rsp,
4248 L2CAP_CONF_REJECT, flags), rsp);
4253 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4254 chan->conf_len += len;
4256 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4257 /* Incomplete config. Send empty response. */
4258 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4259 l2cap_build_conf_rsp(chan, rsp,
4260 L2CAP_CONF_SUCCESS, flags), rsp);
4264 /* Complete config. */
4265 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4267 l2cap_send_disconn_req(chan, ECONNRESET);
4271 chan->ident = cmd->ident;
4272 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4273 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4274 chan->num_conf_rsp++;
4276 /* Reset config buffer. */
4279 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4282 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4283 set_default_fcs(chan);
4285 if (chan->mode == L2CAP_MODE_ERTM ||
4286 chan->mode == L2CAP_MODE_STREAMING)
4287 err = l2cap_ertm_init(chan);
4290 l2cap_send_disconn_req(chan, -err);
4292 l2cap_chan_ready(chan);
4297 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4299 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4300 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4301 chan->num_conf_req++;
4304 /* Got Conf Rsp PENDING from remote side and assume we sent
4305 Conf Rsp PENDING in the code above */
4306 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4307 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4309 /* check compatibility */
4311 /* Send rsp for BR/EDR channel */
4312 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4316 l2cap_chan_unlock(chan);
4317 l2cap_chan_put(chan);
4321 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4322 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4325 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4326 u16 scid, flags, result;
4327 struct l2cap_chan *chan;
4328 int len = cmd_len - sizeof(*rsp);
4331 if (cmd_len < sizeof(*rsp))
4334 scid = __le16_to_cpu(rsp->scid);
4335 flags = __le16_to_cpu(rsp->flags);
4336 result = __le16_to_cpu(rsp->result);
4338 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4341 chan = l2cap_get_chan_by_scid(conn, scid);
4346 case L2CAP_CONF_SUCCESS:
4347 l2cap_conf_rfc_get(chan, rsp->data, len);
4348 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4351 case L2CAP_CONF_PENDING:
4352 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4354 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4357 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4358 buf, sizeof(buf), &result);
4360 l2cap_send_disconn_req(chan, ECONNRESET);
4364 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4368 case L2CAP_CONF_UNKNOWN:
4369 case L2CAP_CONF_UNACCEPT:
4370 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4373 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4374 l2cap_send_disconn_req(chan, ECONNRESET);
4378 /* throw out any old stored conf requests */
4379 result = L2CAP_CONF_SUCCESS;
4380 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4381 req, sizeof(req), &result);
4383 l2cap_send_disconn_req(chan, ECONNRESET);
4387 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4388 L2CAP_CONF_REQ, len, req);
4389 chan->num_conf_req++;
4390 if (result != L2CAP_CONF_SUCCESS)
4397 l2cap_chan_set_err(chan, ECONNRESET);
4399 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4400 l2cap_send_disconn_req(chan, ECONNRESET);
4404 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4407 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4409 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4410 set_default_fcs(chan);
4412 if (chan->mode == L2CAP_MODE_ERTM ||
4413 chan->mode == L2CAP_MODE_STREAMING)
4414 err = l2cap_ertm_init(chan);
4417 l2cap_send_disconn_req(chan, -err);
4419 l2cap_chan_ready(chan);
4423 l2cap_chan_unlock(chan);
4424 l2cap_chan_put(chan);
4428 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4429 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4432 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4433 struct l2cap_disconn_rsp rsp;
4435 struct l2cap_chan *chan;
4437 if (cmd_len != sizeof(*req))
4440 scid = __le16_to_cpu(req->scid);
4441 dcid = __le16_to_cpu(req->dcid);
4443 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4445 chan = l2cap_get_chan_by_scid(conn, dcid);
4447 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4451 rsp.dcid = cpu_to_le16(chan->scid);
4452 rsp.scid = cpu_to_le16(chan->dcid);
4453 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4455 chan->ops->set_shutdown(chan);
4457 l2cap_chan_unlock(chan);
4458 mutex_lock(&conn->chan_lock);
4459 l2cap_chan_lock(chan);
4460 l2cap_chan_del(chan, ECONNRESET);
4461 mutex_unlock(&conn->chan_lock);
4463 chan->ops->close(chan);
4465 l2cap_chan_unlock(chan);
4466 l2cap_chan_put(chan);
4471 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4472 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4475 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4477 struct l2cap_chan *chan;
4479 if (cmd_len != sizeof(*rsp))
4482 scid = __le16_to_cpu(rsp->scid);
4483 dcid = __le16_to_cpu(rsp->dcid);
4485 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4487 chan = l2cap_get_chan_by_scid(conn, scid);
4492 if (chan->state != BT_DISCONN) {
4493 l2cap_chan_unlock(chan);
4494 l2cap_chan_put(chan);
4498 l2cap_chan_unlock(chan);
4499 mutex_lock(&conn->chan_lock);
4500 l2cap_chan_lock(chan);
4501 l2cap_chan_del(chan, 0);
4502 mutex_unlock(&conn->chan_lock);
4504 chan->ops->close(chan);
4506 l2cap_chan_unlock(chan);
4507 l2cap_chan_put(chan);
4512 static inline int l2cap_information_req(struct l2cap_conn *conn,
4513 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4516 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4519 if (cmd_len != sizeof(*req))
4522 type = __le16_to_cpu(req->type);
4524 BT_DBG("type 0x%4.4x", type);
4526 if (type == L2CAP_IT_FEAT_MASK) {
4528 u32 feat_mask = l2cap_feat_mask;
4529 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4530 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4531 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4533 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4536 put_unaligned_le32(feat_mask, rsp->data);
4537 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4539 } else if (type == L2CAP_IT_FIXED_CHAN) {
4541 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4543 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4544 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4545 rsp->data[0] = conn->local_fixed_chan;
4546 memset(rsp->data + 1, 0, 7);
4547 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4550 struct l2cap_info_rsp rsp;
4551 rsp.type = cpu_to_le16(type);
4552 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4553 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4560 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4561 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4564 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4567 if (cmd_len < sizeof(*rsp))
4570 type = __le16_to_cpu(rsp->type);
4571 result = __le16_to_cpu(rsp->result);
4573 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4575 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4576 if (cmd->ident != conn->info_ident ||
4577 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4580 cancel_delayed_work(&conn->info_timer);
4582 if (result != L2CAP_IR_SUCCESS) {
4583 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4584 conn->info_ident = 0;
4586 l2cap_conn_start(conn);
4592 case L2CAP_IT_FEAT_MASK:
4593 conn->feat_mask = get_unaligned_le32(rsp->data);
4595 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4596 struct l2cap_info_req req;
4597 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4599 conn->info_ident = l2cap_get_ident(conn);
4601 l2cap_send_cmd(conn, conn->info_ident,
4602 L2CAP_INFO_REQ, sizeof(req), &req);
4604 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4605 conn->info_ident = 0;
4607 l2cap_conn_start(conn);
4611 case L2CAP_IT_FIXED_CHAN:
4612 conn->remote_fixed_chan = rsp->data[0];
4613 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4614 conn->info_ident = 0;
4616 l2cap_conn_start(conn);
4623 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4624 struct l2cap_cmd_hdr *cmd,
4625 u16 cmd_len, u8 *data)
4627 struct hci_conn *hcon = conn->hcon;
4628 struct l2cap_conn_param_update_req *req;
4629 struct l2cap_conn_param_update_rsp rsp;
4630 u16 min, max, latency, to_multiplier;
4633 if (hcon->role != HCI_ROLE_MASTER)
4636 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4639 req = (struct l2cap_conn_param_update_req *) data;
4640 min = __le16_to_cpu(req->min);
4641 max = __le16_to_cpu(req->max);
4642 latency = __le16_to_cpu(req->latency);
4643 to_multiplier = __le16_to_cpu(req->to_multiplier);
4645 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4646 min, max, latency, to_multiplier);
4648 memset(&rsp, 0, sizeof(rsp));
4650 err = hci_check_conn_params(min, max, latency, to_multiplier);
4652 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4654 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4656 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4662 store_hint = hci_le_conn_update(hcon, min, max, latency,
4664 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4665 store_hint, min, max, latency,
4673 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4674 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4677 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4678 struct hci_conn *hcon = conn->hcon;
4679 u16 dcid, mtu, mps, credits, result;
4680 struct l2cap_chan *chan;
4683 if (cmd_len < sizeof(*rsp))
4686 dcid = __le16_to_cpu(rsp->dcid);
4687 mtu = __le16_to_cpu(rsp->mtu);
4688 mps = __le16_to_cpu(rsp->mps);
4689 credits = __le16_to_cpu(rsp->credits);
4690 result = __le16_to_cpu(rsp->result);
4692 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4693 dcid < L2CAP_CID_DYN_START ||
4694 dcid > L2CAP_CID_LE_DYN_END))
4697 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4698 dcid, mtu, mps, credits, result);
4700 mutex_lock(&conn->chan_lock);
4702 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4710 l2cap_chan_lock(chan);
4713 case L2CAP_CR_LE_SUCCESS:
4714 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4722 chan->remote_mps = mps;
4723 chan->tx_credits = credits;
4724 l2cap_chan_ready(chan);
4727 case L2CAP_CR_LE_AUTHENTICATION:
4728 case L2CAP_CR_LE_ENCRYPTION:
4729 /* If we already have MITM protection we can't do
4732 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4733 l2cap_chan_del(chan, ECONNREFUSED);
4737 sec_level = hcon->sec_level + 1;
4738 if (chan->sec_level < sec_level)
4739 chan->sec_level = sec_level;
4741 /* We'll need to send a new Connect Request */
4742 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4744 smp_conn_security(hcon, chan->sec_level);
4748 l2cap_chan_del(chan, ECONNREFUSED);
4752 l2cap_chan_unlock(chan);
4755 mutex_unlock(&conn->chan_lock);
4760 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4761 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4766 switch (cmd->code) {
4767 case L2CAP_COMMAND_REJ:
4768 l2cap_command_rej(conn, cmd, cmd_len, data);
4771 case L2CAP_CONN_REQ:
4772 err = l2cap_connect_req(conn, cmd, cmd_len, data);
4775 case L2CAP_CONN_RSP:
4776 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4779 case L2CAP_CONF_REQ:
4780 err = l2cap_config_req(conn, cmd, cmd_len, data);
4783 case L2CAP_CONF_RSP:
4784 l2cap_config_rsp(conn, cmd, cmd_len, data);
4787 case L2CAP_DISCONN_REQ:
4788 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4791 case L2CAP_DISCONN_RSP:
4792 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4795 case L2CAP_ECHO_REQ:
4796 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4799 case L2CAP_ECHO_RSP:
4802 case L2CAP_INFO_REQ:
4803 err = l2cap_information_req(conn, cmd, cmd_len, data);
4806 case L2CAP_INFO_RSP:
4807 l2cap_information_rsp(conn, cmd, cmd_len, data);
4811 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4819 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4820 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4823 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4824 struct l2cap_le_conn_rsp rsp;
4825 struct l2cap_chan *chan, *pchan;
4826 u16 dcid, scid, credits, mtu, mps;
4830 if (cmd_len != sizeof(*req))
4833 scid = __le16_to_cpu(req->scid);
4834 mtu = __le16_to_cpu(req->mtu);
4835 mps = __le16_to_cpu(req->mps);
4840 if (mtu < 23 || mps < 23)
4843 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4846 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4849 * Valid range: 0x0001-0x00ff
4851 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4853 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4854 result = L2CAP_CR_LE_BAD_PSM;
4859 /* Check if we have socket listening on psm */
4860 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4861 &conn->hcon->dst, LE_LINK);
4863 result = L2CAP_CR_LE_BAD_PSM;
4868 mutex_lock(&conn->chan_lock);
4869 l2cap_chan_lock(pchan);
4871 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4873 result = L2CAP_CR_LE_AUTHENTICATION;
4875 goto response_unlock;
4878 /* Check for valid dynamic CID range */
4879 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4880 result = L2CAP_CR_LE_INVALID_SCID;
4882 goto response_unlock;
4885 /* Check if we already have channel with that dcid */
4886 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4887 result = L2CAP_CR_LE_SCID_IN_USE;
4889 goto response_unlock;
4892 chan = pchan->ops->new_connection(pchan);
4894 result = L2CAP_CR_LE_NO_MEM;
4895 goto response_unlock;
4898 bacpy(&chan->src, &conn->hcon->src);
4899 bacpy(&chan->dst, &conn->hcon->dst);
4900 chan->src_type = bdaddr_src_type(conn->hcon);
4901 chan->dst_type = bdaddr_dst_type(conn->hcon);
4905 chan->remote_mps = mps;
4907 __l2cap_chan_add(conn, chan);
4909 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4912 credits = chan->rx_credits;
4914 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4916 chan->ident = cmd->ident;
4918 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4919 l2cap_state_change(chan, BT_CONNECT2);
4920 /* The following result value is actually not defined
4921 * for LE CoC but we use it to let the function know
4922 * that it should bail out after doing its cleanup
4923 * instead of sending a response.
4925 result = L2CAP_CR_PEND;
4926 chan->ops->defer(chan);
4928 l2cap_chan_ready(chan);
4929 result = L2CAP_CR_LE_SUCCESS;
4933 l2cap_chan_unlock(pchan);
4934 mutex_unlock(&conn->chan_lock);
4935 l2cap_chan_put(pchan);
4937 if (result == L2CAP_CR_PEND)
4942 rsp.mtu = cpu_to_le16(chan->imtu);
4943 rsp.mps = cpu_to_le16(chan->mps);
4949 rsp.dcid = cpu_to_le16(dcid);
4950 rsp.credits = cpu_to_le16(credits);
4951 rsp.result = cpu_to_le16(result);
4953 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4958 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4959 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4962 struct l2cap_le_credits *pkt;
4963 struct l2cap_chan *chan;
4964 u16 cid, credits, max_credits;
4966 if (cmd_len != sizeof(*pkt))
4969 pkt = (struct l2cap_le_credits *) data;
4970 cid = __le16_to_cpu(pkt->cid);
4971 credits = __le16_to_cpu(pkt->credits);
4973 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4975 chan = l2cap_get_chan_by_dcid(conn, cid);
4979 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4980 if (credits > max_credits) {
4981 BT_ERR("LE credits overflow");
4982 l2cap_send_disconn_req(chan, ECONNRESET);
4984 /* Return 0 so that we don't trigger an unnecessary
4985 * command reject packet.
4990 chan->tx_credits += credits;
4992 /* Resume sending */
4993 l2cap_le_flowctl_send(chan);
4995 if (chan->tx_credits)
4996 chan->ops->resume(chan);
4999 l2cap_chan_unlock(chan);
5000 l2cap_chan_put(chan);
5005 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5006 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5009 struct l2cap_ecred_conn_req *req = (void *) data;
5010 DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5011 struct l2cap_chan *chan, *pchan;
5021 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5022 result = L2CAP_CR_LE_INVALID_PARAMS;
5026 cmd_len -= sizeof(*req);
5027 num_scid = cmd_len / sizeof(u16);
5029 if (num_scid > L2CAP_ECRED_MAX_CID) {
5030 result = L2CAP_CR_LE_INVALID_PARAMS;
5034 mtu = __le16_to_cpu(req->mtu);
5035 mps = __le16_to_cpu(req->mps);
5037 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5038 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5044 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5047 * Valid range: 0x0001-0x00ff
5049 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5051 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5052 result = L2CAP_CR_LE_BAD_PSM;
5056 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5058 memset(pdu, 0, sizeof(*pdu));
5060 /* Check if we have socket listening on psm */
5061 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5062 &conn->hcon->dst, LE_LINK);
5064 result = L2CAP_CR_LE_BAD_PSM;
5068 mutex_lock(&conn->chan_lock);
5069 l2cap_chan_lock(pchan);
5071 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5073 result = L2CAP_CR_LE_AUTHENTICATION;
5077 result = L2CAP_CR_LE_SUCCESS;
5079 for (i = 0; i < num_scid; i++) {
5080 u16 scid = __le16_to_cpu(req->scid[i]);
5082 BT_DBG("scid[%d] 0x%4.4x", i, scid);
5084 pdu->dcid[i] = 0x0000;
5085 len += sizeof(*pdu->dcid);
5087 /* Check for valid dynamic CID range */
5088 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5089 result = L2CAP_CR_LE_INVALID_SCID;
5093 /* Check if we already have channel with that dcid */
5094 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5095 result = L2CAP_CR_LE_SCID_IN_USE;
5099 chan = pchan->ops->new_connection(pchan);
5101 result = L2CAP_CR_LE_NO_MEM;
5105 bacpy(&chan->src, &conn->hcon->src);
5106 bacpy(&chan->dst, &conn->hcon->dst);
5107 chan->src_type = bdaddr_src_type(conn->hcon);
5108 chan->dst_type = bdaddr_dst_type(conn->hcon);
5112 chan->remote_mps = mps;
5114 __l2cap_chan_add(conn, chan);
5116 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5119 if (!pdu->credits) {
5120 pdu->mtu = cpu_to_le16(chan->imtu);
5121 pdu->mps = cpu_to_le16(chan->mps);
5122 pdu->credits = cpu_to_le16(chan->rx_credits);
5125 pdu->dcid[i] = cpu_to_le16(chan->scid);
5127 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5129 chan->ident = cmd->ident;
5130 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5132 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5133 l2cap_state_change(chan, BT_CONNECT2);
5135 chan->ops->defer(chan);
5137 l2cap_chan_ready(chan);
5142 l2cap_chan_unlock(pchan);
5143 mutex_unlock(&conn->chan_lock);
5144 l2cap_chan_put(pchan);
5147 pdu->result = cpu_to_le16(result);
5152 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5153 sizeof(*pdu) + len, pdu);
5158 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5159 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5162 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5163 struct hci_conn *hcon = conn->hcon;
5164 u16 mtu, mps, credits, result;
5165 struct l2cap_chan *chan, *tmp;
5166 int err = 0, sec_level;
5169 if (cmd_len < sizeof(*rsp))
5172 mtu = __le16_to_cpu(rsp->mtu);
5173 mps = __le16_to_cpu(rsp->mps);
5174 credits = __le16_to_cpu(rsp->credits);
5175 result = __le16_to_cpu(rsp->result);
5177 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5180 mutex_lock(&conn->chan_lock);
5182 cmd_len -= sizeof(*rsp);
5184 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5187 if (chan->ident != cmd->ident ||
5188 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5189 chan->state == BT_CONNECTED)
5192 l2cap_chan_lock(chan);
5194 /* Check that there is a dcid for each pending channel */
5195 if (cmd_len < sizeof(dcid)) {
5196 l2cap_chan_del(chan, ECONNREFUSED);
5197 l2cap_chan_unlock(chan);
5201 dcid = __le16_to_cpu(rsp->dcid[i++]);
5202 cmd_len -= sizeof(u16);
5204 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5206 /* Check if dcid is already in use */
5207 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5208 /* If a device receives a
5209 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5210 * already-assigned Destination CID, then both the
5211 * original channel and the new channel shall be
5212 * immediately discarded and not used.
5214 l2cap_chan_del(chan, ECONNREFUSED);
5215 l2cap_chan_unlock(chan);
5216 chan = __l2cap_get_chan_by_dcid(conn, dcid);
5217 l2cap_chan_lock(chan);
5218 l2cap_chan_del(chan, ECONNRESET);
5219 l2cap_chan_unlock(chan);
5224 case L2CAP_CR_LE_AUTHENTICATION:
5225 case L2CAP_CR_LE_ENCRYPTION:
5226 /* If we already have MITM protection we can't do
5229 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5230 l2cap_chan_del(chan, ECONNREFUSED);
5234 sec_level = hcon->sec_level + 1;
5235 if (chan->sec_level < sec_level)
5236 chan->sec_level = sec_level;
5238 /* We'll need to send a new Connect Request */
5239 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5241 smp_conn_security(hcon, chan->sec_level);
5244 case L2CAP_CR_LE_BAD_PSM:
5245 l2cap_chan_del(chan, ECONNREFUSED);
5249 /* If dcid was not set it means channels was refused */
5251 l2cap_chan_del(chan, ECONNREFUSED);
5258 chan->remote_mps = mps;
5259 chan->tx_credits = credits;
5260 l2cap_chan_ready(chan);
5264 l2cap_chan_unlock(chan);
5267 mutex_unlock(&conn->chan_lock);
5272 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5273 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5276 struct l2cap_ecred_reconf_req *req = (void *) data;
5277 struct l2cap_ecred_reconf_rsp rsp;
5278 u16 mtu, mps, result;
5279 struct l2cap_chan *chan;
5285 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5286 result = L2CAP_CR_LE_INVALID_PARAMS;
5290 mtu = __le16_to_cpu(req->mtu);
5291 mps = __le16_to_cpu(req->mps);
5293 BT_DBG("mtu %u mps %u", mtu, mps);
5295 if (mtu < L2CAP_ECRED_MIN_MTU) {
5296 result = L2CAP_RECONF_INVALID_MTU;
5300 if (mps < L2CAP_ECRED_MIN_MPS) {
5301 result = L2CAP_RECONF_INVALID_MPS;
5305 cmd_len -= sizeof(*req);
5306 num_scid = cmd_len / sizeof(u16);
5307 result = L2CAP_RECONF_SUCCESS;
5309 for (i = 0; i < num_scid; i++) {
5312 scid = __le16_to_cpu(req->scid[i]);
5316 chan = __l2cap_get_chan_by_dcid(conn, scid);
5320 /* If the MTU value is decreased for any of the included
5321 * channels, then the receiver shall disconnect all
5322 * included channels.
5324 if (chan->omtu > mtu) {
5325 BT_ERR("chan %p decreased MTU %u -> %u", chan,
5327 result = L2CAP_RECONF_INVALID_MTU;
5331 chan->remote_mps = mps;
5335 rsp.result = cpu_to_le16(result);
5337 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5343 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5344 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5347 struct l2cap_chan *chan, *tmp;
5348 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5351 if (cmd_len < sizeof(*rsp))
5354 result = __le16_to_cpu(rsp->result);
5356 BT_DBG("result 0x%4.4x", rsp->result);
5361 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5362 if (chan->ident != cmd->ident)
5365 l2cap_chan_del(chan, ECONNRESET);
5371 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5372 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5375 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5376 struct l2cap_chan *chan;
5378 if (cmd_len < sizeof(*rej))
5381 mutex_lock(&conn->chan_lock);
5383 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5387 chan = l2cap_chan_hold_unless_zero(chan);
5391 l2cap_chan_lock(chan);
5392 l2cap_chan_del(chan, ECONNREFUSED);
5393 l2cap_chan_unlock(chan);
5394 l2cap_chan_put(chan);
5397 mutex_unlock(&conn->chan_lock);
5401 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5402 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5407 switch (cmd->code) {
5408 case L2CAP_COMMAND_REJ:
5409 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5412 case L2CAP_CONN_PARAM_UPDATE_REQ:
5413 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5416 case L2CAP_CONN_PARAM_UPDATE_RSP:
5419 case L2CAP_LE_CONN_RSP:
5420 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5423 case L2CAP_LE_CONN_REQ:
5424 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5427 case L2CAP_LE_CREDITS:
5428 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5431 case L2CAP_ECRED_CONN_REQ:
5432 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5435 case L2CAP_ECRED_CONN_RSP:
5436 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5439 case L2CAP_ECRED_RECONF_REQ:
5440 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5443 case L2CAP_ECRED_RECONF_RSP:
5444 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5447 case L2CAP_DISCONN_REQ:
5448 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5451 case L2CAP_DISCONN_RSP:
5452 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5456 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5464 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5465 struct sk_buff *skb)
5467 struct hci_conn *hcon = conn->hcon;
5468 struct l2cap_cmd_hdr *cmd;
5472 if (hcon->type != LE_LINK)
5475 if (skb->len < L2CAP_CMD_HDR_SIZE)
5478 cmd = (void *) skb->data;
5479 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5481 len = le16_to_cpu(cmd->len);
5483 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5485 if (len != skb->len || !cmd->ident) {
5486 BT_DBG("corrupted command");
5490 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5492 struct l2cap_cmd_rej_unk rej;
5494 BT_ERR("Wrong link type (%d)", err);
5496 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5497 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5505 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5507 struct l2cap_cmd_rej_unk rej;
5509 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5510 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5513 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5514 struct sk_buff *skb)
5516 struct hci_conn *hcon = conn->hcon;
5517 struct l2cap_cmd_hdr *cmd;
5520 l2cap_raw_recv(conn, skb);
5522 if (hcon->type != ACL_LINK)
5525 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5528 cmd = (void *) skb->data;
5529 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5531 len = le16_to_cpu(cmd->len);
5533 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5536 if (len > skb->len || !cmd->ident) {
5537 BT_DBG("corrupted command");
5538 l2cap_sig_send_rej(conn, cmd->ident);
5539 skb_pull(skb, len > skb->len ? skb->len : len);
5543 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5545 BT_ERR("Wrong link type (%d)", err);
5546 l2cap_sig_send_rej(conn, cmd->ident);
5553 BT_DBG("corrupted command");
5554 l2cap_sig_send_rej(conn, 0);
5561 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5563 u16 our_fcs, rcv_fcs;
5566 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5567 hdr_size = L2CAP_EXT_HDR_SIZE;
5569 hdr_size = L2CAP_ENH_HDR_SIZE;
5571 if (chan->fcs == L2CAP_FCS_CRC16) {
5572 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5573 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5574 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5576 if (our_fcs != rcv_fcs)
5582 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5584 struct l2cap_ctrl control;
5586 BT_DBG("chan %p", chan);
5588 memset(&control, 0, sizeof(control));
5591 control.reqseq = chan->buffer_seq;
5592 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5594 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5595 control.super = L2CAP_SUPER_RNR;
5596 l2cap_send_sframe(chan, &control);
5599 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5600 chan->unacked_frames > 0)
5601 __set_retrans_timer(chan);
5603 /* Send pending iframes */
5604 l2cap_ertm_send(chan);
5606 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5607 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5608 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5611 control.super = L2CAP_SUPER_RR;
5612 l2cap_send_sframe(chan, &control);
5616 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5617 struct sk_buff **last_frag)
5619 /* skb->len reflects data in skb as well as all fragments
5620 * skb->data_len reflects only data in fragments
5622 if (!skb_has_frag_list(skb))
5623 skb_shinfo(skb)->frag_list = new_frag;
5625 new_frag->next = NULL;
5627 (*last_frag)->next = new_frag;
5628 *last_frag = new_frag;
5630 skb->len += new_frag->len;
5631 skb->data_len += new_frag->len;
5632 skb->truesize += new_frag->truesize;
5635 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5636 struct l2cap_ctrl *control)
5640 switch (control->sar) {
5641 case L2CAP_SAR_UNSEGMENTED:
5645 err = chan->ops->recv(chan, skb);
5648 case L2CAP_SAR_START:
5652 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5655 chan->sdu_len = get_unaligned_le16(skb->data);
5656 skb_pull(skb, L2CAP_SDULEN_SIZE);
5658 if (chan->sdu_len > chan->imtu) {
5663 if (skb->len >= chan->sdu_len)
5667 chan->sdu_last_frag = skb;
5673 case L2CAP_SAR_CONTINUE:
5677 append_skb_frag(chan->sdu, skb,
5678 &chan->sdu_last_frag);
5681 if (chan->sdu->len >= chan->sdu_len)
5691 append_skb_frag(chan->sdu, skb,
5692 &chan->sdu_last_frag);
5695 if (chan->sdu->len != chan->sdu_len)
5698 err = chan->ops->recv(chan, chan->sdu);
5701 /* Reassembly complete */
5703 chan->sdu_last_frag = NULL;
5711 kfree_skb(chan->sdu);
5713 chan->sdu_last_frag = NULL;
5720 static int l2cap_resegment(struct l2cap_chan *chan)
5726 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5730 if (chan->mode != L2CAP_MODE_ERTM)
5733 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5734 l2cap_tx(chan, NULL, NULL, event);
5737 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5740 /* Pass sequential frames to l2cap_reassemble_sdu()
5741 * until a gap is encountered.
5744 BT_DBG("chan %p", chan);
5746 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5747 struct sk_buff *skb;
5748 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5749 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5751 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5756 skb_unlink(skb, &chan->srej_q);
5757 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5758 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5763 if (skb_queue_empty(&chan->srej_q)) {
5764 chan->rx_state = L2CAP_RX_STATE_RECV;
5765 l2cap_send_ack(chan);
5771 static void l2cap_handle_srej(struct l2cap_chan *chan,
5772 struct l2cap_ctrl *control)
5774 struct sk_buff *skb;
5776 BT_DBG("chan %p, control %p", chan, control);
5778 if (control->reqseq == chan->next_tx_seq) {
5779 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5780 l2cap_send_disconn_req(chan, ECONNRESET);
5784 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5787 BT_DBG("Seq %d not available for retransmission",
5792 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5793 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5794 l2cap_send_disconn_req(chan, ECONNRESET);
5798 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5800 if (control->poll) {
5801 l2cap_pass_to_tx(chan, control);
5803 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5804 l2cap_retransmit(chan, control);
5805 l2cap_ertm_send(chan);
5807 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5808 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5809 chan->srej_save_reqseq = control->reqseq;
5812 l2cap_pass_to_tx_fbit(chan, control);
5814 if (control->final) {
5815 if (chan->srej_save_reqseq != control->reqseq ||
5816 !test_and_clear_bit(CONN_SREJ_ACT,
5818 l2cap_retransmit(chan, control);
5820 l2cap_retransmit(chan, control);
5821 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5822 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5823 chan->srej_save_reqseq = control->reqseq;
5829 static void l2cap_handle_rej(struct l2cap_chan *chan,
5830 struct l2cap_ctrl *control)
5832 struct sk_buff *skb;
5834 BT_DBG("chan %p, control %p", chan, control);
5836 if (control->reqseq == chan->next_tx_seq) {
5837 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5838 l2cap_send_disconn_req(chan, ECONNRESET);
5842 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5844 if (chan->max_tx && skb &&
5845 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5846 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5847 l2cap_send_disconn_req(chan, ECONNRESET);
5851 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5853 l2cap_pass_to_tx(chan, control);
5855 if (control->final) {
5856 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5857 l2cap_retransmit_all(chan, control);
5859 l2cap_retransmit_all(chan, control);
5860 l2cap_ertm_send(chan);
5861 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5862 set_bit(CONN_REJ_ACT, &chan->conn_state);
5866 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5868 BT_DBG("chan %p, txseq %d", chan, txseq);
5870 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5871 chan->expected_tx_seq);
5873 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5874 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5876 /* See notes below regarding "double poll" and
5879 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5880 BT_DBG("Invalid/Ignore - after SREJ");
5881 return L2CAP_TXSEQ_INVALID_IGNORE;
5883 BT_DBG("Invalid - in window after SREJ sent");
5884 return L2CAP_TXSEQ_INVALID;
5888 if (chan->srej_list.head == txseq) {
5889 BT_DBG("Expected SREJ");
5890 return L2CAP_TXSEQ_EXPECTED_SREJ;
5893 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5894 BT_DBG("Duplicate SREJ - txseq already stored");
5895 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5898 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5899 BT_DBG("Unexpected SREJ - not requested");
5900 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5904 if (chan->expected_tx_seq == txseq) {
5905 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5907 BT_DBG("Invalid - txseq outside tx window");
5908 return L2CAP_TXSEQ_INVALID;
5911 return L2CAP_TXSEQ_EXPECTED;
5915 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5916 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5917 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5918 return L2CAP_TXSEQ_DUPLICATE;
5921 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5922 /* A source of invalid packets is a "double poll" condition,
5923 * where delays cause us to send multiple poll packets. If
5924 * the remote stack receives and processes both polls,
5925 * sequence numbers can wrap around in such a way that a
5926 * resent frame has a sequence number that looks like new data
5927 * with a sequence gap. This would trigger an erroneous SREJ
5930 * Fortunately, this is impossible with a tx window that's
5931 * less than half of the maximum sequence number, which allows
5932 * invalid frames to be safely ignored.
5934 * With tx window sizes greater than half of the tx window
5935 * maximum, the frame is invalid and cannot be ignored. This
5936 * causes a disconnect.
5939 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5940 BT_DBG("Invalid/Ignore - txseq outside tx window");
5941 return L2CAP_TXSEQ_INVALID_IGNORE;
5943 BT_DBG("Invalid - txseq outside tx window");
5944 return L2CAP_TXSEQ_INVALID;
5947 BT_DBG("Unexpected - txseq indicates missing frames");
5948 return L2CAP_TXSEQ_UNEXPECTED;
5952 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5953 struct l2cap_ctrl *control,
5954 struct sk_buff *skb, u8 event)
5956 struct l2cap_ctrl local_control;
5958 bool skb_in_use = false;
5960 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5964 case L2CAP_EV_RECV_IFRAME:
5965 switch (l2cap_classify_txseq(chan, control->txseq)) {
5966 case L2CAP_TXSEQ_EXPECTED:
5967 l2cap_pass_to_tx(chan, control);
5969 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5970 BT_DBG("Busy, discarding expected seq %d",
5975 chan->expected_tx_seq = __next_seq(chan,
5978 chan->buffer_seq = chan->expected_tx_seq;
5981 /* l2cap_reassemble_sdu may free skb, hence invalidate
5982 * control, so make a copy in advance to use it after
5983 * l2cap_reassemble_sdu returns and to avoid the race
5984 * condition, for example:
5986 * The current thread calls:
5987 * l2cap_reassemble_sdu
5988 * chan->ops->recv == l2cap_sock_recv_cb
5989 * __sock_queue_rcv_skb
5990 * Another thread calls:
5994 * Then the current thread tries to access control, but
5995 * it was freed by skb_free_datagram.
5997 local_control = *control;
5998 err = l2cap_reassemble_sdu(chan, skb, control);
6002 if (local_control.final) {
6003 if (!test_and_clear_bit(CONN_REJ_ACT,
6004 &chan->conn_state)) {
6005 local_control.final = 0;
6006 l2cap_retransmit_all(chan, &local_control);
6007 l2cap_ertm_send(chan);
6011 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6012 l2cap_send_ack(chan);
6014 case L2CAP_TXSEQ_UNEXPECTED:
6015 l2cap_pass_to_tx(chan, control);
6017 /* Can't issue SREJ frames in the local busy state.
6018 * Drop this frame, it will be seen as missing
6019 * when local busy is exited.
6021 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6022 BT_DBG("Busy, discarding unexpected seq %d",
6027 /* There was a gap in the sequence, so an SREJ
6028 * must be sent for each missing frame. The
6029 * current frame is stored for later use.
6031 skb_queue_tail(&chan->srej_q, skb);
6033 BT_DBG("Queued %p (queue len %d)", skb,
6034 skb_queue_len(&chan->srej_q));
6036 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6037 l2cap_seq_list_clear(&chan->srej_list);
6038 l2cap_send_srej(chan, control->txseq);
6040 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6042 case L2CAP_TXSEQ_DUPLICATE:
6043 l2cap_pass_to_tx(chan, control);
6045 case L2CAP_TXSEQ_INVALID_IGNORE:
6047 case L2CAP_TXSEQ_INVALID:
6049 l2cap_send_disconn_req(chan, ECONNRESET);
6053 case L2CAP_EV_RECV_RR:
6054 l2cap_pass_to_tx(chan, control);
6055 if (control->final) {
6056 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6058 if (!test_and_clear_bit(CONN_REJ_ACT,
6059 &chan->conn_state)) {
6061 l2cap_retransmit_all(chan, control);
6064 l2cap_ertm_send(chan);
6065 } else if (control->poll) {
6066 l2cap_send_i_or_rr_or_rnr(chan);
6068 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6069 &chan->conn_state) &&
6070 chan->unacked_frames)
6071 __set_retrans_timer(chan);
6073 l2cap_ertm_send(chan);
6076 case L2CAP_EV_RECV_RNR:
6077 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6078 l2cap_pass_to_tx(chan, control);
6079 if (control && control->poll) {
6080 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6081 l2cap_send_rr_or_rnr(chan, 0);
6083 __clear_retrans_timer(chan);
6084 l2cap_seq_list_clear(&chan->retrans_list);
6086 case L2CAP_EV_RECV_REJ:
6087 l2cap_handle_rej(chan, control);
6089 case L2CAP_EV_RECV_SREJ:
6090 l2cap_handle_srej(chan, control);
6096 if (skb && !skb_in_use) {
6097 BT_DBG("Freeing %p", skb);
6104 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6105 struct l2cap_ctrl *control,
6106 struct sk_buff *skb, u8 event)
6109 u16 txseq = control->txseq;
6110 bool skb_in_use = false;
6112 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6116 case L2CAP_EV_RECV_IFRAME:
6117 switch (l2cap_classify_txseq(chan, txseq)) {
6118 case L2CAP_TXSEQ_EXPECTED:
6119 /* Keep frame for reassembly later */
6120 l2cap_pass_to_tx(chan, control);
6121 skb_queue_tail(&chan->srej_q, skb);
6123 BT_DBG("Queued %p (queue len %d)", skb,
6124 skb_queue_len(&chan->srej_q));
6126 chan->expected_tx_seq = __next_seq(chan, txseq);
6128 case L2CAP_TXSEQ_EXPECTED_SREJ:
6129 l2cap_seq_list_pop(&chan->srej_list);
6131 l2cap_pass_to_tx(chan, control);
6132 skb_queue_tail(&chan->srej_q, skb);
6134 BT_DBG("Queued %p (queue len %d)", skb,
6135 skb_queue_len(&chan->srej_q));
6137 err = l2cap_rx_queued_iframes(chan);
6142 case L2CAP_TXSEQ_UNEXPECTED:
6143 /* Got a frame that can't be reassembled yet.
6144 * Save it for later, and send SREJs to cover
6145 * the missing frames.
6147 skb_queue_tail(&chan->srej_q, skb);
6149 BT_DBG("Queued %p (queue len %d)", skb,
6150 skb_queue_len(&chan->srej_q));
6152 l2cap_pass_to_tx(chan, control);
6153 l2cap_send_srej(chan, control->txseq);
6155 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6156 /* This frame was requested with an SREJ, but
6157 * some expected retransmitted frames are
6158 * missing. Request retransmission of missing
6161 skb_queue_tail(&chan->srej_q, skb);
6163 BT_DBG("Queued %p (queue len %d)", skb,
6164 skb_queue_len(&chan->srej_q));
6166 l2cap_pass_to_tx(chan, control);
6167 l2cap_send_srej_list(chan, control->txseq);
6169 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6170 /* We've already queued this frame. Drop this copy. */
6171 l2cap_pass_to_tx(chan, control);
6173 case L2CAP_TXSEQ_DUPLICATE:
6174 /* Expecting a later sequence number, so this frame
6175 * was already received. Ignore it completely.
6178 case L2CAP_TXSEQ_INVALID_IGNORE:
6180 case L2CAP_TXSEQ_INVALID:
6182 l2cap_send_disconn_req(chan, ECONNRESET);
6186 case L2CAP_EV_RECV_RR:
6187 l2cap_pass_to_tx(chan, control);
6188 if (control->final) {
6189 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6191 if (!test_and_clear_bit(CONN_REJ_ACT,
6192 &chan->conn_state)) {
6194 l2cap_retransmit_all(chan, control);
6197 l2cap_ertm_send(chan);
6198 } else if (control->poll) {
6199 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6200 &chan->conn_state) &&
6201 chan->unacked_frames) {
6202 __set_retrans_timer(chan);
6205 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6206 l2cap_send_srej_tail(chan);
6208 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6209 &chan->conn_state) &&
6210 chan->unacked_frames)
6211 __set_retrans_timer(chan);
6213 l2cap_send_ack(chan);
6216 case L2CAP_EV_RECV_RNR:
6217 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6218 l2cap_pass_to_tx(chan, control);
6219 if (control->poll) {
6220 l2cap_send_srej_tail(chan);
6222 struct l2cap_ctrl rr_control;
6223 memset(&rr_control, 0, sizeof(rr_control));
6224 rr_control.sframe = 1;
6225 rr_control.super = L2CAP_SUPER_RR;
6226 rr_control.reqseq = chan->buffer_seq;
6227 l2cap_send_sframe(chan, &rr_control);
6231 case L2CAP_EV_RECV_REJ:
6232 l2cap_handle_rej(chan, control);
6234 case L2CAP_EV_RECV_SREJ:
6235 l2cap_handle_srej(chan, control);
6239 if (skb && !skb_in_use) {
6240 BT_DBG("Freeing %p", skb);
6247 static int l2cap_finish_move(struct l2cap_chan *chan)
6249 BT_DBG("chan %p", chan);
6251 chan->rx_state = L2CAP_RX_STATE_RECV;
6252 chan->conn->mtu = chan->conn->hcon->mtu;
6254 return l2cap_resegment(chan);
6257 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6258 struct l2cap_ctrl *control,
6259 struct sk_buff *skb, u8 event)
6263 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6269 l2cap_process_reqseq(chan, control->reqseq);
6271 if (!skb_queue_empty(&chan->tx_q))
6272 chan->tx_send_head = skb_peek(&chan->tx_q);
6274 chan->tx_send_head = NULL;
6276 /* Rewind next_tx_seq to the point expected
6279 chan->next_tx_seq = control->reqseq;
6280 chan->unacked_frames = 0;
6282 err = l2cap_finish_move(chan);
6286 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6287 l2cap_send_i_or_rr_or_rnr(chan);
6289 if (event == L2CAP_EV_RECV_IFRAME)
6292 return l2cap_rx_state_recv(chan, control, NULL, event);
6295 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6296 struct l2cap_ctrl *control,
6297 struct sk_buff *skb, u8 event)
6301 if (!control->final)
6304 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6306 chan->rx_state = L2CAP_RX_STATE_RECV;
6307 l2cap_process_reqseq(chan, control->reqseq);
6309 if (!skb_queue_empty(&chan->tx_q))
6310 chan->tx_send_head = skb_peek(&chan->tx_q);
6312 chan->tx_send_head = NULL;
6314 /* Rewind next_tx_seq to the point expected
6317 chan->next_tx_seq = control->reqseq;
6318 chan->unacked_frames = 0;
6319 chan->conn->mtu = chan->conn->hcon->mtu;
6321 err = l2cap_resegment(chan);
6324 err = l2cap_rx_state_recv(chan, control, skb, event);
6329 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6331 /* Make sure reqseq is for a packet that has been sent but not acked */
6334 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6335 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6338 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6339 struct sk_buff *skb, u8 event)
6343 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6344 control, skb, event, chan->rx_state);
6346 if (__valid_reqseq(chan, control->reqseq)) {
6347 switch (chan->rx_state) {
6348 case L2CAP_RX_STATE_RECV:
6349 err = l2cap_rx_state_recv(chan, control, skb, event);
6351 case L2CAP_RX_STATE_SREJ_SENT:
6352 err = l2cap_rx_state_srej_sent(chan, control, skb,
6355 case L2CAP_RX_STATE_WAIT_P:
6356 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6358 case L2CAP_RX_STATE_WAIT_F:
6359 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6366 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6367 control->reqseq, chan->next_tx_seq,
6368 chan->expected_ack_seq);
6369 l2cap_send_disconn_req(chan, ECONNRESET);
6375 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6376 struct sk_buff *skb)
6378 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6379 * the txseq field in advance to use it after l2cap_reassemble_sdu
6380 * returns and to avoid the race condition, for example:
6382 * The current thread calls:
6383 * l2cap_reassemble_sdu
6384 * chan->ops->recv == l2cap_sock_recv_cb
6385 * __sock_queue_rcv_skb
6386 * Another thread calls:
6390 * Then the current thread tries to access control, but it was freed by
6391 * skb_free_datagram.
6393 u16 txseq = control->txseq;
6395 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6398 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6399 l2cap_pass_to_tx(chan, control);
6401 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6402 __next_seq(chan, chan->buffer_seq));
6404 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6406 l2cap_reassemble_sdu(chan, skb, control);
6409 kfree_skb(chan->sdu);
6412 chan->sdu_last_frag = NULL;
6416 BT_DBG("Freeing %p", skb);
6421 chan->last_acked_seq = txseq;
6422 chan->expected_tx_seq = __next_seq(chan, txseq);
6427 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6429 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6433 __unpack_control(chan, skb);
6438 * We can just drop the corrupted I-frame here.
6439 * Receiver will miss it and start proper recovery
6440 * procedures and ask for retransmission.
6442 if (l2cap_check_fcs(chan, skb))
6445 if (!control->sframe && control->sar == L2CAP_SAR_START)
6446 len -= L2CAP_SDULEN_SIZE;
6448 if (chan->fcs == L2CAP_FCS_CRC16)
6449 len -= L2CAP_FCS_SIZE;
6451 if (len > chan->mps) {
6452 l2cap_send_disconn_req(chan, ECONNRESET);
6456 if (chan->ops->filter) {
6457 if (chan->ops->filter(chan, skb))
6461 if (!control->sframe) {
6464 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6465 control->sar, control->reqseq, control->final,
6468 /* Validate F-bit - F=0 always valid, F=1 only
6469 * valid in TX WAIT_F
6471 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6474 if (chan->mode != L2CAP_MODE_STREAMING) {
6475 event = L2CAP_EV_RECV_IFRAME;
6476 err = l2cap_rx(chan, control, skb, event);
6478 err = l2cap_stream_rx(chan, control, skb);
6482 l2cap_send_disconn_req(chan, ECONNRESET);
6484 const u8 rx_func_to_event[4] = {
6485 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6486 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6489 /* Only I-frames are expected in streaming mode */
6490 if (chan->mode == L2CAP_MODE_STREAMING)
6493 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6494 control->reqseq, control->final, control->poll,
6498 BT_ERR("Trailing bytes: %d in sframe", len);
6499 l2cap_send_disconn_req(chan, ECONNRESET);
6503 /* Validate F and P bits */
6504 if (control->final && (control->poll ||
6505 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6508 event = rx_func_to_event[control->super];
6509 if (l2cap_rx(chan, control, skb, event))
6510 l2cap_send_disconn_req(chan, ECONNRESET);
6520 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6522 struct l2cap_conn *conn = chan->conn;
6523 struct l2cap_le_credits pkt;
6524 u16 return_credits = l2cap_le_rx_credits(chan);
6526 if (chan->rx_credits >= return_credits)
6529 return_credits -= chan->rx_credits;
6531 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6533 chan->rx_credits += return_credits;
6535 pkt.cid = cpu_to_le16(chan->scid);
6536 pkt.credits = cpu_to_le16(return_credits);
6538 chan->ident = l2cap_get_ident(conn);
6540 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6543 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6545 if (chan->rx_avail == rx_avail)
6548 BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6550 chan->rx_avail = rx_avail;
6552 if (chan->state == BT_CONNECTED)
6553 l2cap_chan_le_send_credits(chan);
6556 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6560 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6562 /* Wait recv to confirm reception before updating the credits */
6563 err = chan->ops->recv(chan, skb);
6565 if (err < 0 && chan->rx_avail != -1) {
6566 BT_ERR("Queueing received LE L2CAP data failed");
6567 l2cap_send_disconn_req(chan, ECONNRESET);
6571 /* Update credits whenever an SDU is received */
6572 l2cap_chan_le_send_credits(chan);
6577 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6581 if (!chan->rx_credits) {
6582 BT_ERR("No credits to receive LE L2CAP data");
6583 l2cap_send_disconn_req(chan, ECONNRESET);
6587 if (chan->imtu < skb->len) {
6588 BT_ERR("Too big LE L2CAP PDU");
6593 BT_DBG("chan %p: rx_credits %u -> %u",
6594 chan, chan->rx_credits + 1, chan->rx_credits);
6596 /* Update if remote had run out of credits, this should only happens
6597 * if the remote is not using the entire MPS.
6599 if (!chan->rx_credits)
6600 l2cap_chan_le_send_credits(chan);
6607 sdu_len = get_unaligned_le16(skb->data);
6608 skb_pull(skb, L2CAP_SDULEN_SIZE);
6610 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6611 sdu_len, skb->len, chan->imtu);
6613 if (sdu_len > chan->imtu) {
6614 BT_ERR("Too big LE L2CAP SDU length received");
6619 if (skb->len > sdu_len) {
6620 BT_ERR("Too much LE L2CAP data received");
6625 if (skb->len == sdu_len)
6626 return l2cap_ecred_recv(chan, skb);
6629 chan->sdu_len = sdu_len;
6630 chan->sdu_last_frag = skb;
6632 /* Detect if remote is not able to use the selected MPS */
6633 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6634 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6636 /* Adjust the number of credits */
6637 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6638 chan->mps = mps_len;
6639 l2cap_chan_le_send_credits(chan);
6645 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6646 chan->sdu->len, skb->len, chan->sdu_len);
6648 if (chan->sdu->len + skb->len > chan->sdu_len) {
6649 BT_ERR("Too much LE L2CAP data received");
6654 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6657 if (chan->sdu->len == chan->sdu_len) {
6658 err = l2cap_ecred_recv(chan, chan->sdu);
6661 chan->sdu_last_frag = NULL;
6669 kfree_skb(chan->sdu);
6671 chan->sdu_last_frag = NULL;
6675 /* We can't return an error here since we took care of the skb
6676 * freeing internally. An error return would cause the caller to
6677 * do a double-free of the skb.
6682 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6683 struct sk_buff *skb)
6685 struct l2cap_chan *chan;
6687 chan = l2cap_get_chan_by_scid(conn, cid);
6689 BT_DBG("unknown cid 0x%4.4x", cid);
6690 /* Drop packet and return */
6695 BT_DBG("chan %p, len %d", chan, skb->len);
6697 /* If we receive data on a fixed channel before the info req/rsp
6698 * procedure is done simply assume that the channel is supported
6699 * and mark it as ready.
6701 if (chan->chan_type == L2CAP_CHAN_FIXED)
6702 l2cap_chan_ready(chan);
6704 if (chan->state != BT_CONNECTED)
6707 switch (chan->mode) {
6708 case L2CAP_MODE_LE_FLOWCTL:
6709 case L2CAP_MODE_EXT_FLOWCTL:
6710 if (l2cap_ecred_data_rcv(chan, skb) < 0)
6715 case L2CAP_MODE_BASIC:
6716 /* If socket recv buffers overflows we drop data here
6717 * which is *bad* because L2CAP has to be reliable.
6718 * But we don't have any other choice. L2CAP doesn't
6719 * provide flow control mechanism. */
6721 if (chan->imtu < skb->len) {
6722 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6726 if (!chan->ops->recv(chan, skb))
6730 case L2CAP_MODE_ERTM:
6731 case L2CAP_MODE_STREAMING:
6732 l2cap_data_rcv(chan, skb);
6736 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6744 l2cap_chan_unlock(chan);
6745 l2cap_chan_put(chan);
6748 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6749 struct sk_buff *skb)
6751 struct hci_conn *hcon = conn->hcon;
6752 struct l2cap_chan *chan;
6754 if (hcon->type != ACL_LINK)
6757 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6762 BT_DBG("chan %p, len %d", chan, skb->len);
6764 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6767 if (chan->imtu < skb->len)
6770 /* Store remote BD_ADDR and PSM for msg_name */
6771 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6772 bt_cb(skb)->l2cap.psm = psm;
6774 if (!chan->ops->recv(chan, skb)) {
6775 l2cap_chan_put(chan);
6780 l2cap_chan_put(chan);
6785 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6787 struct l2cap_hdr *lh = (void *) skb->data;
6788 struct hci_conn *hcon = conn->hcon;
6792 if (hcon->state != BT_CONNECTED) {
6793 BT_DBG("queueing pending rx skb");
6794 skb_queue_tail(&conn->pending_rx, skb);
6798 skb_pull(skb, L2CAP_HDR_SIZE);
6799 cid = __le16_to_cpu(lh->cid);
6800 len = __le16_to_cpu(lh->len);
6802 if (len != skb->len) {
6807 /* Since we can't actively block incoming LE connections we must
6808 * at least ensure that we ignore incoming data from them.
6810 if (hcon->type == LE_LINK &&
6811 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6812 bdaddr_dst_type(hcon))) {
6817 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6820 case L2CAP_CID_SIGNALING:
6821 l2cap_sig_channel(conn, skb);
6824 case L2CAP_CID_CONN_LESS:
6825 psm = get_unaligned((__le16 *) skb->data);
6826 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6827 l2cap_conless_channel(conn, psm, skb);
6830 case L2CAP_CID_LE_SIGNALING:
6831 l2cap_le_sig_channel(conn, skb);
6835 l2cap_data_channel(conn, cid, skb);
6840 static void process_pending_rx(struct work_struct *work)
6842 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6844 struct sk_buff *skb;
6848 while ((skb = skb_dequeue(&conn->pending_rx)))
6849 l2cap_recv_frame(conn, skb);
6852 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6854 struct l2cap_conn *conn = hcon->l2cap_data;
6855 struct hci_chan *hchan;
6860 hchan = hci_chan_create(hcon);
6864 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6866 hci_chan_del(hchan);
6870 kref_init(&conn->ref);
6871 hcon->l2cap_data = conn;
6872 conn->hcon = hci_conn_get(hcon);
6873 conn->hchan = hchan;
6875 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6877 conn->mtu = hcon->mtu;
6878 conn->feat_mask = 0;
6880 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6882 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6883 (bredr_sc_enabled(hcon->hdev) ||
6884 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6885 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6887 mutex_init(&conn->ident_lock);
6888 mutex_init(&conn->chan_lock);
6890 INIT_LIST_HEAD(&conn->chan_l);
6891 INIT_LIST_HEAD(&conn->users);
6893 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6895 skb_queue_head_init(&conn->pending_rx);
6896 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6897 INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6899 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6904 static bool is_valid_psm(u16 psm, u8 dst_type)
6909 if (bdaddr_type_is_le(dst_type))
6910 return (psm <= 0x00ff);
6912 /* PSM must be odd and lsb of upper byte must be 0 */
6913 return ((psm & 0x0101) == 0x0001);
6916 struct l2cap_chan_data {
6917 struct l2cap_chan *chan;
6922 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6924 struct l2cap_chan_data *d = data;
6927 if (chan == d->chan)
6930 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6933 pid = chan->ops->get_peer_pid(chan);
6935 /* Only count deferred channels with the same PID/PSM */
6936 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6937 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6943 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6944 bdaddr_t *dst, u8 dst_type, u16 timeout)
6946 struct l2cap_conn *conn;
6947 struct hci_conn *hcon;
6948 struct hci_dev *hdev;
6951 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6952 dst, dst_type, __le16_to_cpu(psm), chan->mode);
6954 hdev = hci_get_route(dst, &chan->src, chan->src_type);
6956 return -EHOSTUNREACH;
6960 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6961 chan->chan_type != L2CAP_CHAN_RAW) {
6966 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6971 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6976 switch (chan->mode) {
6977 case L2CAP_MODE_BASIC:
6979 case L2CAP_MODE_LE_FLOWCTL:
6981 case L2CAP_MODE_EXT_FLOWCTL:
6982 if (!enable_ecred) {
6987 case L2CAP_MODE_ERTM:
6988 case L2CAP_MODE_STREAMING:
6997 switch (chan->state) {
7001 /* Already connecting */
7006 /* Already connected */
7020 /* Set destination address and psm */
7021 bacpy(&chan->dst, dst);
7022 chan->dst_type = dst_type;
7027 if (bdaddr_type_is_le(dst_type)) {
7028 /* Convert from L2CAP channel address type to HCI address type
7030 if (dst_type == BDADDR_LE_PUBLIC)
7031 dst_type = ADDR_LE_DEV_PUBLIC;
7033 dst_type = ADDR_LE_DEV_RANDOM;
7035 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7036 hcon = hci_connect_le(hdev, dst, dst_type, false,
7037 chan->sec_level, timeout,
7038 HCI_ROLE_SLAVE, 0, 0);
7040 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7041 chan->sec_level, timeout,
7042 CONN_REASON_L2CAP_CHAN);
7045 u8 auth_type = l2cap_get_auth_type(chan);
7046 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7047 CONN_REASON_L2CAP_CHAN, timeout);
7051 err = PTR_ERR(hcon);
7055 conn = l2cap_conn_add(hcon);
7057 hci_conn_drop(hcon);
7062 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7063 struct l2cap_chan_data data;
7066 data.pid = chan->ops->get_peer_pid(chan);
7069 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7071 /* Check if there isn't too many channels being connected */
7072 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7073 hci_conn_drop(hcon);
7079 mutex_lock(&conn->chan_lock);
7080 l2cap_chan_lock(chan);
7082 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7083 hci_conn_drop(hcon);
7088 /* Update source addr of the socket */
7089 bacpy(&chan->src, &hcon->src);
7090 chan->src_type = bdaddr_src_type(hcon);
7092 __l2cap_chan_add(conn, chan);
7094 /* l2cap_chan_add takes its own ref so we can drop this one */
7095 hci_conn_drop(hcon);
7097 l2cap_state_change(chan, BT_CONNECT);
7098 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7100 /* Release chan->sport so that it can be reused by other
7101 * sockets (as it's only used for listening sockets).
7103 write_lock(&chan_list_lock);
7105 write_unlock(&chan_list_lock);
7107 if (hcon->state == BT_CONNECTED) {
7108 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7109 __clear_chan_timer(chan);
7110 if (l2cap_chan_check_security(chan, true))
7111 l2cap_state_change(chan, BT_CONNECTED);
7113 l2cap_do_start(chan);
7119 l2cap_chan_unlock(chan);
7120 mutex_unlock(&conn->chan_lock);
7122 hci_dev_unlock(hdev);
7126 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7128 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7130 struct l2cap_conn *conn = chan->conn;
7131 DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7133 pdu->mtu = cpu_to_le16(chan->imtu);
7134 pdu->mps = cpu_to_le16(chan->mps);
7135 pdu->scid[0] = cpu_to_le16(chan->scid);
7137 chan->ident = l2cap_get_ident(conn);
7139 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7143 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7145 if (chan->imtu > mtu)
7148 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7152 l2cap_ecred_reconfigure(chan);
7157 /* ---- L2CAP interface with lower layer (HCI) ---- */
7159 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7161 int exact = 0, lm1 = 0, lm2 = 0;
7162 struct l2cap_chan *c;
7164 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7166 /* Find listening sockets and check their link_mode */
7167 read_lock(&chan_list_lock);
7168 list_for_each_entry(c, &chan_list, global_l) {
7169 if (c->state != BT_LISTEN)
7172 if (!bacmp(&c->src, &hdev->bdaddr)) {
7173 lm1 |= HCI_LM_ACCEPT;
7174 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7175 lm1 |= HCI_LM_MASTER;
7177 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7178 lm2 |= HCI_LM_ACCEPT;
7179 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7180 lm2 |= HCI_LM_MASTER;
7183 read_unlock(&chan_list_lock);
7185 return exact ? lm1 : lm2;
7188 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7189 * from an existing channel in the list or from the beginning of the
7190 * global list (by passing NULL as first parameter).
7192 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7193 struct hci_conn *hcon)
7195 u8 src_type = bdaddr_src_type(hcon);
7197 read_lock(&chan_list_lock);
7200 c = list_next_entry(c, global_l);
7202 c = list_entry(chan_list.next, typeof(*c), global_l);
7204 list_for_each_entry_from(c, &chan_list, global_l) {
7205 if (c->chan_type != L2CAP_CHAN_FIXED)
7207 if (c->state != BT_LISTEN)
7209 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7211 if (src_type != c->src_type)
7214 c = l2cap_chan_hold_unless_zero(c);
7215 read_unlock(&chan_list_lock);
7219 read_unlock(&chan_list_lock);
7224 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7226 struct hci_dev *hdev = hcon->hdev;
7227 struct l2cap_conn *conn;
7228 struct l2cap_chan *pchan;
7231 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7234 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7237 l2cap_conn_del(hcon, bt_to_errno(status));
7241 conn = l2cap_conn_add(hcon);
7245 dst_type = bdaddr_dst_type(hcon);
7247 /* If device is blocked, do not create channels for it */
7248 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7251 /* Find fixed channels and notify them of the new connection. We
7252 * use multiple individual lookups, continuing each time where
7253 * we left off, because the list lock would prevent calling the
7254 * potentially sleeping l2cap_chan_lock() function.
7256 pchan = l2cap_global_fixed_chan(NULL, hcon);
7258 struct l2cap_chan *chan, *next;
7260 /* Client fixed channels should override server ones */
7261 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7264 l2cap_chan_lock(pchan);
7265 chan = pchan->ops->new_connection(pchan);
7267 bacpy(&chan->src, &hcon->src);
7268 bacpy(&chan->dst, &hcon->dst);
7269 chan->src_type = bdaddr_src_type(hcon);
7270 chan->dst_type = dst_type;
7272 __l2cap_chan_add(conn, chan);
7275 l2cap_chan_unlock(pchan);
7277 next = l2cap_global_fixed_chan(pchan, hcon);
7278 l2cap_chan_put(pchan);
7282 l2cap_conn_ready(conn);
7285 int l2cap_disconn_ind(struct hci_conn *hcon)
7287 struct l2cap_conn *conn = hcon->l2cap_data;
7289 BT_DBG("hcon %p", hcon);
7292 return HCI_ERROR_REMOTE_USER_TERM;
7293 return conn->disc_reason;
7296 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7298 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7301 BT_DBG("hcon %p reason %d", hcon, reason);
7303 l2cap_conn_del(hcon, bt_to_errno(reason));
7306 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7308 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7311 if (encrypt == 0x00) {
7312 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7313 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7314 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7315 chan->sec_level == BT_SECURITY_FIPS)
7316 l2cap_chan_close(chan, ECONNREFUSED);
7318 if (chan->sec_level == BT_SECURITY_MEDIUM)
7319 __clear_chan_timer(chan);
7323 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7325 struct l2cap_conn *conn = hcon->l2cap_data;
7326 struct l2cap_chan *chan;
7331 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7333 mutex_lock(&conn->chan_lock);
7335 list_for_each_entry(chan, &conn->chan_l, list) {
7336 l2cap_chan_lock(chan);
7338 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7339 state_to_string(chan->state));
7341 if (!status && encrypt)
7342 chan->sec_level = hcon->sec_level;
7344 if (!__l2cap_no_conn_pending(chan)) {
7345 l2cap_chan_unlock(chan);
7349 if (!status && (chan->state == BT_CONNECTED ||
7350 chan->state == BT_CONFIG)) {
7351 chan->ops->resume(chan);
7352 l2cap_check_encryption(chan, encrypt);
7353 l2cap_chan_unlock(chan);
7357 if (chan->state == BT_CONNECT) {
7358 if (!status && l2cap_check_enc_key_size(hcon))
7359 l2cap_start_connection(chan);
7361 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7362 } else if (chan->state == BT_CONNECT2 &&
7363 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7364 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7365 struct l2cap_conn_rsp rsp;
7368 if (!status && l2cap_check_enc_key_size(hcon)) {
7369 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7370 res = L2CAP_CR_PEND;
7371 stat = L2CAP_CS_AUTHOR_PEND;
7372 chan->ops->defer(chan);
7374 l2cap_state_change(chan, BT_CONFIG);
7375 res = L2CAP_CR_SUCCESS;
7376 stat = L2CAP_CS_NO_INFO;
7379 l2cap_state_change(chan, BT_DISCONN);
7380 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7381 res = L2CAP_CR_SEC_BLOCK;
7382 stat = L2CAP_CS_NO_INFO;
7385 rsp.scid = cpu_to_le16(chan->dcid);
7386 rsp.dcid = cpu_to_le16(chan->scid);
7387 rsp.result = cpu_to_le16(res);
7388 rsp.status = cpu_to_le16(stat);
7389 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7392 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7393 res == L2CAP_CR_SUCCESS) {
7395 set_bit(CONF_REQ_SENT, &chan->conf_state);
7396 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7398 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7400 chan->num_conf_req++;
7404 l2cap_chan_unlock(chan);
7407 mutex_unlock(&conn->chan_lock);
7410 /* Append fragment into frame respecting the maximum len of rx_skb */
7411 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7414 if (!conn->rx_skb) {
7415 /* Allocate skb for the complete frame (with header) */
7416 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7423 /* Copy as much as the rx_skb can hold */
7424 len = min_t(u16, len, skb->len);
7425 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7427 conn->rx_len -= len;
7432 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7434 struct sk_buff *rx_skb;
7437 /* Append just enough to complete the header */
7438 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7440 /* If header could not be read just continue */
7441 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7444 rx_skb = conn->rx_skb;
7445 len = get_unaligned_le16(rx_skb->data);
7447 /* Check if rx_skb has enough space to received all fragments */
7448 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7449 /* Update expected len */
7450 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7451 return L2CAP_LEN_SIZE;
7454 /* Reset conn->rx_skb since it will need to be reallocated in order to
7455 * fit all fragments.
7457 conn->rx_skb = NULL;
7459 /* Reallocates rx_skb using the exact expected length */
7460 len = l2cap_recv_frag(conn, rx_skb,
7461 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7467 static void l2cap_recv_reset(struct l2cap_conn *conn)
7469 kfree_skb(conn->rx_skb);
7470 conn->rx_skb = NULL;
7474 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7476 struct l2cap_conn *conn = hcon->l2cap_data;
7480 conn = l2cap_conn_add(hcon);
7485 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7489 case ACL_START_NO_FLUSH:
7492 BT_ERR("Unexpected start frame (len %d)", skb->len);
7493 l2cap_recv_reset(conn);
7494 l2cap_conn_unreliable(conn, ECOMM);
7497 /* Start fragment may not contain the L2CAP length so just
7498 * copy the initial byte when that happens and use conn->mtu as
7501 if (skb->len < L2CAP_LEN_SIZE) {
7502 l2cap_recv_frag(conn, skb, conn->mtu);
7506 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7508 if (len == skb->len) {
7509 /* Complete frame received */
7510 l2cap_recv_frame(conn, skb);
7514 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7516 if (skb->len > len) {
7517 BT_ERR("Frame is too long (len %u, expected len %d)",
7519 l2cap_conn_unreliable(conn, ECOMM);
7523 /* Append fragment into frame (with header) */
7524 if (l2cap_recv_frag(conn, skb, len) < 0)
7530 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7532 if (!conn->rx_skb) {
7533 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7534 l2cap_conn_unreliable(conn, ECOMM);
7538 /* Complete the L2CAP length if it has not been read */
7539 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7540 if (l2cap_recv_len(conn, skb) < 0) {
7541 l2cap_conn_unreliable(conn, ECOMM);
7545 /* Header still could not be read just continue */
7546 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7550 if (skb->len > conn->rx_len) {
7551 BT_ERR("Fragment is too long (len %u, expected %u)",
7552 skb->len, conn->rx_len);
7553 l2cap_recv_reset(conn);
7554 l2cap_conn_unreliable(conn, ECOMM);
7558 /* Append fragment into frame (with header) */
7559 l2cap_recv_frag(conn, skb, skb->len);
7561 if (!conn->rx_len) {
7562 /* Complete frame received. l2cap_recv_frame
7563 * takes ownership of the skb so set the global
7564 * rx_skb pointer to NULL first.
7566 struct sk_buff *rx_skb = conn->rx_skb;
7567 conn->rx_skb = NULL;
7568 l2cap_recv_frame(conn, rx_skb);
7577 static struct hci_cb l2cap_cb = {
7579 .connect_cfm = l2cap_connect_cfm,
7580 .disconn_cfm = l2cap_disconn_cfm,
7581 .security_cfm = l2cap_security_cfm,
7584 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7586 struct l2cap_chan *c;
7588 read_lock(&chan_list_lock);
7590 list_for_each_entry(c, &chan_list, global_l) {
7591 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7592 &c->src, c->src_type, &c->dst, c->dst_type,
7593 c->state, __le16_to_cpu(c->psm),
7594 c->scid, c->dcid, c->imtu, c->omtu,
7595 c->sec_level, c->mode);
7598 read_unlock(&chan_list_lock);
7603 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7605 static struct dentry *l2cap_debugfs;
7607 int __init l2cap_init(void)
7611 err = l2cap_init_sockets();
7615 hci_register_cb(&l2cap_cb);
7617 if (IS_ERR_OR_NULL(bt_debugfs))
7620 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7621 NULL, &l2cap_debugfs_fops);
7626 void l2cap_exit(void)
7628 debugfs_remove(l2cap_debugfs);
7629 hci_unregister_cb(&l2cap_cb);
7630 l2cap_cleanup_sockets();
7633 module_param(disable_ertm, bool, 0644);
7634 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7636 module_param(enable_ecred, bool, 0644);
7637 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");