Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at master 7835 lines 189 kB view raw
1/* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> 5 Copyright (C) 2010 Google Inc. 6 Copyright (C) 2011 ProFUSION Embedded Systems 7 Copyright (c) 2012 Code Aurora Forum. All rights reserved. 8 9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 10 11 This program is free software; you can redistribute it and/or modify 12 it under the terms of the GNU General Public License version 2 as 13 published by the Free Software Foundation; 14 15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 23 24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 26 SOFTWARE IS DISCLAIMED. 27*/ 28 29/* Bluetooth L2CAP core. */ 30 31#include <linux/module.h> 32 33#include <linux/debugfs.h> 34#include <linux/crc16.h> 35#include <linux/filter.h> 36 37#include <net/bluetooth/bluetooth.h> 38#include <net/bluetooth/hci_core.h> 39#include <net/bluetooth/l2cap.h> 40 41#include "smp.h" 42 43#define LE_FLOWCTL_MAX_CREDITS 65535 44 45bool disable_ertm; 46bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED); 47 48static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; 49 50static LIST_HEAD(chan_list); 51static DEFINE_RWLOCK(chan_list_lock); 52 53static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 54 u8 code, u8 ident, u16 dlen, void *data); 55static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, 56 void *data); 57static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size); 58static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err); 59 60static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 61 struct sk_buff_head *skbs, u8 event); 62static void l2cap_retrans_timeout(struct work_struct *work); 63static void l2cap_monitor_timeout(struct work_struct *work); 64static void l2cap_ack_timeout(struct work_struct *work); 65 66static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type) 67{ 68 if (link_type == LE_LINK) { 69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC) 70 return BDADDR_LE_PUBLIC; 71 else 72 return BDADDR_LE_RANDOM; 73 } 74 75 return BDADDR_BREDR; 76} 77 78static inline u8 bdaddr_src_type(struct hci_conn *hcon) 79{ 80 return bdaddr_type(hcon->type, hcon->src_type); 81} 82 83static inline u8 bdaddr_dst_type(struct hci_conn *hcon) 84{ 85 return bdaddr_type(hcon->type, hcon->dst_type); 86} 87 88/* ---- L2CAP channels ---- */ 89 90static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, 91 u16 cid) 92{ 93 struct l2cap_chan *c; 94 95 list_for_each_entry(c, &conn->chan_l, list) { 96 if (c->dcid == cid) 97 return c; 98 } 99 return NULL; 100} 101 102static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, 103 u16 cid) 104{ 105 struct l2cap_chan *c; 106 107 list_for_each_entry(c, &conn->chan_l, list) { 108 if (c->scid == cid) 109 return c; 110 } 111 return NULL; 112} 113 114/* Find channel with given SCID. 115 * Returns a reference locked channel. 116 */ 117static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, 118 u16 cid) 119{ 120 struct l2cap_chan *c; 121 122 c = __l2cap_get_chan_by_scid(conn, cid); 123 if (c) { 124 /* Only lock if chan reference is not 0 */ 125 c = l2cap_chan_hold_unless_zero(c); 126 if (c) 127 l2cap_chan_lock(c); 128 } 129 130 return c; 131} 132 133/* Find channel with given DCID. 134 * Returns a reference locked channel. 135 */ 136static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, 137 u16 cid) 138{ 139 struct l2cap_chan *c; 140 141 c = __l2cap_get_chan_by_dcid(conn, cid); 142 if (c) { 143 /* Only lock if chan reference is not 0 */ 144 c = l2cap_chan_hold_unless_zero(c); 145 if (c) 146 l2cap_chan_lock(c); 147 } 148 149 return c; 150} 151 152static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, 153 u8 ident) 154{ 155 struct l2cap_chan *c; 156 157 list_for_each_entry(c, &conn->chan_l, list) { 158 if (c->ident == ident) 159 return c; 160 } 161 return NULL; 162} 163 164static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src, 165 u8 src_type) 166{ 167 struct l2cap_chan *c; 168 169 list_for_each_entry(c, &chan_list, global_l) { 170 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR) 171 continue; 172 173 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR) 174 continue; 175 176 if (c->sport == psm && !bacmp(&c->src, src)) 177 return c; 178 } 179 return NULL; 180} 181 182int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) 183{ 184 int err; 185 186 write_lock(&chan_list_lock); 187 188 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) { 189 err = -EADDRINUSE; 190 goto done; 191 } 192 193 if (psm) { 194 chan->psm = psm; 195 chan->sport = psm; 196 err = 0; 197 } else { 198 u16 p, start, end, incr; 199 200 if (chan->src_type == BDADDR_BREDR) { 201 start = L2CAP_PSM_DYN_START; 202 end = L2CAP_PSM_AUTO_END; 203 incr = 2; 204 } else { 205 start = L2CAP_PSM_LE_DYN_START; 206 end = L2CAP_PSM_LE_DYN_END; 207 incr = 1; 208 } 209 210 err = -EINVAL; 211 for (p = start; p <= end; p += incr) 212 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src, 213 chan->src_type)) { 214 chan->psm = cpu_to_le16(p); 215 chan->sport = cpu_to_le16(p); 216 err = 0; 217 break; 218 } 219 } 220 221done: 222 write_unlock(&chan_list_lock); 223 return err; 224} 225EXPORT_SYMBOL_GPL(l2cap_add_psm); 226 227int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) 228{ 229 write_lock(&chan_list_lock); 230 231 /* Override the defaults (which are for conn-oriented) */ 232 chan->omtu = L2CAP_DEFAULT_MTU; 233 chan->chan_type = L2CAP_CHAN_FIXED; 234 235 chan->scid = scid; 236 237 write_unlock(&chan_list_lock); 238 239 return 0; 240} 241 242static u16 l2cap_alloc_cid(struct l2cap_conn *conn) 243{ 244 u16 cid, dyn_end; 245 246 if (conn->hcon->type == LE_LINK) 247 dyn_end = L2CAP_CID_LE_DYN_END; 248 else 249 dyn_end = L2CAP_CID_DYN_END; 250 251 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) { 252 if (!__l2cap_get_chan_by_scid(conn, cid)) 253 return cid; 254 } 255 256 return 0; 257} 258 259static void l2cap_state_change(struct l2cap_chan *chan, int state) 260{ 261 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), 262 state_to_string(state)); 263 264 chan->state = state; 265 chan->ops->state_change(chan, state, 0); 266} 267 268static inline void l2cap_state_change_and_error(struct l2cap_chan *chan, 269 int state, int err) 270{ 271 chan->state = state; 272 chan->ops->state_change(chan, chan->state, err); 273} 274 275static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) 276{ 277 chan->ops->state_change(chan, chan->state, err); 278} 279 280static void __set_retrans_timer(struct l2cap_chan *chan) 281{ 282 if (!delayed_work_pending(&chan->monitor_timer) && 283 chan->retrans_timeout) { 284 l2cap_set_timer(chan, &chan->retrans_timer, 285 msecs_to_jiffies(chan->retrans_timeout)); 286 } 287} 288 289static void __set_monitor_timer(struct l2cap_chan *chan) 290{ 291 __clear_retrans_timer(chan); 292 if (chan->monitor_timeout) { 293 l2cap_set_timer(chan, &chan->monitor_timer, 294 msecs_to_jiffies(chan->monitor_timeout)); 295 } 296} 297 298static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, 299 u16 seq) 300{ 301 struct sk_buff *skb; 302 303 skb_queue_walk(head, skb) { 304 if (bt_cb(skb)->l2cap.txseq == seq) 305 return skb; 306 } 307 308 return NULL; 309} 310 311/* ---- L2CAP sequence number lists ---- */ 312 313/* For ERTM, ordered lists of sequence numbers must be tracked for 314 * SREJ requests that are received and for frames that are to be 315 * retransmitted. These seq_list functions implement a singly-linked 316 * list in an array, where membership in the list can also be checked 317 * in constant time. Items can also be added to the tail of the list 318 * and removed from the head in constant time, without further memory 319 * allocs or frees. 320 */ 321 322static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size) 323{ 324 size_t alloc_size, i; 325 326 /* Allocated size is a power of 2 to map sequence numbers 327 * (which may be up to 14 bits) in to a smaller array that is 328 * sized for the negotiated ERTM transmit windows. 329 */ 330 alloc_size = roundup_pow_of_two(size); 331 332 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL); 333 if (!seq_list->list) 334 return -ENOMEM; 335 336 seq_list->mask = alloc_size - 1; 337 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 339 for (i = 0; i < alloc_size; i++) 340 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; 341 342 return 0; 343} 344 345static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list) 346{ 347 kfree(seq_list->list); 348} 349 350static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list, 351 u16 seq) 352{ 353 /* Constant-time check for list membership */ 354 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR; 355} 356 357static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list) 358{ 359 u16 seq = seq_list->head; 360 u16 mask = seq_list->mask; 361 362 seq_list->head = seq_list->list[seq & mask]; 363 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; 364 365 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { 366 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 367 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 368 } 369 370 return seq; 371} 372 373static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list) 374{ 375 u16 i; 376 377 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) 378 return; 379 380 for (i = 0; i <= seq_list->mask; i++) 381 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; 382 383 seq_list->head = L2CAP_SEQ_LIST_CLEAR; 384 seq_list->tail = L2CAP_SEQ_LIST_CLEAR; 385} 386 387static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq) 388{ 389 u16 mask = seq_list->mask; 390 391 /* All appends happen in constant time */ 392 393 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR) 394 return; 395 396 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR) 397 seq_list->head = seq; 398 else 399 seq_list->list[seq_list->tail & mask] = seq; 400 401 seq_list->tail = seq; 402 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL; 403} 404 405static void l2cap_chan_timeout(struct work_struct *work) 406{ 407 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 408 chan_timer.work); 409 struct l2cap_conn *conn = chan->conn; 410 int reason; 411 412 BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); 413 414 if (!conn) 415 return; 416 417 mutex_lock(&conn->lock); 418 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling 419 * this work. No need to call l2cap_chan_hold(chan) here again. 420 */ 421 l2cap_chan_lock(chan); 422 423 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) 424 reason = ECONNREFUSED; 425 else if (chan->state == BT_CONNECT && 426 chan->sec_level != BT_SECURITY_SDP) 427 reason = ECONNREFUSED; 428 else 429 reason = ETIMEDOUT; 430 431 l2cap_chan_close(chan, reason); 432 433 chan->ops->close(chan); 434 435 l2cap_chan_unlock(chan); 436 l2cap_chan_put(chan); 437 438 mutex_unlock(&conn->lock); 439} 440 441struct l2cap_chan *l2cap_chan_create(void) 442{ 443 struct l2cap_chan *chan; 444 445 chan = kzalloc_obj(*chan, GFP_ATOMIC); 446 if (!chan) 447 return NULL; 448 449 skb_queue_head_init(&chan->tx_q); 450 skb_queue_head_init(&chan->srej_q); 451 mutex_init(&chan->lock); 452 453 /* Set default lock nesting level */ 454 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL); 455 456 /* Available receive buffer space is initially unknown */ 457 chan->rx_avail = -1; 458 459 write_lock(&chan_list_lock); 460 list_add(&chan->global_l, &chan_list); 461 write_unlock(&chan_list_lock); 462 463 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); 464 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); 465 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); 466 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); 467 468 chan->state = BT_OPEN; 469 470 kref_init(&chan->kref); 471 472 /* This flag is cleared in l2cap_chan_ready() */ 473 set_bit(CONF_NOT_COMPLETE, &chan->conf_state); 474 475 BT_DBG("chan %p", chan); 476 477 return chan; 478} 479EXPORT_SYMBOL_GPL(l2cap_chan_create); 480 481static void l2cap_chan_destroy(struct kref *kref) 482{ 483 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref); 484 485 BT_DBG("chan %p", chan); 486 487 write_lock(&chan_list_lock); 488 list_del(&chan->global_l); 489 write_unlock(&chan_list_lock); 490 491 kfree(chan); 492} 493 494void l2cap_chan_hold(struct l2cap_chan *c) 495{ 496 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); 497 498 kref_get(&c->kref); 499} 500EXPORT_SYMBOL_GPL(l2cap_chan_hold); 501 502struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c) 503{ 504 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); 505 506 if (!kref_get_unless_zero(&c->kref)) 507 return NULL; 508 509 return c; 510} 511 512void l2cap_chan_put(struct l2cap_chan *c) 513{ 514 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); 515 516 kref_put(&c->kref, l2cap_chan_destroy); 517} 518EXPORT_SYMBOL_GPL(l2cap_chan_put); 519 520void l2cap_chan_set_defaults(struct l2cap_chan *chan) 521{ 522 chan->fcs = L2CAP_FCS_CRC16; 523 chan->max_tx = L2CAP_DEFAULT_MAX_TX; 524 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; 525 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 526 chan->remote_max_tx = chan->max_tx; 527 chan->remote_tx_win = chan->tx_win; 528 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW; 529 chan->sec_level = BT_SECURITY_LOW; 530 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; 531 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; 532 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; 533 534 chan->conf_state = 0; 535 set_bit(CONF_NOT_COMPLETE, &chan->conf_state); 536 537 set_bit(FLAG_FORCE_ACTIVE, &chan->flags); 538} 539EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults); 540 541static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan) 542{ 543 size_t sdu_len = chan->sdu ? chan->sdu->len : 0; 544 545 if (chan->mps == 0) 546 return 0; 547 548 /* If we don't know the available space in the receiver buffer, give 549 * enough credits for a full packet. 550 */ 551 if (chan->rx_avail == -1) 552 return (chan->imtu / chan->mps) + 1; 553 554 /* If we know how much space is available in the receive buffer, give 555 * out as many credits as would fill the buffer. 556 */ 557 if (chan->rx_avail <= sdu_len) 558 return 0; 559 560 return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps); 561} 562 563static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits) 564{ 565 chan->sdu = NULL; 566 chan->sdu_last_frag = NULL; 567 chan->sdu_len = 0; 568 chan->tx_credits = tx_credits; 569 /* Derive MPS from connection MTU to stop HCI fragmentation */ 570 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE); 571 chan->rx_credits = l2cap_le_rx_credits(chan); 572 573 skb_queue_head_init(&chan->tx_q); 574} 575 576static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits) 577{ 578 l2cap_le_flowctl_init(chan, tx_credits); 579 580 /* L2CAP implementations shall support a minimum MPS of 64 octets */ 581 if (chan->mps < L2CAP_ECRED_MIN_MPS) { 582 chan->mps = L2CAP_ECRED_MIN_MPS; 583 chan->rx_credits = l2cap_le_rx_credits(chan); 584 } 585} 586 587void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 588{ 589 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 590 __le16_to_cpu(chan->psm), chan->dcid); 591 592 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 593 594 chan->conn = conn; 595 596 switch (chan->chan_type) { 597 case L2CAP_CHAN_CONN_ORIENTED: 598 /* Alloc CID for connection-oriented socket */ 599 chan->scid = l2cap_alloc_cid(conn); 600 if (conn->hcon->type == ACL_LINK) 601 chan->omtu = L2CAP_DEFAULT_MTU; 602 break; 603 604 case L2CAP_CHAN_CONN_LESS: 605 /* Connectionless socket */ 606 chan->scid = L2CAP_CID_CONN_LESS; 607 chan->dcid = L2CAP_CID_CONN_LESS; 608 chan->omtu = L2CAP_DEFAULT_MTU; 609 break; 610 611 case L2CAP_CHAN_FIXED: 612 /* Caller will set CID and CID specific MTU values */ 613 break; 614 615 default: 616 /* Raw socket can send/recv signalling messages only */ 617 chan->scid = L2CAP_CID_SIGNALING; 618 chan->dcid = L2CAP_CID_SIGNALING; 619 chan->omtu = L2CAP_DEFAULT_MTU; 620 } 621 622 chan->local_id = L2CAP_BESTEFFORT_ID; 623 chan->local_stype = L2CAP_SERV_BESTEFFORT; 624 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; 625 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; 626 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; 627 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO; 628 629 l2cap_chan_hold(chan); 630 631 /* Only keep a reference for fixed channels if they requested it */ 632 if (chan->chan_type != L2CAP_CHAN_FIXED || 633 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) 634 hci_conn_hold(conn->hcon); 635 636 /* Append to the list since the order matters for ECRED */ 637 list_add_tail(&chan->list, &conn->chan_l); 638} 639 640void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 641{ 642 mutex_lock(&conn->lock); 643 __l2cap_chan_add(conn, chan); 644 mutex_unlock(&conn->lock); 645} 646 647void l2cap_chan_del(struct l2cap_chan *chan, int err) 648{ 649 struct l2cap_conn *conn = chan->conn; 650 651 __clear_chan_timer(chan); 652 653 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err, 654 state_to_string(chan->state)); 655 656 chan->ops->teardown(chan, err); 657 658 if (conn) { 659 /* Delete from channel list */ 660 list_del(&chan->list); 661 662 l2cap_chan_put(chan); 663 664 chan->conn = NULL; 665 666 /* Reference was only held for non-fixed channels or 667 * fixed channels that explicitly requested it using the 668 * FLAG_HOLD_HCI_CONN flag. 669 */ 670 if (chan->chan_type != L2CAP_CHAN_FIXED || 671 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) 672 hci_conn_drop(conn->hcon); 673 } 674 675 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) 676 return; 677 678 switch (chan->mode) { 679 case L2CAP_MODE_BASIC: 680 break; 681 682 case L2CAP_MODE_LE_FLOWCTL: 683 case L2CAP_MODE_EXT_FLOWCTL: 684 skb_queue_purge(&chan->tx_q); 685 break; 686 687 case L2CAP_MODE_ERTM: 688 __clear_retrans_timer(chan); 689 __clear_monitor_timer(chan); 690 __clear_ack_timer(chan); 691 692 skb_queue_purge(&chan->srej_q); 693 694 l2cap_seq_list_free(&chan->srej_list); 695 l2cap_seq_list_free(&chan->retrans_list); 696 fallthrough; 697 698 case L2CAP_MODE_STREAMING: 699 skb_queue_purge(&chan->tx_q); 700 break; 701 } 702} 703EXPORT_SYMBOL_GPL(l2cap_chan_del); 704 705static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id, 706 l2cap_chan_func_t func, void *data) 707{ 708 struct l2cap_chan *chan, *l; 709 710 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 711 if (chan->ident == id) 712 func(chan, data); 713 } 714} 715 716static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, 717 void *data) 718{ 719 struct l2cap_chan *chan; 720 721 list_for_each_entry(chan, &conn->chan_l, list) { 722 func(chan, data); 723 } 724} 725 726void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, 727 void *data) 728{ 729 if (!conn) 730 return; 731 732 mutex_lock(&conn->lock); 733 __l2cap_chan_list(conn, func, data); 734 mutex_unlock(&conn->lock); 735} 736 737EXPORT_SYMBOL_GPL(l2cap_chan_list); 738 739static void l2cap_conn_update_id_addr(struct work_struct *work) 740{ 741 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 742 id_addr_timer.work); 743 struct hci_conn *hcon = conn->hcon; 744 struct l2cap_chan *chan; 745 746 mutex_lock(&conn->lock); 747 748 list_for_each_entry(chan, &conn->chan_l, list) { 749 l2cap_chan_lock(chan); 750 bacpy(&chan->dst, &hcon->dst); 751 chan->dst_type = bdaddr_dst_type(hcon); 752 l2cap_chan_unlock(chan); 753 } 754 755 mutex_unlock(&conn->lock); 756} 757 758static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) 759{ 760 struct l2cap_conn *conn = chan->conn; 761 struct l2cap_le_conn_rsp rsp; 762 u16 result; 763 764 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) 765 result = L2CAP_CR_LE_AUTHORIZATION; 766 else 767 result = L2CAP_CR_LE_BAD_PSM; 768 769 l2cap_state_change(chan, BT_DISCONN); 770 771 rsp.dcid = cpu_to_le16(chan->scid); 772 rsp.mtu = cpu_to_le16(chan->imtu); 773 rsp.mps = cpu_to_le16(chan->mps); 774 rsp.credits = cpu_to_le16(chan->rx_credits); 775 rsp.result = cpu_to_le16(result); 776 777 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), 778 &rsp); 779} 780 781static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan) 782{ 783 l2cap_state_change(chan, BT_DISCONN); 784 785 __l2cap_ecred_conn_rsp_defer(chan); 786} 787 788static void l2cap_chan_connect_reject(struct l2cap_chan *chan) 789{ 790 struct l2cap_conn *conn = chan->conn; 791 struct l2cap_conn_rsp rsp; 792 u16 result; 793 794 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) 795 result = L2CAP_CR_SEC_BLOCK; 796 else 797 result = L2CAP_CR_BAD_PSM; 798 799 l2cap_state_change(chan, BT_DISCONN); 800 801 rsp.scid = cpu_to_le16(chan->dcid); 802 rsp.dcid = cpu_to_le16(chan->scid); 803 rsp.result = cpu_to_le16(result); 804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 805 806 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); 807} 808 809void l2cap_chan_close(struct l2cap_chan *chan, int reason) 810{ 811 struct l2cap_conn *conn = chan->conn; 812 813 BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); 814 815 switch (chan->state) { 816 case BT_LISTEN: 817 chan->ops->teardown(chan, 0); 818 break; 819 820 case BT_CONNECTED: 821 case BT_CONFIG: 822 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { 823 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 824 l2cap_send_disconn_req(chan, reason); 825 } else 826 l2cap_chan_del(chan, reason); 827 break; 828 829 case BT_CONNECT2: 830 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { 831 if (conn->hcon->type == ACL_LINK) 832 l2cap_chan_connect_reject(chan); 833 else if (conn->hcon->type == LE_LINK) { 834 switch (chan->mode) { 835 case L2CAP_MODE_LE_FLOWCTL: 836 l2cap_chan_le_connect_reject(chan); 837 break; 838 case L2CAP_MODE_EXT_FLOWCTL: 839 l2cap_chan_ecred_connect_reject(chan); 840 return; 841 } 842 } 843 } 844 845 l2cap_chan_del(chan, reason); 846 break; 847 848 case BT_CONNECT: 849 case BT_DISCONN: 850 l2cap_chan_del(chan, reason); 851 break; 852 853 default: 854 chan->ops->teardown(chan, 0); 855 break; 856 } 857} 858EXPORT_SYMBOL(l2cap_chan_close); 859 860static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) 861{ 862 switch (chan->chan_type) { 863 case L2CAP_CHAN_RAW: 864 switch (chan->sec_level) { 865 case BT_SECURITY_HIGH: 866 case BT_SECURITY_FIPS: 867 return HCI_AT_DEDICATED_BONDING_MITM; 868 case BT_SECURITY_MEDIUM: 869 return HCI_AT_DEDICATED_BONDING; 870 default: 871 return HCI_AT_NO_BONDING; 872 } 873 break; 874 case L2CAP_CHAN_CONN_LESS: 875 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) { 876 if (chan->sec_level == BT_SECURITY_LOW) 877 chan->sec_level = BT_SECURITY_SDP; 878 } 879 if (chan->sec_level == BT_SECURITY_HIGH || 880 chan->sec_level == BT_SECURITY_FIPS) 881 return HCI_AT_NO_BONDING_MITM; 882 else 883 return HCI_AT_NO_BONDING; 884 break; 885 case L2CAP_CHAN_CONN_ORIENTED: 886 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) { 887 if (chan->sec_level == BT_SECURITY_LOW) 888 chan->sec_level = BT_SECURITY_SDP; 889 890 if (chan->sec_level == BT_SECURITY_HIGH || 891 chan->sec_level == BT_SECURITY_FIPS) 892 return HCI_AT_NO_BONDING_MITM; 893 else 894 return HCI_AT_NO_BONDING; 895 } 896 fallthrough; 897 898 default: 899 switch (chan->sec_level) { 900 case BT_SECURITY_HIGH: 901 case BT_SECURITY_FIPS: 902 return HCI_AT_GENERAL_BONDING_MITM; 903 case BT_SECURITY_MEDIUM: 904 return HCI_AT_GENERAL_BONDING; 905 default: 906 return HCI_AT_NO_BONDING; 907 } 908 break; 909 } 910} 911 912/* Service level security */ 913int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator) 914{ 915 struct l2cap_conn *conn = chan->conn; 916 __u8 auth_type; 917 918 if (conn->hcon->type == LE_LINK) 919 return smp_conn_security(conn->hcon, chan->sec_level); 920 921 auth_type = l2cap_get_auth_type(chan); 922 923 return hci_conn_security(conn->hcon, chan->sec_level, auth_type, 924 initiator); 925} 926 927static int l2cap_get_ident(struct l2cap_conn *conn) 928{ 929 u8 max; 930 int ident; 931 932 /* LE link does not support tools like l2ping so use the full range */ 933 if (conn->hcon->type == LE_LINK) 934 max = 255; 935 /* Get next available identificator. 936 * 1 - 128 are used by kernel. 937 * 129 - 199 are reserved. 938 * 200 - 254 are used by utilities like l2ping, etc. 939 */ 940 else 941 max = 128; 942 943 /* Allocate ident using min as last used + 1 (cyclic) */ 944 ident = ida_alloc_range(&conn->tx_ida, READ_ONCE(conn->tx_ident) + 1, 945 max, GFP_ATOMIC); 946 /* Force min 1 to start over */ 947 if (ident <= 0) { 948 ident = ida_alloc_range(&conn->tx_ida, 1, max, GFP_ATOMIC); 949 if (ident <= 0) { 950 /* If all idents are in use, log an error, this is 951 * extremely unlikely to happen and would indicate a bug 952 * in the code that idents are not being freed properly. 953 */ 954 BT_ERR("Unable to allocate ident: %d", ident); 955 return 0; 956 } 957 } 958 959 WRITE_ONCE(conn->tx_ident, ident); 960 961 return ident; 962} 963 964static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb, 965 u8 flags) 966{ 967 /* Check if the hcon still valid before attempting to send */ 968 if (hci_conn_valid(conn->hcon->hdev, conn->hcon)) 969 hci_send_acl(conn->hchan, skb, flags); 970 else 971 kfree_skb(skb); 972} 973 974static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, 975 void *data) 976{ 977 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 978 u8 flags; 979 980 BT_DBG("code 0x%2.2x", code); 981 982 if (!skb) 983 return; 984 985 /* Use NO_FLUSH if supported or we have an LE link (which does 986 * not support auto-flushing packets) */ 987 if (lmp_no_flush_capable(conn->hcon->hdev) || 988 conn->hcon->type == LE_LINK) 989 flags = ACL_START_NO_FLUSH; 990 else 991 flags = ACL_START; 992 993 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; 994 skb->priority = HCI_PRIO_MAX; 995 996 l2cap_send_acl(conn, skb, flags); 997} 998 999static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) 1000{ 1001 struct hci_conn *hcon = chan->conn->hcon; 1002 u16 flags; 1003 1004 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, 1005 skb->priority); 1006 1007 /* Use NO_FLUSH for LE links (where this is the only option) or 1008 * if the BR/EDR link supports it and flushing has not been 1009 * explicitly requested (through FLAG_FLUSHABLE). 1010 */ 1011 if (hcon->type == LE_LINK || 1012 (!test_bit(FLAG_FLUSHABLE, &chan->flags) && 1013 lmp_no_flush_capable(hcon->hdev))) 1014 flags = ACL_START_NO_FLUSH; 1015 else 1016 flags = ACL_START; 1017 1018 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); 1019 hci_send_acl(chan->conn->hchan, skb, flags); 1020} 1021 1022static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control) 1023{ 1024 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; 1025 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT; 1026 1027 if (enh & L2CAP_CTRL_FRAME_TYPE) { 1028 /* S-Frame */ 1029 control->sframe = 1; 1030 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT; 1031 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; 1032 1033 control->sar = 0; 1034 control->txseq = 0; 1035 } else { 1036 /* I-Frame */ 1037 control->sframe = 0; 1038 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; 1039 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; 1040 1041 control->poll = 0; 1042 control->super = 0; 1043 } 1044} 1045 1046static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control) 1047{ 1048 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT; 1049 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT; 1050 1051 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) { 1052 /* S-Frame */ 1053 control->sframe = 1; 1054 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT; 1055 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT; 1056 1057 control->sar = 0; 1058 control->txseq = 0; 1059 } else { 1060 /* I-Frame */ 1061 control->sframe = 0; 1062 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; 1063 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT; 1064 1065 control->poll = 0; 1066 control->super = 0; 1067 } 1068} 1069 1070static inline void __unpack_control(struct l2cap_chan *chan, 1071 struct sk_buff *skb) 1072{ 1073 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 1074 __unpack_extended_control(get_unaligned_le32(skb->data), 1075 &bt_cb(skb)->l2cap); 1076 skb_pull(skb, L2CAP_EXT_CTRL_SIZE); 1077 } else { 1078 __unpack_enhanced_control(get_unaligned_le16(skb->data), 1079 &bt_cb(skb)->l2cap); 1080 skb_pull(skb, L2CAP_ENH_CTRL_SIZE); 1081 } 1082} 1083 1084static u32 __pack_extended_control(struct l2cap_ctrl *control) 1085{ 1086 u32 packed; 1087 1088 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT; 1089 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT; 1090 1091 if (control->sframe) { 1092 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT; 1093 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT; 1094 packed |= L2CAP_EXT_CTRL_FRAME_TYPE; 1095 } else { 1096 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT; 1097 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT; 1098 } 1099 1100 return packed; 1101} 1102 1103static u16 __pack_enhanced_control(struct l2cap_ctrl *control) 1104{ 1105 u16 packed; 1106 1107 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT; 1108 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT; 1109 1110 if (control->sframe) { 1111 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT; 1112 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT; 1113 packed |= L2CAP_CTRL_FRAME_TYPE; 1114 } else { 1115 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT; 1116 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT; 1117 } 1118 1119 return packed; 1120} 1121 1122static inline void __pack_control(struct l2cap_chan *chan, 1123 struct l2cap_ctrl *control, 1124 struct sk_buff *skb) 1125{ 1126 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 1127 put_unaligned_le32(__pack_extended_control(control), 1128 skb->data + L2CAP_HDR_SIZE); 1129 } else { 1130 put_unaligned_le16(__pack_enhanced_control(control), 1131 skb->data + L2CAP_HDR_SIZE); 1132 } 1133} 1134 1135static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) 1136{ 1137 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1138 return L2CAP_EXT_HDR_SIZE; 1139 else 1140 return L2CAP_ENH_HDR_SIZE; 1141} 1142 1143static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, 1144 u32 control) 1145{ 1146 struct sk_buff *skb; 1147 struct l2cap_hdr *lh; 1148 int hlen = __ertm_hdr_size(chan); 1149 1150 if (chan->fcs == L2CAP_FCS_CRC16) 1151 hlen += L2CAP_FCS_SIZE; 1152 1153 skb = bt_skb_alloc(hlen, GFP_KERNEL); 1154 1155 if (!skb) 1156 return ERR_PTR(-ENOMEM); 1157 1158 lh = skb_put(skb, L2CAP_HDR_SIZE); 1159 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 1160 lh->cid = cpu_to_le16(chan->dcid); 1161 1162 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1163 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); 1164 else 1165 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); 1166 1167 if (chan->fcs == L2CAP_FCS_CRC16) { 1168 u16 fcs = crc16(0, (u8 *)skb->data, skb->len); 1169 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 1170 } 1171 1172 skb->priority = HCI_PRIO_MAX; 1173 return skb; 1174} 1175 1176static void l2cap_send_sframe(struct l2cap_chan *chan, 1177 struct l2cap_ctrl *control) 1178{ 1179 struct sk_buff *skb; 1180 u32 control_field; 1181 1182 BT_DBG("chan %p, control %p", chan, control); 1183 1184 if (!control->sframe) 1185 return; 1186 1187 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && 1188 !control->poll) 1189 control->final = 1; 1190 1191 if (control->super == L2CAP_SUPER_RR) 1192 clear_bit(CONN_RNR_SENT, &chan->conn_state); 1193 else if (control->super == L2CAP_SUPER_RNR) 1194 set_bit(CONN_RNR_SENT, &chan->conn_state); 1195 1196 if (control->super != L2CAP_SUPER_SREJ) { 1197 chan->last_acked_seq = control->reqseq; 1198 __clear_ack_timer(chan); 1199 } 1200 1201 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, 1202 control->final, control->poll, control->super); 1203 1204 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 1205 control_field = __pack_extended_control(control); 1206 else 1207 control_field = __pack_enhanced_control(control); 1208 1209 skb = l2cap_create_sframe_pdu(chan, control_field); 1210 if (!IS_ERR(skb)) 1211 l2cap_do_send(chan, skb); 1212} 1213 1214static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) 1215{ 1216 struct l2cap_ctrl control; 1217 1218 BT_DBG("chan %p, poll %d", chan, poll); 1219 1220 memset(&control, 0, sizeof(control)); 1221 control.sframe = 1; 1222 control.poll = poll; 1223 1224 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) 1225 control.super = L2CAP_SUPER_RNR; 1226 else 1227 control.super = L2CAP_SUPER_RR; 1228 1229 control.reqseq = chan->buffer_seq; 1230 l2cap_send_sframe(chan, &control); 1231} 1232 1233static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 1234{ 1235 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) 1236 return true; 1237 1238 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); 1239} 1240 1241void l2cap_send_conn_req(struct l2cap_chan *chan) 1242{ 1243 struct l2cap_conn *conn = chan->conn; 1244 struct l2cap_conn_req req; 1245 1246 req.scid = cpu_to_le16(chan->scid); 1247 req.psm = chan->psm; 1248 1249 chan->ident = l2cap_get_ident(conn); 1250 1251 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 1252 1253 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); 1254} 1255 1256static void l2cap_chan_ready(struct l2cap_chan *chan) 1257{ 1258 /* The channel may have already been flagged as connected in 1259 * case of receiving data before the L2CAP info req/rsp 1260 * procedure is complete. 1261 */ 1262 if (chan->state == BT_CONNECTED) 1263 return; 1264 1265 /* This clears all conf flags, including CONF_NOT_COMPLETE */ 1266 chan->conf_state = 0; 1267 __clear_chan_timer(chan); 1268 1269 switch (chan->mode) { 1270 case L2CAP_MODE_LE_FLOWCTL: 1271 case L2CAP_MODE_EXT_FLOWCTL: 1272 if (!chan->tx_credits) 1273 chan->ops->suspend(chan); 1274 break; 1275 } 1276 1277 chan->state = BT_CONNECTED; 1278 1279 chan->ops->ready(chan); 1280} 1281 1282static void l2cap_le_connect(struct l2cap_chan *chan) 1283{ 1284 struct l2cap_conn *conn = chan->conn; 1285 struct l2cap_le_conn_req req; 1286 1287 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags)) 1288 return; 1289 1290 if (!chan->imtu) 1291 chan->imtu = chan->conn->mtu; 1292 1293 l2cap_le_flowctl_init(chan, 0); 1294 1295 memset(&req, 0, sizeof(req)); 1296 req.psm = chan->psm; 1297 req.scid = cpu_to_le16(chan->scid); 1298 req.mtu = cpu_to_le16(chan->imtu); 1299 req.mps = cpu_to_le16(chan->mps); 1300 req.credits = cpu_to_le16(chan->rx_credits); 1301 1302 chan->ident = l2cap_get_ident(conn); 1303 1304 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ, 1305 sizeof(req), &req); 1306} 1307 1308struct l2cap_ecred_conn_data { 1309 struct { 1310 struct l2cap_ecred_conn_req_hdr req; 1311 __le16 scid[5]; 1312 } __packed pdu; 1313 struct l2cap_chan *chan; 1314 struct pid *pid; 1315 int count; 1316}; 1317 1318static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data) 1319{ 1320 struct l2cap_ecred_conn_data *conn = data; 1321 struct pid *pid; 1322 1323 if (chan == conn->chan) 1324 return; 1325 1326 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) 1327 return; 1328 1329 pid = chan->ops->get_peer_pid(chan); 1330 1331 /* Only add deferred channels with the same PID/PSM */ 1332 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident || 1333 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) 1334 return; 1335 1336 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) 1337 return; 1338 1339 l2cap_ecred_init(chan, 0); 1340 1341 /* Set the same ident so we can match on the rsp */ 1342 chan->ident = conn->chan->ident; 1343 1344 /* Include all channels deferred */ 1345 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid); 1346 1347 conn->count++; 1348} 1349 1350static void l2cap_ecred_connect(struct l2cap_chan *chan) 1351{ 1352 struct l2cap_conn *conn = chan->conn; 1353 struct l2cap_ecred_conn_data data; 1354 1355 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) 1356 return; 1357 1358 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) 1359 return; 1360 1361 l2cap_ecred_init(chan, 0); 1362 1363 memset(&data, 0, sizeof(data)); 1364 data.pdu.req.psm = chan->psm; 1365 data.pdu.req.mtu = cpu_to_le16(chan->imtu); 1366 data.pdu.req.mps = cpu_to_le16(chan->mps); 1367 data.pdu.req.credits = cpu_to_le16(chan->rx_credits); 1368 data.pdu.scid[0] = cpu_to_le16(chan->scid); 1369 1370 chan->ident = l2cap_get_ident(conn); 1371 1372 data.count = 1; 1373 data.chan = chan; 1374 data.pid = chan->ops->get_peer_pid(chan); 1375 1376 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data); 1377 1378 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ, 1379 sizeof(data.pdu.req) + data.count * sizeof(__le16), 1380 &data.pdu); 1381} 1382 1383static void l2cap_le_start(struct l2cap_chan *chan) 1384{ 1385 struct l2cap_conn *conn = chan->conn; 1386 1387 if (!smp_conn_security(conn->hcon, chan->sec_level)) 1388 return; 1389 1390 if (!chan->psm) { 1391 l2cap_chan_ready(chan); 1392 return; 1393 } 1394 1395 if (chan->state == BT_CONNECT) { 1396 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) 1397 l2cap_ecred_connect(chan); 1398 else 1399 l2cap_le_connect(chan); 1400 } 1401} 1402 1403static void l2cap_start_connection(struct l2cap_chan *chan) 1404{ 1405 if (chan->conn->hcon->type == LE_LINK) { 1406 l2cap_le_start(chan); 1407 } else { 1408 l2cap_send_conn_req(chan); 1409 } 1410} 1411 1412static void l2cap_request_info(struct l2cap_conn *conn) 1413{ 1414 struct l2cap_info_req req; 1415 1416 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1417 return; 1418 1419 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 1420 1421 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 1422 conn->info_ident = l2cap_get_ident(conn); 1423 1424 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); 1425 1426 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, 1427 sizeof(req), &req); 1428} 1429 1430static bool l2cap_check_enc_key_size(struct hci_conn *hcon, 1431 struct l2cap_chan *chan) 1432{ 1433 /* The minimum encryption key size needs to be enforced by the 1434 * host stack before establishing any L2CAP connections. The 1435 * specification in theory allows a minimum of 1, but to align 1436 * BR/EDR and LE transports, a minimum of 7 is chosen. 1437 * 1438 * This check might also be called for unencrypted connections 1439 * that have no key size requirements. Ensure that the link is 1440 * actually encrypted before enforcing a key size. 1441 */ 1442 int min_key_size = hcon->hdev->min_enc_key_size; 1443 1444 /* On FIPS security level, key size must be 16 bytes */ 1445 if (chan->sec_level == BT_SECURITY_FIPS) 1446 min_key_size = 16; 1447 1448 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || 1449 hcon->enc_key_size >= min_key_size); 1450} 1451 1452static void l2cap_do_start(struct l2cap_chan *chan) 1453{ 1454 struct l2cap_conn *conn = chan->conn; 1455 1456 if (conn->hcon->type == LE_LINK) { 1457 l2cap_le_start(chan); 1458 return; 1459 } 1460 1461 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { 1462 l2cap_request_info(conn); 1463 return; 1464 } 1465 1466 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 1467 return; 1468 1469 if (!l2cap_chan_check_security(chan, true) || 1470 !__l2cap_no_conn_pending(chan)) 1471 return; 1472 1473 if (l2cap_check_enc_key_size(conn->hcon, chan)) 1474 l2cap_start_connection(chan); 1475 else 1476 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 1477} 1478 1479static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) 1480{ 1481 u32 local_feat_mask = l2cap_feat_mask; 1482 if (!disable_ertm) 1483 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; 1484 1485 switch (mode) { 1486 case L2CAP_MODE_ERTM: 1487 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; 1488 case L2CAP_MODE_STREAMING: 1489 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; 1490 default: 1491 return 0x00; 1492 } 1493} 1494 1495static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err) 1496{ 1497 struct l2cap_conn *conn = chan->conn; 1498 struct l2cap_disconn_req req; 1499 1500 if (!conn) 1501 return; 1502 1503 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { 1504 __clear_retrans_timer(chan); 1505 __clear_monitor_timer(chan); 1506 __clear_ack_timer(chan); 1507 } 1508 1509 req.dcid = cpu_to_le16(chan->dcid); 1510 req.scid = cpu_to_le16(chan->scid); 1511 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, 1512 sizeof(req), &req); 1513 1514 l2cap_state_change_and_error(chan, BT_DISCONN, err); 1515} 1516 1517/* ---- L2CAP connections ---- */ 1518static void l2cap_conn_start(struct l2cap_conn *conn) 1519{ 1520 struct l2cap_chan *chan, *tmp; 1521 1522 BT_DBG("conn %p", conn); 1523 1524 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { 1525 l2cap_chan_lock(chan); 1526 1527 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1528 l2cap_chan_ready(chan); 1529 l2cap_chan_unlock(chan); 1530 continue; 1531 } 1532 1533 if (chan->state == BT_CONNECT) { 1534 if (!l2cap_chan_check_security(chan, true) || 1535 !__l2cap_no_conn_pending(chan)) { 1536 l2cap_chan_unlock(chan); 1537 continue; 1538 } 1539 1540 if (!l2cap_mode_supported(chan->mode, conn->feat_mask) 1541 && test_bit(CONF_STATE2_DEVICE, 1542 &chan->conf_state)) { 1543 l2cap_chan_close(chan, ECONNRESET); 1544 l2cap_chan_unlock(chan); 1545 continue; 1546 } 1547 1548 if (l2cap_check_enc_key_size(conn->hcon, chan)) 1549 l2cap_start_connection(chan); 1550 else 1551 l2cap_chan_close(chan, ECONNREFUSED); 1552 1553 } else if (chan->state == BT_CONNECT2) { 1554 struct l2cap_conn_rsp rsp; 1555 char buf[128]; 1556 rsp.scid = cpu_to_le16(chan->dcid); 1557 rsp.dcid = cpu_to_le16(chan->scid); 1558 1559 if (l2cap_chan_check_security(chan, false)) { 1560 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 1561 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1562 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1563 chan->ops->defer(chan); 1564 1565 } else { 1566 l2cap_state_change(chan, BT_CONFIG); 1567 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 1568 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 1569 } 1570 } else { 1571 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1572 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 1573 } 1574 1575 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 1576 sizeof(rsp), &rsp); 1577 1578 if (test_bit(CONF_REQ_SENT, &chan->conf_state) || 1579 rsp.result != L2CAP_CR_SUCCESS) { 1580 l2cap_chan_unlock(chan); 1581 continue; 1582 } 1583 1584 set_bit(CONF_REQ_SENT, &chan->conf_state); 1585 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 1586 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 1587 chan->num_conf_req++; 1588 } 1589 1590 l2cap_chan_unlock(chan); 1591 } 1592} 1593 1594static void l2cap_le_conn_ready(struct l2cap_conn *conn) 1595{ 1596 struct hci_conn *hcon = conn->hcon; 1597 struct hci_dev *hdev = hcon->hdev; 1598 1599 BT_DBG("%s conn %p", hdev->name, conn); 1600 1601 /* For outgoing pairing which doesn't necessarily have an 1602 * associated socket (e.g. mgmt_pair_device). 1603 */ 1604 if (hcon->out) 1605 smp_conn_security(hcon, hcon->pending_sec_level); 1606 1607 /* For LE peripheral connections, make sure the connection interval 1608 * is in the range of the minimum and maximum interval that has 1609 * been configured for this connection. If not, then trigger 1610 * the connection update procedure. 1611 */ 1612 if (hcon->role == HCI_ROLE_SLAVE && 1613 (hcon->le_conn_interval < hcon->le_conn_min_interval || 1614 hcon->le_conn_interval > hcon->le_conn_max_interval)) { 1615 struct l2cap_conn_param_update_req req; 1616 1617 req.min = cpu_to_le16(hcon->le_conn_min_interval); 1618 req.max = cpu_to_le16(hcon->le_conn_max_interval); 1619 req.latency = cpu_to_le16(hcon->le_conn_latency); 1620 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout); 1621 1622 l2cap_send_cmd(conn, l2cap_get_ident(conn), 1623 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req); 1624 } 1625} 1626 1627static void l2cap_conn_ready(struct l2cap_conn *conn) 1628{ 1629 struct l2cap_chan *chan; 1630 struct hci_conn *hcon = conn->hcon; 1631 1632 BT_DBG("conn %p", conn); 1633 1634 if (hcon->type == ACL_LINK) 1635 l2cap_request_info(conn); 1636 1637 mutex_lock(&conn->lock); 1638 1639 list_for_each_entry(chan, &conn->chan_l, list) { 1640 1641 l2cap_chan_lock(chan); 1642 1643 if (hcon->type == LE_LINK) { 1644 l2cap_le_start(chan); 1645 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1646 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 1647 l2cap_chan_ready(chan); 1648 } else if (chan->state == BT_CONNECT) { 1649 l2cap_do_start(chan); 1650 } 1651 1652 l2cap_chan_unlock(chan); 1653 } 1654 1655 mutex_unlock(&conn->lock); 1656 1657 if (hcon->type == LE_LINK) 1658 l2cap_le_conn_ready(conn); 1659 1660 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work); 1661} 1662 1663/* Notify sockets that we cannot guaranty reliability anymore */ 1664static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) 1665{ 1666 struct l2cap_chan *chan; 1667 1668 BT_DBG("conn %p", conn); 1669 1670 list_for_each_entry(chan, &conn->chan_l, list) { 1671 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) 1672 l2cap_chan_set_err(chan, err); 1673 } 1674} 1675 1676static void l2cap_info_timeout(struct work_struct *work) 1677{ 1678 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 1679 info_timer.work); 1680 1681 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 1682 conn->info_ident = 0; 1683 1684 mutex_lock(&conn->lock); 1685 l2cap_conn_start(conn); 1686 mutex_unlock(&conn->lock); 1687} 1688 1689/* 1690 * l2cap_user 1691 * External modules can register l2cap_user objects on l2cap_conn. The ->probe 1692 * callback is called during registration. The ->remove callback is called 1693 * during unregistration. 1694 * An l2cap_user object can either be explicitly unregistered or when the 1695 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon, 1696 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called. 1697 * External modules must own a reference to the l2cap_conn object if they intend 1698 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at 1699 * any time if they don't. 1700 */ 1701 1702int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user) 1703{ 1704 int ret; 1705 1706 /* We need to check whether l2cap_conn is registered. If it is not, we 1707 * must not register the l2cap_user. l2cap_conn_del() unregisters 1708 * l2cap_conn objects under conn->lock, and we use the same lock here 1709 * to protect access to conn->users and conn->hchan. 1710 */ 1711 1712 mutex_lock(&conn->lock); 1713 1714 if (!list_empty(&user->list)) { 1715 ret = -EINVAL; 1716 goto out_unlock; 1717 } 1718 1719 /* conn->hchan is NULL after l2cap_conn_del() was called */ 1720 if (!conn->hchan) { 1721 ret = -ENODEV; 1722 goto out_unlock; 1723 } 1724 1725 ret = user->probe(conn, user); 1726 if (ret) 1727 goto out_unlock; 1728 1729 list_add(&user->list, &conn->users); 1730 ret = 0; 1731 1732out_unlock: 1733 mutex_unlock(&conn->lock); 1734 return ret; 1735} 1736EXPORT_SYMBOL(l2cap_register_user); 1737 1738void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user) 1739{ 1740 mutex_lock(&conn->lock); 1741 1742 if (list_empty(&user->list)) 1743 goto out_unlock; 1744 1745 list_del_init(&user->list); 1746 user->remove(conn, user); 1747 1748out_unlock: 1749 mutex_unlock(&conn->lock); 1750} 1751EXPORT_SYMBOL(l2cap_unregister_user); 1752 1753static void l2cap_unregister_all_users(struct l2cap_conn *conn) 1754{ 1755 struct l2cap_user *user; 1756 1757 while (!list_empty(&conn->users)) { 1758 user = list_first_entry(&conn->users, struct l2cap_user, list); 1759 list_del_init(&user->list); 1760 user->remove(conn, user); 1761 } 1762} 1763 1764static void l2cap_conn_del(struct hci_conn *hcon, int err) 1765{ 1766 struct l2cap_conn *conn = hcon->l2cap_data; 1767 struct l2cap_chan *chan, *l; 1768 1769 if (!conn) 1770 return; 1771 1772 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); 1773 1774 disable_delayed_work_sync(&conn->info_timer); 1775 disable_delayed_work_sync(&conn->id_addr_timer); 1776 1777 mutex_lock(&conn->lock); 1778 1779 kfree_skb(conn->rx_skb); 1780 1781 skb_queue_purge(&conn->pending_rx); 1782 1783 /* We can not call flush_work(&conn->pending_rx_work) here since we 1784 * might block if we are running on a worker from the same workqueue 1785 * pending_rx_work is waiting on. 1786 */ 1787 if (work_pending(&conn->pending_rx_work)) 1788 cancel_work_sync(&conn->pending_rx_work); 1789 1790 ida_destroy(&conn->tx_ida); 1791 1792 l2cap_unregister_all_users(conn); 1793 1794 /* Force the connection to be immediately dropped */ 1795 hcon->disc_timeout = 0; 1796 1797 /* Kill channels */ 1798 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 1799 l2cap_chan_hold(chan); 1800 l2cap_chan_lock(chan); 1801 1802 l2cap_chan_del(chan, err); 1803 1804 chan->ops->close(chan); 1805 1806 l2cap_chan_unlock(chan); 1807 l2cap_chan_put(chan); 1808 } 1809 1810 hci_chan_del(conn->hchan); 1811 conn->hchan = NULL; 1812 1813 hcon->l2cap_data = NULL; 1814 mutex_unlock(&conn->lock); 1815 l2cap_conn_put(conn); 1816} 1817 1818static void l2cap_conn_free(struct kref *ref) 1819{ 1820 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref); 1821 1822 hci_conn_put(conn->hcon); 1823 kfree(conn); 1824} 1825 1826struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn) 1827{ 1828 kref_get(&conn->ref); 1829 return conn; 1830} 1831EXPORT_SYMBOL(l2cap_conn_get); 1832 1833void l2cap_conn_put(struct l2cap_conn *conn) 1834{ 1835 kref_put(&conn->ref, l2cap_conn_free); 1836} 1837EXPORT_SYMBOL(l2cap_conn_put); 1838 1839/* ---- Socket interface ---- */ 1840 1841/* Find socket with psm and source / destination bdaddr. 1842 * Returns closest match. 1843 */ 1844static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, 1845 bdaddr_t *src, 1846 bdaddr_t *dst, 1847 u8 link_type) 1848{ 1849 struct l2cap_chan *c, *tmp, *c1 = NULL; 1850 1851 read_lock(&chan_list_lock); 1852 1853 list_for_each_entry_safe(c, tmp, &chan_list, global_l) { 1854 if (state && c->state != state) 1855 continue; 1856 1857 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) 1858 continue; 1859 1860 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) 1861 continue; 1862 1863 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) { 1864 int src_match, dst_match; 1865 int src_any, dst_any; 1866 1867 /* Exact match. */ 1868 src_match = !bacmp(&c->src, src); 1869 dst_match = !bacmp(&c->dst, dst); 1870 if (src_match && dst_match) { 1871 if (!l2cap_chan_hold_unless_zero(c)) 1872 continue; 1873 1874 read_unlock(&chan_list_lock); 1875 return c; 1876 } 1877 1878 /* Closest match */ 1879 src_any = !bacmp(&c->src, BDADDR_ANY); 1880 dst_any = !bacmp(&c->dst, BDADDR_ANY); 1881 if ((src_match && dst_any) || (src_any && dst_match) || 1882 (src_any && dst_any)) 1883 c1 = c; 1884 } 1885 } 1886 1887 if (c1) 1888 c1 = l2cap_chan_hold_unless_zero(c1); 1889 1890 read_unlock(&chan_list_lock); 1891 1892 return c1; 1893} 1894 1895static void l2cap_monitor_timeout(struct work_struct *work) 1896{ 1897 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1898 monitor_timer.work); 1899 1900 BT_DBG("chan %p", chan); 1901 1902 l2cap_chan_lock(chan); 1903 1904 if (!chan->conn) { 1905 l2cap_chan_unlock(chan); 1906 l2cap_chan_put(chan); 1907 return; 1908 } 1909 1910 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO); 1911 1912 l2cap_chan_unlock(chan); 1913 l2cap_chan_put(chan); 1914} 1915 1916static void l2cap_retrans_timeout(struct work_struct *work) 1917{ 1918 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1919 retrans_timer.work); 1920 1921 BT_DBG("chan %p", chan); 1922 1923 l2cap_chan_lock(chan); 1924 1925 if (!chan->conn) { 1926 l2cap_chan_unlock(chan); 1927 l2cap_chan_put(chan); 1928 return; 1929 } 1930 1931 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO); 1932 l2cap_chan_unlock(chan); 1933 l2cap_chan_put(chan); 1934} 1935 1936static void l2cap_streaming_send(struct l2cap_chan *chan, 1937 struct sk_buff_head *skbs) 1938{ 1939 struct sk_buff *skb; 1940 struct l2cap_ctrl *control; 1941 1942 BT_DBG("chan %p, skbs %p", chan, skbs); 1943 1944 skb_queue_splice_tail_init(skbs, &chan->tx_q); 1945 1946 while (!skb_queue_empty(&chan->tx_q)) { 1947 1948 skb = skb_dequeue(&chan->tx_q); 1949 1950 bt_cb(skb)->l2cap.retries = 1; 1951 control = &bt_cb(skb)->l2cap; 1952 1953 control->reqseq = 0; 1954 control->txseq = chan->next_tx_seq; 1955 1956 __pack_control(chan, control, skb); 1957 1958 if (chan->fcs == L2CAP_FCS_CRC16) { 1959 u16 fcs = crc16(0, (u8 *) skb->data, skb->len); 1960 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 1961 } 1962 1963 l2cap_do_send(chan, skb); 1964 1965 BT_DBG("Sent txseq %u", control->txseq); 1966 1967 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1968 chan->frames_sent++; 1969 } 1970} 1971 1972static int l2cap_ertm_send(struct l2cap_chan *chan) 1973{ 1974 struct sk_buff *skb, *tx_skb; 1975 struct l2cap_ctrl *control; 1976 int sent = 0; 1977 1978 BT_DBG("chan %p", chan); 1979 1980 if (chan->state != BT_CONNECTED) 1981 return -ENOTCONN; 1982 1983 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 1984 return 0; 1985 1986 while (chan->tx_send_head && 1987 chan->unacked_frames < chan->remote_tx_win && 1988 chan->tx_state == L2CAP_TX_STATE_XMIT) { 1989 1990 skb = chan->tx_send_head; 1991 1992 bt_cb(skb)->l2cap.retries = 1; 1993 control = &bt_cb(skb)->l2cap; 1994 1995 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1996 control->final = 1; 1997 1998 control->reqseq = chan->buffer_seq; 1999 chan->last_acked_seq = chan->buffer_seq; 2000 control->txseq = chan->next_tx_seq; 2001 2002 __pack_control(chan, control, skb); 2003 2004 if (chan->fcs == L2CAP_FCS_CRC16) { 2005 u16 fcs = crc16(0, (u8 *) skb->data, skb->len); 2006 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 2007 } 2008 2009 /* Clone after data has been modified. Data is assumed to be 2010 read-only (for locking purposes) on cloned sk_buffs. 2011 */ 2012 tx_skb = skb_clone(skb, GFP_KERNEL); 2013 2014 if (!tx_skb) 2015 break; 2016 2017 __set_retrans_timer(chan); 2018 2019 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 2020 chan->unacked_frames++; 2021 chan->frames_sent++; 2022 sent++; 2023 2024 if (skb_queue_is_last(&chan->tx_q, skb)) 2025 chan->tx_send_head = NULL; 2026 else 2027 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); 2028 2029 l2cap_do_send(chan, tx_skb); 2030 BT_DBG("Sent txseq %u", control->txseq); 2031 } 2032 2033 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent, 2034 chan->unacked_frames, skb_queue_len(&chan->tx_q)); 2035 2036 return sent; 2037} 2038 2039static void l2cap_ertm_resend(struct l2cap_chan *chan) 2040{ 2041 struct l2cap_ctrl control; 2042 struct sk_buff *skb; 2043 struct sk_buff *tx_skb; 2044 u16 seq; 2045 2046 BT_DBG("chan %p", chan); 2047 2048 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 2049 return; 2050 2051 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { 2052 seq = l2cap_seq_list_pop(&chan->retrans_list); 2053 2054 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); 2055 if (!skb) { 2056 BT_DBG("Error: Can't retransmit seq %d, frame missing", 2057 seq); 2058 continue; 2059 } 2060 2061 bt_cb(skb)->l2cap.retries++; 2062 control = bt_cb(skb)->l2cap; 2063 2064 if (chan->max_tx != 0 && 2065 bt_cb(skb)->l2cap.retries > chan->max_tx) { 2066 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 2067 l2cap_send_disconn_req(chan, ECONNRESET); 2068 l2cap_seq_list_clear(&chan->retrans_list); 2069 break; 2070 } 2071 2072 control.reqseq = chan->buffer_seq; 2073 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 2074 control.final = 1; 2075 else 2076 control.final = 0; 2077 2078 if (skb_cloned(skb)) { 2079 /* Cloned sk_buffs are read-only, so we need a 2080 * writeable copy 2081 */ 2082 tx_skb = skb_copy(skb, GFP_KERNEL); 2083 } else { 2084 tx_skb = skb_clone(skb, GFP_KERNEL); 2085 } 2086 2087 if (!tx_skb) { 2088 l2cap_seq_list_clear(&chan->retrans_list); 2089 break; 2090 } 2091 2092 /* Update skb contents */ 2093 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 2094 put_unaligned_le32(__pack_extended_control(&control), 2095 tx_skb->data + L2CAP_HDR_SIZE); 2096 } else { 2097 put_unaligned_le16(__pack_enhanced_control(&control), 2098 tx_skb->data + L2CAP_HDR_SIZE); 2099 } 2100 2101 /* Update FCS */ 2102 if (chan->fcs == L2CAP_FCS_CRC16) { 2103 u16 fcs = crc16(0, (u8 *) tx_skb->data, 2104 tx_skb->len - L2CAP_FCS_SIZE); 2105 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) - 2106 L2CAP_FCS_SIZE); 2107 } 2108 2109 l2cap_do_send(chan, tx_skb); 2110 2111 BT_DBG("Resent txseq %d", control.txseq); 2112 2113 chan->last_acked_seq = chan->buffer_seq; 2114 } 2115} 2116 2117static void l2cap_retransmit(struct l2cap_chan *chan, 2118 struct l2cap_ctrl *control) 2119{ 2120 BT_DBG("chan %p, control %p", chan, control); 2121 2122 l2cap_seq_list_append(&chan->retrans_list, control->reqseq); 2123 l2cap_ertm_resend(chan); 2124} 2125 2126static void l2cap_retransmit_all(struct l2cap_chan *chan, 2127 struct l2cap_ctrl *control) 2128{ 2129 struct sk_buff *skb; 2130 2131 BT_DBG("chan %p, control %p", chan, control); 2132 2133 if (control->poll) 2134 set_bit(CONN_SEND_FBIT, &chan->conn_state); 2135 2136 l2cap_seq_list_clear(&chan->retrans_list); 2137 2138 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 2139 return; 2140 2141 if (chan->unacked_frames) { 2142 skb_queue_walk(&chan->tx_q, skb) { 2143 if (bt_cb(skb)->l2cap.txseq == control->reqseq || 2144 skb == chan->tx_send_head) 2145 break; 2146 } 2147 2148 skb_queue_walk_from(&chan->tx_q, skb) { 2149 if (skb == chan->tx_send_head) 2150 break; 2151 2152 l2cap_seq_list_append(&chan->retrans_list, 2153 bt_cb(skb)->l2cap.txseq); 2154 } 2155 2156 l2cap_ertm_resend(chan); 2157 } 2158} 2159 2160static void l2cap_send_ack(struct l2cap_chan *chan) 2161{ 2162 struct l2cap_ctrl control; 2163 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq, 2164 chan->last_acked_seq); 2165 int threshold; 2166 2167 BT_DBG("chan %p last_acked_seq %d buffer_seq %d", 2168 chan, chan->last_acked_seq, chan->buffer_seq); 2169 2170 memset(&control, 0, sizeof(control)); 2171 control.sframe = 1; 2172 2173 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 2174 chan->rx_state == L2CAP_RX_STATE_RECV) { 2175 __clear_ack_timer(chan); 2176 control.super = L2CAP_SUPER_RNR; 2177 control.reqseq = chan->buffer_seq; 2178 l2cap_send_sframe(chan, &control); 2179 } else { 2180 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { 2181 l2cap_ertm_send(chan); 2182 /* If any i-frames were sent, they included an ack */ 2183 if (chan->buffer_seq == chan->last_acked_seq) 2184 frames_to_ack = 0; 2185 } 2186 2187 /* Ack now if the window is 3/4ths full. 2188 * Calculate without mul or div 2189 */ 2190 threshold = chan->ack_win; 2191 threshold += threshold << 1; 2192 threshold >>= 2; 2193 2194 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack, 2195 threshold); 2196 2197 if (frames_to_ack >= threshold) { 2198 __clear_ack_timer(chan); 2199 control.super = L2CAP_SUPER_RR; 2200 control.reqseq = chan->buffer_seq; 2201 l2cap_send_sframe(chan, &control); 2202 frames_to_ack = 0; 2203 } 2204 2205 if (frames_to_ack) 2206 __set_ack_timer(chan); 2207 } 2208} 2209 2210static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, 2211 struct msghdr *msg, int len, 2212 int count, struct sk_buff *skb) 2213{ 2214 struct l2cap_conn *conn = chan->conn; 2215 struct sk_buff **frag; 2216 int sent = 0; 2217 2218 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter)) 2219 return -EFAULT; 2220 2221 sent += count; 2222 len -= count; 2223 2224 /* Continuation fragments (no L2CAP header) */ 2225 frag = &skb_shinfo(skb)->frag_list; 2226 while (len) { 2227 struct sk_buff *tmp; 2228 2229 count = min_t(unsigned int, conn->mtu, len); 2230 2231 tmp = chan->ops->alloc_skb(chan, 0, count, 2232 msg->msg_flags & MSG_DONTWAIT); 2233 if (IS_ERR(tmp)) 2234 return PTR_ERR(tmp); 2235 2236 *frag = tmp; 2237 2238 if (!copy_from_iter_full(skb_put(*frag, count), count, 2239 &msg->msg_iter)) 2240 return -EFAULT; 2241 2242 sent += count; 2243 len -= count; 2244 2245 skb->len += (*frag)->len; 2246 skb->data_len += (*frag)->len; 2247 2248 frag = &(*frag)->next; 2249 } 2250 2251 return sent; 2252} 2253 2254static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, 2255 struct msghdr *msg, size_t len) 2256{ 2257 struct l2cap_conn *conn = chan->conn; 2258 struct sk_buff *skb; 2259 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; 2260 struct l2cap_hdr *lh; 2261 2262 BT_DBG("chan %p psm 0x%2.2x len %zu", chan, 2263 __le16_to_cpu(chan->psm), len); 2264 2265 count = min_t(unsigned int, (conn->mtu - hlen), len); 2266 2267 skb = chan->ops->alloc_skb(chan, hlen, count, 2268 msg->msg_flags & MSG_DONTWAIT); 2269 if (IS_ERR(skb)) 2270 return skb; 2271 2272 /* Create L2CAP header */ 2273 lh = skb_put(skb, L2CAP_HDR_SIZE); 2274 lh->cid = cpu_to_le16(chan->dcid); 2275 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE); 2276 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE)); 2277 2278 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2279 if (unlikely(err < 0)) { 2280 kfree_skb(skb); 2281 return ERR_PTR(err); 2282 } 2283 return skb; 2284} 2285 2286static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, 2287 struct msghdr *msg, size_t len) 2288{ 2289 struct l2cap_conn *conn = chan->conn; 2290 struct sk_buff *skb; 2291 int err, count; 2292 struct l2cap_hdr *lh; 2293 2294 BT_DBG("chan %p len %zu", chan, len); 2295 2296 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); 2297 2298 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count, 2299 msg->msg_flags & MSG_DONTWAIT); 2300 if (IS_ERR(skb)) 2301 return skb; 2302 2303 /* Create L2CAP header */ 2304 lh = skb_put(skb, L2CAP_HDR_SIZE); 2305 lh->cid = cpu_to_le16(chan->dcid); 2306 lh->len = cpu_to_le16(len); 2307 2308 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2309 if (unlikely(err < 0)) { 2310 kfree_skb(skb); 2311 return ERR_PTR(err); 2312 } 2313 return skb; 2314} 2315 2316static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, 2317 struct msghdr *msg, size_t len, 2318 u16 sdulen) 2319{ 2320 struct l2cap_conn *conn = chan->conn; 2321 struct sk_buff *skb; 2322 int err, count, hlen; 2323 struct l2cap_hdr *lh; 2324 2325 BT_DBG("chan %p len %zu", chan, len); 2326 2327 if (!conn) 2328 return ERR_PTR(-ENOTCONN); 2329 2330 hlen = __ertm_hdr_size(chan); 2331 2332 if (sdulen) 2333 hlen += L2CAP_SDULEN_SIZE; 2334 2335 if (chan->fcs == L2CAP_FCS_CRC16) 2336 hlen += L2CAP_FCS_SIZE; 2337 2338 count = min_t(unsigned int, (conn->mtu - hlen), len); 2339 2340 skb = chan->ops->alloc_skb(chan, hlen, count, 2341 msg->msg_flags & MSG_DONTWAIT); 2342 if (IS_ERR(skb)) 2343 return skb; 2344 2345 /* Create L2CAP header */ 2346 lh = skb_put(skb, L2CAP_HDR_SIZE); 2347 lh->cid = cpu_to_le16(chan->dcid); 2348 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2349 2350 /* Control header is populated later */ 2351 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 2352 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); 2353 else 2354 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); 2355 2356 if (sdulen) 2357 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2358 2359 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2360 if (unlikely(err < 0)) { 2361 kfree_skb(skb); 2362 return ERR_PTR(err); 2363 } 2364 2365 bt_cb(skb)->l2cap.fcs = chan->fcs; 2366 bt_cb(skb)->l2cap.retries = 0; 2367 return skb; 2368} 2369 2370static int l2cap_segment_sdu(struct l2cap_chan *chan, 2371 struct sk_buff_head *seg_queue, 2372 struct msghdr *msg, size_t len) 2373{ 2374 struct sk_buff *skb; 2375 u16 sdu_len; 2376 size_t pdu_len; 2377 u8 sar; 2378 2379 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); 2380 2381 /* It is critical that ERTM PDUs fit in a single HCI fragment, 2382 * so fragmented skbs are not used. The HCI layer's handling 2383 * of fragmented skbs is not compatible with ERTM's queueing. 2384 */ 2385 2386 /* PDU size is derived from the HCI MTU */ 2387 pdu_len = chan->conn->mtu; 2388 2389 /* Constrain PDU size for BR/EDR connections */ 2390 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); 2391 2392 /* Adjust for largest possible L2CAP overhead. */ 2393 if (chan->fcs) 2394 pdu_len -= L2CAP_FCS_SIZE; 2395 2396 pdu_len -= __ertm_hdr_size(chan); 2397 2398 /* Remote device may have requested smaller PDUs */ 2399 pdu_len = min_t(size_t, pdu_len, chan->remote_mps); 2400 2401 if (!pdu_len) 2402 return -EINVAL; 2403 2404 if (len <= pdu_len) { 2405 sar = L2CAP_SAR_UNSEGMENTED; 2406 sdu_len = 0; 2407 pdu_len = len; 2408 } else { 2409 sar = L2CAP_SAR_START; 2410 sdu_len = len; 2411 } 2412 2413 while (len > 0) { 2414 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len); 2415 2416 if (IS_ERR(skb)) { 2417 __skb_queue_purge(seg_queue); 2418 return PTR_ERR(skb); 2419 } 2420 2421 bt_cb(skb)->l2cap.sar = sar; 2422 __skb_queue_tail(seg_queue, skb); 2423 2424 len -= pdu_len; 2425 if (sdu_len) 2426 sdu_len = 0; 2427 2428 if (len <= pdu_len) { 2429 sar = L2CAP_SAR_END; 2430 pdu_len = len; 2431 } else { 2432 sar = L2CAP_SAR_CONTINUE; 2433 } 2434 } 2435 2436 return 0; 2437} 2438 2439static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan, 2440 struct msghdr *msg, 2441 size_t len, u16 sdulen) 2442{ 2443 struct l2cap_conn *conn = chan->conn; 2444 struct sk_buff *skb; 2445 int err, count, hlen; 2446 struct l2cap_hdr *lh; 2447 2448 BT_DBG("chan %p len %zu", chan, len); 2449 2450 if (!conn) 2451 return ERR_PTR(-ENOTCONN); 2452 2453 hlen = L2CAP_HDR_SIZE; 2454 2455 if (sdulen) 2456 hlen += L2CAP_SDULEN_SIZE; 2457 2458 count = min_t(unsigned int, (conn->mtu - hlen), len); 2459 2460 skb = chan->ops->alloc_skb(chan, hlen, count, 2461 msg->msg_flags & MSG_DONTWAIT); 2462 if (IS_ERR(skb)) 2463 return skb; 2464 2465 /* Create L2CAP header */ 2466 lh = skb_put(skb, L2CAP_HDR_SIZE); 2467 lh->cid = cpu_to_le16(chan->dcid); 2468 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2469 2470 if (sdulen) 2471 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2472 2473 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 2474 if (unlikely(err < 0)) { 2475 kfree_skb(skb); 2476 return ERR_PTR(err); 2477 } 2478 2479 return skb; 2480} 2481 2482static int l2cap_segment_le_sdu(struct l2cap_chan *chan, 2483 struct sk_buff_head *seg_queue, 2484 struct msghdr *msg, size_t len) 2485{ 2486 struct sk_buff *skb; 2487 size_t pdu_len; 2488 u16 sdu_len; 2489 2490 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); 2491 2492 sdu_len = len; 2493 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE; 2494 2495 while (len > 0) { 2496 if (len <= pdu_len) 2497 pdu_len = len; 2498 2499 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len); 2500 if (IS_ERR(skb)) { 2501 __skb_queue_purge(seg_queue); 2502 return PTR_ERR(skb); 2503 } 2504 2505 __skb_queue_tail(seg_queue, skb); 2506 2507 len -= pdu_len; 2508 2509 if (sdu_len) { 2510 sdu_len = 0; 2511 pdu_len += L2CAP_SDULEN_SIZE; 2512 } 2513 } 2514 2515 return 0; 2516} 2517 2518static void l2cap_le_flowctl_send(struct l2cap_chan *chan) 2519{ 2520 int sent = 0; 2521 2522 BT_DBG("chan %p", chan); 2523 2524 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { 2525 l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); 2526 chan->tx_credits--; 2527 sent++; 2528 } 2529 2530 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits, 2531 skb_queue_len(&chan->tx_q)); 2532} 2533 2534static void l2cap_tx_timestamp(struct sk_buff *skb, 2535 const struct sockcm_cookie *sockc, 2536 size_t len) 2537{ 2538 struct sock *sk = skb ? skb->sk : NULL; 2539 2540 if (sk && sk->sk_type == SOCK_STREAM) 2541 hci_setup_tx_timestamp(skb, len, sockc); 2542 else 2543 hci_setup_tx_timestamp(skb, 1, sockc); 2544} 2545 2546static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue, 2547 const struct sockcm_cookie *sockc, 2548 size_t len) 2549{ 2550 struct sk_buff *skb = skb_peek(queue); 2551 struct sock *sk = skb ? skb->sk : NULL; 2552 2553 if (sk && sk->sk_type == SOCK_STREAM) 2554 l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len); 2555 else 2556 l2cap_tx_timestamp(skb, sockc, len); 2557} 2558 2559int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2560 const struct sockcm_cookie *sockc) 2561{ 2562 struct sk_buff *skb; 2563 int err; 2564 struct sk_buff_head seg_queue; 2565 2566 if (!chan->conn) 2567 return -ENOTCONN; 2568 2569 /* Connectionless channel */ 2570 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 2571 skb = l2cap_create_connless_pdu(chan, msg, len); 2572 if (IS_ERR(skb)) 2573 return PTR_ERR(skb); 2574 2575 l2cap_tx_timestamp(skb, sockc, len); 2576 2577 l2cap_do_send(chan, skb); 2578 return len; 2579 } 2580 2581 switch (chan->mode) { 2582 case L2CAP_MODE_LE_FLOWCTL: 2583 case L2CAP_MODE_EXT_FLOWCTL: 2584 /* Check outgoing MTU */ 2585 if (len > chan->omtu) 2586 return -EMSGSIZE; 2587 2588 __skb_queue_head_init(&seg_queue); 2589 2590 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len); 2591 2592 if (chan->state != BT_CONNECTED) { 2593 __skb_queue_purge(&seg_queue); 2594 err = -ENOTCONN; 2595 } 2596 2597 if (err) 2598 return err; 2599 2600 l2cap_tx_timestamp_seg(&seg_queue, sockc, len); 2601 2602 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); 2603 2604 l2cap_le_flowctl_send(chan); 2605 2606 if (!chan->tx_credits) 2607 chan->ops->suspend(chan); 2608 2609 err = len; 2610 2611 break; 2612 2613 case L2CAP_MODE_BASIC: 2614 /* Check outgoing MTU */ 2615 if (len > chan->omtu) 2616 return -EMSGSIZE; 2617 2618 /* Create a basic PDU */ 2619 skb = l2cap_create_basic_pdu(chan, msg, len); 2620 if (IS_ERR(skb)) 2621 return PTR_ERR(skb); 2622 2623 l2cap_tx_timestamp(skb, sockc, len); 2624 2625 l2cap_do_send(chan, skb); 2626 err = len; 2627 break; 2628 2629 case L2CAP_MODE_ERTM: 2630 case L2CAP_MODE_STREAMING: 2631 /* Check outgoing MTU */ 2632 if (len > chan->omtu) { 2633 err = -EMSGSIZE; 2634 break; 2635 } 2636 2637 __skb_queue_head_init(&seg_queue); 2638 2639 /* Do segmentation before calling in to the state machine, 2640 * since it's possible to block while waiting for memory 2641 * allocation. 2642 */ 2643 err = l2cap_segment_sdu(chan, &seg_queue, msg, len); 2644 2645 if (err) 2646 break; 2647 2648 if (chan->mode == L2CAP_MODE_ERTM) { 2649 /* TODO: ERTM mode timestamping */ 2650 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); 2651 } else { 2652 l2cap_tx_timestamp_seg(&seg_queue, sockc, len); 2653 l2cap_streaming_send(chan, &seg_queue); 2654 } 2655 2656 err = len; 2657 2658 /* If the skbs were not queued for sending, they'll still be in 2659 * seg_queue and need to be purged. 2660 */ 2661 __skb_queue_purge(&seg_queue); 2662 break; 2663 2664 default: 2665 BT_DBG("bad state %1.1x", chan->mode); 2666 err = -EBADFD; 2667 } 2668 2669 return err; 2670} 2671EXPORT_SYMBOL_GPL(l2cap_chan_send); 2672 2673static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) 2674{ 2675 struct l2cap_ctrl control; 2676 u16 seq; 2677 2678 BT_DBG("chan %p, txseq %u", chan, txseq); 2679 2680 memset(&control, 0, sizeof(control)); 2681 control.sframe = 1; 2682 control.super = L2CAP_SUPER_SREJ; 2683 2684 for (seq = chan->expected_tx_seq; seq != txseq; 2685 seq = __next_seq(chan, seq)) { 2686 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) { 2687 control.reqseq = seq; 2688 l2cap_send_sframe(chan, &control); 2689 l2cap_seq_list_append(&chan->srej_list, seq); 2690 } 2691 } 2692 2693 chan->expected_tx_seq = __next_seq(chan, txseq); 2694} 2695 2696static void l2cap_send_srej_tail(struct l2cap_chan *chan) 2697{ 2698 struct l2cap_ctrl control; 2699 2700 BT_DBG("chan %p", chan); 2701 2702 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) 2703 return; 2704 2705 memset(&control, 0, sizeof(control)); 2706 control.sframe = 1; 2707 control.super = L2CAP_SUPER_SREJ; 2708 control.reqseq = chan->srej_list.tail; 2709 l2cap_send_sframe(chan, &control); 2710} 2711 2712static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) 2713{ 2714 struct l2cap_ctrl control; 2715 u16 initial_head; 2716 u16 seq; 2717 2718 BT_DBG("chan %p, txseq %u", chan, txseq); 2719 2720 memset(&control, 0, sizeof(control)); 2721 control.sframe = 1; 2722 control.super = L2CAP_SUPER_SREJ; 2723 2724 /* Capture initial list head to allow only one pass through the list. */ 2725 initial_head = chan->srej_list.head; 2726 2727 do { 2728 seq = l2cap_seq_list_pop(&chan->srej_list); 2729 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) 2730 break; 2731 2732 control.reqseq = seq; 2733 l2cap_send_sframe(chan, &control); 2734 l2cap_seq_list_append(&chan->srej_list, seq); 2735 } while (chan->srej_list.head != initial_head); 2736} 2737 2738static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) 2739{ 2740 struct sk_buff *acked_skb; 2741 u16 ackseq; 2742 2743 BT_DBG("chan %p, reqseq %u", chan, reqseq); 2744 2745 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) 2746 return; 2747 2748 BT_DBG("expected_ack_seq %u, unacked_frames %u", 2749 chan->expected_ack_seq, chan->unacked_frames); 2750 2751 for (ackseq = chan->expected_ack_seq; ackseq != reqseq; 2752 ackseq = __next_seq(chan, ackseq)) { 2753 2754 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq); 2755 if (acked_skb) { 2756 skb_unlink(acked_skb, &chan->tx_q); 2757 kfree_skb(acked_skb); 2758 chan->unacked_frames--; 2759 } 2760 } 2761 2762 chan->expected_ack_seq = reqseq; 2763 2764 if (chan->unacked_frames == 0) 2765 __clear_retrans_timer(chan); 2766 2767 BT_DBG("unacked_frames %u", chan->unacked_frames); 2768} 2769 2770static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) 2771{ 2772 BT_DBG("chan %p", chan); 2773 2774 chan->expected_tx_seq = chan->buffer_seq; 2775 l2cap_seq_list_clear(&chan->srej_list); 2776 skb_queue_purge(&chan->srej_q); 2777 chan->rx_state = L2CAP_RX_STATE_RECV; 2778} 2779 2780static void l2cap_tx_state_xmit(struct l2cap_chan *chan, 2781 struct l2cap_ctrl *control, 2782 struct sk_buff_head *skbs, u8 event) 2783{ 2784 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, 2785 event); 2786 2787 switch (event) { 2788 case L2CAP_EV_DATA_REQUEST: 2789 if (chan->tx_send_head == NULL) 2790 chan->tx_send_head = skb_peek(skbs); 2791 2792 skb_queue_splice_tail_init(skbs, &chan->tx_q); 2793 l2cap_ertm_send(chan); 2794 break; 2795 case L2CAP_EV_LOCAL_BUSY_DETECTED: 2796 BT_DBG("Enter LOCAL_BUSY"); 2797 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2798 2799 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 2800 /* The SREJ_SENT state must be aborted if we are to 2801 * enter the LOCAL_BUSY state. 2802 */ 2803 l2cap_abort_rx_srej_sent(chan); 2804 } 2805 2806 l2cap_send_ack(chan); 2807 2808 break; 2809 case L2CAP_EV_LOCAL_BUSY_CLEAR: 2810 BT_DBG("Exit LOCAL_BUSY"); 2811 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2812 2813 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { 2814 struct l2cap_ctrl local_control; 2815 2816 memset(&local_control, 0, sizeof(local_control)); 2817 local_control.sframe = 1; 2818 local_control.super = L2CAP_SUPER_RR; 2819 local_control.poll = 1; 2820 local_control.reqseq = chan->buffer_seq; 2821 l2cap_send_sframe(chan, &local_control); 2822 2823 chan->retry_count = 1; 2824 __set_monitor_timer(chan); 2825 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2826 } 2827 break; 2828 case L2CAP_EV_RECV_REQSEQ_AND_FBIT: 2829 l2cap_process_reqseq(chan, control->reqseq); 2830 break; 2831 case L2CAP_EV_EXPLICIT_POLL: 2832 l2cap_send_rr_or_rnr(chan, 1); 2833 chan->retry_count = 1; 2834 __set_monitor_timer(chan); 2835 __clear_ack_timer(chan); 2836 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2837 break; 2838 case L2CAP_EV_RETRANS_TO: 2839 l2cap_send_rr_or_rnr(chan, 1); 2840 chan->retry_count = 1; 2841 __set_monitor_timer(chan); 2842 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2843 break; 2844 case L2CAP_EV_RECV_FBIT: 2845 /* Nothing to process */ 2846 break; 2847 default: 2848 break; 2849 } 2850} 2851 2852static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, 2853 struct l2cap_ctrl *control, 2854 struct sk_buff_head *skbs, u8 event) 2855{ 2856 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, 2857 event); 2858 2859 switch (event) { 2860 case L2CAP_EV_DATA_REQUEST: 2861 if (chan->tx_send_head == NULL) 2862 chan->tx_send_head = skb_peek(skbs); 2863 /* Queue data, but don't send. */ 2864 skb_queue_splice_tail_init(skbs, &chan->tx_q); 2865 break; 2866 case L2CAP_EV_LOCAL_BUSY_DETECTED: 2867 BT_DBG("Enter LOCAL_BUSY"); 2868 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2869 2870 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 2871 /* The SREJ_SENT state must be aborted if we are to 2872 * enter the LOCAL_BUSY state. 2873 */ 2874 l2cap_abort_rx_srej_sent(chan); 2875 } 2876 2877 l2cap_send_ack(chan); 2878 2879 break; 2880 case L2CAP_EV_LOCAL_BUSY_CLEAR: 2881 BT_DBG("Exit LOCAL_BUSY"); 2882 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 2883 2884 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { 2885 struct l2cap_ctrl local_control; 2886 memset(&local_control, 0, sizeof(local_control)); 2887 local_control.sframe = 1; 2888 local_control.super = L2CAP_SUPER_RR; 2889 local_control.poll = 1; 2890 local_control.reqseq = chan->buffer_seq; 2891 l2cap_send_sframe(chan, &local_control); 2892 2893 chan->retry_count = 1; 2894 __set_monitor_timer(chan); 2895 chan->tx_state = L2CAP_TX_STATE_WAIT_F; 2896 } 2897 break; 2898 case L2CAP_EV_RECV_REQSEQ_AND_FBIT: 2899 l2cap_process_reqseq(chan, control->reqseq); 2900 fallthrough; 2901 2902 case L2CAP_EV_RECV_FBIT: 2903 if (control && control->final) { 2904 __clear_monitor_timer(chan); 2905 if (chan->unacked_frames > 0) 2906 __set_retrans_timer(chan); 2907 chan->retry_count = 0; 2908 chan->tx_state = L2CAP_TX_STATE_XMIT; 2909 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); 2910 } 2911 break; 2912 case L2CAP_EV_EXPLICIT_POLL: 2913 /* Ignore */ 2914 break; 2915 case L2CAP_EV_MONITOR_TO: 2916 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { 2917 l2cap_send_rr_or_rnr(chan, 1); 2918 __set_monitor_timer(chan); 2919 chan->retry_count++; 2920 } else { 2921 l2cap_send_disconn_req(chan, ECONNABORTED); 2922 } 2923 break; 2924 default: 2925 break; 2926 } 2927} 2928 2929static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 2930 struct sk_buff_head *skbs, u8 event) 2931{ 2932 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", 2933 chan, control, skbs, event, chan->tx_state); 2934 2935 switch (chan->tx_state) { 2936 case L2CAP_TX_STATE_XMIT: 2937 l2cap_tx_state_xmit(chan, control, skbs, event); 2938 break; 2939 case L2CAP_TX_STATE_WAIT_F: 2940 l2cap_tx_state_wait_f(chan, control, skbs, event); 2941 break; 2942 default: 2943 /* Ignore event */ 2944 break; 2945 } 2946} 2947 2948static void l2cap_pass_to_tx(struct l2cap_chan *chan, 2949 struct l2cap_ctrl *control) 2950{ 2951 BT_DBG("chan %p, control %p", chan, control); 2952 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT); 2953} 2954 2955static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, 2956 struct l2cap_ctrl *control) 2957{ 2958 BT_DBG("chan %p, control %p", chan, control); 2959 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT); 2960} 2961 2962/* Copy frame to all raw sockets on that connection */ 2963static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 2964{ 2965 struct sk_buff *nskb; 2966 struct l2cap_chan *chan; 2967 2968 BT_DBG("conn %p", conn); 2969 2970 list_for_each_entry(chan, &conn->chan_l, list) { 2971 if (chan->chan_type != L2CAP_CHAN_RAW) 2972 continue; 2973 2974 /* Don't send frame to the channel it came from */ 2975 if (bt_cb(skb)->l2cap.chan == chan) 2976 continue; 2977 2978 nskb = skb_clone(skb, GFP_KERNEL); 2979 if (!nskb) 2980 continue; 2981 if (chan->ops->recv(chan, nskb)) 2982 kfree_skb(nskb); 2983 } 2984} 2985 2986/* ---- L2CAP signalling commands ---- */ 2987static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, 2988 u8 ident, u16 dlen, void *data) 2989{ 2990 struct sk_buff *skb, **frag; 2991 struct l2cap_cmd_hdr *cmd; 2992 struct l2cap_hdr *lh; 2993 int len, count; 2994 2995 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", 2996 conn, code, ident, dlen); 2997 2998 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) 2999 return NULL; 3000 3001 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 3002 count = min_t(unsigned int, conn->mtu, len); 3003 3004 skb = bt_skb_alloc(count, GFP_KERNEL); 3005 if (!skb) 3006 return NULL; 3007 3008 lh = skb_put(skb, L2CAP_HDR_SIZE); 3009 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 3010 3011 if (conn->hcon->type == LE_LINK) 3012 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); 3013 else 3014 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 3015 3016 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE); 3017 cmd->code = code; 3018 cmd->ident = ident; 3019 cmd->len = cpu_to_le16(dlen); 3020 3021 if (dlen) { 3022 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; 3023 skb_put_data(skb, data, count); 3024 data += count; 3025 } 3026 3027 len -= skb->len; 3028 3029 /* Continuation fragments (no L2CAP header) */ 3030 frag = &skb_shinfo(skb)->frag_list; 3031 while (len) { 3032 count = min_t(unsigned int, conn->mtu, len); 3033 3034 *frag = bt_skb_alloc(count, GFP_KERNEL); 3035 if (!*frag) 3036 goto fail; 3037 3038 skb_put_data(*frag, data, count); 3039 3040 len -= count; 3041 data += count; 3042 3043 frag = &(*frag)->next; 3044 } 3045 3046 return skb; 3047 3048fail: 3049 kfree_skb(skb); 3050 return NULL; 3051} 3052 3053static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, 3054 unsigned long *val) 3055{ 3056 struct l2cap_conf_opt *opt = *ptr; 3057 int len; 3058 3059 len = L2CAP_CONF_OPT_SIZE + opt->len; 3060 *ptr += len; 3061 3062 *type = opt->type; 3063 *olen = opt->len; 3064 3065 switch (opt->len) { 3066 case 1: 3067 *val = *((u8 *) opt->val); 3068 break; 3069 3070 case 2: 3071 *val = get_unaligned_le16(opt->val); 3072 break; 3073 3074 case 4: 3075 *val = get_unaligned_le32(opt->val); 3076 break; 3077 3078 default: 3079 *val = (unsigned long) opt->val; 3080 break; 3081 } 3082 3083 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val); 3084 return len; 3085} 3086 3087static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size) 3088{ 3089 struct l2cap_conf_opt *opt = *ptr; 3090 3091 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val); 3092 3093 if (size < L2CAP_CONF_OPT_SIZE + len) 3094 return; 3095 3096 opt->type = type; 3097 opt->len = len; 3098 3099 switch (len) { 3100 case 1: 3101 *((u8 *) opt->val) = val; 3102 break; 3103 3104 case 2: 3105 put_unaligned_le16(val, opt->val); 3106 break; 3107 3108 case 4: 3109 put_unaligned_le32(val, opt->val); 3110 break; 3111 3112 default: 3113 memcpy(opt->val, (void *) val, len); 3114 break; 3115 } 3116 3117 *ptr += L2CAP_CONF_OPT_SIZE + len; 3118} 3119 3120static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size) 3121{ 3122 struct l2cap_conf_efs efs; 3123 3124 switch (chan->mode) { 3125 case L2CAP_MODE_ERTM: 3126 efs.id = chan->local_id; 3127 efs.stype = chan->local_stype; 3128 efs.msdu = cpu_to_le16(chan->local_msdu); 3129 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 3130 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); 3131 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO); 3132 break; 3133 3134 case L2CAP_MODE_STREAMING: 3135 efs.id = 1; 3136 efs.stype = L2CAP_SERV_BESTEFFORT; 3137 efs.msdu = cpu_to_le16(chan->local_msdu); 3138 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 3139 efs.acc_lat = 0; 3140 efs.flush_to = 0; 3141 break; 3142 3143 default: 3144 return; 3145 } 3146 3147 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), 3148 (unsigned long) &efs, size); 3149} 3150 3151static void l2cap_ack_timeout(struct work_struct *work) 3152{ 3153 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 3154 ack_timer.work); 3155 u16 frames_to_ack; 3156 3157 BT_DBG("chan %p", chan); 3158 3159 l2cap_chan_lock(chan); 3160 3161 frames_to_ack = __seq_offset(chan, chan->buffer_seq, 3162 chan->last_acked_seq); 3163 3164 if (frames_to_ack) 3165 l2cap_send_rr_or_rnr(chan, 0); 3166 3167 l2cap_chan_unlock(chan); 3168 l2cap_chan_put(chan); 3169} 3170 3171int l2cap_ertm_init(struct l2cap_chan *chan) 3172{ 3173 int err; 3174 3175 chan->next_tx_seq = 0; 3176 chan->expected_tx_seq = 0; 3177 chan->expected_ack_seq = 0; 3178 chan->unacked_frames = 0; 3179 chan->buffer_seq = 0; 3180 chan->frames_sent = 0; 3181 chan->last_acked_seq = 0; 3182 chan->sdu = NULL; 3183 chan->sdu_last_frag = NULL; 3184 chan->sdu_len = 0; 3185 3186 skb_queue_head_init(&chan->tx_q); 3187 3188 if (chan->mode != L2CAP_MODE_ERTM) 3189 return 0; 3190 3191 chan->rx_state = L2CAP_RX_STATE_RECV; 3192 chan->tx_state = L2CAP_TX_STATE_XMIT; 3193 3194 skb_queue_head_init(&chan->srej_q); 3195 3196 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); 3197 if (err < 0) 3198 return err; 3199 3200 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); 3201 if (err < 0) 3202 l2cap_seq_list_free(&chan->srej_list); 3203 3204 return err; 3205} 3206 3207static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 3208{ 3209 switch (mode) { 3210 case L2CAP_MODE_STREAMING: 3211 case L2CAP_MODE_ERTM: 3212 if (l2cap_mode_supported(mode, remote_feat_mask)) 3213 return mode; 3214 fallthrough; 3215 default: 3216 return L2CAP_MODE_BASIC; 3217 } 3218} 3219 3220static inline bool __l2cap_ews_supported(struct l2cap_conn *conn) 3221{ 3222 return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW); 3223} 3224 3225static inline bool __l2cap_efs_supported(struct l2cap_conn *conn) 3226{ 3227 return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW); 3228} 3229 3230static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan, 3231 struct l2cap_conf_rfc *rfc) 3232{ 3233 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); 3234 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); 3235} 3236 3237static inline void l2cap_txwin_setup(struct l2cap_chan *chan) 3238{ 3239 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && 3240 __l2cap_ews_supported(chan->conn)) { 3241 /* use extended control field */ 3242 set_bit(FLAG_EXT_CTRL, &chan->flags); 3243 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; 3244 } else { 3245 chan->tx_win = min_t(u16, chan->tx_win, 3246 L2CAP_DEFAULT_TX_WINDOW); 3247 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 3248 } 3249 chan->ack_win = chan->tx_win; 3250} 3251 3252static void l2cap_mtu_auto(struct l2cap_chan *chan) 3253{ 3254 struct hci_conn *conn = chan->conn->hcon; 3255 3256 chan->imtu = L2CAP_DEFAULT_MIN_MTU; 3257 3258 /* The 2-DH1 packet has between 2 and 56 information bytes 3259 * (including the 2-byte payload header) 3260 */ 3261 if (!(conn->pkt_type & HCI_2DH1)) 3262 chan->imtu = 54; 3263 3264 /* The 3-DH1 packet has between 2 and 85 information bytes 3265 * (including the 2-byte payload header) 3266 */ 3267 if (!(conn->pkt_type & HCI_3DH1)) 3268 chan->imtu = 83; 3269 3270 /* The 2-DH3 packet has between 2 and 369 information bytes 3271 * (including the 2-byte payload header) 3272 */ 3273 if (!(conn->pkt_type & HCI_2DH3)) 3274 chan->imtu = 367; 3275 3276 /* The 3-DH3 packet has between 2 and 554 information bytes 3277 * (including the 2-byte payload header) 3278 */ 3279 if (!(conn->pkt_type & HCI_3DH3)) 3280 chan->imtu = 552; 3281 3282 /* The 2-DH5 packet has between 2 and 681 information bytes 3283 * (including the 2-byte payload header) 3284 */ 3285 if (!(conn->pkt_type & HCI_2DH5)) 3286 chan->imtu = 679; 3287 3288 /* The 3-DH5 packet has between 2 and 1023 information bytes 3289 * (including the 2-byte payload header) 3290 */ 3291 if (!(conn->pkt_type & HCI_3DH5)) 3292 chan->imtu = 1021; 3293} 3294 3295static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) 3296{ 3297 struct l2cap_conf_req *req = data; 3298 struct l2cap_conf_rfc rfc = { .mode = chan->mode }; 3299 void *ptr = req->data; 3300 void *endptr = data + data_size; 3301 u16 size; 3302 3303 BT_DBG("chan %p", chan); 3304 3305 if (chan->num_conf_req || chan->num_conf_rsp) 3306 goto done; 3307 3308 switch (chan->mode) { 3309 case L2CAP_MODE_STREAMING: 3310 case L2CAP_MODE_ERTM: 3311 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) 3312 break; 3313 3314 if (__l2cap_efs_supported(chan->conn)) 3315 set_bit(FLAG_EFS_ENABLE, &chan->flags); 3316 3317 fallthrough; 3318 default: 3319 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); 3320 break; 3321 } 3322 3323done: 3324 if (chan->imtu != L2CAP_DEFAULT_MTU) { 3325 if (!chan->imtu) 3326 l2cap_mtu_auto(chan); 3327 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, 3328 endptr - ptr); 3329 } 3330 3331 switch (chan->mode) { 3332 case L2CAP_MODE_BASIC: 3333 if (disable_ertm) 3334 break; 3335 3336 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && 3337 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) 3338 break; 3339 3340 rfc.mode = L2CAP_MODE_BASIC; 3341 rfc.txwin_size = 0; 3342 rfc.max_transmit = 0; 3343 rfc.retrans_timeout = 0; 3344 rfc.monitor_timeout = 0; 3345 rfc.max_pdu_size = 0; 3346 3347 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3348 (unsigned long) &rfc, endptr - ptr); 3349 break; 3350 3351 case L2CAP_MODE_ERTM: 3352 rfc.mode = L2CAP_MODE_ERTM; 3353 rfc.max_transmit = chan->max_tx; 3354 3355 __l2cap_set_ertm_timeouts(chan, &rfc); 3356 3357 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 3358 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - 3359 L2CAP_FCS_SIZE); 3360 rfc.max_pdu_size = cpu_to_le16(size); 3361 3362 l2cap_txwin_setup(chan); 3363 3364 rfc.txwin_size = min_t(u16, chan->tx_win, 3365 L2CAP_DEFAULT_TX_WINDOW); 3366 3367 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3368 (unsigned long) &rfc, endptr - ptr); 3369 3370 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 3371 l2cap_add_opt_efs(&ptr, chan, endptr - ptr); 3372 3373 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 3374 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 3375 chan->tx_win, endptr - ptr); 3376 3377 if (chan->conn->feat_mask & L2CAP_FEAT_FCS) 3378 if (chan->fcs == L2CAP_FCS_NONE || 3379 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { 3380 chan->fcs = L2CAP_FCS_NONE; 3381 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, 3382 chan->fcs, endptr - ptr); 3383 } 3384 break; 3385 3386 case L2CAP_MODE_STREAMING: 3387 l2cap_txwin_setup(chan); 3388 rfc.mode = L2CAP_MODE_STREAMING; 3389 rfc.txwin_size = 0; 3390 rfc.max_transmit = 0; 3391 rfc.retrans_timeout = 0; 3392 rfc.monitor_timeout = 0; 3393 3394 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 3395 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - 3396 L2CAP_FCS_SIZE); 3397 rfc.max_pdu_size = cpu_to_le16(size); 3398 3399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3400 (unsigned long) &rfc, endptr - ptr); 3401 3402 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 3403 l2cap_add_opt_efs(&ptr, chan, endptr - ptr); 3404 3405 if (chan->conn->feat_mask & L2CAP_FEAT_FCS) 3406 if (chan->fcs == L2CAP_FCS_NONE || 3407 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { 3408 chan->fcs = L2CAP_FCS_NONE; 3409 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, 3410 chan->fcs, endptr - ptr); 3411 } 3412 break; 3413 } 3414 3415 req->dcid = cpu_to_le16(chan->dcid); 3416 req->flags = cpu_to_le16(0); 3417 3418 return ptr - data; 3419} 3420 3421static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) 3422{ 3423 struct l2cap_conf_rsp *rsp = data; 3424 void *ptr = rsp->data; 3425 void *endptr = data + data_size; 3426 void *req = chan->conf_req; 3427 int len = chan->conf_len; 3428 int type, hint, olen; 3429 unsigned long val; 3430 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 3431 struct l2cap_conf_efs efs; 3432 u8 remote_efs = 0; 3433 u16 mtu = 0; 3434 u16 result = L2CAP_CONF_SUCCESS; 3435 u16 size; 3436 3437 BT_DBG("chan %p", chan); 3438 3439 while (len >= L2CAP_CONF_OPT_SIZE) { 3440 len -= l2cap_get_conf_opt(&req, &type, &olen, &val); 3441 if (len < 0) 3442 break; 3443 3444 hint = type & L2CAP_CONF_HINT; 3445 type &= L2CAP_CONF_MASK; 3446 3447 switch (type) { 3448 case L2CAP_CONF_MTU: 3449 if (olen != 2) 3450 break; 3451 mtu = val; 3452 break; 3453 3454 case L2CAP_CONF_FLUSH_TO: 3455 if (olen != 2) 3456 break; 3457 chan->flush_to = val; 3458 break; 3459 3460 case L2CAP_CONF_QOS: 3461 break; 3462 3463 case L2CAP_CONF_RFC: 3464 if (olen != sizeof(rfc)) 3465 break; 3466 memcpy(&rfc, (void *) val, olen); 3467 break; 3468 3469 case L2CAP_CONF_FCS: 3470 if (olen != 1) 3471 break; 3472 if (val == L2CAP_FCS_NONE) 3473 set_bit(CONF_RECV_NO_FCS, &chan->conf_state); 3474 break; 3475 3476 case L2CAP_CONF_EFS: 3477 if (olen != sizeof(efs)) 3478 break; 3479 remote_efs = 1; 3480 memcpy(&efs, (void *) val, olen); 3481 break; 3482 3483 case L2CAP_CONF_EWS: 3484 if (olen != 2) 3485 break; 3486 return -ECONNREFUSED; 3487 3488 default: 3489 if (hint) 3490 break; 3491 result = L2CAP_CONF_UNKNOWN; 3492 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr); 3493 break; 3494 } 3495 } 3496 3497 if (chan->num_conf_rsp || chan->num_conf_req > 1) 3498 goto done; 3499 3500 switch (chan->mode) { 3501 case L2CAP_MODE_STREAMING: 3502 case L2CAP_MODE_ERTM: 3503 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { 3504 chan->mode = l2cap_select_mode(rfc.mode, 3505 chan->conn->feat_mask); 3506 break; 3507 } 3508 3509 if (remote_efs) { 3510 if (__l2cap_efs_supported(chan->conn)) 3511 set_bit(FLAG_EFS_ENABLE, &chan->flags); 3512 else 3513 return -ECONNREFUSED; 3514 } 3515 3516 if (chan->mode != rfc.mode) 3517 return -ECONNREFUSED; 3518 3519 break; 3520 } 3521 3522done: 3523 if (chan->mode != rfc.mode) { 3524 result = L2CAP_CONF_UNACCEPT; 3525 rfc.mode = chan->mode; 3526 3527 if (chan->num_conf_rsp == 1) 3528 return -ECONNREFUSED; 3529 3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3531 (unsigned long) &rfc, endptr - ptr); 3532 } 3533 3534 if (result == L2CAP_CONF_SUCCESS) { 3535 /* Configure output options and let the other side know 3536 * which ones we don't like. */ 3537 3538 /* If MTU is not provided in configure request, try adjusting it 3539 * to the current output MTU if it has been set 3540 * 3541 * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5 3542 * 3543 * Each configuration parameter value (if any is present) in an 3544 * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a 3545 * configuration parameter value that has been sent (or, in case 3546 * of default values, implied) in the corresponding 3547 * L2CAP_CONFIGURATION_REQ packet. 3548 */ 3549 if (!mtu) { 3550 /* Only adjust for ERTM channels as for older modes the 3551 * remote stack may not be able to detect that the 3552 * adjustment causing it to silently drop packets. 3553 */ 3554 if (chan->mode == L2CAP_MODE_ERTM && 3555 chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU) 3556 mtu = chan->omtu; 3557 else 3558 mtu = L2CAP_DEFAULT_MTU; 3559 } 3560 3561 if (mtu < L2CAP_DEFAULT_MIN_MTU) 3562 result = L2CAP_CONF_UNACCEPT; 3563 else { 3564 chan->omtu = mtu; 3565 set_bit(CONF_MTU_DONE, &chan->conf_state); 3566 } 3567 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr); 3568 3569 if (remote_efs) { 3570 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3571 efs.stype != L2CAP_SERV_NOTRAFIC && 3572 efs.stype != chan->local_stype) { 3573 3574 result = L2CAP_CONF_UNACCEPT; 3575 3576 if (chan->num_conf_req >= 1) 3577 return -ECONNREFUSED; 3578 3579 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 3580 sizeof(efs), 3581 (unsigned long) &efs, endptr - ptr); 3582 } else { 3583 /* Send PENDING Conf Rsp */ 3584 result = L2CAP_CONF_PENDING; 3585 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 3586 } 3587 } 3588 3589 switch (rfc.mode) { 3590 case L2CAP_MODE_BASIC: 3591 chan->fcs = L2CAP_FCS_NONE; 3592 set_bit(CONF_MODE_DONE, &chan->conf_state); 3593 break; 3594 3595 case L2CAP_MODE_ERTM: 3596 if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) 3597 chan->remote_tx_win = rfc.txwin_size; 3598 else 3599 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; 3600 3601 chan->remote_max_tx = rfc.max_transmit; 3602 3603 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 3604 chan->conn->mtu - L2CAP_EXT_HDR_SIZE - 3605 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); 3606 rfc.max_pdu_size = cpu_to_le16(size); 3607 chan->remote_mps = size; 3608 3609 __l2cap_set_ertm_timeouts(chan, &rfc); 3610 3611 set_bit(CONF_MODE_DONE, &chan->conf_state); 3612 3613 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 3614 sizeof(rfc), (unsigned long) &rfc, endptr - ptr); 3615 3616 if (remote_efs && 3617 test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3618 chan->remote_id = efs.id; 3619 chan->remote_stype = efs.stype; 3620 chan->remote_msdu = le16_to_cpu(efs.msdu); 3621 chan->remote_flush_to = 3622 le32_to_cpu(efs.flush_to); 3623 chan->remote_acc_lat = 3624 le32_to_cpu(efs.acc_lat); 3625 chan->remote_sdu_itime = 3626 le32_to_cpu(efs.sdu_itime); 3627 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 3628 sizeof(efs), 3629 (unsigned long) &efs, endptr - ptr); 3630 } 3631 break; 3632 3633 case L2CAP_MODE_STREAMING: 3634 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 3635 chan->conn->mtu - L2CAP_EXT_HDR_SIZE - 3636 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); 3637 rfc.max_pdu_size = cpu_to_le16(size); 3638 chan->remote_mps = size; 3639 3640 set_bit(CONF_MODE_DONE, &chan->conf_state); 3641 3642 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3643 (unsigned long) &rfc, endptr - ptr); 3644 3645 break; 3646 3647 default: 3648 result = L2CAP_CONF_UNACCEPT; 3649 3650 memset(&rfc, 0, sizeof(rfc)); 3651 rfc.mode = chan->mode; 3652 } 3653 3654 if (result == L2CAP_CONF_SUCCESS) 3655 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 3656 } 3657 rsp->scid = cpu_to_le16(chan->dcid); 3658 rsp->result = cpu_to_le16(result); 3659 rsp->flags = cpu_to_le16(0); 3660 3661 return ptr - data; 3662} 3663 3664static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, 3665 void *data, size_t size, u16 *result) 3666{ 3667 struct l2cap_conf_req *req = data; 3668 void *ptr = req->data; 3669 void *endptr = data + size; 3670 int type, olen; 3671 unsigned long val; 3672 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 3673 struct l2cap_conf_efs efs; 3674 3675 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); 3676 3677 while (len >= L2CAP_CONF_OPT_SIZE) { 3678 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 3679 if (len < 0) 3680 break; 3681 3682 switch (type) { 3683 case L2CAP_CONF_MTU: 3684 if (olen != 2) 3685 break; 3686 if (val < L2CAP_DEFAULT_MIN_MTU) { 3687 *result = L2CAP_CONF_UNACCEPT; 3688 chan->imtu = L2CAP_DEFAULT_MIN_MTU; 3689 } else 3690 chan->imtu = val; 3691 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, 3692 endptr - ptr); 3693 break; 3694 3695 case L2CAP_CONF_FLUSH_TO: 3696 if (olen != 2) 3697 break; 3698 chan->flush_to = val; 3699 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, 3700 chan->flush_to, endptr - ptr); 3701 break; 3702 3703 case L2CAP_CONF_RFC: 3704 if (olen != sizeof(rfc)) 3705 break; 3706 memcpy(&rfc, (void *)val, olen); 3707 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && 3708 rfc.mode != chan->mode) 3709 return -ECONNREFUSED; 3710 chan->fcs = 0; 3711 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3712 (unsigned long) &rfc, endptr - ptr); 3713 break; 3714 3715 case L2CAP_CONF_EWS: 3716 if (olen != 2) 3717 break; 3718 chan->ack_win = min_t(u16, val, chan->ack_win); 3719 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 3720 chan->tx_win, endptr - ptr); 3721 break; 3722 3723 case L2CAP_CONF_EFS: 3724 if (olen != sizeof(efs)) 3725 break; 3726 memcpy(&efs, (void *)val, olen); 3727 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3728 efs.stype != L2CAP_SERV_NOTRAFIC && 3729 efs.stype != chan->local_stype) 3730 return -ECONNREFUSED; 3731 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), 3732 (unsigned long) &efs, endptr - ptr); 3733 break; 3734 3735 case L2CAP_CONF_FCS: 3736 if (olen != 1) 3737 break; 3738 if (*result == L2CAP_CONF_PENDING) 3739 if (val == L2CAP_FCS_NONE) 3740 set_bit(CONF_RECV_NO_FCS, 3741 &chan->conf_state); 3742 break; 3743 } 3744 } 3745 3746 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode) 3747 return -ECONNREFUSED; 3748 3749 chan->mode = rfc.mode; 3750 3751 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { 3752 switch (rfc.mode) { 3753 case L2CAP_MODE_ERTM: 3754 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 3755 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 3756 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3757 if (!test_bit(FLAG_EXT_CTRL, &chan->flags)) 3758 chan->ack_win = min_t(u16, chan->ack_win, 3759 rfc.txwin_size); 3760 3761 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3762 chan->local_msdu = le16_to_cpu(efs.msdu); 3763 chan->local_sdu_itime = 3764 le32_to_cpu(efs.sdu_itime); 3765 chan->local_acc_lat = le32_to_cpu(efs.acc_lat); 3766 chan->local_flush_to = 3767 le32_to_cpu(efs.flush_to); 3768 } 3769 break; 3770 3771 case L2CAP_MODE_STREAMING: 3772 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3773 } 3774 } 3775 3776 req->dcid = cpu_to_le16(chan->dcid); 3777 req->flags = cpu_to_le16(0); 3778 3779 return ptr - data; 3780} 3781 3782static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, 3783 u16 result, u16 flags) 3784{ 3785 struct l2cap_conf_rsp *rsp = data; 3786 void *ptr = rsp->data; 3787 3788 BT_DBG("chan %p", chan); 3789 3790 rsp->scid = cpu_to_le16(chan->dcid); 3791 rsp->result = cpu_to_le16(result); 3792 rsp->flags = cpu_to_le16(flags); 3793 3794 return ptr - data; 3795} 3796 3797void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan) 3798{ 3799 struct l2cap_le_conn_rsp rsp; 3800 struct l2cap_conn *conn = chan->conn; 3801 3802 BT_DBG("chan %p", chan); 3803 3804 rsp.dcid = cpu_to_le16(chan->scid); 3805 rsp.mtu = cpu_to_le16(chan->imtu); 3806 rsp.mps = cpu_to_le16(chan->mps); 3807 rsp.credits = cpu_to_le16(chan->rx_credits); 3808 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); 3809 3810 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), 3811 &rsp); 3812} 3813 3814static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data) 3815{ 3816 int *result = data; 3817 3818 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) 3819 return; 3820 3821 switch (chan->state) { 3822 case BT_CONNECT2: 3823 /* If channel still pending accept add to result */ 3824 (*result)++; 3825 return; 3826 case BT_CONNECTED: 3827 return; 3828 default: 3829 /* If not connected or pending accept it has been refused */ 3830 *result = -ECONNREFUSED; 3831 return; 3832 } 3833} 3834 3835struct l2cap_ecred_rsp_data { 3836 struct { 3837 struct l2cap_ecred_conn_rsp_hdr rsp; 3838 __le16 scid[L2CAP_ECRED_MAX_CID]; 3839 } __packed pdu; 3840 int count; 3841}; 3842 3843static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) 3844{ 3845 struct l2cap_ecred_rsp_data *rsp = data; 3846 struct l2cap_ecred_conn_rsp *rsp_flex = 3847 container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr); 3848 3849 /* Check if channel for outgoing connection or if it wasn't deferred 3850 * since in those cases it must be skipped. 3851 */ 3852 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) || 3853 !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) 3854 return; 3855 3856 /* Reset ident so only one response is sent */ 3857 chan->ident = 0; 3858 3859 /* Include all channels pending with the same ident */ 3860 if (!rsp->pdu.rsp.result) 3861 rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid); 3862 else 3863 l2cap_chan_del(chan, ECONNRESET); 3864} 3865 3866void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan) 3867{ 3868 struct l2cap_conn *conn = chan->conn; 3869 struct l2cap_ecred_rsp_data data; 3870 u16 id = chan->ident; 3871 int result = 0; 3872 3873 if (!id) 3874 return; 3875 3876 BT_DBG("chan %p id %d", chan, id); 3877 3878 memset(&data, 0, sizeof(data)); 3879 3880 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu); 3881 data.pdu.rsp.mps = cpu_to_le16(chan->mps); 3882 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits); 3883 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); 3884 3885 /* Verify that all channels are ready */ 3886 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result); 3887 3888 if (result > 0) 3889 return; 3890 3891 if (result < 0) 3892 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION); 3893 3894 /* Build response */ 3895 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data); 3896 3897 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP, 3898 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)), 3899 &data.pdu); 3900} 3901 3902void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) 3903{ 3904 struct l2cap_conn_rsp rsp; 3905 struct l2cap_conn *conn = chan->conn; 3906 u8 buf[128]; 3907 u8 rsp_code; 3908 3909 rsp.scid = cpu_to_le16(chan->dcid); 3910 rsp.dcid = cpu_to_le16(chan->scid); 3911 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 3912 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 3913 rsp_code = L2CAP_CONN_RSP; 3914 3915 BT_DBG("chan %p rsp_code %u", chan, rsp_code); 3916 3917 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp); 3918 3919 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) 3920 return; 3921 3922 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 3923 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 3924 chan->num_conf_req++; 3925} 3926 3927static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) 3928{ 3929 int type, olen; 3930 unsigned long val; 3931 /* Use sane default values in case a misbehaving remote device 3932 * did not send an RFC or extended window size option. 3933 */ 3934 u16 txwin_ext = chan->ack_win; 3935 struct l2cap_conf_rfc rfc = { 3936 .mode = chan->mode, 3937 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO), 3938 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO), 3939 .max_pdu_size = cpu_to_le16(chan->imtu), 3940 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW), 3941 }; 3942 3943 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); 3944 3945 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING)) 3946 return; 3947 3948 while (len >= L2CAP_CONF_OPT_SIZE) { 3949 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 3950 if (len < 0) 3951 break; 3952 3953 switch (type) { 3954 case L2CAP_CONF_RFC: 3955 if (olen != sizeof(rfc)) 3956 break; 3957 memcpy(&rfc, (void *)val, olen); 3958 break; 3959 case L2CAP_CONF_EWS: 3960 if (olen != 2) 3961 break; 3962 txwin_ext = val; 3963 break; 3964 } 3965 } 3966 3967 switch (rfc.mode) { 3968 case L2CAP_MODE_ERTM: 3969 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 3970 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 3971 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3972 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 3973 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext); 3974 else 3975 chan->ack_win = min_t(u16, chan->ack_win, 3976 rfc.txwin_size); 3977 break; 3978 case L2CAP_MODE_STREAMING: 3979 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3980 } 3981} 3982 3983static inline int l2cap_command_rej(struct l2cap_conn *conn, 3984 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 3985 u8 *data) 3986{ 3987 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 3988 3989 if (cmd_len < sizeof(*rej)) 3990 return -EPROTO; 3991 3992 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) 3993 return 0; 3994 3995 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && 3996 cmd->ident == conn->info_ident) { 3997 cancel_delayed_work(&conn->info_timer); 3998 3999 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4000 conn->info_ident = 0; 4001 4002 l2cap_conn_start(conn); 4003 } 4004 4005 return 0; 4006} 4007 4008static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, 4009 u8 *data, u8 rsp_code) 4010{ 4011 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 4012 struct l2cap_conn_rsp rsp; 4013 struct l2cap_chan *chan = NULL, *pchan = NULL; 4014 int result, status = L2CAP_CS_NO_INFO; 4015 4016 u16 dcid = 0, scid = __le16_to_cpu(req->scid); 4017 __le16 psm = req->psm; 4018 4019 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid); 4020 4021 /* Check if we have socket listening on psm */ 4022 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 4023 &conn->hcon->dst, ACL_LINK); 4024 if (!pchan) { 4025 result = L2CAP_CR_BAD_PSM; 4026 goto response; 4027 } 4028 4029 l2cap_chan_lock(pchan); 4030 4031 /* Check if the ACL is secure enough (if not SDP) */ 4032 if (psm != cpu_to_le16(L2CAP_PSM_SDP) && 4033 (!hci_conn_check_link_mode(conn->hcon) || 4034 !l2cap_check_enc_key_size(conn->hcon, pchan))) { 4035 conn->disc_reason = HCI_ERROR_AUTH_FAILURE; 4036 result = L2CAP_CR_SEC_BLOCK; 4037 goto response; 4038 } 4039 4040 result = L2CAP_CR_NO_MEM; 4041 4042 /* Check for valid dynamic CID range (as per Erratum 3253) */ 4043 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) { 4044 result = L2CAP_CR_INVALID_SCID; 4045 goto response; 4046 } 4047 4048 /* Check if we already have channel with that dcid */ 4049 if (__l2cap_get_chan_by_dcid(conn, scid)) { 4050 result = L2CAP_CR_SCID_IN_USE; 4051 goto response; 4052 } 4053 4054 chan = pchan->ops->new_connection(pchan); 4055 if (!chan) 4056 goto response; 4057 4058 /* For certain devices (ex: HID mouse), support for authentication, 4059 * pairing and bonding is optional. For such devices, inorder to avoid 4060 * the ACL alive for too long after L2CAP disconnection, reset the ACL 4061 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect. 4062 */ 4063 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 4064 4065 bacpy(&chan->src, &conn->hcon->src); 4066 bacpy(&chan->dst, &conn->hcon->dst); 4067 chan->src_type = bdaddr_src_type(conn->hcon); 4068 chan->dst_type = bdaddr_dst_type(conn->hcon); 4069 chan->psm = psm; 4070 chan->dcid = scid; 4071 4072 __l2cap_chan_add(conn, chan); 4073 4074 dcid = chan->scid; 4075 4076 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 4077 4078 chan->ident = cmd->ident; 4079 4080 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 4081 if (l2cap_chan_check_security(chan, false)) { 4082 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 4083 l2cap_state_change(chan, BT_CONNECT2); 4084 result = L2CAP_CR_PEND; 4085 status = L2CAP_CS_AUTHOR_PEND; 4086 chan->ops->defer(chan); 4087 } else { 4088 l2cap_state_change(chan, BT_CONFIG); 4089 result = L2CAP_CR_SUCCESS; 4090 status = L2CAP_CS_NO_INFO; 4091 } 4092 } else { 4093 l2cap_state_change(chan, BT_CONNECT2); 4094 result = L2CAP_CR_PEND; 4095 status = L2CAP_CS_AUTHEN_PEND; 4096 } 4097 } else { 4098 l2cap_state_change(chan, BT_CONNECT2); 4099 result = L2CAP_CR_PEND; 4100 status = L2CAP_CS_NO_INFO; 4101 } 4102 4103response: 4104 rsp.scid = cpu_to_le16(scid); 4105 rsp.dcid = cpu_to_le16(dcid); 4106 rsp.result = cpu_to_le16(result); 4107 rsp.status = cpu_to_le16(status); 4108 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp); 4109 4110 if (!pchan) 4111 return; 4112 4113 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { 4114 struct l2cap_info_req info; 4115 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 4116 4117 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 4118 conn->info_ident = l2cap_get_ident(conn); 4119 4120 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); 4121 4122 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, 4123 sizeof(info), &info); 4124 } 4125 4126 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && 4127 result == L2CAP_CR_SUCCESS) { 4128 u8 buf[128]; 4129 set_bit(CONF_REQ_SENT, &chan->conf_state); 4130 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 4131 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 4132 chan->num_conf_req++; 4133 } 4134 4135 l2cap_chan_unlock(pchan); 4136 l2cap_chan_put(pchan); 4137} 4138 4139static int l2cap_connect_req(struct l2cap_conn *conn, 4140 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 4141{ 4142 if (cmd_len < sizeof(struct l2cap_conn_req)) 4143 return -EPROTO; 4144 4145 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP); 4146 return 0; 4147} 4148 4149static int l2cap_connect_create_rsp(struct l2cap_conn *conn, 4150 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4151 u8 *data) 4152{ 4153 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 4154 u16 scid, dcid, result, status; 4155 struct l2cap_chan *chan; 4156 u8 req[128]; 4157 int err; 4158 4159 if (cmd_len < sizeof(*rsp)) 4160 return -EPROTO; 4161 4162 scid = __le16_to_cpu(rsp->scid); 4163 dcid = __le16_to_cpu(rsp->dcid); 4164 result = __le16_to_cpu(rsp->result); 4165 status = __le16_to_cpu(rsp->status); 4166 4167 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START || 4168 dcid > L2CAP_CID_DYN_END)) 4169 return -EPROTO; 4170 4171 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", 4172 dcid, scid, result, status); 4173 4174 if (scid) { 4175 chan = __l2cap_get_chan_by_scid(conn, scid); 4176 if (!chan) 4177 return -EBADSLT; 4178 } else { 4179 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 4180 if (!chan) 4181 return -EBADSLT; 4182 } 4183 4184 chan = l2cap_chan_hold_unless_zero(chan); 4185 if (!chan) 4186 return -EBADSLT; 4187 4188 err = 0; 4189 4190 l2cap_chan_lock(chan); 4191 4192 switch (result) { 4193 case L2CAP_CR_SUCCESS: 4194 if (__l2cap_get_chan_by_dcid(conn, dcid)) { 4195 err = -EBADSLT; 4196 break; 4197 } 4198 4199 l2cap_state_change(chan, BT_CONFIG); 4200 chan->ident = 0; 4201 chan->dcid = dcid; 4202 clear_bit(CONF_CONNECT_PEND, &chan->conf_state); 4203 4204 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) 4205 break; 4206 4207 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 4208 l2cap_build_conf_req(chan, req, sizeof(req)), req); 4209 chan->num_conf_req++; 4210 break; 4211 4212 case L2CAP_CR_PEND: 4213 set_bit(CONF_CONNECT_PEND, &chan->conf_state); 4214 break; 4215 4216 default: 4217 l2cap_chan_del(chan, ECONNREFUSED); 4218 break; 4219 } 4220 4221 l2cap_chan_unlock(chan); 4222 l2cap_chan_put(chan); 4223 4224 return err; 4225} 4226 4227static inline void set_default_fcs(struct l2cap_chan *chan) 4228{ 4229 /* FCS is enabled only in ERTM or streaming mode, if one or both 4230 * sides request it. 4231 */ 4232 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) 4233 chan->fcs = L2CAP_FCS_NONE; 4234 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) 4235 chan->fcs = L2CAP_FCS_CRC16; 4236} 4237 4238static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data, 4239 u8 ident, u16 flags) 4240{ 4241 struct l2cap_conn *conn = chan->conn; 4242 4243 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident, 4244 flags); 4245 4246 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 4247 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 4248 4249 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP, 4250 l2cap_build_conf_rsp(chan, data, 4251 L2CAP_CONF_SUCCESS, flags), data); 4252} 4253 4254static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident, 4255 u16 scid, u16 dcid) 4256{ 4257 struct l2cap_cmd_rej_cid rej; 4258 4259 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); 4260 rej.scid = __cpu_to_le16(scid); 4261 rej.dcid = __cpu_to_le16(dcid); 4262 4263 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 4264} 4265 4266static inline int l2cap_config_req(struct l2cap_conn *conn, 4267 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4268 u8 *data) 4269{ 4270 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; 4271 u16 dcid, flags; 4272 u8 rsp[64]; 4273 struct l2cap_chan *chan; 4274 int len, err = 0; 4275 4276 if (cmd_len < sizeof(*req)) 4277 return -EPROTO; 4278 4279 dcid = __le16_to_cpu(req->dcid); 4280 flags = __le16_to_cpu(req->flags); 4281 4282 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); 4283 4284 chan = l2cap_get_chan_by_scid(conn, dcid); 4285 if (!chan) { 4286 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0); 4287 return 0; 4288 } 4289 4290 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && 4291 chan->state != BT_CONNECTED) { 4292 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, 4293 chan->dcid); 4294 goto unlock; 4295 } 4296 4297 /* Reject if config buffer is too small. */ 4298 len = cmd_len - sizeof(*req); 4299 if (chan->conf_len + len > sizeof(chan->conf_req)) { 4300 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 4301 l2cap_build_conf_rsp(chan, rsp, 4302 L2CAP_CONF_REJECT, flags), rsp); 4303 goto unlock; 4304 } 4305 4306 /* Store config. */ 4307 memcpy(chan->conf_req + chan->conf_len, req->data, len); 4308 chan->conf_len += len; 4309 4310 if (flags & L2CAP_CONF_FLAG_CONTINUATION) { 4311 /* Incomplete config. Send empty response. */ 4312 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 4313 l2cap_build_conf_rsp(chan, rsp, 4314 L2CAP_CONF_SUCCESS, flags), rsp); 4315 goto unlock; 4316 } 4317 4318 /* Complete config. */ 4319 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp)); 4320 if (len < 0) { 4321 l2cap_send_disconn_req(chan, ECONNRESET); 4322 goto unlock; 4323 } 4324 4325 chan->ident = cmd->ident; 4326 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 4327 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP) 4328 chan->num_conf_rsp++; 4329 4330 /* Reset config buffer. */ 4331 chan->conf_len = 0; 4332 4333 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) 4334 goto unlock; 4335 4336 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 4337 set_default_fcs(chan); 4338 4339 if (chan->state != BT_CONNECTED) { 4340 if (chan->mode == L2CAP_MODE_ERTM || 4341 chan->mode == L2CAP_MODE_STREAMING) 4342 err = l2cap_ertm_init(chan); 4343 4344 if (err < 0) 4345 l2cap_send_disconn_req(chan, -err); 4346 else 4347 l2cap_chan_ready(chan); 4348 } 4349 4350 goto unlock; 4351 } 4352 4353 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { 4354 u8 buf[64]; 4355 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 4356 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); 4357 chan->num_conf_req++; 4358 } 4359 4360 /* Got Conf Rsp PENDING from remote side and assume we sent 4361 Conf Rsp PENDING in the code above */ 4362 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && 4363 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { 4364 4365 /* check compatibility */ 4366 4367 /* Send rsp for BR/EDR channel */ 4368 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags); 4369 } 4370 4371unlock: 4372 l2cap_chan_unlock(chan); 4373 l2cap_chan_put(chan); 4374 return err; 4375} 4376 4377static inline int l2cap_config_rsp(struct l2cap_conn *conn, 4378 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4379 u8 *data) 4380{ 4381 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 4382 u16 scid, flags, result; 4383 struct l2cap_chan *chan; 4384 int len = cmd_len - sizeof(*rsp); 4385 int err = 0; 4386 4387 if (cmd_len < sizeof(*rsp)) 4388 return -EPROTO; 4389 4390 scid = __le16_to_cpu(rsp->scid); 4391 flags = __le16_to_cpu(rsp->flags); 4392 result = __le16_to_cpu(rsp->result); 4393 4394 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags, 4395 result, len); 4396 4397 chan = l2cap_get_chan_by_scid(conn, scid); 4398 if (!chan) 4399 return 0; 4400 4401 switch (result) { 4402 case L2CAP_CONF_SUCCESS: 4403 l2cap_conf_rfc_get(chan, rsp->data, len); 4404 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state); 4405 break; 4406 4407 case L2CAP_CONF_PENDING: 4408 set_bit(CONF_REM_CONF_PEND, &chan->conf_state); 4409 4410 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { 4411 char buf[64]; 4412 4413 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 4414 buf, sizeof(buf), &result); 4415 if (len < 0) { 4416 l2cap_send_disconn_req(chan, ECONNRESET); 4417 goto done; 4418 } 4419 4420 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0); 4421 } 4422 goto done; 4423 4424 case L2CAP_CONF_UNKNOWN: 4425 case L2CAP_CONF_UNACCEPT: 4426 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 4427 char req[64]; 4428 4429 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { 4430 l2cap_send_disconn_req(chan, ECONNRESET); 4431 goto done; 4432 } 4433 4434 /* throw out any old stored conf requests */ 4435 result = L2CAP_CONF_SUCCESS; 4436 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 4437 req, sizeof(req), &result); 4438 if (len < 0) { 4439 l2cap_send_disconn_req(chan, ECONNRESET); 4440 goto done; 4441 } 4442 4443 l2cap_send_cmd(conn, l2cap_get_ident(conn), 4444 L2CAP_CONF_REQ, len, req); 4445 chan->num_conf_req++; 4446 if (result != L2CAP_CONF_SUCCESS) 4447 goto done; 4448 break; 4449 } 4450 fallthrough; 4451 4452 default: 4453 l2cap_chan_set_err(chan, ECONNRESET); 4454 4455 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); 4456 l2cap_send_disconn_req(chan, ECONNRESET); 4457 goto done; 4458 } 4459 4460 if (flags & L2CAP_CONF_FLAG_CONTINUATION) 4461 goto done; 4462 4463 set_bit(CONF_INPUT_DONE, &chan->conf_state); 4464 4465 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { 4466 set_default_fcs(chan); 4467 4468 if (chan->mode == L2CAP_MODE_ERTM || 4469 chan->mode == L2CAP_MODE_STREAMING) 4470 err = l2cap_ertm_init(chan); 4471 4472 if (err < 0) 4473 l2cap_send_disconn_req(chan, -err); 4474 else 4475 l2cap_chan_ready(chan); 4476 } 4477 4478done: 4479 l2cap_chan_unlock(chan); 4480 l2cap_chan_put(chan); 4481 return err; 4482} 4483 4484static inline int l2cap_disconnect_req(struct l2cap_conn *conn, 4485 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4486 u8 *data) 4487{ 4488 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; 4489 struct l2cap_disconn_rsp rsp; 4490 u16 dcid, scid; 4491 struct l2cap_chan *chan; 4492 4493 if (cmd_len != sizeof(*req)) 4494 return -EPROTO; 4495 4496 scid = __le16_to_cpu(req->scid); 4497 dcid = __le16_to_cpu(req->dcid); 4498 4499 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); 4500 4501 chan = l2cap_get_chan_by_scid(conn, dcid); 4502 if (!chan) { 4503 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid); 4504 return 0; 4505 } 4506 4507 rsp.dcid = cpu_to_le16(chan->scid); 4508 rsp.scid = cpu_to_le16(chan->dcid); 4509 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); 4510 4511 chan->ops->set_shutdown(chan); 4512 4513 l2cap_chan_del(chan, ECONNRESET); 4514 4515 chan->ops->close(chan); 4516 4517 l2cap_chan_unlock(chan); 4518 l2cap_chan_put(chan); 4519 4520 return 0; 4521} 4522 4523static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, 4524 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4525 u8 *data) 4526{ 4527 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 4528 u16 dcid, scid; 4529 struct l2cap_chan *chan; 4530 4531 if (cmd_len != sizeof(*rsp)) 4532 return -EPROTO; 4533 4534 scid = __le16_to_cpu(rsp->scid); 4535 dcid = __le16_to_cpu(rsp->dcid); 4536 4537 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); 4538 4539 chan = l2cap_get_chan_by_scid(conn, scid); 4540 if (!chan) { 4541 return 0; 4542 } 4543 4544 if (chan->state != BT_DISCONN) { 4545 l2cap_chan_unlock(chan); 4546 l2cap_chan_put(chan); 4547 return 0; 4548 } 4549 4550 l2cap_chan_del(chan, 0); 4551 4552 chan->ops->close(chan); 4553 4554 l2cap_chan_unlock(chan); 4555 l2cap_chan_put(chan); 4556 4557 return 0; 4558} 4559 4560static inline int l2cap_information_req(struct l2cap_conn *conn, 4561 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4562 u8 *data) 4563{ 4564 struct l2cap_info_req *req = (struct l2cap_info_req *) data; 4565 u16 type; 4566 4567 if (cmd_len != sizeof(*req)) 4568 return -EPROTO; 4569 4570 type = __le16_to_cpu(req->type); 4571 4572 BT_DBG("type 0x%4.4x", type); 4573 4574 if (type == L2CAP_IT_FEAT_MASK) { 4575 u8 buf[8]; 4576 u32 feat_mask = l2cap_feat_mask; 4577 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 4578 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 4579 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 4580 if (!disable_ertm) 4581 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 4582 | L2CAP_FEAT_FCS; 4583 4584 put_unaligned_le32(feat_mask, rsp->data); 4585 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), 4586 buf); 4587 } else if (type == L2CAP_IT_FIXED_CHAN) { 4588 u8 buf[12]; 4589 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 4590 4591 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 4592 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 4593 rsp->data[0] = conn->local_fixed_chan; 4594 memset(rsp->data + 1, 0, 7); 4595 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), 4596 buf); 4597 } else { 4598 struct l2cap_info_rsp rsp; 4599 rsp.type = cpu_to_le16(type); 4600 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 4601 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), 4602 &rsp); 4603 } 4604 4605 return 0; 4606} 4607 4608static inline int l2cap_information_rsp(struct l2cap_conn *conn, 4609 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4610 u8 *data) 4611{ 4612 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; 4613 u16 type, result; 4614 4615 if (cmd_len < sizeof(*rsp)) 4616 return -EPROTO; 4617 4618 type = __le16_to_cpu(rsp->type); 4619 result = __le16_to_cpu(rsp->result); 4620 4621 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 4622 4623 /* L2CAP Info req/rsp are unbound to channels, add extra checks */ 4624 if (cmd->ident != conn->info_ident || 4625 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 4626 return 0; 4627 4628 cancel_delayed_work(&conn->info_timer); 4629 4630 if (result != L2CAP_IR_SUCCESS) { 4631 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4632 conn->info_ident = 0; 4633 4634 l2cap_conn_start(conn); 4635 4636 return 0; 4637 } 4638 4639 switch (type) { 4640 case L2CAP_IT_FEAT_MASK: 4641 if (cmd_len >= sizeof(*rsp) + sizeof(u32)) 4642 conn->feat_mask = get_unaligned_le32(rsp->data); 4643 4644 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 4645 struct l2cap_info_req req; 4646 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 4647 4648 conn->info_ident = l2cap_get_ident(conn); 4649 4650 l2cap_send_cmd(conn, conn->info_ident, 4651 L2CAP_INFO_REQ, sizeof(req), &req); 4652 } else { 4653 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4654 conn->info_ident = 0; 4655 4656 l2cap_conn_start(conn); 4657 } 4658 break; 4659 4660 case L2CAP_IT_FIXED_CHAN: 4661 if (cmd_len >= sizeof(*rsp) + sizeof(rsp->data[0])) 4662 conn->remote_fixed_chan = rsp->data[0]; 4663 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4664 conn->info_ident = 0; 4665 4666 l2cap_conn_start(conn); 4667 break; 4668 } 4669 4670 return 0; 4671} 4672 4673static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, 4674 struct l2cap_cmd_hdr *cmd, 4675 u16 cmd_len, u8 *data) 4676{ 4677 struct hci_conn *hcon = conn->hcon; 4678 struct l2cap_conn_param_update_req *req; 4679 struct l2cap_conn_param_update_rsp rsp; 4680 u16 min, max, latency, to_multiplier; 4681 int err; 4682 4683 if (hcon->role != HCI_ROLE_MASTER) 4684 return -EINVAL; 4685 4686 if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) 4687 return -EPROTO; 4688 4689 req = (struct l2cap_conn_param_update_req *) data; 4690 min = __le16_to_cpu(req->min); 4691 max = __le16_to_cpu(req->max); 4692 latency = __le16_to_cpu(req->latency); 4693 to_multiplier = __le16_to_cpu(req->to_multiplier); 4694 4695 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", 4696 min, max, latency, to_multiplier); 4697 4698 memset(&rsp, 0, sizeof(rsp)); 4699 4700 err = hci_check_conn_params(min, max, latency, to_multiplier); 4701 if (err) 4702 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 4703 else 4704 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); 4705 4706 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 4707 sizeof(rsp), &rsp); 4708 4709 if (!err) 4710 hci_le_conn_update(hcon, min, max, latency, to_multiplier); 4711 4712 return 0; 4713} 4714 4715static int l2cap_le_connect_rsp(struct l2cap_conn *conn, 4716 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4717 u8 *data) 4718{ 4719 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data; 4720 struct hci_conn *hcon = conn->hcon; 4721 u16 dcid, mtu, mps, credits, result; 4722 struct l2cap_chan *chan; 4723 int err, sec_level; 4724 4725 if (cmd_len < sizeof(*rsp)) 4726 return -EPROTO; 4727 4728 dcid = __le16_to_cpu(rsp->dcid); 4729 mtu = __le16_to_cpu(rsp->mtu); 4730 mps = __le16_to_cpu(rsp->mps); 4731 credits = __le16_to_cpu(rsp->credits); 4732 result = __le16_to_cpu(rsp->result); 4733 4734 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 || 4735 dcid < L2CAP_CID_DYN_START || 4736 dcid > L2CAP_CID_LE_DYN_END)) 4737 return -EPROTO; 4738 4739 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", 4740 dcid, mtu, mps, credits, result); 4741 4742 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 4743 if (!chan) 4744 return -EBADSLT; 4745 4746 err = 0; 4747 4748 l2cap_chan_lock(chan); 4749 4750 switch (result) { 4751 case L2CAP_CR_LE_SUCCESS: 4752 if (__l2cap_get_chan_by_dcid(conn, dcid)) { 4753 err = -EBADSLT; 4754 break; 4755 } 4756 4757 chan->ident = 0; 4758 chan->dcid = dcid; 4759 chan->omtu = mtu; 4760 chan->remote_mps = mps; 4761 chan->tx_credits = credits; 4762 l2cap_chan_ready(chan); 4763 break; 4764 4765 case L2CAP_CR_LE_AUTHENTICATION: 4766 case L2CAP_CR_LE_ENCRYPTION: 4767 /* If we already have MITM protection we can't do 4768 * anything. 4769 */ 4770 if (hcon->sec_level > BT_SECURITY_MEDIUM) { 4771 l2cap_chan_del(chan, ECONNREFUSED); 4772 break; 4773 } 4774 4775 sec_level = hcon->sec_level + 1; 4776 if (chan->sec_level < sec_level) 4777 chan->sec_level = sec_level; 4778 4779 /* We'll need to send a new Connect Request */ 4780 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags); 4781 4782 smp_conn_security(hcon, chan->sec_level); 4783 break; 4784 4785 default: 4786 l2cap_chan_del(chan, ECONNREFUSED); 4787 break; 4788 } 4789 4790 l2cap_chan_unlock(chan); 4791 4792 return err; 4793} 4794 4795static void l2cap_put_ident(struct l2cap_conn *conn, u8 code, u8 id) 4796{ 4797 switch (code) { 4798 case L2CAP_COMMAND_REJ: 4799 case L2CAP_CONN_RSP: 4800 case L2CAP_CONF_RSP: 4801 case L2CAP_DISCONN_RSP: 4802 case L2CAP_ECHO_RSP: 4803 case L2CAP_INFO_RSP: 4804 case L2CAP_CONN_PARAM_UPDATE_RSP: 4805 case L2CAP_ECRED_CONN_RSP: 4806 case L2CAP_ECRED_RECONF_RSP: 4807 /* First do a lookup since the remote may send bogus ids that 4808 * would make ida_free to generate warnings. 4809 */ 4810 if (ida_find_first_range(&conn->tx_ida, id, id) >= 0) 4811 ida_free(&conn->tx_ida, id); 4812 } 4813} 4814 4815static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, 4816 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4817 u8 *data) 4818{ 4819 int err = 0; 4820 4821 l2cap_put_ident(conn, cmd->code, cmd->ident); 4822 4823 switch (cmd->code) { 4824 case L2CAP_COMMAND_REJ: 4825 l2cap_command_rej(conn, cmd, cmd_len, data); 4826 break; 4827 4828 case L2CAP_CONN_REQ: 4829 err = l2cap_connect_req(conn, cmd, cmd_len, data); 4830 break; 4831 4832 case L2CAP_CONN_RSP: 4833 l2cap_connect_create_rsp(conn, cmd, cmd_len, data); 4834 break; 4835 4836 case L2CAP_CONF_REQ: 4837 err = l2cap_config_req(conn, cmd, cmd_len, data); 4838 break; 4839 4840 case L2CAP_CONF_RSP: 4841 l2cap_config_rsp(conn, cmd, cmd_len, data); 4842 break; 4843 4844 case L2CAP_DISCONN_REQ: 4845 err = l2cap_disconnect_req(conn, cmd, cmd_len, data); 4846 break; 4847 4848 case L2CAP_DISCONN_RSP: 4849 l2cap_disconnect_rsp(conn, cmd, cmd_len, data); 4850 break; 4851 4852 case L2CAP_ECHO_REQ: 4853 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data); 4854 break; 4855 4856 case L2CAP_ECHO_RSP: 4857 break; 4858 4859 case L2CAP_INFO_REQ: 4860 err = l2cap_information_req(conn, cmd, cmd_len, data); 4861 break; 4862 4863 case L2CAP_INFO_RSP: 4864 l2cap_information_rsp(conn, cmd, cmd_len, data); 4865 break; 4866 4867 default: 4868 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); 4869 err = -EINVAL; 4870 break; 4871 } 4872 4873 return err; 4874} 4875 4876static int l2cap_le_connect_req(struct l2cap_conn *conn, 4877 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4878 u8 *data) 4879{ 4880 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data; 4881 struct l2cap_le_conn_rsp rsp; 4882 struct l2cap_chan *chan, *pchan; 4883 u16 dcid, scid, credits, mtu, mps; 4884 __le16 psm; 4885 u8 result; 4886 4887 if (cmd_len != sizeof(*req)) 4888 return -EPROTO; 4889 4890 scid = __le16_to_cpu(req->scid); 4891 mtu = __le16_to_cpu(req->mtu); 4892 mps = __le16_to_cpu(req->mps); 4893 psm = req->psm; 4894 dcid = 0; 4895 credits = 0; 4896 4897 if (mtu < 23 || mps < 23) 4898 return -EPROTO; 4899 4900 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), 4901 scid, mtu, mps); 4902 4903 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A 4904 * page 1059: 4905 * 4906 * Valid range: 0x0001-0x00ff 4907 * 4908 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges 4909 */ 4910 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { 4911 result = L2CAP_CR_LE_BAD_PSM; 4912 chan = NULL; 4913 goto response; 4914 } 4915 4916 /* Check if we have socket listening on psm */ 4917 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 4918 &conn->hcon->dst, LE_LINK); 4919 if (!pchan) { 4920 result = L2CAP_CR_LE_BAD_PSM; 4921 chan = NULL; 4922 goto response; 4923 } 4924 4925 l2cap_chan_lock(pchan); 4926 4927 if (!smp_sufficient_security(conn->hcon, pchan->sec_level, 4928 SMP_ALLOW_STK)) { 4929 result = pchan->sec_level == BT_SECURITY_MEDIUM ? 4930 L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION; 4931 chan = NULL; 4932 goto response_unlock; 4933 } 4934 4935 /* Check if Key Size is sufficient for the security level */ 4936 if (!l2cap_check_enc_key_size(conn->hcon, pchan)) { 4937 result = L2CAP_CR_LE_BAD_KEY_SIZE; 4938 chan = NULL; 4939 goto response_unlock; 4940 } 4941 4942 /* Check for valid dynamic CID range */ 4943 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { 4944 result = L2CAP_CR_LE_INVALID_SCID; 4945 chan = NULL; 4946 goto response_unlock; 4947 } 4948 4949 /* Check if we already have channel with that dcid */ 4950 if (__l2cap_get_chan_by_dcid(conn, scid)) { 4951 result = L2CAP_CR_LE_SCID_IN_USE; 4952 chan = NULL; 4953 goto response_unlock; 4954 } 4955 4956 chan = pchan->ops->new_connection(pchan); 4957 if (!chan) { 4958 result = L2CAP_CR_LE_NO_MEM; 4959 goto response_unlock; 4960 } 4961 4962 bacpy(&chan->src, &conn->hcon->src); 4963 bacpy(&chan->dst, &conn->hcon->dst); 4964 chan->src_type = bdaddr_src_type(conn->hcon); 4965 chan->dst_type = bdaddr_dst_type(conn->hcon); 4966 chan->psm = psm; 4967 chan->dcid = scid; 4968 chan->omtu = mtu; 4969 chan->remote_mps = mps; 4970 4971 __l2cap_chan_add(conn, chan); 4972 4973 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits)); 4974 4975 dcid = chan->scid; 4976 credits = chan->rx_credits; 4977 4978 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 4979 4980 chan->ident = cmd->ident; 4981 4982 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 4983 l2cap_state_change(chan, BT_CONNECT2); 4984 /* The following result value is actually not defined 4985 * for LE CoC but we use it to let the function know 4986 * that it should bail out after doing its cleanup 4987 * instead of sending a response. 4988 */ 4989 result = L2CAP_CR_PEND; 4990 chan->ops->defer(chan); 4991 } else { 4992 l2cap_chan_ready(chan); 4993 result = L2CAP_CR_LE_SUCCESS; 4994 } 4995 4996response_unlock: 4997 l2cap_chan_unlock(pchan); 4998 l2cap_chan_put(pchan); 4999 5000 if (result == L2CAP_CR_PEND) 5001 return 0; 5002 5003response: 5004 if (chan) { 5005 rsp.mtu = cpu_to_le16(chan->imtu); 5006 rsp.mps = cpu_to_le16(chan->mps); 5007 } else { 5008 rsp.mtu = 0; 5009 rsp.mps = 0; 5010 } 5011 5012 rsp.dcid = cpu_to_le16(dcid); 5013 rsp.credits = cpu_to_le16(credits); 5014 rsp.result = cpu_to_le16(result); 5015 5016 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); 5017 5018 return 0; 5019} 5020 5021static inline int l2cap_le_credits(struct l2cap_conn *conn, 5022 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5023 u8 *data) 5024{ 5025 struct l2cap_le_credits *pkt; 5026 struct l2cap_chan *chan; 5027 u16 cid, credits, max_credits; 5028 5029 if (cmd_len != sizeof(*pkt)) 5030 return -EPROTO; 5031 5032 pkt = (struct l2cap_le_credits *) data; 5033 cid = __le16_to_cpu(pkt->cid); 5034 credits = __le16_to_cpu(pkt->credits); 5035 5036 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits); 5037 5038 chan = l2cap_get_chan_by_dcid(conn, cid); 5039 if (!chan) 5040 return -EBADSLT; 5041 5042 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits; 5043 if (credits > max_credits) { 5044 BT_ERR("LE credits overflow"); 5045 l2cap_send_disconn_req(chan, ECONNRESET); 5046 5047 /* Return 0 so that we don't trigger an unnecessary 5048 * command reject packet. 5049 */ 5050 goto unlock; 5051 } 5052 5053 chan->tx_credits += credits; 5054 5055 /* Resume sending */ 5056 l2cap_le_flowctl_send(chan); 5057 5058 if (chan->tx_credits) 5059 chan->ops->resume(chan); 5060 5061unlock: 5062 l2cap_chan_unlock(chan); 5063 l2cap_chan_put(chan); 5064 5065 return 0; 5066} 5067 5068static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, 5069 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5070 u8 *data) 5071{ 5072 struct l2cap_ecred_conn_req *req = (void *) data; 5073 DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID); 5074 struct l2cap_chan *chan, *pchan; 5075 u16 mtu, mps; 5076 __le16 psm; 5077 u8 result, rsp_len = 0; 5078 int i, num_scid = 0; 5079 bool defer = false; 5080 5081 if (!enable_ecred) 5082 return -EINVAL; 5083 5084 memset(pdu, 0, sizeof(*pdu)); 5085 5086 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { 5087 result = L2CAP_CR_LE_INVALID_PARAMS; 5088 goto response; 5089 } 5090 5091 /* Check if there are no pending channels with the same ident */ 5092 __l2cap_chan_list_id(conn, cmd->ident, l2cap_ecred_list_defer, 5093 &num_scid); 5094 if (num_scid) { 5095 result = L2CAP_CR_LE_INVALID_PARAMS; 5096 goto response; 5097 } 5098 5099 cmd_len -= sizeof(*req); 5100 num_scid = cmd_len / sizeof(u16); 5101 5102 if (num_scid > L2CAP_ECRED_MAX_CID) { 5103 result = L2CAP_CR_LE_INVALID_PARAMS; 5104 goto response; 5105 } 5106 5107 /* Always respond with the same number of scids as in the request */ 5108 rsp_len = cmd_len; 5109 5110 mtu = __le16_to_cpu(req->mtu); 5111 mps = __le16_to_cpu(req->mps); 5112 5113 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) { 5114 result = L2CAP_CR_LE_INVALID_PARAMS; 5115 goto response; 5116 } 5117 5118 psm = req->psm; 5119 5120 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A 5121 * page 1059: 5122 * 5123 * Valid range: 0x0001-0x00ff 5124 * 5125 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges 5126 */ 5127 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { 5128 result = L2CAP_CR_LE_BAD_PSM; 5129 goto response; 5130 } 5131 5132 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); 5133 5134 /* Check if we have socket listening on psm */ 5135 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 5136 &conn->hcon->dst, LE_LINK); 5137 if (!pchan) { 5138 result = L2CAP_CR_LE_BAD_PSM; 5139 goto response; 5140 } 5141 5142 l2cap_chan_lock(pchan); 5143 5144 if (!smp_sufficient_security(conn->hcon, pchan->sec_level, 5145 SMP_ALLOW_STK)) { 5146 result = pchan->sec_level == BT_SECURITY_MEDIUM ? 5147 L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION; 5148 goto unlock; 5149 } 5150 5151 /* Check if the listening channel has set an output MTU then the 5152 * requested MTU shall be less than or equal to that value. 5153 */ 5154 if (pchan->omtu && mtu < pchan->omtu) { 5155 result = L2CAP_CR_LE_UNACCEPT_PARAMS; 5156 goto unlock; 5157 } 5158 5159 result = L2CAP_CR_LE_SUCCESS; 5160 5161 for (i = 0; i < num_scid; i++) { 5162 u16 scid = __le16_to_cpu(req->scid[i]); 5163 5164 BT_DBG("scid[%d] 0x%4.4x", i, scid); 5165 5166 pdu->dcid[i] = 0x0000; 5167 5168 /* Check for valid dynamic CID range */ 5169 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { 5170 result = L2CAP_CR_LE_INVALID_SCID; 5171 continue; 5172 } 5173 5174 /* Check if we already have channel with that dcid */ 5175 if (__l2cap_get_chan_by_dcid(conn, scid)) { 5176 result = L2CAP_CR_LE_SCID_IN_USE; 5177 continue; 5178 } 5179 5180 chan = pchan->ops->new_connection(pchan); 5181 if (!chan) { 5182 result = L2CAP_CR_LE_NO_MEM; 5183 continue; 5184 } 5185 5186 bacpy(&chan->src, &conn->hcon->src); 5187 bacpy(&chan->dst, &conn->hcon->dst); 5188 chan->src_type = bdaddr_src_type(conn->hcon); 5189 chan->dst_type = bdaddr_dst_type(conn->hcon); 5190 chan->psm = psm; 5191 chan->dcid = scid; 5192 chan->omtu = mtu; 5193 chan->remote_mps = mps; 5194 5195 __l2cap_chan_add(conn, chan); 5196 5197 l2cap_ecred_init(chan, __le16_to_cpu(req->credits)); 5198 5199 /* Init response */ 5200 if (!pdu->credits) { 5201 pdu->mtu = cpu_to_le16(chan->imtu); 5202 pdu->mps = cpu_to_le16(chan->mps); 5203 pdu->credits = cpu_to_le16(chan->rx_credits); 5204 } 5205 5206 pdu->dcid[i] = cpu_to_le16(chan->scid); 5207 5208 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 5209 5210 chan->ident = cmd->ident; 5211 chan->mode = L2CAP_MODE_EXT_FLOWCTL; 5212 5213 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 5214 l2cap_state_change(chan, BT_CONNECT2); 5215 defer = true; 5216 chan->ops->defer(chan); 5217 } else { 5218 l2cap_chan_ready(chan); 5219 } 5220 } 5221 5222unlock: 5223 l2cap_chan_unlock(pchan); 5224 l2cap_chan_put(pchan); 5225 5226response: 5227 pdu->result = cpu_to_le16(result); 5228 5229 if (defer) 5230 return 0; 5231 5232 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP, 5233 sizeof(*pdu) + rsp_len, pdu); 5234 5235 return 0; 5236} 5237 5238static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, 5239 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5240 u8 *data) 5241{ 5242 struct l2cap_ecred_conn_rsp *rsp = (void *) data; 5243 struct hci_conn *hcon = conn->hcon; 5244 u16 mtu, mps, credits, result; 5245 struct l2cap_chan *chan, *tmp; 5246 int err = 0, sec_level; 5247 int i = 0; 5248 5249 if (cmd_len < sizeof(*rsp)) 5250 return -EPROTO; 5251 5252 mtu = __le16_to_cpu(rsp->mtu); 5253 mps = __le16_to_cpu(rsp->mps); 5254 credits = __le16_to_cpu(rsp->credits); 5255 result = __le16_to_cpu(rsp->result); 5256 5257 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits, 5258 result); 5259 5260 cmd_len -= sizeof(*rsp); 5261 5262 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { 5263 u16 dcid; 5264 5265 if (chan->ident != cmd->ident || 5266 chan->mode != L2CAP_MODE_EXT_FLOWCTL || 5267 chan->state == BT_CONNECTED) 5268 continue; 5269 5270 l2cap_chan_lock(chan); 5271 5272 /* Check that there is a dcid for each pending channel */ 5273 if (cmd_len < sizeof(dcid)) { 5274 l2cap_chan_del(chan, ECONNREFUSED); 5275 l2cap_chan_unlock(chan); 5276 continue; 5277 } 5278 5279 dcid = __le16_to_cpu(rsp->dcid[i++]); 5280 cmd_len -= sizeof(u16); 5281 5282 BT_DBG("dcid[%d] 0x%4.4x", i, dcid); 5283 5284 /* Check if dcid is already in use */ 5285 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) { 5286 /* If a device receives a 5287 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an 5288 * already-assigned Destination CID, then both the 5289 * original channel and the new channel shall be 5290 * immediately discarded and not used. 5291 */ 5292 l2cap_chan_del(chan, ECONNREFUSED); 5293 l2cap_chan_unlock(chan); 5294 chan = __l2cap_get_chan_by_dcid(conn, dcid); 5295 l2cap_chan_lock(chan); 5296 l2cap_chan_del(chan, ECONNRESET); 5297 l2cap_chan_unlock(chan); 5298 continue; 5299 } 5300 5301 switch (result) { 5302 case L2CAP_CR_LE_AUTHENTICATION: 5303 case L2CAP_CR_LE_ENCRYPTION: 5304 /* If we already have MITM protection we can't do 5305 * anything. 5306 */ 5307 if (hcon->sec_level > BT_SECURITY_MEDIUM) { 5308 l2cap_chan_del(chan, ECONNREFUSED); 5309 break; 5310 } 5311 5312 sec_level = hcon->sec_level + 1; 5313 if (chan->sec_level < sec_level) 5314 chan->sec_level = sec_level; 5315 5316 /* We'll need to send a new Connect Request */ 5317 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags); 5318 5319 smp_conn_security(hcon, chan->sec_level); 5320 break; 5321 5322 case L2CAP_CR_LE_BAD_PSM: 5323 l2cap_chan_del(chan, ECONNREFUSED); 5324 break; 5325 5326 default: 5327 /* If dcid was not set it means channels was refused */ 5328 if (!dcid) { 5329 l2cap_chan_del(chan, ECONNREFUSED); 5330 break; 5331 } 5332 5333 chan->ident = 0; 5334 chan->dcid = dcid; 5335 chan->omtu = mtu; 5336 chan->remote_mps = mps; 5337 chan->tx_credits = credits; 5338 l2cap_chan_ready(chan); 5339 break; 5340 } 5341 5342 l2cap_chan_unlock(chan); 5343 } 5344 5345 return err; 5346} 5347 5348static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn, 5349 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5350 u8 *data) 5351{ 5352 struct l2cap_ecred_reconf_req *req = (void *) data; 5353 struct l2cap_ecred_reconf_rsp rsp; 5354 u16 mtu, mps, result; 5355 struct l2cap_chan *chan[L2CAP_ECRED_MAX_CID] = {}; 5356 int i, num_scid; 5357 5358 if (!enable_ecred) 5359 return -EINVAL; 5360 5361 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { 5362 result = L2CAP_RECONF_INVALID_CID; 5363 goto respond; 5364 } 5365 5366 mtu = __le16_to_cpu(req->mtu); 5367 mps = __le16_to_cpu(req->mps); 5368 5369 BT_DBG("mtu %u mps %u", mtu, mps); 5370 5371 if (mtu < L2CAP_ECRED_MIN_MTU) { 5372 result = L2CAP_RECONF_INVALID_PARAMS; 5373 goto respond; 5374 } 5375 5376 if (mps < L2CAP_ECRED_MIN_MPS) { 5377 result = L2CAP_RECONF_INVALID_PARAMS; 5378 goto respond; 5379 } 5380 5381 cmd_len -= sizeof(*req); 5382 num_scid = cmd_len / sizeof(u16); 5383 5384 if (num_scid > L2CAP_ECRED_MAX_CID) { 5385 result = L2CAP_RECONF_INVALID_PARAMS; 5386 goto respond; 5387 } 5388 5389 result = L2CAP_RECONF_SUCCESS; 5390 5391 /* Check if each SCID, MTU and MPS are valid */ 5392 for (i = 0; i < num_scid; i++) { 5393 u16 scid; 5394 5395 scid = __le16_to_cpu(req->scid[i]); 5396 if (!scid) { 5397 result = L2CAP_RECONF_INVALID_CID; 5398 goto respond; 5399 } 5400 5401 chan[i] = __l2cap_get_chan_by_dcid(conn, scid); 5402 if (!chan[i]) { 5403 result = L2CAP_RECONF_INVALID_CID; 5404 goto respond; 5405 } 5406 5407 /* The MTU field shall be greater than or equal to the greatest 5408 * current MTU size of these channels. 5409 */ 5410 if (chan[i]->omtu > mtu) { 5411 BT_ERR("chan %p decreased MTU %u -> %u", chan[i], 5412 chan[i]->omtu, mtu); 5413 result = L2CAP_RECONF_INVALID_MTU; 5414 goto respond; 5415 } 5416 5417 /* If more than one channel is being configured, the MPS field 5418 * shall be greater than or equal to the current MPS size of 5419 * each of these channels. If only one channel is being 5420 * configured, the MPS field may be less than the current MPS 5421 * of that channel. 5422 */ 5423 if (chan[i]->remote_mps > mps && num_scid > 1) { 5424 BT_ERR("chan %p decreased MPS %u -> %u", chan[i], 5425 chan[i]->remote_mps, mps); 5426 result = L2CAP_RECONF_INVALID_MPS; 5427 goto respond; 5428 } 5429 } 5430 5431 /* Commit the new MTU and MPS values after checking they are valid */ 5432 for (i = 0; i < num_scid; i++) { 5433 chan[i]->omtu = mtu; 5434 chan[i]->remote_mps = mps; 5435 } 5436 5437respond: 5438 rsp.result = cpu_to_le16(result); 5439 5440 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp), 5441 &rsp); 5442 5443 return 0; 5444} 5445 5446static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn, 5447 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5448 u8 *data) 5449{ 5450 struct l2cap_chan *chan, *tmp; 5451 struct l2cap_ecred_reconf_rsp *rsp = (void *)data; 5452 u16 result; 5453 5454 if (cmd_len < sizeof(*rsp)) 5455 return -EPROTO; 5456 5457 result = __le16_to_cpu(rsp->result); 5458 5459 BT_DBG("result 0x%4.4x", result); 5460 5461 if (!result) 5462 return 0; 5463 5464 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { 5465 if (chan->ident != cmd->ident) 5466 continue; 5467 5468 l2cap_chan_hold(chan); 5469 l2cap_chan_lock(chan); 5470 5471 l2cap_chan_del(chan, ECONNRESET); 5472 5473 l2cap_chan_unlock(chan); 5474 l2cap_chan_put(chan); 5475 } 5476 5477 return 0; 5478} 5479 5480static inline int l2cap_le_command_rej(struct l2cap_conn *conn, 5481 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5482 u8 *data) 5483{ 5484 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 5485 struct l2cap_chan *chan; 5486 5487 if (cmd_len < sizeof(*rej)) 5488 return -EPROTO; 5489 5490 chan = __l2cap_get_chan_by_ident(conn, cmd->ident); 5491 if (!chan) 5492 goto done; 5493 5494 chan = l2cap_chan_hold_unless_zero(chan); 5495 if (!chan) 5496 goto done; 5497 5498 l2cap_chan_lock(chan); 5499 l2cap_chan_del(chan, ECONNREFUSED); 5500 l2cap_chan_unlock(chan); 5501 l2cap_chan_put(chan); 5502 5503done: 5504 return 0; 5505} 5506 5507static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, 5508 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 5509 u8 *data) 5510{ 5511 int err = 0; 5512 5513 l2cap_put_ident(conn, cmd->code, cmd->ident); 5514 5515 switch (cmd->code) { 5516 case L2CAP_COMMAND_REJ: 5517 l2cap_le_command_rej(conn, cmd, cmd_len, data); 5518 break; 5519 5520 case L2CAP_CONN_PARAM_UPDATE_REQ: 5521 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data); 5522 break; 5523 5524 case L2CAP_CONN_PARAM_UPDATE_RSP: 5525 break; 5526 5527 case L2CAP_LE_CONN_RSP: 5528 l2cap_le_connect_rsp(conn, cmd, cmd_len, data); 5529 break; 5530 5531 case L2CAP_LE_CONN_REQ: 5532 err = l2cap_le_connect_req(conn, cmd, cmd_len, data); 5533 break; 5534 5535 case L2CAP_LE_CREDITS: 5536 err = l2cap_le_credits(conn, cmd, cmd_len, data); 5537 break; 5538 5539 case L2CAP_ECRED_CONN_REQ: 5540 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data); 5541 break; 5542 5543 case L2CAP_ECRED_CONN_RSP: 5544 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data); 5545 break; 5546 5547 case L2CAP_ECRED_RECONF_REQ: 5548 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data); 5549 break; 5550 5551 case L2CAP_ECRED_RECONF_RSP: 5552 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data); 5553 break; 5554 5555 case L2CAP_DISCONN_REQ: 5556 err = l2cap_disconnect_req(conn, cmd, cmd_len, data); 5557 break; 5558 5559 case L2CAP_DISCONN_RSP: 5560 l2cap_disconnect_rsp(conn, cmd, cmd_len, data); 5561 break; 5562 5563 default: 5564 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code); 5565 err = -EINVAL; 5566 break; 5567 } 5568 5569 return err; 5570} 5571 5572static inline void l2cap_le_sig_channel(struct l2cap_conn *conn, 5573 struct sk_buff *skb) 5574{ 5575 struct hci_conn *hcon = conn->hcon; 5576 struct l2cap_cmd_hdr *cmd; 5577 u16 len; 5578 int err; 5579 5580 if (hcon->type != LE_LINK) 5581 goto drop; 5582 5583 if (skb->len < L2CAP_CMD_HDR_SIZE) 5584 goto drop; 5585 5586 cmd = (void *) skb->data; 5587 skb_pull(skb, L2CAP_CMD_HDR_SIZE); 5588 5589 len = le16_to_cpu(cmd->len); 5590 5591 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident); 5592 5593 if (len != skb->len || !cmd->ident) { 5594 BT_DBG("corrupted command"); 5595 goto drop; 5596 } 5597 5598 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data); 5599 if (err) { 5600 struct l2cap_cmd_rej_unk rej; 5601 5602 BT_ERR("Wrong link type (%d)", err); 5603 5604 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 5605 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, 5606 sizeof(rej), &rej); 5607 } 5608 5609drop: 5610 kfree_skb(skb); 5611} 5612 5613static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident) 5614{ 5615 struct l2cap_cmd_rej_unk rej; 5616 5617 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 5618 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 5619} 5620 5621static inline void l2cap_sig_channel(struct l2cap_conn *conn, 5622 struct sk_buff *skb) 5623{ 5624 struct hci_conn *hcon = conn->hcon; 5625 struct l2cap_cmd_hdr *cmd; 5626 int err; 5627 5628 l2cap_raw_recv(conn, skb); 5629 5630 if (hcon->type != ACL_LINK) 5631 goto drop; 5632 5633 while (skb->len >= L2CAP_CMD_HDR_SIZE) { 5634 u16 len; 5635 5636 cmd = (void *) skb->data; 5637 skb_pull(skb, L2CAP_CMD_HDR_SIZE); 5638 5639 len = le16_to_cpu(cmd->len); 5640 5641 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, 5642 cmd->ident); 5643 5644 if (len > skb->len || !cmd->ident) { 5645 BT_DBG("corrupted command"); 5646 l2cap_sig_send_rej(conn, cmd->ident); 5647 skb_pull(skb, len > skb->len ? skb->len : len); 5648 continue; 5649 } 5650 5651 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data); 5652 if (err) { 5653 BT_ERR("Wrong link type (%d)", err); 5654 l2cap_sig_send_rej(conn, cmd->ident); 5655 } 5656 5657 skb_pull(skb, len); 5658 } 5659 5660 if (skb->len > 0) { 5661 BT_DBG("corrupted command"); 5662 l2cap_sig_send_rej(conn, 0); 5663 } 5664 5665drop: 5666 kfree_skb(skb); 5667} 5668 5669static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) 5670{ 5671 u16 our_fcs, rcv_fcs; 5672 int hdr_size; 5673 5674 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 5675 hdr_size = L2CAP_EXT_HDR_SIZE; 5676 else 5677 hdr_size = L2CAP_ENH_HDR_SIZE; 5678 5679 if (chan->fcs == L2CAP_FCS_CRC16) { 5680 skb_trim(skb, skb->len - L2CAP_FCS_SIZE); 5681 rcv_fcs = get_unaligned_le16(skb->data + skb->len); 5682 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 5683 5684 if (our_fcs != rcv_fcs) 5685 return -EBADMSG; 5686 } 5687 return 0; 5688} 5689 5690static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) 5691{ 5692 struct l2cap_ctrl control; 5693 5694 BT_DBG("chan %p", chan); 5695 5696 memset(&control, 0, sizeof(control)); 5697 control.sframe = 1; 5698 control.final = 1; 5699 control.reqseq = chan->buffer_seq; 5700 set_bit(CONN_SEND_FBIT, &chan->conn_state); 5701 5702 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 5703 control.super = L2CAP_SUPER_RNR; 5704 l2cap_send_sframe(chan, &control); 5705 } 5706 5707 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 5708 chan->unacked_frames > 0) 5709 __set_retrans_timer(chan); 5710 5711 /* Send pending iframes */ 5712 l2cap_ertm_send(chan); 5713 5714 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 5715 test_bit(CONN_SEND_FBIT, &chan->conn_state)) { 5716 /* F-bit wasn't sent in an s-frame or i-frame yet, so 5717 * send it now. 5718 */ 5719 control.super = L2CAP_SUPER_RR; 5720 l2cap_send_sframe(chan, &control); 5721 } 5722} 5723 5724static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag, 5725 struct sk_buff **last_frag) 5726{ 5727 /* skb->len reflects data in skb as well as all fragments 5728 * skb->data_len reflects only data in fragments 5729 */ 5730 if (!skb_has_frag_list(skb)) 5731 skb_shinfo(skb)->frag_list = new_frag; 5732 5733 new_frag->next = NULL; 5734 5735 (*last_frag)->next = new_frag; 5736 *last_frag = new_frag; 5737 5738 skb->len += new_frag->len; 5739 skb->data_len += new_frag->len; 5740 skb->truesize += new_frag->truesize; 5741} 5742 5743static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, 5744 struct l2cap_ctrl *control) 5745{ 5746 int err = -EINVAL; 5747 5748 switch (control->sar) { 5749 case L2CAP_SAR_UNSEGMENTED: 5750 if (chan->sdu) 5751 break; 5752 5753 err = chan->ops->recv(chan, skb); 5754 break; 5755 5756 case L2CAP_SAR_START: 5757 if (chan->sdu) 5758 break; 5759 5760 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE)) 5761 break; 5762 5763 chan->sdu_len = get_unaligned_le16(skb->data); 5764 skb_pull(skb, L2CAP_SDULEN_SIZE); 5765 5766 if (chan->sdu_len > chan->imtu) { 5767 err = -EMSGSIZE; 5768 break; 5769 } 5770 5771 if (skb->len >= chan->sdu_len) 5772 break; 5773 5774 chan->sdu = skb; 5775 chan->sdu_last_frag = skb; 5776 5777 skb = NULL; 5778 err = 0; 5779 break; 5780 5781 case L2CAP_SAR_CONTINUE: 5782 if (!chan->sdu) 5783 break; 5784 5785 append_skb_frag(chan->sdu, skb, 5786 &chan->sdu_last_frag); 5787 skb = NULL; 5788 5789 if (chan->sdu->len >= chan->sdu_len) 5790 break; 5791 5792 err = 0; 5793 break; 5794 5795 case L2CAP_SAR_END: 5796 if (!chan->sdu) 5797 break; 5798 5799 append_skb_frag(chan->sdu, skb, 5800 &chan->sdu_last_frag); 5801 skb = NULL; 5802 5803 if (chan->sdu->len != chan->sdu_len) 5804 break; 5805 5806 err = chan->ops->recv(chan, chan->sdu); 5807 5808 if (!err) { 5809 /* Reassembly complete */ 5810 chan->sdu = NULL; 5811 chan->sdu_last_frag = NULL; 5812 chan->sdu_len = 0; 5813 } 5814 break; 5815 } 5816 5817 if (err) { 5818 kfree_skb(skb); 5819 kfree_skb(chan->sdu); 5820 chan->sdu = NULL; 5821 chan->sdu_last_frag = NULL; 5822 chan->sdu_len = 0; 5823 } 5824 5825 return err; 5826} 5827 5828static int l2cap_resegment(struct l2cap_chan *chan) 5829{ 5830 /* Placeholder */ 5831 return 0; 5832} 5833 5834void l2cap_chan_busy(struct l2cap_chan *chan, int busy) 5835{ 5836 u8 event; 5837 5838 if (chan->mode != L2CAP_MODE_ERTM) 5839 return; 5840 5841 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; 5842 l2cap_tx(chan, NULL, NULL, event); 5843} 5844 5845static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) 5846{ 5847 int err = 0; 5848 /* Pass sequential frames to l2cap_reassemble_sdu() 5849 * until a gap is encountered. 5850 */ 5851 5852 BT_DBG("chan %p", chan); 5853 5854 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 5855 struct sk_buff *skb; 5856 BT_DBG("Searching for skb with txseq %d (queue len %d)", 5857 chan->buffer_seq, skb_queue_len(&chan->srej_q)); 5858 5859 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq); 5860 5861 if (!skb) 5862 break; 5863 5864 skb_unlink(skb, &chan->srej_q); 5865 chan->buffer_seq = __next_seq(chan, chan->buffer_seq); 5866 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap); 5867 if (err) 5868 break; 5869 } 5870 5871 if (skb_queue_empty(&chan->srej_q)) { 5872 chan->rx_state = L2CAP_RX_STATE_RECV; 5873 l2cap_send_ack(chan); 5874 } 5875 5876 return err; 5877} 5878 5879static void l2cap_handle_srej(struct l2cap_chan *chan, 5880 struct l2cap_ctrl *control) 5881{ 5882 struct sk_buff *skb; 5883 5884 BT_DBG("chan %p, control %p", chan, control); 5885 5886 if (control->reqseq == chan->next_tx_seq) { 5887 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); 5888 l2cap_send_disconn_req(chan, ECONNRESET); 5889 return; 5890 } 5891 5892 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); 5893 5894 if (skb == NULL) { 5895 BT_DBG("Seq %d not available for retransmission", 5896 control->reqseq); 5897 return; 5898 } 5899 5900 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) { 5901 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 5902 l2cap_send_disconn_req(chan, ECONNRESET); 5903 return; 5904 } 5905 5906 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 5907 5908 if (control->poll) { 5909 l2cap_pass_to_tx(chan, control); 5910 5911 set_bit(CONN_SEND_FBIT, &chan->conn_state); 5912 l2cap_retransmit(chan, control); 5913 l2cap_ertm_send(chan); 5914 5915 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { 5916 set_bit(CONN_SREJ_ACT, &chan->conn_state); 5917 chan->srej_save_reqseq = control->reqseq; 5918 } 5919 } else { 5920 l2cap_pass_to_tx_fbit(chan, control); 5921 5922 if (control->final) { 5923 if (chan->srej_save_reqseq != control->reqseq || 5924 !test_and_clear_bit(CONN_SREJ_ACT, 5925 &chan->conn_state)) 5926 l2cap_retransmit(chan, control); 5927 } else { 5928 l2cap_retransmit(chan, control); 5929 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { 5930 set_bit(CONN_SREJ_ACT, &chan->conn_state); 5931 chan->srej_save_reqseq = control->reqseq; 5932 } 5933 } 5934 } 5935} 5936 5937static void l2cap_handle_rej(struct l2cap_chan *chan, 5938 struct l2cap_ctrl *control) 5939{ 5940 struct sk_buff *skb; 5941 5942 BT_DBG("chan %p, control %p", chan, control); 5943 5944 if (control->reqseq == chan->next_tx_seq) { 5945 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); 5946 l2cap_send_disconn_req(chan, ECONNRESET); 5947 return; 5948 } 5949 5950 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); 5951 5952 if (chan->max_tx && skb && 5953 bt_cb(skb)->l2cap.retries >= chan->max_tx) { 5954 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 5955 l2cap_send_disconn_req(chan, ECONNRESET); 5956 return; 5957 } 5958 5959 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 5960 5961 l2cap_pass_to_tx(chan, control); 5962 5963 if (control->final) { 5964 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 5965 l2cap_retransmit_all(chan, control); 5966 } else { 5967 l2cap_retransmit_all(chan, control); 5968 l2cap_ertm_send(chan); 5969 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) 5970 set_bit(CONN_REJ_ACT, &chan->conn_state); 5971 } 5972} 5973 5974static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) 5975{ 5976 BT_DBG("chan %p, txseq %d", chan, txseq); 5977 5978 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, 5979 chan->expected_tx_seq); 5980 5981 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 5982 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= 5983 chan->tx_win) { 5984 /* See notes below regarding "double poll" and 5985 * invalid packets. 5986 */ 5987 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { 5988 BT_DBG("Invalid/Ignore - after SREJ"); 5989 return L2CAP_TXSEQ_INVALID_IGNORE; 5990 } else { 5991 BT_DBG("Invalid - in window after SREJ sent"); 5992 return L2CAP_TXSEQ_INVALID; 5993 } 5994 } 5995 5996 if (chan->srej_list.head == txseq) { 5997 BT_DBG("Expected SREJ"); 5998 return L2CAP_TXSEQ_EXPECTED_SREJ; 5999 } 6000 6001 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) { 6002 BT_DBG("Duplicate SREJ - txseq already stored"); 6003 return L2CAP_TXSEQ_DUPLICATE_SREJ; 6004 } 6005 6006 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) { 6007 BT_DBG("Unexpected SREJ - not requested"); 6008 return L2CAP_TXSEQ_UNEXPECTED_SREJ; 6009 } 6010 } 6011 6012 if (chan->expected_tx_seq == txseq) { 6013 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= 6014 chan->tx_win) { 6015 BT_DBG("Invalid - txseq outside tx window"); 6016 return L2CAP_TXSEQ_INVALID; 6017 } else { 6018 BT_DBG("Expected"); 6019 return L2CAP_TXSEQ_EXPECTED; 6020 } 6021 } 6022 6023 if (__seq_offset(chan, txseq, chan->last_acked_seq) < 6024 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) { 6025 BT_DBG("Duplicate - expected_tx_seq later than txseq"); 6026 return L2CAP_TXSEQ_DUPLICATE; 6027 } 6028 6029 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { 6030 /* A source of invalid packets is a "double poll" condition, 6031 * where delays cause us to send multiple poll packets. If 6032 * the remote stack receives and processes both polls, 6033 * sequence numbers can wrap around in such a way that a 6034 * resent frame has a sequence number that looks like new data 6035 * with a sequence gap. This would trigger an erroneous SREJ 6036 * request. 6037 * 6038 * Fortunately, this is impossible with a tx window that's 6039 * less than half of the maximum sequence number, which allows 6040 * invalid frames to be safely ignored. 6041 * 6042 * With tx window sizes greater than half of the tx window 6043 * maximum, the frame is invalid and cannot be ignored. This 6044 * causes a disconnect. 6045 */ 6046 6047 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { 6048 BT_DBG("Invalid/Ignore - txseq outside tx window"); 6049 return L2CAP_TXSEQ_INVALID_IGNORE; 6050 } else { 6051 BT_DBG("Invalid - txseq outside tx window"); 6052 return L2CAP_TXSEQ_INVALID; 6053 } 6054 } else { 6055 BT_DBG("Unexpected - txseq indicates missing frames"); 6056 return L2CAP_TXSEQ_UNEXPECTED; 6057 } 6058} 6059 6060static int l2cap_rx_state_recv(struct l2cap_chan *chan, 6061 struct l2cap_ctrl *control, 6062 struct sk_buff *skb, u8 event) 6063{ 6064 struct l2cap_ctrl local_control; 6065 int err = 0; 6066 bool skb_in_use = false; 6067 6068 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 6069 event); 6070 6071 switch (event) { 6072 case L2CAP_EV_RECV_IFRAME: 6073 switch (l2cap_classify_txseq(chan, control->txseq)) { 6074 case L2CAP_TXSEQ_EXPECTED: 6075 l2cap_pass_to_tx(chan, control); 6076 6077 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 6078 BT_DBG("Busy, discarding expected seq %d", 6079 control->txseq); 6080 break; 6081 } 6082 6083 chan->expected_tx_seq = __next_seq(chan, 6084 control->txseq); 6085 6086 chan->buffer_seq = chan->expected_tx_seq; 6087 skb_in_use = true; 6088 6089 /* l2cap_reassemble_sdu may free skb, hence invalidate 6090 * control, so make a copy in advance to use it after 6091 * l2cap_reassemble_sdu returns and to avoid the race 6092 * condition, for example: 6093 * 6094 * The current thread calls: 6095 * l2cap_reassemble_sdu 6096 * chan->ops->recv == l2cap_sock_recv_cb 6097 * __sock_queue_rcv_skb 6098 * Another thread calls: 6099 * bt_sock_recvmsg 6100 * skb_recv_datagram 6101 * skb_free_datagram 6102 * Then the current thread tries to access control, but 6103 * it was freed by skb_free_datagram. 6104 */ 6105 local_control = *control; 6106 err = l2cap_reassemble_sdu(chan, skb, control); 6107 if (err) 6108 break; 6109 6110 if (local_control.final) { 6111 if (!test_and_clear_bit(CONN_REJ_ACT, 6112 &chan->conn_state)) { 6113 local_control.final = 0; 6114 l2cap_retransmit_all(chan, &local_control); 6115 l2cap_ertm_send(chan); 6116 } 6117 } 6118 6119 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) 6120 l2cap_send_ack(chan); 6121 break; 6122 case L2CAP_TXSEQ_UNEXPECTED: 6123 l2cap_pass_to_tx(chan, control); 6124 6125 /* Can't issue SREJ frames in the local busy state. 6126 * Drop this frame, it will be seen as missing 6127 * when local busy is exited. 6128 */ 6129 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 6130 BT_DBG("Busy, discarding unexpected seq %d", 6131 control->txseq); 6132 break; 6133 } 6134 6135 /* There was a gap in the sequence, so an SREJ 6136 * must be sent for each missing frame. The 6137 * current frame is stored for later use. 6138 */ 6139 skb_queue_tail(&chan->srej_q, skb); 6140 skb_in_use = true; 6141 BT_DBG("Queued %p (queue len %d)", skb, 6142 skb_queue_len(&chan->srej_q)); 6143 6144 clear_bit(CONN_SREJ_ACT, &chan->conn_state); 6145 l2cap_seq_list_clear(&chan->srej_list); 6146 l2cap_send_srej(chan, control->txseq); 6147 6148 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; 6149 break; 6150 case L2CAP_TXSEQ_DUPLICATE: 6151 l2cap_pass_to_tx(chan, control); 6152 break; 6153 case L2CAP_TXSEQ_INVALID_IGNORE: 6154 break; 6155 case L2CAP_TXSEQ_INVALID: 6156 default: 6157 l2cap_send_disconn_req(chan, ECONNRESET); 6158 break; 6159 } 6160 break; 6161 case L2CAP_EV_RECV_RR: 6162 l2cap_pass_to_tx(chan, control); 6163 if (control->final) { 6164 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6165 6166 if (!test_and_clear_bit(CONN_REJ_ACT, 6167 &chan->conn_state)) { 6168 control->final = 0; 6169 l2cap_retransmit_all(chan, control); 6170 } 6171 6172 l2cap_ertm_send(chan); 6173 } else if (control->poll) { 6174 l2cap_send_i_or_rr_or_rnr(chan); 6175 } else { 6176 if (test_and_clear_bit(CONN_REMOTE_BUSY, 6177 &chan->conn_state) && 6178 chan->unacked_frames) 6179 __set_retrans_timer(chan); 6180 6181 l2cap_ertm_send(chan); 6182 } 6183 break; 6184 case L2CAP_EV_RECV_RNR: 6185 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6186 l2cap_pass_to_tx(chan, control); 6187 if (control && control->poll) { 6188 set_bit(CONN_SEND_FBIT, &chan->conn_state); 6189 l2cap_send_rr_or_rnr(chan, 0); 6190 } 6191 __clear_retrans_timer(chan); 6192 l2cap_seq_list_clear(&chan->retrans_list); 6193 break; 6194 case L2CAP_EV_RECV_REJ: 6195 l2cap_handle_rej(chan, control); 6196 break; 6197 case L2CAP_EV_RECV_SREJ: 6198 l2cap_handle_srej(chan, control); 6199 break; 6200 default: 6201 break; 6202 } 6203 6204 if (skb && !skb_in_use) { 6205 BT_DBG("Freeing %p", skb); 6206 kfree_skb(skb); 6207 } 6208 6209 return err; 6210} 6211 6212static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, 6213 struct l2cap_ctrl *control, 6214 struct sk_buff *skb, u8 event) 6215{ 6216 int err = 0; 6217 u16 txseq = control->txseq; 6218 bool skb_in_use = false; 6219 6220 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 6221 event); 6222 6223 switch (event) { 6224 case L2CAP_EV_RECV_IFRAME: 6225 switch (l2cap_classify_txseq(chan, txseq)) { 6226 case L2CAP_TXSEQ_EXPECTED: 6227 /* Keep frame for reassembly later */ 6228 l2cap_pass_to_tx(chan, control); 6229 skb_queue_tail(&chan->srej_q, skb); 6230 skb_in_use = true; 6231 BT_DBG("Queued %p (queue len %d)", skb, 6232 skb_queue_len(&chan->srej_q)); 6233 6234 chan->expected_tx_seq = __next_seq(chan, txseq); 6235 break; 6236 case L2CAP_TXSEQ_EXPECTED_SREJ: 6237 l2cap_seq_list_pop(&chan->srej_list); 6238 6239 l2cap_pass_to_tx(chan, control); 6240 skb_queue_tail(&chan->srej_q, skb); 6241 skb_in_use = true; 6242 BT_DBG("Queued %p (queue len %d)", skb, 6243 skb_queue_len(&chan->srej_q)); 6244 6245 err = l2cap_rx_queued_iframes(chan); 6246 if (err) 6247 break; 6248 6249 break; 6250 case L2CAP_TXSEQ_UNEXPECTED: 6251 /* Got a frame that can't be reassembled yet. 6252 * Save it for later, and send SREJs to cover 6253 * the missing frames. 6254 */ 6255 skb_queue_tail(&chan->srej_q, skb); 6256 skb_in_use = true; 6257 BT_DBG("Queued %p (queue len %d)", skb, 6258 skb_queue_len(&chan->srej_q)); 6259 6260 l2cap_pass_to_tx(chan, control); 6261 l2cap_send_srej(chan, control->txseq); 6262 break; 6263 case L2CAP_TXSEQ_UNEXPECTED_SREJ: 6264 /* This frame was requested with an SREJ, but 6265 * some expected retransmitted frames are 6266 * missing. Request retransmission of missing 6267 * SREJ'd frames. 6268 */ 6269 skb_queue_tail(&chan->srej_q, skb); 6270 skb_in_use = true; 6271 BT_DBG("Queued %p (queue len %d)", skb, 6272 skb_queue_len(&chan->srej_q)); 6273 6274 l2cap_pass_to_tx(chan, control); 6275 l2cap_send_srej_list(chan, control->txseq); 6276 break; 6277 case L2CAP_TXSEQ_DUPLICATE_SREJ: 6278 /* We've already queued this frame. Drop this copy. */ 6279 l2cap_pass_to_tx(chan, control); 6280 break; 6281 case L2CAP_TXSEQ_DUPLICATE: 6282 /* Expecting a later sequence number, so this frame 6283 * was already received. Ignore it completely. 6284 */ 6285 break; 6286 case L2CAP_TXSEQ_INVALID_IGNORE: 6287 break; 6288 case L2CAP_TXSEQ_INVALID: 6289 default: 6290 l2cap_send_disconn_req(chan, ECONNRESET); 6291 break; 6292 } 6293 break; 6294 case L2CAP_EV_RECV_RR: 6295 l2cap_pass_to_tx(chan, control); 6296 if (control->final) { 6297 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6298 6299 if (!test_and_clear_bit(CONN_REJ_ACT, 6300 &chan->conn_state)) { 6301 control->final = 0; 6302 l2cap_retransmit_all(chan, control); 6303 } 6304 6305 l2cap_ertm_send(chan); 6306 } else if (control->poll) { 6307 if (test_and_clear_bit(CONN_REMOTE_BUSY, 6308 &chan->conn_state) && 6309 chan->unacked_frames) { 6310 __set_retrans_timer(chan); 6311 } 6312 6313 set_bit(CONN_SEND_FBIT, &chan->conn_state); 6314 l2cap_send_srej_tail(chan); 6315 } else { 6316 if (test_and_clear_bit(CONN_REMOTE_BUSY, 6317 &chan->conn_state) && 6318 chan->unacked_frames) 6319 __set_retrans_timer(chan); 6320 6321 l2cap_send_ack(chan); 6322 } 6323 break; 6324 case L2CAP_EV_RECV_RNR: 6325 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6326 l2cap_pass_to_tx(chan, control); 6327 if (control->poll) { 6328 l2cap_send_srej_tail(chan); 6329 } else { 6330 struct l2cap_ctrl rr_control; 6331 memset(&rr_control, 0, sizeof(rr_control)); 6332 rr_control.sframe = 1; 6333 rr_control.super = L2CAP_SUPER_RR; 6334 rr_control.reqseq = chan->buffer_seq; 6335 l2cap_send_sframe(chan, &rr_control); 6336 } 6337 6338 break; 6339 case L2CAP_EV_RECV_REJ: 6340 l2cap_handle_rej(chan, control); 6341 break; 6342 case L2CAP_EV_RECV_SREJ: 6343 l2cap_handle_srej(chan, control); 6344 break; 6345 } 6346 6347 if (skb && !skb_in_use) { 6348 BT_DBG("Freeing %p", skb); 6349 kfree_skb(skb); 6350 } 6351 6352 return err; 6353} 6354 6355static int l2cap_finish_move(struct l2cap_chan *chan) 6356{ 6357 BT_DBG("chan %p", chan); 6358 6359 chan->rx_state = L2CAP_RX_STATE_RECV; 6360 chan->conn->mtu = chan->conn->hcon->mtu; 6361 6362 return l2cap_resegment(chan); 6363} 6364 6365static int l2cap_rx_state_wait_p(struct l2cap_chan *chan, 6366 struct l2cap_ctrl *control, 6367 struct sk_buff *skb, u8 event) 6368{ 6369 int err; 6370 6371 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, 6372 event); 6373 6374 if (!control->poll) 6375 return -EPROTO; 6376 6377 l2cap_process_reqseq(chan, control->reqseq); 6378 6379 if (!skb_queue_empty(&chan->tx_q)) 6380 chan->tx_send_head = skb_peek(&chan->tx_q); 6381 else 6382 chan->tx_send_head = NULL; 6383 6384 /* Rewind next_tx_seq to the point expected 6385 * by the receiver. 6386 */ 6387 chan->next_tx_seq = control->reqseq; 6388 chan->unacked_frames = 0; 6389 6390 err = l2cap_finish_move(chan); 6391 if (err) 6392 return err; 6393 6394 set_bit(CONN_SEND_FBIT, &chan->conn_state); 6395 l2cap_send_i_or_rr_or_rnr(chan); 6396 6397 if (event == L2CAP_EV_RECV_IFRAME) 6398 return -EPROTO; 6399 6400 return l2cap_rx_state_recv(chan, control, NULL, event); 6401} 6402 6403static int l2cap_rx_state_wait_f(struct l2cap_chan *chan, 6404 struct l2cap_ctrl *control, 6405 struct sk_buff *skb, u8 event) 6406{ 6407 int err; 6408 6409 if (!control->final) 6410 return -EPROTO; 6411 6412 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 6413 6414 chan->rx_state = L2CAP_RX_STATE_RECV; 6415 l2cap_process_reqseq(chan, control->reqseq); 6416 6417 if (!skb_queue_empty(&chan->tx_q)) 6418 chan->tx_send_head = skb_peek(&chan->tx_q); 6419 else 6420 chan->tx_send_head = NULL; 6421 6422 /* Rewind next_tx_seq to the point expected 6423 * by the receiver. 6424 */ 6425 chan->next_tx_seq = control->reqseq; 6426 chan->unacked_frames = 0; 6427 chan->conn->mtu = chan->conn->hcon->mtu; 6428 6429 err = l2cap_resegment(chan); 6430 6431 if (!err) 6432 err = l2cap_rx_state_recv(chan, control, skb, event); 6433 6434 return err; 6435} 6436 6437static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) 6438{ 6439 /* Make sure reqseq is for a packet that has been sent but not acked */ 6440 u16 unacked; 6441 6442 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq); 6443 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked; 6444} 6445 6446static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 6447 struct sk_buff *skb, u8 event) 6448{ 6449 int err = 0; 6450 6451 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, 6452 control, skb, event, chan->rx_state); 6453 6454 if (__valid_reqseq(chan, control->reqseq)) { 6455 switch (chan->rx_state) { 6456 case L2CAP_RX_STATE_RECV: 6457 err = l2cap_rx_state_recv(chan, control, skb, event); 6458 break; 6459 case L2CAP_RX_STATE_SREJ_SENT: 6460 err = l2cap_rx_state_srej_sent(chan, control, skb, 6461 event); 6462 break; 6463 case L2CAP_RX_STATE_WAIT_P: 6464 err = l2cap_rx_state_wait_p(chan, control, skb, event); 6465 break; 6466 case L2CAP_RX_STATE_WAIT_F: 6467 err = l2cap_rx_state_wait_f(chan, control, skb, event); 6468 break; 6469 default: 6470 /* shut it down */ 6471 break; 6472 } 6473 } else { 6474 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", 6475 control->reqseq, chan->next_tx_seq, 6476 chan->expected_ack_seq); 6477 l2cap_send_disconn_req(chan, ECONNRESET); 6478 } 6479 6480 return err; 6481} 6482 6483static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 6484 struct sk_buff *skb) 6485{ 6486 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store 6487 * the txseq field in advance to use it after l2cap_reassemble_sdu 6488 * returns and to avoid the race condition, for example: 6489 * 6490 * The current thread calls: 6491 * l2cap_reassemble_sdu 6492 * chan->ops->recv == l2cap_sock_recv_cb 6493 * __sock_queue_rcv_skb 6494 * Another thread calls: 6495 * bt_sock_recvmsg 6496 * skb_recv_datagram 6497 * skb_free_datagram 6498 * Then the current thread tries to access control, but it was freed by 6499 * skb_free_datagram. 6500 */ 6501 u16 txseq = control->txseq; 6502 6503 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, 6504 chan->rx_state); 6505 6506 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) { 6507 l2cap_pass_to_tx(chan, control); 6508 6509 BT_DBG("buffer_seq %u->%u", chan->buffer_seq, 6510 __next_seq(chan, chan->buffer_seq)); 6511 6512 chan->buffer_seq = __next_seq(chan, chan->buffer_seq); 6513 6514 l2cap_reassemble_sdu(chan, skb, control); 6515 } else { 6516 if (chan->sdu) { 6517 kfree_skb(chan->sdu); 6518 chan->sdu = NULL; 6519 } 6520 chan->sdu_last_frag = NULL; 6521 chan->sdu_len = 0; 6522 6523 if (skb) { 6524 BT_DBG("Freeing %p", skb); 6525 kfree_skb(skb); 6526 } 6527 } 6528 6529 chan->last_acked_seq = txseq; 6530 chan->expected_tx_seq = __next_seq(chan, txseq); 6531 6532 return 0; 6533} 6534 6535static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 6536{ 6537 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap; 6538 u16 len; 6539 u8 event; 6540 6541 __unpack_control(chan, skb); 6542 6543 len = skb->len; 6544 6545 /* 6546 * We can just drop the corrupted I-frame here. 6547 * Receiver will miss it and start proper recovery 6548 * procedures and ask for retransmission. 6549 */ 6550 if (l2cap_check_fcs(chan, skb)) 6551 goto drop; 6552 6553 if (!control->sframe && control->sar == L2CAP_SAR_START) 6554 len -= L2CAP_SDULEN_SIZE; 6555 6556 if (chan->fcs == L2CAP_FCS_CRC16) 6557 len -= L2CAP_FCS_SIZE; 6558 6559 if (len > chan->mps) { 6560 l2cap_send_disconn_req(chan, ECONNRESET); 6561 goto drop; 6562 } 6563 6564 if (chan->ops->filter) { 6565 if (chan->ops->filter(chan, skb)) 6566 goto drop; 6567 } 6568 6569 if (!control->sframe) { 6570 int err; 6571 6572 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", 6573 control->sar, control->reqseq, control->final, 6574 control->txseq); 6575 6576 /* Validate F-bit - F=0 always valid, F=1 only 6577 * valid in TX WAIT_F 6578 */ 6579 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) 6580 goto drop; 6581 6582 if (chan->mode != L2CAP_MODE_STREAMING) { 6583 event = L2CAP_EV_RECV_IFRAME; 6584 err = l2cap_rx(chan, control, skb, event); 6585 } else { 6586 err = l2cap_stream_rx(chan, control, skb); 6587 } 6588 6589 if (err) 6590 l2cap_send_disconn_req(chan, ECONNRESET); 6591 } else { 6592 const u8 rx_func_to_event[4] = { 6593 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, 6594 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ 6595 }; 6596 6597 /* Only I-frames are expected in streaming mode */ 6598 if (chan->mode == L2CAP_MODE_STREAMING) 6599 goto drop; 6600 6601 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", 6602 control->reqseq, control->final, control->poll, 6603 control->super); 6604 6605 if (len != 0) { 6606 BT_ERR("Trailing bytes: %d in sframe", len); 6607 l2cap_send_disconn_req(chan, ECONNRESET); 6608 goto drop; 6609 } 6610 6611 /* Validate F and P bits */ 6612 if (control->final && (control->poll || 6613 chan->tx_state != L2CAP_TX_STATE_WAIT_F)) 6614 goto drop; 6615 6616 event = rx_func_to_event[control->super]; 6617 if (l2cap_rx(chan, control, skb, event)) 6618 l2cap_send_disconn_req(chan, ECONNRESET); 6619 } 6620 6621 return 0; 6622 6623drop: 6624 kfree_skb(skb); 6625 return 0; 6626} 6627 6628static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) 6629{ 6630 struct l2cap_conn *conn = chan->conn; 6631 struct l2cap_le_credits pkt; 6632 u16 return_credits = l2cap_le_rx_credits(chan); 6633 6634 if (chan->mode != L2CAP_MODE_LE_FLOWCTL && 6635 chan->mode != L2CAP_MODE_EXT_FLOWCTL) 6636 return; 6637 6638 if (chan->rx_credits >= return_credits) 6639 return; 6640 6641 return_credits -= chan->rx_credits; 6642 6643 BT_DBG("chan %p returning %u credits to sender", chan, return_credits); 6644 6645 chan->rx_credits += return_credits; 6646 6647 pkt.cid = cpu_to_le16(chan->scid); 6648 pkt.credits = cpu_to_le16(return_credits); 6649 6650 chan->ident = l2cap_get_ident(conn); 6651 6652 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt); 6653} 6654 6655void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail) 6656{ 6657 if (chan->rx_avail == rx_avail) 6658 return; 6659 6660 BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail); 6661 6662 chan->rx_avail = rx_avail; 6663 6664 if (chan->state == BT_CONNECTED) 6665 l2cap_chan_le_send_credits(chan); 6666} 6667 6668static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb) 6669{ 6670 int err; 6671 6672 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len); 6673 6674 /* Wait recv to confirm reception before updating the credits */ 6675 err = chan->ops->recv(chan, skb); 6676 6677 if (err < 0 && chan->rx_avail != -1) { 6678 BT_ERR("Queueing received LE L2CAP data failed"); 6679 l2cap_send_disconn_req(chan, ECONNRESET); 6680 return err; 6681 } 6682 6683 /* Update credits whenever an SDU is received */ 6684 l2cap_chan_le_send_credits(chan); 6685 6686 return err; 6687} 6688 6689static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 6690{ 6691 int err; 6692 6693 if (!chan->rx_credits) { 6694 BT_ERR("No credits to receive LE L2CAP data"); 6695 l2cap_send_disconn_req(chan, ECONNRESET); 6696 return -ENOBUFS; 6697 } 6698 6699 if (skb->len > chan->imtu) { 6700 BT_ERR("Too big LE L2CAP PDU: len %u > %u", skb->len, 6701 chan->imtu); 6702 l2cap_send_disconn_req(chan, ECONNRESET); 6703 return -ENOBUFS; 6704 } 6705 6706 if (skb->len > chan->mps) { 6707 BT_ERR("Too big LE L2CAP MPS: len %u > %u", skb->len, 6708 chan->mps); 6709 l2cap_send_disconn_req(chan, ECONNRESET); 6710 return -ENOBUFS; 6711 } 6712 6713 chan->rx_credits--; 6714 BT_DBG("chan %p: rx_credits %u -> %u", 6715 chan, chan->rx_credits + 1, chan->rx_credits); 6716 6717 /* Update if remote had run out of credits, this should only happens 6718 * if the remote is not using the entire MPS. 6719 */ 6720 if (!chan->rx_credits) 6721 l2cap_chan_le_send_credits(chan); 6722 6723 err = 0; 6724 6725 if (!chan->sdu) { 6726 u16 sdu_len; 6727 6728 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE)) { 6729 err = -EINVAL; 6730 goto failed; 6731 } 6732 6733 sdu_len = get_unaligned_le16(skb->data); 6734 skb_pull(skb, L2CAP_SDULEN_SIZE); 6735 6736 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u", 6737 sdu_len, skb->len, chan->imtu); 6738 6739 if (sdu_len > chan->imtu) { 6740 BT_ERR("Too big LE L2CAP SDU length: len %u > %u", 6741 sdu_len, chan->imtu); 6742 l2cap_send_disconn_req(chan, ECONNRESET); 6743 err = -EMSGSIZE; 6744 goto failed; 6745 } 6746 6747 if (skb->len > sdu_len) { 6748 BT_ERR("Too much LE L2CAP data received"); 6749 err = -EINVAL; 6750 goto failed; 6751 } 6752 6753 if (skb->len == sdu_len) 6754 return l2cap_ecred_recv(chan, skb); 6755 6756 chan->sdu = skb; 6757 chan->sdu_len = sdu_len; 6758 chan->sdu_last_frag = skb; 6759 6760 /* Detect if remote is not able to use the selected MPS */ 6761 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) { 6762 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE; 6763 6764 /* Adjust the number of credits */ 6765 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len); 6766 chan->mps = mps_len; 6767 l2cap_chan_le_send_credits(chan); 6768 } 6769 6770 return 0; 6771 } 6772 6773 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u", 6774 chan->sdu->len, skb->len, chan->sdu_len); 6775 6776 if (chan->sdu->len + skb->len > chan->sdu_len) { 6777 BT_ERR("Too much LE L2CAP data received"); 6778 l2cap_send_disconn_req(chan, ECONNRESET); 6779 err = -EINVAL; 6780 goto failed; 6781 } 6782 6783 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); 6784 skb = NULL; 6785 6786 if (chan->sdu->len == chan->sdu_len) { 6787 err = l2cap_ecred_recv(chan, chan->sdu); 6788 if (!err) { 6789 chan->sdu = NULL; 6790 chan->sdu_last_frag = NULL; 6791 chan->sdu_len = 0; 6792 } 6793 } 6794 6795failed: 6796 if (err) { 6797 kfree_skb(skb); 6798 kfree_skb(chan->sdu); 6799 chan->sdu = NULL; 6800 chan->sdu_last_frag = NULL; 6801 chan->sdu_len = 0; 6802 } 6803 6804 /* We can't return an error here since we took care of the skb 6805 * freeing internally. An error return would cause the caller to 6806 * do a double-free of the skb. 6807 */ 6808 return 0; 6809} 6810 6811static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, 6812 struct sk_buff *skb) 6813{ 6814 struct l2cap_chan *chan; 6815 6816 chan = l2cap_get_chan_by_scid(conn, cid); 6817 if (!chan) { 6818 BT_DBG("unknown cid 0x%4.4x", cid); 6819 /* Drop packet and return */ 6820 kfree_skb(skb); 6821 return; 6822 } 6823 6824 BT_DBG("chan %p, len %d", chan, skb->len); 6825 6826 /* If we receive data on a fixed channel before the info req/rsp 6827 * procedure is done simply assume that the channel is supported 6828 * and mark it as ready. 6829 */ 6830 if (chan->chan_type == L2CAP_CHAN_FIXED) 6831 l2cap_chan_ready(chan); 6832 6833 if (chan->state != BT_CONNECTED) 6834 goto drop; 6835 6836 switch (chan->mode) { 6837 case L2CAP_MODE_LE_FLOWCTL: 6838 case L2CAP_MODE_EXT_FLOWCTL: 6839 if (l2cap_ecred_data_rcv(chan, skb) < 0) 6840 goto drop; 6841 6842 goto done; 6843 6844 case L2CAP_MODE_BASIC: 6845 /* If socket recv buffers overflows we drop data here 6846 * which is *bad* because L2CAP has to be reliable. 6847 * But we don't have any other choice. L2CAP doesn't 6848 * provide flow control mechanism. */ 6849 6850 if (chan->imtu < skb->len) { 6851 BT_ERR("Dropping L2CAP data: receive buffer overflow"); 6852 goto drop; 6853 } 6854 6855 if (!chan->ops->recv(chan, skb)) 6856 goto done; 6857 break; 6858 6859 case L2CAP_MODE_ERTM: 6860 case L2CAP_MODE_STREAMING: 6861 l2cap_data_rcv(chan, skb); 6862 goto done; 6863 6864 default: 6865 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode); 6866 break; 6867 } 6868 6869drop: 6870 kfree_skb(skb); 6871 6872done: 6873 l2cap_chan_unlock(chan); 6874 l2cap_chan_put(chan); 6875} 6876 6877static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, 6878 struct sk_buff *skb) 6879{ 6880 struct hci_conn *hcon = conn->hcon; 6881 struct l2cap_chan *chan; 6882 6883 if (hcon->type != ACL_LINK) 6884 goto free_skb; 6885 6886 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst, 6887 ACL_LINK); 6888 if (!chan) 6889 goto free_skb; 6890 6891 BT_DBG("chan %p, len %d", chan, skb->len); 6892 6893 l2cap_chan_lock(chan); 6894 6895 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) 6896 goto drop; 6897 6898 if (chan->imtu < skb->len) 6899 goto drop; 6900 6901 /* Store remote BD_ADDR and PSM for msg_name */ 6902 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst); 6903 bt_cb(skb)->l2cap.psm = psm; 6904 6905 if (!chan->ops->recv(chan, skb)) { 6906 l2cap_chan_unlock(chan); 6907 l2cap_chan_put(chan); 6908 return; 6909 } 6910 6911drop: 6912 l2cap_chan_unlock(chan); 6913 l2cap_chan_put(chan); 6914free_skb: 6915 kfree_skb(skb); 6916} 6917 6918static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) 6919{ 6920 struct l2cap_hdr *lh = (void *) skb->data; 6921 struct hci_conn *hcon = conn->hcon; 6922 u16 cid, len; 6923 __le16 psm; 6924 6925 if (hcon->state != BT_CONNECTED) { 6926 BT_DBG("queueing pending rx skb"); 6927 skb_queue_tail(&conn->pending_rx, skb); 6928 return; 6929 } 6930 6931 skb_pull(skb, L2CAP_HDR_SIZE); 6932 cid = __le16_to_cpu(lh->cid); 6933 len = __le16_to_cpu(lh->len); 6934 6935 if (len != skb->len) { 6936 kfree_skb(skb); 6937 return; 6938 } 6939 6940 /* Since we can't actively block incoming LE connections we must 6941 * at least ensure that we ignore incoming data from them. 6942 */ 6943 if (hcon->type == LE_LINK && 6944 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst, 6945 bdaddr_dst_type(hcon))) { 6946 kfree_skb(skb); 6947 return; 6948 } 6949 6950 BT_DBG("len %d, cid 0x%4.4x", len, cid); 6951 6952 switch (cid) { 6953 case L2CAP_CID_SIGNALING: 6954 l2cap_sig_channel(conn, skb); 6955 break; 6956 6957 case L2CAP_CID_CONN_LESS: 6958 psm = get_unaligned((__le16 *) skb->data); 6959 skb_pull(skb, L2CAP_PSMLEN_SIZE); 6960 l2cap_conless_channel(conn, psm, skb); 6961 break; 6962 6963 case L2CAP_CID_LE_SIGNALING: 6964 l2cap_le_sig_channel(conn, skb); 6965 break; 6966 6967 default: 6968 l2cap_data_channel(conn, cid, skb); 6969 break; 6970 } 6971} 6972 6973static void process_pending_rx(struct work_struct *work) 6974{ 6975 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 6976 pending_rx_work); 6977 struct sk_buff *skb; 6978 6979 BT_DBG(""); 6980 6981 mutex_lock(&conn->lock); 6982 6983 while ((skb = skb_dequeue(&conn->pending_rx))) 6984 l2cap_recv_frame(conn, skb); 6985 6986 mutex_unlock(&conn->lock); 6987} 6988 6989static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) 6990{ 6991 struct l2cap_conn *conn = hcon->l2cap_data; 6992 struct hci_chan *hchan; 6993 6994 if (conn) 6995 return conn; 6996 6997 hchan = hci_chan_create(hcon); 6998 if (!hchan) 6999 return NULL; 7000 7001 conn = kzalloc_obj(*conn); 7002 if (!conn) { 7003 hci_chan_del(hchan); 7004 return NULL; 7005 } 7006 7007 kref_init(&conn->ref); 7008 hcon->l2cap_data = conn; 7009 conn->hcon = hci_conn_get(hcon); 7010 conn->hchan = hchan; 7011 7012 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); 7013 7014 conn->mtu = hcon->mtu; 7015 conn->feat_mask = 0; 7016 7017 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; 7018 7019 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) && 7020 (bredr_sc_enabled(hcon->hdev) || 7021 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP))) 7022 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; 7023 7024 mutex_init(&conn->lock); 7025 7026 INIT_LIST_HEAD(&conn->chan_l); 7027 INIT_LIST_HEAD(&conn->users); 7028 7029 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); 7030 ida_init(&conn->tx_ida); 7031 7032 skb_queue_head_init(&conn->pending_rx); 7033 INIT_WORK(&conn->pending_rx_work, process_pending_rx); 7034 INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr); 7035 7036 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 7037 7038 return conn; 7039} 7040 7041static bool is_valid_psm(u16 psm, u8 dst_type) 7042{ 7043 if (!psm) 7044 return false; 7045 7046 if (bdaddr_type_is_le(dst_type)) 7047 return (psm <= 0x00ff); 7048 7049 /* PSM must be odd and lsb of upper byte must be 0 */ 7050 return ((psm & 0x0101) == 0x0001); 7051} 7052 7053struct l2cap_chan_data { 7054 struct l2cap_chan *chan; 7055 struct pid *pid; 7056 int count; 7057}; 7058 7059static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data) 7060{ 7061 struct l2cap_chan_data *d = data; 7062 struct pid *pid; 7063 7064 if (chan == d->chan) 7065 return; 7066 7067 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags)) 7068 return; 7069 7070 pid = chan->ops->get_peer_pid(chan); 7071 7072 /* Only count deferred channels with the same PID/PSM */ 7073 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident || 7074 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) 7075 return; 7076 7077 d->count++; 7078} 7079 7080int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, 7081 bdaddr_t *dst, u8 dst_type, u16 timeout) 7082{ 7083 struct l2cap_conn *conn; 7084 struct hci_conn *hcon; 7085 struct hci_dev *hdev; 7086 int err; 7087 7088 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src, 7089 dst, dst_type, __le16_to_cpu(psm), chan->mode); 7090 7091 hdev = hci_get_route(dst, &chan->src, chan->src_type); 7092 if (!hdev) 7093 return -EHOSTUNREACH; 7094 7095 hci_dev_lock(hdev); 7096 7097 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid && 7098 chan->chan_type != L2CAP_CHAN_RAW) { 7099 err = -EINVAL; 7100 goto done; 7101 } 7102 7103 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) { 7104 err = -EINVAL; 7105 goto done; 7106 } 7107 7108 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) { 7109 err = -EINVAL; 7110 goto done; 7111 } 7112 7113 switch (chan->mode) { 7114 case L2CAP_MODE_BASIC: 7115 break; 7116 case L2CAP_MODE_LE_FLOWCTL: 7117 break; 7118 case L2CAP_MODE_EXT_FLOWCTL: 7119 if (!enable_ecred) { 7120 err = -EOPNOTSUPP; 7121 goto done; 7122 } 7123 break; 7124 case L2CAP_MODE_ERTM: 7125 case L2CAP_MODE_STREAMING: 7126 if (!disable_ertm) 7127 break; 7128 fallthrough; 7129 default: 7130 err = -EOPNOTSUPP; 7131 goto done; 7132 } 7133 7134 switch (chan->state) { 7135 case BT_CONNECT: 7136 case BT_CONNECT2: 7137 case BT_CONFIG: 7138 /* Already connecting */ 7139 err = 0; 7140 goto done; 7141 7142 case BT_CONNECTED: 7143 /* Already connected */ 7144 err = -EISCONN; 7145 goto done; 7146 7147 case BT_OPEN: 7148 case BT_BOUND: 7149 /* Can connect */ 7150 break; 7151 7152 default: 7153 err = -EBADFD; 7154 goto done; 7155 } 7156 7157 /* Set destination address and psm */ 7158 bacpy(&chan->dst, dst); 7159 chan->dst_type = dst_type; 7160 7161 chan->psm = psm; 7162 chan->dcid = cid; 7163 7164 if (bdaddr_type_is_le(dst_type)) { 7165 /* Convert from L2CAP channel address type to HCI address type 7166 */ 7167 if (dst_type == BDADDR_LE_PUBLIC) 7168 dst_type = ADDR_LE_DEV_PUBLIC; 7169 else 7170 dst_type = ADDR_LE_DEV_RANDOM; 7171 7172 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 7173 hcon = hci_connect_le(hdev, dst, dst_type, false, 7174 chan->sec_level, timeout, 7175 HCI_ROLE_SLAVE, 0, 0); 7176 else 7177 hcon = hci_connect_le_scan(hdev, dst, dst_type, 7178 chan->sec_level, timeout, 7179 CONN_REASON_L2CAP_CHAN); 7180 7181 } else { 7182 u8 auth_type = l2cap_get_auth_type(chan); 7183 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type, 7184 CONN_REASON_L2CAP_CHAN, timeout); 7185 } 7186 7187 if (IS_ERR(hcon)) { 7188 err = PTR_ERR(hcon); 7189 goto done; 7190 } 7191 7192 conn = l2cap_conn_add(hcon); 7193 if (!conn) { 7194 hci_conn_drop(hcon); 7195 err = -ENOMEM; 7196 goto done; 7197 } 7198 7199 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) { 7200 struct l2cap_chan_data data; 7201 7202 data.chan = chan; 7203 data.pid = chan->ops->get_peer_pid(chan); 7204 data.count = 1; 7205 7206 l2cap_chan_list(conn, l2cap_chan_by_pid, &data); 7207 7208 /* Check if there isn't too many channels being connected */ 7209 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) { 7210 hci_conn_drop(hcon); 7211 err = -EPROTO; 7212 goto done; 7213 } 7214 } 7215 7216 mutex_lock(&conn->lock); 7217 l2cap_chan_lock(chan); 7218 7219 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { 7220 hci_conn_drop(hcon); 7221 err = -EBUSY; 7222 goto chan_unlock; 7223 } 7224 7225 /* Update source addr of the socket */ 7226 bacpy(&chan->src, &hcon->src); 7227 chan->src_type = bdaddr_src_type(hcon); 7228 7229 __l2cap_chan_add(conn, chan); 7230 7231 /* l2cap_chan_add takes its own ref so we can drop this one */ 7232 hci_conn_drop(hcon); 7233 7234 l2cap_state_change(chan, BT_CONNECT); 7235 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); 7236 7237 /* Release chan->sport so that it can be reused by other 7238 * sockets (as it's only used for listening sockets). 7239 */ 7240 write_lock(&chan_list_lock); 7241 chan->sport = 0; 7242 write_unlock(&chan_list_lock); 7243 7244 if (hcon->state == BT_CONNECTED) { 7245 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 7246 __clear_chan_timer(chan); 7247 if (l2cap_chan_check_security(chan, true)) 7248 l2cap_state_change(chan, BT_CONNECTED); 7249 } else 7250 l2cap_do_start(chan); 7251 } 7252 7253 err = 0; 7254 7255chan_unlock: 7256 l2cap_chan_unlock(chan); 7257 mutex_unlock(&conn->lock); 7258done: 7259 hci_dev_unlock(hdev); 7260 hci_dev_put(hdev); 7261 return err; 7262} 7263EXPORT_SYMBOL_GPL(l2cap_chan_connect); 7264 7265static void l2cap_ecred_reconfigure(struct l2cap_chan *chan) 7266{ 7267 struct l2cap_conn *conn = chan->conn; 7268 DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1); 7269 7270 pdu->mtu = cpu_to_le16(chan->imtu); 7271 pdu->mps = cpu_to_le16(chan->mps); 7272 pdu->scid[0] = cpu_to_le16(chan->scid); 7273 7274 chan->ident = l2cap_get_ident(conn); 7275 7276 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ, 7277 sizeof(pdu), &pdu); 7278} 7279 7280int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu) 7281{ 7282 if (chan->imtu > mtu) 7283 return -EINVAL; 7284 7285 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu); 7286 7287 chan->imtu = mtu; 7288 7289 l2cap_ecred_reconfigure(chan); 7290 7291 return 0; 7292} 7293 7294/* ---- L2CAP interface with lower layer (HCI) ---- */ 7295 7296int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) 7297{ 7298 int exact = 0, lm1 = 0, lm2 = 0; 7299 struct l2cap_chan *c; 7300 7301 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); 7302 7303 /* Find listening sockets and check their link_mode */ 7304 read_lock(&chan_list_lock); 7305 list_for_each_entry(c, &chan_list, global_l) { 7306 if (c->state != BT_LISTEN) 7307 continue; 7308 7309 if (!bacmp(&c->src, &hdev->bdaddr)) { 7310 lm1 |= HCI_LM_ACCEPT; 7311 if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) 7312 lm1 |= HCI_LM_MASTER; 7313 exact++; 7314 } else if (!bacmp(&c->src, BDADDR_ANY)) { 7315 lm2 |= HCI_LM_ACCEPT; 7316 if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) 7317 lm2 |= HCI_LM_MASTER; 7318 } 7319 } 7320 read_unlock(&chan_list_lock); 7321 7322 return exact ? lm1 : lm2; 7323} 7324 7325/* Find the next fixed channel in BT_LISTEN state, continue iteration 7326 * from an existing channel in the list or from the beginning of the 7327 * global list (by passing NULL as first parameter). 7328 */ 7329static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, 7330 struct hci_conn *hcon) 7331{ 7332 u8 src_type = bdaddr_src_type(hcon); 7333 7334 read_lock(&chan_list_lock); 7335 7336 if (c) 7337 c = list_next_entry(c, global_l); 7338 else 7339 c = list_entry(chan_list.next, typeof(*c), global_l); 7340 7341 list_for_each_entry_from(c, &chan_list, global_l) { 7342 if (c->chan_type != L2CAP_CHAN_FIXED) 7343 continue; 7344 if (c->state != BT_LISTEN) 7345 continue; 7346 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY)) 7347 continue; 7348 if (src_type != c->src_type) 7349 continue; 7350 7351 c = l2cap_chan_hold_unless_zero(c); 7352 read_unlock(&chan_list_lock); 7353 return c; 7354 } 7355 7356 read_unlock(&chan_list_lock); 7357 7358 return NULL; 7359} 7360 7361static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) 7362{ 7363 struct hci_dev *hdev = hcon->hdev; 7364 struct l2cap_conn *conn; 7365 struct l2cap_chan *pchan; 7366 u8 dst_type; 7367 7368 if (hcon->type != ACL_LINK && hcon->type != LE_LINK) 7369 return; 7370 7371 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); 7372 7373 if (status) { 7374 l2cap_conn_del(hcon, bt_to_errno(status)); 7375 return; 7376 } 7377 7378 conn = l2cap_conn_add(hcon); 7379 if (!conn) 7380 return; 7381 7382 dst_type = bdaddr_dst_type(hcon); 7383 7384 /* If device is blocked, do not create channels for it */ 7385 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type)) 7386 return; 7387 7388 /* Find fixed channels and notify them of the new connection. We 7389 * use multiple individual lookups, continuing each time where 7390 * we left off, because the list lock would prevent calling the 7391 * potentially sleeping l2cap_chan_lock() function. 7392 */ 7393 pchan = l2cap_global_fixed_chan(NULL, hcon); 7394 while (pchan) { 7395 struct l2cap_chan *chan, *next; 7396 7397 /* Client fixed channels should override server ones */ 7398 if (__l2cap_get_chan_by_dcid(conn, pchan->scid)) 7399 goto next; 7400 7401 l2cap_chan_lock(pchan); 7402 chan = pchan->ops->new_connection(pchan); 7403 if (chan) { 7404 bacpy(&chan->src, &hcon->src); 7405 bacpy(&chan->dst, &hcon->dst); 7406 chan->src_type = bdaddr_src_type(hcon); 7407 chan->dst_type = dst_type; 7408 7409 __l2cap_chan_add(conn, chan); 7410 } 7411 7412 l2cap_chan_unlock(pchan); 7413next: 7414 next = l2cap_global_fixed_chan(pchan, hcon); 7415 l2cap_chan_put(pchan); 7416 pchan = next; 7417 } 7418 7419 l2cap_conn_ready(conn); 7420} 7421 7422int l2cap_disconn_ind(struct hci_conn *hcon) 7423{ 7424 struct l2cap_conn *conn = hcon->l2cap_data; 7425 7426 BT_DBG("hcon %p", hcon); 7427 7428 if (!conn) 7429 return HCI_ERROR_REMOTE_USER_TERM; 7430 return conn->disc_reason; 7431} 7432 7433static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) 7434{ 7435 if (hcon->type != ACL_LINK && hcon->type != LE_LINK) 7436 return; 7437 7438 BT_DBG("hcon %p reason %d", hcon, reason); 7439 7440 l2cap_conn_del(hcon, bt_to_errno(reason)); 7441} 7442 7443static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) 7444{ 7445 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) 7446 return; 7447 7448 if (encrypt == 0x00) { 7449 if (chan->sec_level == BT_SECURITY_MEDIUM) { 7450 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); 7451 } else if (chan->sec_level == BT_SECURITY_HIGH || 7452 chan->sec_level == BT_SECURITY_FIPS) 7453 l2cap_chan_close(chan, ECONNREFUSED); 7454 } else { 7455 if (chan->sec_level == BT_SECURITY_MEDIUM) 7456 __clear_chan_timer(chan); 7457 } 7458} 7459 7460static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) 7461{ 7462 struct l2cap_conn *conn = hcon->l2cap_data; 7463 struct l2cap_chan *chan; 7464 7465 if (!conn) 7466 return; 7467 7468 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); 7469 7470 mutex_lock(&conn->lock); 7471 7472 list_for_each_entry(chan, &conn->chan_l, list) { 7473 l2cap_chan_lock(chan); 7474 7475 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, 7476 state_to_string(chan->state)); 7477 7478 if (!status && encrypt) 7479 chan->sec_level = hcon->sec_level; 7480 7481 if (!__l2cap_no_conn_pending(chan)) { 7482 l2cap_chan_unlock(chan); 7483 continue; 7484 } 7485 7486 if (!status && (chan->state == BT_CONNECTED || 7487 chan->state == BT_CONFIG)) { 7488 chan->ops->resume(chan); 7489 l2cap_check_encryption(chan, encrypt); 7490 l2cap_chan_unlock(chan); 7491 continue; 7492 } 7493 7494 if (chan->state == BT_CONNECT) { 7495 if (!status && l2cap_check_enc_key_size(hcon, chan)) 7496 l2cap_start_connection(chan); 7497 else 7498 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 7499 } else if (chan->state == BT_CONNECT2 && 7500 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL || 7501 chan->mode == L2CAP_MODE_LE_FLOWCTL)) { 7502 struct l2cap_conn_rsp rsp; 7503 __u16 res, stat; 7504 7505 if (!status && l2cap_check_enc_key_size(hcon, chan)) { 7506 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 7507 res = L2CAP_CR_PEND; 7508 stat = L2CAP_CS_AUTHOR_PEND; 7509 chan->ops->defer(chan); 7510 } else { 7511 l2cap_state_change(chan, BT_CONFIG); 7512 res = L2CAP_CR_SUCCESS; 7513 stat = L2CAP_CS_NO_INFO; 7514 } 7515 } else { 7516 l2cap_state_change(chan, BT_DISCONN); 7517 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 7518 res = L2CAP_CR_SEC_BLOCK; 7519 stat = L2CAP_CS_NO_INFO; 7520 } 7521 7522 rsp.scid = cpu_to_le16(chan->dcid); 7523 rsp.dcid = cpu_to_le16(chan->scid); 7524 rsp.result = cpu_to_le16(res); 7525 rsp.status = cpu_to_le16(stat); 7526 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 7527 sizeof(rsp), &rsp); 7528 7529 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && 7530 res == L2CAP_CR_SUCCESS) { 7531 char buf[128]; 7532 set_bit(CONF_REQ_SENT, &chan->conf_state); 7533 l2cap_send_cmd(conn, l2cap_get_ident(conn), 7534 L2CAP_CONF_REQ, 7535 l2cap_build_conf_req(chan, buf, sizeof(buf)), 7536 buf); 7537 chan->num_conf_req++; 7538 } 7539 } 7540 7541 l2cap_chan_unlock(chan); 7542 } 7543 7544 mutex_unlock(&conn->lock); 7545} 7546 7547/* Append fragment into frame respecting the maximum len of rx_skb */ 7548static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb, 7549 u16 len) 7550{ 7551 if (!conn->rx_skb) { 7552 /* Allocate skb for the complete frame (with header) */ 7553 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); 7554 if (!conn->rx_skb) 7555 return -ENOMEM; 7556 /* Init rx_len */ 7557 conn->rx_len = len; 7558 7559 skb_set_delivery_time(conn->rx_skb, skb->tstamp, 7560 skb->tstamp_type); 7561 } 7562 7563 /* Copy as much as the rx_skb can hold */ 7564 len = min_t(u16, len, skb->len); 7565 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len); 7566 skb_pull(skb, len); 7567 conn->rx_len -= len; 7568 7569 return len; 7570} 7571 7572static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb) 7573{ 7574 struct sk_buff *rx_skb; 7575 int len; 7576 7577 /* Append just enough to complete the header */ 7578 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len); 7579 7580 /* If header could not be read just continue */ 7581 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE) 7582 return len; 7583 7584 rx_skb = conn->rx_skb; 7585 len = get_unaligned_le16(rx_skb->data); 7586 7587 /* Check if rx_skb has enough space to received all fragments */ 7588 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) { 7589 /* Update expected len */ 7590 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE); 7591 return L2CAP_LEN_SIZE; 7592 } 7593 7594 /* Reset conn->rx_skb since it will need to be reallocated in order to 7595 * fit all fragments. 7596 */ 7597 conn->rx_skb = NULL; 7598 7599 /* Reallocates rx_skb using the exact expected length */ 7600 len = l2cap_recv_frag(conn, rx_skb, 7601 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE)); 7602 kfree_skb(rx_skb); 7603 7604 return len; 7605} 7606 7607static void l2cap_recv_reset(struct l2cap_conn *conn) 7608{ 7609 kfree_skb(conn->rx_skb); 7610 conn->rx_skb = NULL; 7611 conn->rx_len = 0; 7612} 7613 7614struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c) 7615{ 7616 if (!c) 7617 return NULL; 7618 7619 BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref)); 7620 7621 if (!kref_get_unless_zero(&c->ref)) 7622 return NULL; 7623 7624 return c; 7625} 7626 7627int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle, 7628 struct sk_buff *skb, u16 flags) 7629{ 7630 struct hci_conn *hcon; 7631 struct l2cap_conn *conn; 7632 int len; 7633 7634 /* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */ 7635 hci_dev_lock(hdev); 7636 7637 hcon = hci_conn_hash_lookup_handle(hdev, handle); 7638 if (!hcon) { 7639 hci_dev_unlock(hdev); 7640 kfree_skb(skb); 7641 return -ENOENT; 7642 } 7643 7644 hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF); 7645 7646 conn = hcon->l2cap_data; 7647 7648 if (!conn) 7649 conn = l2cap_conn_add(hcon); 7650 7651 conn = l2cap_conn_hold_unless_zero(conn); 7652 hcon = NULL; 7653 7654 hci_dev_unlock(hdev); 7655 7656 if (!conn) { 7657 kfree_skb(skb); 7658 return -EINVAL; 7659 } 7660 7661 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags); 7662 7663 mutex_lock(&conn->lock); 7664 7665 switch (flags) { 7666 case ACL_START: 7667 case ACL_START_NO_FLUSH: 7668 case ACL_COMPLETE: 7669 if (conn->rx_skb) { 7670 BT_ERR("Unexpected start frame (len %d)", skb->len); 7671 l2cap_recv_reset(conn); 7672 l2cap_conn_unreliable(conn, ECOMM); 7673 } 7674 7675 /* Start fragment may not contain the L2CAP length so just 7676 * copy the initial byte when that happens and use conn->mtu as 7677 * expected length. 7678 */ 7679 if (skb->len < L2CAP_LEN_SIZE) { 7680 l2cap_recv_frag(conn, skb, conn->mtu); 7681 break; 7682 } 7683 7684 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE; 7685 7686 if (len == skb->len) { 7687 /* Complete frame received */ 7688 l2cap_recv_frame(conn, skb); 7689 goto unlock; 7690 } 7691 7692 BT_DBG("Start: total len %d, frag len %u", len, skb->len); 7693 7694 if (skb->len > len) { 7695 BT_ERR("Frame is too long (len %u, expected len %d)", 7696 skb->len, len); 7697 /* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C 7698 * (Multiple Signaling Command in one PDU, Data 7699 * Truncated, BR/EDR) send a C-frame to the IUT with 7700 * PDU Length set to 8 and Channel ID set to the 7701 * correct signaling channel for the logical link. 7702 * The Information payload contains one L2CAP_ECHO_REQ 7703 * packet with Data Length set to 0 with 0 octets of 7704 * echo data and one invalid command packet due to 7705 * data truncated in PDU but present in HCI packet. 7706 * 7707 * Shorter the socket buffer to the PDU length to 7708 * allow to process valid commands from the PDU before 7709 * setting the socket unreliable. 7710 */ 7711 skb->len = len; 7712 l2cap_recv_frame(conn, skb); 7713 l2cap_conn_unreliable(conn, ECOMM); 7714 goto unlock; 7715 } 7716 7717 /* Append fragment into frame (with header) */ 7718 if (l2cap_recv_frag(conn, skb, len) < 0) 7719 goto drop; 7720 7721 break; 7722 7723 case ACL_CONT: 7724 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len); 7725 7726 if (!conn->rx_skb) { 7727 BT_ERR("Unexpected continuation frame (len %d)", skb->len); 7728 l2cap_conn_unreliable(conn, ECOMM); 7729 goto drop; 7730 } 7731 7732 /* Complete the L2CAP length if it has not been read */ 7733 if (conn->rx_skb->len < L2CAP_LEN_SIZE) { 7734 if (l2cap_recv_len(conn, skb) < 0) { 7735 l2cap_conn_unreliable(conn, ECOMM); 7736 goto drop; 7737 } 7738 7739 /* Header still could not be read just continue */ 7740 if (conn->rx_skb->len < L2CAP_LEN_SIZE) 7741 break; 7742 } 7743 7744 if (skb->len > conn->rx_len) { 7745 BT_ERR("Fragment is too long (len %u, expected %u)", 7746 skb->len, conn->rx_len); 7747 l2cap_recv_reset(conn); 7748 l2cap_conn_unreliable(conn, ECOMM); 7749 goto drop; 7750 } 7751 7752 /* Append fragment into frame (with header) */ 7753 l2cap_recv_frag(conn, skb, skb->len); 7754 7755 if (!conn->rx_len) { 7756 /* Complete frame received. l2cap_recv_frame 7757 * takes ownership of the skb so set the global 7758 * rx_skb pointer to NULL first. 7759 */ 7760 struct sk_buff *rx_skb = conn->rx_skb; 7761 conn->rx_skb = NULL; 7762 l2cap_recv_frame(conn, rx_skb); 7763 } 7764 break; 7765 } 7766 7767drop: 7768 kfree_skb(skb); 7769unlock: 7770 mutex_unlock(&conn->lock); 7771 l2cap_conn_put(conn); 7772 return 0; 7773} 7774 7775static struct hci_cb l2cap_cb = { 7776 .name = "L2CAP", 7777 .connect_cfm = l2cap_connect_cfm, 7778 .disconn_cfm = l2cap_disconn_cfm, 7779 .security_cfm = l2cap_security_cfm, 7780}; 7781 7782static int l2cap_debugfs_show(struct seq_file *f, void *p) 7783{ 7784 struct l2cap_chan *c; 7785 7786 read_lock(&chan_list_lock); 7787 7788 list_for_each_entry(c, &chan_list, global_l) { 7789 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 7790 &c->src, c->src_type, &c->dst, c->dst_type, 7791 c->state, __le16_to_cpu(c->psm), 7792 c->scid, c->dcid, c->imtu, c->omtu, 7793 c->sec_level, c->mode); 7794 } 7795 7796 read_unlock(&chan_list_lock); 7797 7798 return 0; 7799} 7800 7801DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs); 7802 7803static struct dentry *l2cap_debugfs; 7804 7805int __init l2cap_init(void) 7806{ 7807 int err; 7808 7809 err = l2cap_init_sockets(); 7810 if (err < 0) 7811 return err; 7812 7813 hci_register_cb(&l2cap_cb); 7814 7815 if (IS_ERR_OR_NULL(bt_debugfs)) 7816 return 0; 7817 7818 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, 7819 NULL, &l2cap_debugfs_fops); 7820 7821 return 0; 7822} 7823 7824void l2cap_exit(void) 7825{ 7826 debugfs_remove(l2cap_debugfs); 7827 hci_unregister_cb(&l2cap_cb); 7828 l2cap_cleanup_sockets(); 7829} 7830 7831module_param(disable_ertm, bool, 0644); 7832MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); 7833 7834module_param(enable_ecred, bool, 0644); 7835MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");