aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorMat Martineau <mathewm@codeaurora.org>2012-05-02 12:42:02 -0400
committerGustavo Padovan <gustavo@padovan.org>2012-05-09 00:40:53 -0400
commit94122bbe9c8c4ad7ba9f02f9a30bfc95672c404e (patch)
tree7d7a3089e56fca303eb6383bbf727efcf63dbc2d /net
parentdaf6a78c161fccd058ca2f1b21e757ebaa2e9909 (diff)
Bluetooth: Refactor L2CAP ERTM and streaming transmit segmentation
Use more common code for ERTM and streaming mode segmentation and transmission, and begin using skb control block data for delaying extended or enhanced header generation until just before the packet is transmitted. This code is also better suited for resegmentation, which is needed when L2CAP links are reconfigured after an AMP channel move. Signed-off-by: Mat Martineau <mathewm@codeaurora.org> Reviewed-by: Ulisses Furquim <ulisses@profusion.mobi> Signed-off-by: Gustavo Padovan <gustavo@padovan.org>
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/l2cap_core.c144
1 files changed, 86 insertions, 58 deletions
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index fcd09fb4b94c..2b30bd767779 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1634,6 +1634,7 @@ static void l2cap_streaming_send(struct l2cap_chan *chan)
1634 while ((skb = skb_dequeue(&chan->tx_q))) { 1634 while ((skb = skb_dequeue(&chan->tx_q))) {
1635 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); 1635 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1636 control |= __set_txseq(chan, chan->next_tx_seq); 1636 control |= __set_txseq(chan, chan->next_tx_seq);
1637 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1637 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); 1638 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1638 1639
1639 if (chan->fcs == L2CAP_FCS_CRC16) { 1640 if (chan->fcs == L2CAP_FCS_CRC16) {
@@ -1706,6 +1707,9 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1706 if (chan->state != BT_CONNECTED) 1707 if (chan->state != BT_CONNECTED)
1707 return -ENOTCONN; 1708 return -ENOTCONN;
1708 1709
1710 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1711 return 0;
1712
1709 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1713 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1710 1714
1711 if (bt_cb(skb)->control.retries == chan->remote_max_tx && 1715 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
@@ -1726,6 +1730,7 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1726 1730
1727 control |= __set_reqseq(chan, chan->buffer_seq); 1731 control |= __set_reqseq(chan, chan->buffer_seq);
1728 control |= __set_txseq(chan, chan->next_tx_seq); 1732 control |= __set_txseq(chan, chan->next_tx_seq);
1733 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1729 1734
1730 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1735 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1731 1736
@@ -1921,7 +1926,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1921 1926
1922static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, 1927static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1923 struct msghdr *msg, size_t len, 1928 struct msghdr *msg, size_t len,
1924 u32 control, u16 sdulen) 1929 u16 sdulen)
1925{ 1930{
1926 struct l2cap_conn *conn = chan->conn; 1931 struct l2cap_conn *conn = chan->conn;
1927 struct sk_buff *skb; 1932 struct sk_buff *skb;
@@ -1956,7 +1961,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1956 lh->cid = cpu_to_le16(chan->dcid); 1961 lh->cid = cpu_to_le16(chan->dcid);
1957 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1962 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1958 1963
1959 __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); 1964 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1960 1965
1961 if (sdulen) 1966 if (sdulen)
1962 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 1967 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1974,57 +1979,78 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1974 return skb; 1979 return skb;
1975} 1980}
1976 1981
1977static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1982static int l2cap_segment_sdu(struct l2cap_chan *chan,
1983 struct sk_buff_head *seg_queue,
1984 struct msghdr *msg, size_t len)
1978{ 1985{
1979 struct sk_buff *skb; 1986 struct sk_buff *skb;
1980 struct sk_buff_head sar_queue; 1987 u16 sdu_len;
1981 u32 control; 1988 size_t pdu_len;
1982 size_t size = 0; 1989 int err = 0;
1990 u8 sar;
1983 1991
1984 skb_queue_head_init(&sar_queue); 1992 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
1985 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1986 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1987 if (IS_ERR(skb))
1988 return PTR_ERR(skb);
1989 1993
1990 __skb_queue_tail(&sar_queue, skb); 1994 /* It is critical that ERTM PDUs fit in a single HCI fragment,
1991 len -= chan->remote_mps; 1995 * so fragmented skbs are not used. The HCI layer's handling
1992 size += chan->remote_mps; 1996 * of fragmented skbs is not compatible with ERTM's queueing.
1997 */
1993 1998
1994 while (len > 0) { 1999 /* PDU size is derived from the HCI MTU */
1995 size_t buflen; 2000 pdu_len = chan->conn->mtu;
1996 2001
1997 if (len > chan->remote_mps) { 2002 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
1998 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE); 2003
1999 buflen = chan->remote_mps; 2004 /* Adjust for largest possible L2CAP overhead. */
2000 } else { 2005 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2001 control = __set_ctrl_sar(chan, L2CAP_SAR_END); 2006
2002 buflen = len; 2007 /* Remote device may have requested smaller PDUs */
2003 } 2008 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2009
2010 if (len <= pdu_len) {
2011 sar = L2CAP_SAR_UNSEGMENTED;
2012 sdu_len = 0;
2013 pdu_len = len;
2014 } else {
2015 sar = L2CAP_SAR_START;
2016 sdu_len = len;
2017 pdu_len -= L2CAP_SDULEN_SIZE;
2018 }
2019
2020 while (len > 0) {
2021 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2004 2022
2005 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
2006 if (IS_ERR(skb)) { 2023 if (IS_ERR(skb)) {
2007 skb_queue_purge(&sar_queue); 2024 __skb_queue_purge(seg_queue);
2008 return PTR_ERR(skb); 2025 return PTR_ERR(skb);
2009 } 2026 }
2010 2027
2011 __skb_queue_tail(&sar_queue, skb); 2028 bt_cb(skb)->control.sar = sar;
2012 len -= buflen; 2029 __skb_queue_tail(seg_queue, skb);
2013 size += buflen; 2030
2031 len -= pdu_len;
2032 if (sdu_len) {
2033 sdu_len = 0;
2034 pdu_len += L2CAP_SDULEN_SIZE;
2035 }
2036
2037 if (len <= pdu_len) {
2038 sar = L2CAP_SAR_END;
2039 pdu_len = len;
2040 } else {
2041 sar = L2CAP_SAR_CONTINUE;
2042 }
2014 } 2043 }
2015 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
2016 if (chan->tx_send_head == NULL)
2017 chan->tx_send_head = sar_queue.next;
2018 2044
2019 return size; 2045 return err;
2020} 2046}
2021 2047
2022int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2048int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2023 u32 priority) 2049 u32 priority)
2024{ 2050{
2025 struct sk_buff *skb; 2051 struct sk_buff *skb;
2026 u32 control;
2027 int err; 2052 int err;
2053 struct sk_buff_head seg_queue;
2028 2054
2029 /* Connectionless channel */ 2055 /* Connectionless channel */
2030 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 2056 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
@@ -2053,42 +2079,44 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2053 2079
2054 case L2CAP_MODE_ERTM: 2080 case L2CAP_MODE_ERTM:
2055 case L2CAP_MODE_STREAMING: 2081 case L2CAP_MODE_STREAMING:
2056 /* Entire SDU fits into one PDU */ 2082 /* Check outgoing MTU */
2057 if (len <= chan->remote_mps) { 2083 if (len > chan->omtu) {
2058 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED); 2084 err = -EMSGSIZE;
2059 skb = l2cap_create_iframe_pdu(chan, msg, len, control, 2085 break;
2060 0); 2086 }
2061 if (IS_ERR(skb))
2062 return PTR_ERR(skb);
2063 2087
2064 __skb_queue_tail(&chan->tx_q, skb); 2088 __skb_queue_head_init(&seg_queue);
2065 2089
2066 if (chan->tx_send_head == NULL) 2090 /* Do segmentation before calling in to the state machine,
2067 chan->tx_send_head = skb; 2091 * since it's possible to block while waiting for memory
2092 * allocation.
2093 */
2094 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2068 2095
2069 } else { 2096 /* The channel could have been closed while segmenting,
2070 /* Segment SDU into multiples PDUs */ 2097 * check that it is still connected.
2071 err = l2cap_sar_segment_sdu(chan, msg, len); 2098 */
2072 if (err < 0) 2099 if (chan->state != BT_CONNECTED) {
2073 return err; 2100 __skb_queue_purge(&seg_queue);
2101 err = -ENOTCONN;
2074 } 2102 }
2075 2103
2076 if (chan->mode == L2CAP_MODE_STREAMING) { 2104 if (err)
2077 l2cap_streaming_send(chan);
2078 err = len;
2079 break; 2105 break;
2080 }
2081 2106
2082 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 2107 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2083 test_bit(CONN_WAIT_F, &chan->conn_state)) { 2108 if (chan->mode == L2CAP_MODE_ERTM)
2084 err = len; 2109 err = l2cap_ertm_send(chan);
2085 break; 2110 else
2086 } 2111 l2cap_streaming_send(chan);
2087 2112
2088 err = l2cap_ertm_send(chan);
2089 if (err >= 0) 2113 if (err >= 0)
2090 err = len; 2114 err = len;
2091 2115
2116 /* If the skbs were not queued for sending, they'll still be in
2117 * seg_queue and need to be purged.
2118 */
2119 __skb_queue_purge(&seg_queue);
2092 break; 2120 break;
2093 2121
2094 default: 2122 default: