aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarcelo Ricardo Leitner <marcelo.leitner@gmail.com>2016-06-02 14:05:43 -0400
committerDavid S. Miller <davem@davemloft.net>2016-06-03 19:37:21 -0400
commit90017accff61ae89283ad9a51f9ac46ca01633fb (patch)
treec62d8801baf03dfd048848e6b6ea325d94c481a6
parent3acb50c18d8d6650f10919464ade4dcdaf41d62f (diff)
sctp: Add GSO support
SCTP has this pecualiarity that its packets cannot be just segmented to (P)MTU. Its chunks must be contained in IP segments, padding respected. So we can't just generate a big skb, set gso_size to the fragmentation point and deliver it to IP layer. This patch takes a different approach. SCTP will now build a skb as it would be if it was received using GRO. That is, there will be a cover skb with protocol headers and children ones containing the actual segments, already segmented to a way that respects SCTP RFCs. With that, we can tell skb_segment() to just split based on frag_list, trusting its sizes are already in accordance. This way SCTP can benefit from GSO and instead of passing several packets through the stack, it can pass a single large packet. v2: - Added support for receiving GSO frames, as requested by Dave Miller. - Clear skb->cb if packet is GSO (otherwise it's not used by SCTP) - Added heuristics similar to what we have in TCP for not generating single GSO packets that fills cwnd. v3: - consider sctphdr size in skb_gso_transport_seglen() - rebased due to 5c7cdf339af5 ("gso: Remove arbitrary checks for unsupported GSO") Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Tested-by: Xin Long <lucien.xin@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netdev_features.h7
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/sctp/Makefile3
-rw-r--r--net/sctp/input.c12
-rw-r--r--net/sctp/inqueue.c51
-rw-r--r--net/sctp/offload.c98
-rw-r--r--net/sctp/output.c363
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/socket.c2
14 files changed, 429 insertions, 126 deletions
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index aa7b2400f98c..9c6c8ef2e9e7 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -53,8 +53,9 @@ enum {
53 * headers in software. 53 * headers in software.
54 */ 54 */
55 NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ 55 NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
56 NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
56 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ 57 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
57 NETIF_F_GSO_TUNNEL_REMCSUM_BIT, 58 NETIF_F_GSO_SCTP_BIT,
58 59
59 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ 60 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
60 NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ 61 NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -128,6 +129,7 @@ enum {
128#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID) 129#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID)
129#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) 130#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL)
130#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) 131#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
132#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP)
131#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) 133#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
132#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) 134#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
133#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 135#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
@@ -166,7 +168,8 @@ enum {
166 NETIF_F_FSO) 168 NETIF_F_FSO)
167 169
168/* List of features with software fallbacks. */ 170/* List of features with software fallbacks. */
169#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO) 171#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO | \
172 NETIF_F_GSO_SCTP)
170 173
171/* 174/*
172 * If one device supports one of these features, then enable them 175 * If one device supports one of these features, then enable them
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f45929ce8157..fa6df2699532 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4012,6 +4012,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4012 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 4012 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4013 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 4013 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4014 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 4014 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4015 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4015 4016
4016 return (features & feature) == feature; 4017 return (features & feature) == feature;
4017} 4018}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index aa3f9d7e8d5c..dc0fca747c5e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -487,6 +487,8 @@ enum {
487 SKB_GSO_PARTIAL = 1 << 13, 487 SKB_GSO_PARTIAL = 1 << 13,
488 488
489 SKB_GSO_TUNNEL_REMCSUM = 1 << 14, 489 SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
490
491 SKB_GSO_SCTP = 1 << 15,
490}; 492};
491 493
492#if BITS_PER_LONG > 32 494#if BITS_PER_LONG > 32
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index b392ac8382f2..632e205ca54b 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -186,6 +186,10 @@ void sctp_assocs_proc_exit(struct net *net);
186int sctp_remaddr_proc_init(struct net *net); 186int sctp_remaddr_proc_init(struct net *net);
187void sctp_remaddr_proc_exit(struct net *net); 187void sctp_remaddr_proc_exit(struct net *net);
188 188
189/*
190 * sctp/offload.c
191 */
192int sctp_offload_init(void);
189 193
190/* 194/*
191 * Module global variables 195 * Module global variables
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 16b013a6191c..83c5ec58b93a 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -566,6 +566,9 @@ struct sctp_chunk {
566 /* This points to the sk_buff containing the actual data. */ 566 /* This points to the sk_buff containing the actual data. */
567 struct sk_buff *skb; 567 struct sk_buff *skb;
568 568
569 /* In case of GSO packets, this will store the head one */
570 struct sk_buff *head_skb;
571
569 /* These are the SCTP headers by reverse order in a packet. 572 /* These are the SCTP headers by reverse order in a packet.
570 * Note that some of these may happen more than once. In that 573 * Note that some of these may happen more than once. In that
571 * case, we point at the "current" one, whatever that means 574 * case, we point at the "current" one, whatever that means
@@ -696,6 +699,8 @@ struct sctp_packet {
696 size_t overhead; 699 size_t overhead;
697 /* This is the total size of all chunks INCLUDING padding. */ 700 /* This is the total size of all chunks INCLUDING padding. */
698 size_t size; 701 size_t size;
702 /* This is the maximum size this packet may have */
703 size_t max_size;
699 704
700 /* The packet is destined for this transport address. 705 /* The packet is destined for this transport address.
701 * The function we finally use to pass down to the next lower 706 * The function we finally use to pass down to the next lower
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f4034817d255..977489820eb9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -89,6 +89,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
89 [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", 89 [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
90 [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation", 90 [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
91 [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial", 91 [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial",
92 [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation",
92 93
93 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", 94 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
94 [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp", 95 [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5ca562b56ec3..b6e0f95bef36 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -49,6 +49,7 @@
49#include <linux/slab.h> 49#include <linux/slab.h>
50#include <linux/tcp.h> 50#include <linux/tcp.h>
51#include <linux/udp.h> 51#include <linux/udp.h>
52#include <linux/sctp.h>
52#include <linux/netdevice.h> 53#include <linux/netdevice.h>
53#ifdef CONFIG_NET_CLS_ACT 54#ifdef CONFIG_NET_CLS_ACT
54#include <net/pkt_sched.h> 55#include <net/pkt_sched.h>
@@ -4383,6 +4384,8 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4383 thlen += inner_tcp_hdrlen(skb); 4384 thlen += inner_tcp_hdrlen(skb);
4384 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 4385 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4385 thlen = tcp_hdrlen(skb); 4386 thlen = tcp_hdrlen(skb);
4387 } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) {
4388 thlen = sizeof(struct sctphdr);
4386 } 4389 }
4387 /* UFO sets gso_size to the size of the fragmentation 4390 /* UFO sets gso_size to the size of the fragmentation
4388 * payload, i.e. the size of the L4 (UDP) header is already 4391 * payload, i.e. the size of the L4 (UDP) header is already
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
index 0fca5824ad0e..6c4f7496cec6 100644
--- a/net/sctp/Makefile
+++ b/net/sctp/Makefile
@@ -11,7 +11,8 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
11 transport.o chunk.o sm_make_chunk.o ulpevent.o \ 11 transport.o chunk.o sm_make_chunk.o ulpevent.o \
12 inqueue.o outqueue.o ulpqueue.o \ 12 inqueue.o outqueue.o ulpqueue.o \
13 tsnmap.o bind_addr.o socket.o primitive.o \ 13 tsnmap.o bind_addr.o socket.o primitive.o \
14 output.o input.o debug.o ssnmap.o auth.o 14 output.o input.o debug.o ssnmap.o auth.o \
15 offload.o
15 16
16sctp_probe-y := probe.o 17sctp_probe-y := probe.o
17 18
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5cff2546c3dd..6f8e676d285e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -139,7 +139,9 @@ int sctp_rcv(struct sk_buff *skb)
139 skb->csum_valid = 0; /* Previous value not applicable */ 139 skb->csum_valid = 0; /* Previous value not applicable */
140 if (skb_csum_unnecessary(skb)) 140 if (skb_csum_unnecessary(skb))
141 __skb_decr_checksum_unnecessary(skb); 141 __skb_decr_checksum_unnecessary(skb);
142 else if (!sctp_checksum_disable && sctp_rcv_checksum(net, skb) < 0) 142 else if (!sctp_checksum_disable &&
143 !(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) &&
144 sctp_rcv_checksum(net, skb) < 0)
143 goto discard_it; 145 goto discard_it;
144 skb->csum_valid = 1; 146 skb->csum_valid = 1;
145 147
@@ -1175,6 +1177,14 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1175{ 1177{
1176 sctp_chunkhdr_t *ch; 1178 sctp_chunkhdr_t *ch;
1177 1179
1180 /* We do not allow GSO frames here as we need to linearize and
1181 * then cannot guarantee frame boundaries. This shouldn't be an
1182 * issue as packets hitting this are mostly INIT or INIT-ACK and
1183 * those cannot be on GSO-style anyway.
1184 */
1185 if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP)
1186 return NULL;
1187
1178 if (skb_linearize(skb)) 1188 if (skb_linearize(skb))
1179 return NULL; 1189 return NULL;
1180 1190
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 5ba08ceda3ab..edabbbdfca54 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -138,6 +138,17 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
138 if (chunk->singleton || 138 if (chunk->singleton ||
139 chunk->end_of_packet || 139 chunk->end_of_packet ||
140 chunk->pdiscard) { 140 chunk->pdiscard) {
141 if (chunk->head_skb == chunk->skb) {
142 chunk->skb = skb_shinfo(chunk->skb)->frag_list;
143 goto new_skb;
144 }
145 if (chunk->skb->next) {
146 chunk->skb = chunk->skb->next;
147 goto new_skb;
148 }
149
150 if (chunk->head_skb)
151 chunk->skb = chunk->head_skb;
141 sctp_chunk_free(chunk); 152 sctp_chunk_free(chunk);
142 chunk = queue->in_progress = NULL; 153 chunk = queue->in_progress = NULL;
143 } else { 154 } else {
@@ -155,15 +166,15 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
155 166
156next_chunk: 167next_chunk:
157 /* Is the queue empty? */ 168 /* Is the queue empty? */
158 if (list_empty(&queue->in_chunk_list)) 169 entry = sctp_list_dequeue(&queue->in_chunk_list);
170 if (!entry)
159 return NULL; 171 return NULL;
160 172
161 entry = queue->in_chunk_list.next;
162 chunk = list_entry(entry, struct sctp_chunk, list); 173 chunk = list_entry(entry, struct sctp_chunk, list);
163 list_del_init(entry);
164 174
165 /* Linearize if it's not GSO */ 175 /* Linearize if it's not GSO */
166 if (skb_is_nonlinear(chunk->skb)) { 176 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) != SKB_GSO_SCTP &&
177 skb_is_nonlinear(chunk->skb)) {
167 if (skb_linearize(chunk->skb)) { 178 if (skb_linearize(chunk->skb)) {
168 __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS); 179 __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
169 sctp_chunk_free(chunk); 180 sctp_chunk_free(chunk);
@@ -174,15 +185,39 @@ next_chunk:
174 chunk->sctp_hdr = sctp_hdr(chunk->skb); 185 chunk->sctp_hdr = sctp_hdr(chunk->skb);
175 } 186 }
176 187
188 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
189 /* GSO-marked skbs but without frags, handle
190 * them normally
191 */
192 if (skb_shinfo(chunk->skb)->frag_list)
193 chunk->head_skb = chunk->skb;
194
195 /* skbs with "cover letter" */
196 if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
197 chunk->skb = skb_shinfo(chunk->skb)->frag_list;
198
199 if (WARN_ON(!chunk->skb)) {
200 __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
201 sctp_chunk_free(chunk);
202 goto next_chunk;
203 }
204 }
205
206 if (chunk->asoc)
207 sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
208
177 queue->in_progress = chunk; 209 queue->in_progress = chunk;
178 210
211new_skb:
179 /* This is the first chunk in the packet. */ 212 /* This is the first chunk in the packet. */
180 chunk->singleton = 1;
181 ch = (sctp_chunkhdr_t *) chunk->skb->data; 213 ch = (sctp_chunkhdr_t *) chunk->skb->data;
214 chunk->singleton = 1;
182 chunk->data_accepted = 0; 215 chunk->data_accepted = 0;
183 216 chunk->pdiscard = 0;
184 if (chunk->asoc) 217 chunk->auth = 0;
185 sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb); 218 chunk->has_asconf = 0;
219 chunk->end_of_packet = 0;
220 chunk->ecn_ce_done = 0;
186 } 221 }
187 222
188 chunk->chunk_hdr = ch; 223 chunk->chunk_hdr = ch;
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
new file mode 100644
index 000000000000..a37887b373a7
--- /dev/null
+++ b/net/sctp/offload.c
@@ -0,0 +1,98 @@
1/*
2 * sctp_offload - GRO/GSO Offloading for SCTP
3 *
4 * Copyright (C) 2015, Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/kernel.h>
20#include <linux/kprobes.h>
21#include <linux/socket.h>
22#include <linux/sctp.h>
23#include <linux/proc_fs.h>
24#include <linux/vmalloc.h>
25#include <linux/module.h>
26#include <linux/kfifo.h>
27#include <linux/time.h>
28#include <net/net_namespace.h>
29
30#include <linux/skbuff.h>
31#include <net/sctp/sctp.h>
32#include <net/sctp/checksum.h>
33#include <net/protocol.h>
34
35static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
36{
37 skb->ip_summed = CHECKSUM_NONE;
38 return sctp_compute_cksum(skb, skb_transport_offset(skb));
39}
40
41static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
42 netdev_features_t features)
43{
44 struct sk_buff *segs = ERR_PTR(-EINVAL);
45 struct sctphdr *sh;
46
47 sh = sctp_hdr(skb);
48 if (!pskb_may_pull(skb, sizeof(*sh)))
49 goto out;
50
51 __skb_pull(skb, sizeof(*sh));
52
53 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
54 /* Packet is from an untrusted source, reset gso_segs. */
55 struct skb_shared_info *pinfo = skb_shinfo(skb);
56 struct sk_buff *frag_iter;
57
58 pinfo->gso_segs = 0;
59 if (skb->len != skb->data_len) {
60 /* Means we have chunks in here too */
61 pinfo->gso_segs++;
62 }
63
64 skb_walk_frags(skb, frag_iter)
65 pinfo->gso_segs++;
66
67 segs = NULL;
68 goto out;
69 }
70
71 segs = skb_segment(skb, features | NETIF_F_HW_CSUM);
72 if (IS_ERR(segs))
73 goto out;
74
75 /* All that is left is update SCTP CRC if necessary */
76 if (!(features & NETIF_F_SCTP_CRC)) {
77 for (skb = segs; skb; skb = skb->next) {
78 if (skb->ip_summed == CHECKSUM_PARTIAL) {
79 sh = sctp_hdr(skb);
80 sh->checksum = sctp_gso_make_checksum(skb);
81 }
82 }
83 }
84
85out:
86 return segs;
87}
88
89static const struct net_offload sctp_offload = {
90 .callbacks = {
91 .gso_segment = sctp_gso_segment,
92 },
93};
94
95int __init sctp_offload_init(void)
96{
97 return inet_add_offload(&sctp_offload, IPPROTO_SCTP);
98}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 9844fe573029..60499a69179d 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -84,18 +84,42 @@ static void sctp_packet_reset(struct sctp_packet *packet)
84struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, 84struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
85 __u32 vtag, int ecn_capable) 85 __u32 vtag, int ecn_capable)
86{ 86{
87 struct sctp_chunk *chunk = NULL; 87 struct sctp_transport *tp = packet->transport;
88 struct sctp_association *asoc = tp->asoc;
88 89
89 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); 90 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
90 91
91 packet->vtag = vtag; 92 packet->vtag = vtag;
92 93
94 if (asoc && tp->dst) {
95 struct sock *sk = asoc->base.sk;
96
97 rcu_read_lock();
98 if (__sk_dst_get(sk) != tp->dst) {
99 dst_hold(tp->dst);
100 sk_setup_caps(sk, tp->dst);
101 }
102
103 if (sk_can_gso(sk)) {
104 struct net_device *dev = tp->dst->dev;
105
106 packet->max_size = dev->gso_max_size;
107 } else {
108 packet->max_size = asoc->pathmtu;
109 }
110 rcu_read_unlock();
111
112 } else {
113 packet->max_size = tp->pathmtu;
114 }
115
93 if (ecn_capable && sctp_packet_empty(packet)) { 116 if (ecn_capable && sctp_packet_empty(packet)) {
94 chunk = sctp_get_ecne_prepend(packet->transport->asoc); 117 struct sctp_chunk *chunk;
95 118
96 /* If there a is a prepend chunk stick it on the list before 119 /* If there a is a prepend chunk stick it on the list before
97 * any other chunks get appended. 120 * any other chunks get appended.
98 */ 121 */
122 chunk = sctp_get_ecne_prepend(asoc);
99 if (chunk) 123 if (chunk)
100 sctp_packet_append_chunk(packet, chunk); 124 sctp_packet_append_chunk(packet, chunk);
101 } 125 }
@@ -381,12 +405,15 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
381 struct sctp_transport *tp = packet->transport; 405 struct sctp_transport *tp = packet->transport;
382 struct sctp_association *asoc = tp->asoc; 406 struct sctp_association *asoc = tp->asoc;
383 struct sctphdr *sh; 407 struct sctphdr *sh;
384 struct sk_buff *nskb; 408 struct sk_buff *nskb = NULL, *head = NULL;
385 struct sctp_chunk *chunk, *tmp; 409 struct sctp_chunk *chunk, *tmp;
386 struct sock *sk; 410 struct sock *sk;
387 int err = 0; 411 int err = 0;
388 int padding; /* How much padding do we need? */ 412 int padding; /* How much padding do we need? */
413 int pkt_size;
389 __u8 has_data = 0; 414 __u8 has_data = 0;
415 int gso = 0;
416 int pktcount = 0;
390 struct dst_entry *dst; 417 struct dst_entry *dst;
391 unsigned char *auth = NULL; /* pointer to auth in skb data */ 418 unsigned char *auth = NULL; /* pointer to auth in skb data */
392 419
@@ -400,18 +427,37 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
400 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); 427 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
401 sk = chunk->skb->sk; 428 sk = chunk->skb->sk;
402 429
403 /* Allocate the new skb. */ 430 /* Allocate the head skb, or main one if not in GSO */
404 nskb = alloc_skb(packet->size + MAX_HEADER, gfp); 431 if (packet->size > tp->pathmtu && !packet->ipfragok) {
405 if (!nskb) 432 if (sk_can_gso(sk)) {
433 gso = 1;
434 pkt_size = packet->overhead;
435 } else {
436 /* If this happens, we trash this packet and try
437 * to build a new one, hopefully correct this
438 * time. Application may notice this error.
439 */
440 pr_err_once("Trying to GSO but underlying device doesn't support it.");
441 goto nomem;
442 }
443 } else {
444 pkt_size = packet->size;
445 }
446 head = alloc_skb(pkt_size + MAX_HEADER, gfp);
447 if (!head)
406 goto nomem; 448 goto nomem;
449 if (gso) {
450 NAPI_GRO_CB(head)->last = head;
451 skb_shinfo(head)->gso_type = sk->sk_gso_type;
452 }
407 453
408 /* Make sure the outbound skb has enough header room reserved. */ 454 /* Make sure the outbound skb has enough header room reserved. */
409 skb_reserve(nskb, packet->overhead + MAX_HEADER); 455 skb_reserve(head, packet->overhead + MAX_HEADER);
410 456
411 /* Set the owning socket so that we know where to get the 457 /* Set the owning socket so that we know where to get the
412 * destination IP address. 458 * destination IP address.
413 */ 459 */
414 sctp_packet_set_owner_w(nskb, sk); 460 sctp_packet_set_owner_w(head, sk);
415 461
416 if (!sctp_transport_dst_check(tp)) { 462 if (!sctp_transport_dst_check(tp)) {
417 sctp_transport_route(tp, NULL, sctp_sk(sk)); 463 sctp_transport_route(tp, NULL, sctp_sk(sk));
@@ -422,11 +468,11 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
422 dst = dst_clone(tp->dst); 468 dst = dst_clone(tp->dst);
423 if (!dst) 469 if (!dst)
424 goto no_route; 470 goto no_route;
425 skb_dst_set(nskb, dst); 471 skb_dst_set(head, dst);
426 472
427 /* Build the SCTP header. */ 473 /* Build the SCTP header. */
428 sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); 474 sh = (struct sctphdr *)skb_push(head, sizeof(struct sctphdr));
429 skb_reset_transport_header(nskb); 475 skb_reset_transport_header(head);
430 sh->source = htons(packet->source_port); 476 sh->source = htons(packet->source_port);
431 sh->dest = htons(packet->destination_port); 477 sh->dest = htons(packet->destination_port);
432 478
@@ -441,90 +487,133 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
441 sh->vtag = htonl(packet->vtag); 487 sh->vtag = htonl(packet->vtag);
442 sh->checksum = 0; 488 sh->checksum = 0;
443 489
444 /**
445 * 6.10 Bundling
446 *
447 * An endpoint bundles chunks by simply including multiple
448 * chunks in one outbound SCTP packet. ...
449 */
450
451 /**
452 * 3.2 Chunk Field Descriptions
453 *
454 * The total length of a chunk (including Type, Length and
455 * Value fields) MUST be a multiple of 4 bytes. If the length
456 * of the chunk is not a multiple of 4 bytes, the sender MUST
457 * pad the chunk with all zero bytes and this padding is not
458 * included in the chunk length field. The sender should
459 * never pad with more than 3 bytes.
460 *
461 * [This whole comment explains WORD_ROUND() below.]
462 */
463
464 pr_debug("***sctp_transmit_packet***\n"); 490 pr_debug("***sctp_transmit_packet***\n");
465 491
466 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { 492 do {
467 list_del_init(&chunk->list); 493 /* Set up convenience variables... */
468 if (sctp_chunk_is_data(chunk)) { 494 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
469 /* 6.3.1 C4) When data is in flight and when allowed 495 pktcount++;
470 * by rule C5, a new RTT measurement MUST be made each
471 * round trip. Furthermore, new RTT measurements
472 * SHOULD be made no more than once per round-trip
473 * for a given destination transport address.
474 */
475 496
476 if (!chunk->resent && !tp->rto_pending) { 497 /* Calculate packet size, so it fits in PMTU. Leave
477 chunk->rtt_in_progress = 1; 498 * other chunks for the next packets.
478 tp->rto_pending = 1; 499 */
500 if (gso) {
501 pkt_size = packet->overhead;
502 list_for_each_entry(chunk, &packet->chunk_list, list) {
503 int padded = WORD_ROUND(chunk->skb->len);
504
505 if (pkt_size + padded > tp->pathmtu)
506 break;
507 pkt_size += padded;
479 } 508 }
480 509
481 has_data = 1; 510 /* Allocate a new skb. */
482 } 511 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
512 if (!nskb)
513 goto nomem;
483 514
484 padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len; 515 /* Make sure the outbound skb has enough header
485 if (padding) 516 * room reserved.
486 memset(skb_put(chunk->skb, padding), 0, padding); 517 */
518 skb_reserve(nskb, packet->overhead + MAX_HEADER);
519 } else {
520 nskb = head;
521 }
487 522
488 /* if this is the auth chunk that we are adding, 523 /**
489 * store pointer where it will be added and put 524 * 3.2 Chunk Field Descriptions
490 * the auth into the packet. 525 *
526 * The total length of a chunk (including Type, Length and
527 * Value fields) MUST be a multiple of 4 bytes. If the length
528 * of the chunk is not a multiple of 4 bytes, the sender MUST
529 * pad the chunk with all zero bytes and this padding is not
530 * included in the chunk length field. The sender should
531 * never pad with more than 3 bytes.
532 *
533 * [This whole comment explains WORD_ROUND() below.]
491 */ 534 */
492 if (chunk == packet->auth)
493 auth = skb_tail_pointer(nskb);
494 535
495 memcpy(skb_put(nskb, chunk->skb->len), 536 pkt_size -= packet->overhead;
537 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
538 list_del_init(&chunk->list);
539 if (sctp_chunk_is_data(chunk)) {
540 /* 6.3.1 C4) When data is in flight and when allowed
541 * by rule C5, a new RTT measurement MUST be made each
542 * round trip. Furthermore, new RTT measurements
543 * SHOULD be made no more than once per round-trip
544 * for a given destination transport address.
545 */
546
547 if (!chunk->resent && !tp->rto_pending) {
548 chunk->rtt_in_progress = 1;
549 tp->rto_pending = 1;
550 }
551
552 has_data = 1;
553 }
554
555 padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len;
556 if (padding)
557 memset(skb_put(chunk->skb, padding), 0, padding);
558
559 /* if this is the auth chunk that we are adding,
560 * store pointer where it will be added and put
561 * the auth into the packet.
562 */
563 if (chunk == packet->auth)
564 auth = skb_tail_pointer(nskb);
565
566 memcpy(skb_put(nskb, chunk->skb->len),
496 chunk->skb->data, chunk->skb->len); 567 chunk->skb->data, chunk->skb->len);
497 568
498 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, " 569 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
499 "rtt_in_progress:%d\n", chunk, 570 chunk,
500 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), 571 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
501 chunk->has_tsn ? "TSN" : "No TSN", 572 chunk->has_tsn ? "TSN" : "No TSN",
502 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, 573 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
503 ntohs(chunk->chunk_hdr->length), chunk->skb->len, 574 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
504 chunk->rtt_in_progress); 575 chunk->rtt_in_progress);
505 576
506 /* 577 /* If this is a control chunk, this is our last
507 * If this is a control chunk, this is our last 578 * reference. Free data chunks after they've been
508 * reference. Free data chunks after they've been 579 * acknowledged or have failed.
509 * acknowledged or have failed. 580 * Re-queue auth chunks if needed.
510 */ 581 */
511 if (!sctp_chunk_is_data(chunk)) 582 pkt_size -= WORD_ROUND(chunk->skb->len);
512 sctp_chunk_free(chunk);
513 }
514 583
515 /* SCTP-AUTH, Section 6.2 584 if (chunk == packet->auth && !list_empty(&packet->chunk_list))
516 * The sender MUST calculate the MAC as described in RFC2104 [2] 585 list_add(&chunk->list, &packet->chunk_list);
517 * using the hash function H as described by the MAC Identifier and 586 else if (!sctp_chunk_is_data(chunk))
518 * the shared association key K based on the endpoint pair shared key 587 sctp_chunk_free(chunk);
519 * described by the shared key identifier. The 'data' used for the 588
520 * computation of the AUTH-chunk is given by the AUTH chunk with its 589 if (!pkt_size)
521 * HMAC field set to zero (as shown in Figure 6) followed by all 590 break;
522 * chunks that are placed after the AUTH chunk in the SCTP packet. 591 }
523 */ 592
524 if (auth) 593 /* SCTP-AUTH, Section 6.2
525 sctp_auth_calculate_hmac(asoc, nskb, 594 * The sender MUST calculate the MAC as described in RFC2104 [2]
526 (struct sctp_auth_chunk *)auth, 595 * using the hash function H as described by the MAC Identifier and
527 gfp); 596 * the shared association key K based on the endpoint pair shared key
597 * described by the shared key identifier. The 'data' used for the
598 * computation of the AUTH-chunk is given by the AUTH chunk with its
599 * HMAC field set to zero (as shown in Figure 6) followed by all
600 * chunks that are placed after the AUTH chunk in the SCTP packet.
601 */
602 if (auth)
603 sctp_auth_calculate_hmac(asoc, nskb,
604 (struct sctp_auth_chunk *)auth,
605 gfp);
606
607 if (!gso)
608 break;
609
610 if (skb_gro_receive(&head, nskb))
611 goto nomem;
612 nskb = NULL;
613 if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
614 sk->sk_gso_max_segs))
615 goto nomem;
616 } while (!list_empty(&packet->chunk_list));
528 617
529 /* 2) Calculate the Adler-32 checksum of the whole packet, 618 /* 2) Calculate the Adler-32 checksum of the whole packet,
530 * including the SCTP common header and all the 619 * including the SCTP common header and all the
@@ -532,16 +621,18 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
532 * 621 *
533 * Note: Adler-32 is no longer applicable, as has been replaced 622 * Note: Adler-32 is no longer applicable, as has been replaced
534 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. 623 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
624 *
625 * If it's a GSO packet, it's postponed to sctp_skb_segment.
535 */ 626 */
536 if (!sctp_checksum_disable) { 627 if (!sctp_checksum_disable || gso) {
537 if (!(dst->dev->features & NETIF_F_SCTP_CRC) || 628 if (!gso && (!(dst->dev->features & NETIF_F_SCTP_CRC) ||
538 (dst_xfrm(dst) != NULL) || packet->ipfragok) { 629 dst_xfrm(dst) || packet->ipfragok)) {
539 sh->checksum = sctp_compute_cksum(nskb, 0); 630 sh->checksum = sctp_compute_cksum(head, 0);
540 } else { 631 } else {
541 /* no need to seed pseudo checksum for SCTP */ 632 /* no need to seed pseudo checksum for SCTP */
542 nskb->ip_summed = CHECKSUM_PARTIAL; 633 head->ip_summed = CHECKSUM_PARTIAL;
543 nskb->csum_start = skb_transport_header(nskb) - nskb->head; 634 head->csum_start = skb_transport_header(head) - head->head;
544 nskb->csum_offset = offsetof(struct sctphdr, checksum); 635 head->csum_offset = offsetof(struct sctphdr, checksum);
545 } 636 }
546 } 637 }
547 638
@@ -557,7 +648,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
557 * Note: The works for IPv6 layer checks this bit too later 648 * Note: The works for IPv6 layer checks this bit too later
558 * in transmission. See IP6_ECN_flow_xmit(). 649 * in transmission. See IP6_ECN_flow_xmit().
559 */ 650 */
560 tp->af_specific->ecn_capable(nskb->sk); 651 tp->af_specific->ecn_capable(sk);
561 652
562 /* Set up the IP options. */ 653 /* Set up the IP options. */
563 /* BUG: not implemented 654 /* BUG: not implemented
@@ -566,7 +657,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
566 657
567 /* Dump that on IP! */ 658 /* Dump that on IP! */
568 if (asoc) { 659 if (asoc) {
569 asoc->stats.opackets++; 660 asoc->stats.opackets += pktcount;
570 if (asoc->peer.last_sent_to != tp) 661 if (asoc->peer.last_sent_to != tp)
571 /* Considering the multiple CPU scenario, this is a 662 /* Considering the multiple CPU scenario, this is a
572 * "correcter" place for last_sent_to. --xguo 663 * "correcter" place for last_sent_to. --xguo
@@ -589,16 +680,36 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
589 } 680 }
590 } 681 }
591 682
592 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len); 683 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
684
685 if (gso) {
686 /* Cleanup our debris for IP stacks */
687 memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
688 sizeof(struct inet6_skb_parm)));
593 689
594 nskb->ignore_df = packet->ipfragok; 690 skb_shinfo(head)->gso_segs = pktcount;
595 tp->af_specific->sctp_xmit(nskb, tp); 691 skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
692
693 /* We have to refresh this in case we are xmiting to
694 * more than one transport at a time
695 */
696 rcu_read_lock();
697 if (__sk_dst_get(sk) != tp->dst) {
698 dst_hold(tp->dst);
699 sk_setup_caps(sk, tp->dst);
700 }
701 rcu_read_unlock();
702 }
703 head->ignore_df = packet->ipfragok;
704 tp->af_specific->sctp_xmit(head, tp);
596 705
597out: 706out:
598 sctp_packet_reset(packet); 707 sctp_packet_reset(packet);
599 return err; 708 return err;
600no_route: 709no_route:
601 kfree_skb(nskb); 710 kfree_skb(head);
711 if (nskb != head)
712 kfree_skb(nskb);
602 713
603 if (asoc) 714 if (asoc)
604 IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); 715 IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
@@ -751,39 +862,63 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
751 struct sctp_chunk *chunk, 862 struct sctp_chunk *chunk,
752 u16 chunk_len) 863 u16 chunk_len)
753{ 864{
754 size_t psize; 865 size_t psize, pmtu;
755 size_t pmtu;
756 int too_big;
757 sctp_xmit_t retval = SCTP_XMIT_OK; 866 sctp_xmit_t retval = SCTP_XMIT_OK;
758 867
759 psize = packet->size; 868 psize = packet->size;
760 pmtu = ((packet->transport->asoc) ? 869 if (packet->transport->asoc)
761 (packet->transport->asoc->pathmtu) : 870 pmtu = packet->transport->asoc->pathmtu;
762 (packet->transport->pathmtu)); 871 else
763 872 pmtu = packet->transport->pathmtu;
764 too_big = (psize + chunk_len > pmtu);
765 873
766 /* Decide if we need to fragment or resubmit later. */ 874 /* Decide if we need to fragment or resubmit later. */
767 if (too_big) { 875 if (psize + chunk_len > pmtu) {
768 /* It's OK to fragmet at IP level if any one of the following 876 /* It's OK to fragment at IP level if any one of the following
769 * is true: 877 * is true:
770 * 1. The packet is empty (meaning this chunk is greater 878 * 1. The packet is empty (meaning this chunk is greater
771 * the MTU) 879 * the MTU)
772 * 2. The chunk we are adding is a control chunk 880 * 2. The packet doesn't have any data in it yet and data
773 * 3. The packet doesn't have any data in it yet and data 881 * requires authentication.
774 * requires authentication.
775 */ 882 */
776 if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) || 883 if (sctp_packet_empty(packet) ||
777 (!packet->has_data && chunk->auth)) { 884 (!packet->has_data && chunk->auth)) {
778 /* We no longer do re-fragmentation. 885 /* We no longer do re-fragmentation.
779 * Just fragment at the IP layer, if we 886 * Just fragment at the IP layer, if we
780 * actually hit this condition 887 * actually hit this condition
781 */ 888 */
782 packet->ipfragok = 1; 889 packet->ipfragok = 1;
783 } else { 890 goto out;
784 retval = SCTP_XMIT_PMTU_FULL;
785 } 891 }
892
893 /* It is also okay to fragment if the chunk we are
894 * adding is a control chunk, but only if current packet
895 * is not a GSO one otherwise it causes fragmentation of
896 * a large frame. So in this case we allow the
897 * fragmentation by forcing it to be in a new packet.
898 */
899 if (!sctp_chunk_is_data(chunk) && packet->has_data)
900 retval = SCTP_XMIT_PMTU_FULL;
901
902 if (psize + chunk_len > packet->max_size)
903 /* Hit GSO/PMTU limit, gotta flush */
904 retval = SCTP_XMIT_PMTU_FULL;
905
906 if (!packet->transport->burst_limited &&
907 psize + chunk_len > (packet->transport->cwnd >> 1))
908 /* Do not allow a single GSO packet to use more
909 * than half of cwnd.
910 */
911 retval = SCTP_XMIT_PMTU_FULL;
912
913 if (packet->transport->burst_limited &&
914 psize + chunk_len > (packet->transport->burst_limited >> 1))
915 /* Do not allow a single GSO packet to use more
916 * than half of original cwnd.
917 */
918 retval = SCTP_XMIT_PMTU_FULL;
919 /* Otherwise it will fit in the GSO packet */
786 } 920 }
787 921
922out:
788 return retval; 923 return retval;
789} 924}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d3d50daa248b..40022ee885d7 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1516,6 +1516,9 @@ static __init int sctp_init(void)
1516 if (status) 1516 if (status)
1517 goto err_v6_add_protocol; 1517 goto err_v6_add_protocol;
1518 1518
1519 if (sctp_offload_init() < 0)
1520 pr_crit("%s: Cannot add SCTP protocol offload\n", __func__);
1521
1519out: 1522out:
1520 return status; 1523 return status;
1521err_v6_add_protocol: 1524err_v6_add_protocol:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 67154b848aa9..712fb2339baa 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4003,6 +4003,8 @@ static int sctp_init_sock(struct sock *sk)
4003 return -ESOCKTNOSUPPORT; 4003 return -ESOCKTNOSUPPORT;
4004 } 4004 }
4005 4005
4006 sk->sk_gso_type = SKB_GSO_SCTP;
4007
4006 /* Initialize default send parameters. These parameters can be 4008 /* Initialize default send parameters. These parameters can be
4007 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 4009 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
4008 */ 4010 */