aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp
diff options
context:
space:
mode:
authorNeil Horman <nhorman@tuxdriver.com>2006-05-05 20:02:09 -0400
committerDavid S. Miller <davem@davemloft.net>2006-05-05 20:02:09 -0400
commit7c3ceb4fb9667f34f1599a062efecf4cdc4a4ce5 (patch)
treee54921ec9e8cffac5fa3d4155d18f144d61ad878 /net/sctp
parent91ef5d2d6e934762db9c3e0d9e5b2862e40ea429 (diff)
[SCTP]: Allow spillover of receive buffer to avoid deadlock.
This patch fixes a deadlock situation in the receive path by allowing temporary spillover of the receive buffer. - If the chunk we receive has a tsn that immediately follows the ctsn, accept it even if we run out of receive buffer space and renege data with higher TSNs. - Once we accept one chunk in a packet, accept all the remaining chunks even if we run out of receive buffer space. Signed-off-by: Neil Horman <nhorman@tuxdriver.com> Acked-by: Mark Butler <butlerm@middle.net> Acked-by: Vlad Yasevich <vladislav.yasevich@hp.com> Signed-off-by: Sridhar Samudrala <sri@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/inqueue.c1
-rw-r--r--net/sctp/sm_statefuns.c46
2 files changed, 37 insertions, 10 deletions
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 297b8951463e..cf0c767d43ae 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -149,6 +149,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
149 /* This is the first chunk in the packet. */ 149 /* This is the first chunk in the packet. */
150 chunk->singleton = 1; 150 chunk->singleton = 1;
151 ch = (sctp_chunkhdr_t *) chunk->skb->data; 151 ch = (sctp_chunkhdr_t *) chunk->skb->data;
152 chunk->data_accepted = 0;
152 } 153 }
153 154
154 chunk->chunk_hdr = ch; 155 chunk->chunk_hdr = ch;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 2b9a832b29a7..f5d131f52a70 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5151,7 +5151,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5151 int tmp; 5151 int tmp;
5152 __u32 tsn; 5152 __u32 tsn;
5153 int account_value; 5153 int account_value;
5154 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
5154 struct sock *sk = asoc->base.sk; 5155 struct sock *sk = asoc->base.sk;
5156 int rcvbuf_over = 0;
5155 5157
5156 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; 5158 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
5157 skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); 5159 skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
@@ -5162,10 +5164,16 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5162 /* ASSERT: Now skb->data is really the user data. */ 5164 /* ASSERT: Now skb->data is really the user data. */
5163 5165
5164 /* 5166 /*
5165 * if we are established, and we have used up our receive 5167 * If we are established, and we have used up our receive buffer
5166 * buffer memory, drop the frame 5168 * memory, think about droping the frame.
5167 */ 5169 * Note that we have an opportunity to improve performance here.
5168 if (asoc->state == SCTP_STATE_ESTABLISHED) { 5170 * If we accept one chunk from an skbuff, we have to keep all the
5171 * memory of that skbuff around until the chunk is read into user
5172 * space. Therefore, once we accept 1 chunk we may as well accept all
5173 * remaining chunks in the skbuff. The data_accepted flag helps us do
5174 * that.
5175 */
5176 if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) {
5169 /* 5177 /*
5170 * If the receive buffer policy is 1, then each 5178 * If the receive buffer policy is 1, then each
5171 * association can allocate up to sk_rcvbuf bytes 5179 * association can allocate up to sk_rcvbuf bytes
@@ -5176,9 +5184,25 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5176 account_value = atomic_read(&asoc->rmem_alloc); 5184 account_value = atomic_read(&asoc->rmem_alloc);
5177 else 5185 else
5178 account_value = atomic_read(&sk->sk_rmem_alloc); 5186 account_value = atomic_read(&sk->sk_rmem_alloc);
5179 5187 if (account_value > sk->sk_rcvbuf) {
5180 if (account_value > sk->sk_rcvbuf) 5188 /*
5181 return SCTP_IERROR_IGNORE_TSN; 5189 * We need to make forward progress, even when we are
5190 * under memory pressure, so we always allow the
5191 * next tsn after the ctsn ack point to be accepted.
5192 * This lets us avoid deadlocks in which we have to
5193 * drop frames that would otherwise let us drain the
5194 * receive queue.
5195 */
5196 if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn)
5197 return SCTP_IERROR_IGNORE_TSN;
5198
5199 /*
5200 * We're going to accept the frame but we should renege
5201 * to make space for it. This will send us down that
5202 * path later in this function.
5203 */
5204 rcvbuf_over = 1;
5205 }
5182 } 5206 }
5183 5207
5184 /* Process ECN based congestion. 5208 /* Process ECN based congestion.
@@ -5226,6 +5250,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5226 datalen -= sizeof(sctp_data_chunk_t); 5250 datalen -= sizeof(sctp_data_chunk_t);
5227 5251
5228 deliver = SCTP_CMD_CHUNK_ULP; 5252 deliver = SCTP_CMD_CHUNK_ULP;
5253 chunk->data_accepted = 1;
5229 5254
5230 /* Think about partial delivery. */ 5255 /* Think about partial delivery. */
5231 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { 5256 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
@@ -5242,7 +5267,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5242 * large spill over. 5267 * large spill over.
5243 */ 5268 */
5244 if (!asoc->rwnd || asoc->rwnd_over || 5269 if (!asoc->rwnd || asoc->rwnd_over ||
5245 (datalen > asoc->rwnd + asoc->frag_point)) { 5270 (datalen > asoc->rwnd + asoc->frag_point) ||
5271 rcvbuf_over) {
5246 5272
5247 /* If this is the next TSN, consider reneging to make 5273 /* If this is the next TSN, consider reneging to make
5248 * room. Note: Playing nice with a confused sender. A 5274 * room. Note: Playing nice with a confused sender. A
@@ -5250,8 +5276,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5250 * space and in the future we may want to detect and 5276 * space and in the future we may want to detect and
5251 * do more drastic reneging. 5277 * do more drastic reneging.
5252 */ 5278 */
5253 if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) && 5279 if (sctp_tsnmap_has_gap(map) &&
5254 (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) { 5280 (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
5255 SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn); 5281 SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
5256 deliver = SCTP_CMD_RENEGE; 5282 deliver = SCTP_CMD_RENEGE;
5257 } else { 5283 } else {