aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/sctp/sctp.h3
-rw-r--r--include/net/sock.h98
-rw-r--r--include/net/tcp.h4
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/sock.c104
-rw-r--r--net/core/stream.c84
-rw-r--r--net/ipv4/tcp.c23
-rw-r--r--net/ipv4/tcp_input.c26
-rw-r--r--net/ipv4/tcp_output.c26
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c11
-rw-r--r--net/sctp/ulpevent.c2
-rw-r--r--net/sctp/ulpqueue.c2
15 files changed, 222 insertions, 175 deletions
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 1b81ede7c2bc..4977b0a81535 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -463,8 +463,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
463 skb->destructor = sctp_sock_rfree; 463 skb->destructor = sctp_sock_rfree;
464 atomic_add(event->rmem_len, &sk->sk_rmem_alloc); 464 atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
465 /* 465 /*
466 * This mimics the behavior of 466 * This mimics the behavior of skb_set_owner_r
467 * sk_stream_set_owner_r
468 */ 467 */
469 sk->sk_forward_alloc -= event->rmem_len; 468 sk->sk_forward_alloc -= event->rmem_len;
470} 469}
diff --git a/include/net/sock.h b/include/net/sock.h
index d27ba6fdd039..3d938f6c6725 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -460,25 +460,6 @@ static inline int sk_stream_memory_free(struct sock *sk)
460 return sk->sk_wmem_queued < sk->sk_sndbuf; 460 return sk->sk_wmem_queued < sk->sk_sndbuf;
461} 461}
462 462
463extern void sk_stream_rfree(struct sk_buff *skb);
464
465static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
466{
467 skb->sk = sk;
468 skb->destructor = sk_stream_rfree;
469 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
470 sk->sk_forward_alloc -= skb->truesize;
471}
472
473static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
474{
475 skb_truesize_check(skb);
476 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
477 sk->sk_wmem_queued -= skb->truesize;
478 sk->sk_forward_alloc += skb->truesize;
479 __kfree_skb(skb);
480}
481
482/* The per-socket spinlock must be held here. */ 463/* The per-socket spinlock must be held here. */
483static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) 464static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
484{ 465{
@@ -576,7 +557,7 @@ struct proto {
576 /* 557 /*
577 * Pressure flag: try to collapse. 558 * Pressure flag: try to collapse.
578 * Technical note: it is used by multiple contexts non atomically. 559 * Technical note: it is used by multiple contexts non atomically.
579 * All the sk_stream_mem_schedule() is of this nature: accounting 560 * All the __sk_mem_schedule() is of this nature: accounting
580 * is strict, actions are advisory and have some latency. 561 * is strict, actions are advisory and have some latency.
581 */ 562 */
582 int *memory_pressure; 563 int *memory_pressure;
@@ -712,33 +693,73 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
712 return &container_of(socket, struct socket_alloc, socket)->vfs_inode; 693 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
713} 694}
714 695
715extern void __sk_stream_mem_reclaim(struct sock *sk); 696/*
716extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); 697 * Functions for memory accounting
698 */
699extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
700extern void __sk_mem_reclaim(struct sock *sk);
717 701
718#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE) 702#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
719#define SK_STREAM_MEM_QUANTUM_SHIFT ilog2(SK_STREAM_MEM_QUANTUM) 703#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
704#define SK_MEM_SEND 0
705#define SK_MEM_RECV 1
720 706
721static inline int sk_stream_pages(int amt) 707static inline int sk_mem_pages(int amt)
722{ 708{
723 return (amt + SK_STREAM_MEM_QUANTUM - 1) >> SK_STREAM_MEM_QUANTUM_SHIFT; 709 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
724} 710}
725 711
726static inline void sk_stream_mem_reclaim(struct sock *sk) 712static inline int sk_has_account(struct sock *sk)
727{ 713{
728 if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) 714 /* return true if protocol supports memory accounting */
729 __sk_stream_mem_reclaim(sk); 715 return !!sk->sk_prot->memory_allocated;
730} 716}
731 717
732static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) 718static inline int sk_wmem_schedule(struct sock *sk, int size)
733{ 719{
734 return (int)skb->truesize <= sk->sk_forward_alloc || 720 if (!sk_has_account(sk))
735 sk_stream_mem_schedule(sk, skb->truesize, 1); 721 return 1;
722 return size <= sk->sk_forward_alloc ||
723 __sk_mem_schedule(sk, size, SK_MEM_SEND);
736} 724}
737 725
738static inline int sk_stream_wmem_schedule(struct sock *sk, int size) 726static inline int sk_rmem_schedule(struct sock *sk, int size)
739{ 727{
728 if (!sk_has_account(sk))
729 return 1;
740 return size <= sk->sk_forward_alloc || 730 return size <= sk->sk_forward_alloc ||
741 sk_stream_mem_schedule(sk, size, 0); 731 __sk_mem_schedule(sk, size, SK_MEM_RECV);
732}
733
734static inline void sk_mem_reclaim(struct sock *sk)
735{
736 if (!sk_has_account(sk))
737 return;
738 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
739 __sk_mem_reclaim(sk);
740}
741
742static inline void sk_mem_charge(struct sock *sk, int size)
743{
744 if (!sk_has_account(sk))
745 return;
746 sk->sk_forward_alloc -= size;
747}
748
749static inline void sk_mem_uncharge(struct sock *sk, int size)
750{
751 if (!sk_has_account(sk))
752 return;
753 sk->sk_forward_alloc += size;
754}
755
756static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
757{
758 skb_truesize_check(skb);
759 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
760 sk->sk_wmem_queued -= skb->truesize;
761 sk_mem_uncharge(sk, skb->truesize);
762 __kfree_skb(skb);
742} 763}
743 764
744/* Used by processes to "lock" a socket state, so that 765/* Used by processes to "lock" a socket state, so that
@@ -1076,12 +1097,6 @@ static inline int sk_can_gso(const struct sock *sk)
1076 1097
1077extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); 1098extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1078 1099
1079static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
1080{
1081 sk->sk_wmem_queued += skb->truesize;
1082 sk->sk_forward_alloc -= skb->truesize;
1083}
1084
1085static inline int skb_copy_to_page(struct sock *sk, char __user *from, 1100static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1086 struct sk_buff *skb, struct page *page, 1101 struct sk_buff *skb, struct page *page,
1087 int off, int copy) 1102 int off, int copy)
@@ -1101,7 +1116,7 @@ static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1101 skb->data_len += copy; 1116 skb->data_len += copy;
1102 skb->truesize += copy; 1117 skb->truesize += copy;
1103 sk->sk_wmem_queued += copy; 1118 sk->sk_wmem_queued += copy;
1104 sk->sk_forward_alloc -= copy; 1119 sk_mem_charge(sk, copy);
1105 return 0; 1120 return 0;
1106} 1121}
1107 1122
@@ -1127,6 +1142,7 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1127 skb->sk = sk; 1142 skb->sk = sk;
1128 skb->destructor = sock_rfree; 1143 skb->destructor = sock_rfree;
1129 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 1144 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1145 sk_mem_charge(sk, skb->truesize);
1130} 1146}
1131 1147
1132extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1148extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 13ebe11a0af7..76286e80205a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1196,8 +1196,8 @@ static inline void tcp_write_queue_purge(struct sock *sk)
1196 struct sk_buff *skb; 1196 struct sk_buff *skb;
1197 1197
1198 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) 1198 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1199 sk_stream_free_skb(sk, skb); 1199 sk_wmem_free_skb(sk, skb);
1200 sk_stream_mem_reclaim(sk); 1200 sk_mem_reclaim(sk);
1201} 1201}
1202 1202
1203static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) 1203static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 2d733131d7ce..8a28fc93b724 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -209,6 +209,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
209void skb_free_datagram(struct sock *sk, struct sk_buff *skb) 209void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
210{ 210{
211 kfree_skb(skb); 211 kfree_skb(skb);
212 sk_mem_reclaim(sk);
212} 213}
213 214
214/** 215/**
@@ -248,6 +249,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
248 } 249 }
249 250
250 kfree_skb(skb); 251 kfree_skb(skb);
252 sk_mem_reclaim(sk);
251 return err; 253 return err;
252} 254}
253 255
diff --git a/net/core/sock.c b/net/core/sock.c
index 118214047ed2..8c184c4a3811 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -282,6 +282,11 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
282 if (err) 282 if (err)
283 goto out; 283 goto out;
284 284
285 if (!sk_rmem_schedule(sk, skb->truesize)) {
286 err = -ENOBUFS;
287 goto out;
288 }
289
285 skb->dev = NULL; 290 skb->dev = NULL;
286 skb_set_owner_r(skb, sk); 291 skb_set_owner_r(skb, sk);
287 292
@@ -1107,7 +1112,9 @@ void sock_rfree(struct sk_buff *skb)
1107{ 1112{
1108 struct sock *sk = skb->sk; 1113 struct sock *sk = skb->sk;
1109 1114
1115 skb_truesize_check(skb);
1110 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1116 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1117 sk_mem_uncharge(skb->sk, skb->truesize);
1111} 1118}
1112 1119
1113 1120
@@ -1384,6 +1391,103 @@ int sk_wait_data(struct sock *sk, long *timeo)
1384 1391
1385EXPORT_SYMBOL(sk_wait_data); 1392EXPORT_SYMBOL(sk_wait_data);
1386 1393
1394/**
1395 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1396 * @sk: socket
1397 * @size: memory size to allocate
1398 * @kind: allocation type
1399 *
1400 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1401 * rmem allocation. This function assumes that protocols which have
1402 * memory_pressure use sk_wmem_queued as write buffer accounting.
1403 */
1404int __sk_mem_schedule(struct sock *sk, int size, int kind)
1405{
1406 struct proto *prot = sk->sk_prot;
1407 int amt = sk_mem_pages(size);
1408 int allocated;
1409
1410 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1411 allocated = atomic_add_return(amt, prot->memory_allocated);
1412
1413 /* Under limit. */
1414 if (allocated <= prot->sysctl_mem[0]) {
1415 if (prot->memory_pressure && *prot->memory_pressure)
1416 *prot->memory_pressure = 0;
1417 return 1;
1418 }
1419
1420 /* Under pressure. */
1421 if (allocated > prot->sysctl_mem[1])
1422 if (prot->enter_memory_pressure)
1423 prot->enter_memory_pressure();
1424
1425 /* Over hard limit. */
1426 if (allocated > prot->sysctl_mem[2])
1427 goto suppress_allocation;
1428
1429 /* guarantee minimum buffer size under pressure */
1430 if (kind == SK_MEM_RECV) {
1431 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1432 return 1;
1433 } else { /* SK_MEM_SEND */
1434 if (sk->sk_type == SOCK_STREAM) {
1435 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1436 return 1;
1437 } else if (atomic_read(&sk->sk_wmem_alloc) <
1438 prot->sysctl_wmem[0])
1439 return 1;
1440 }
1441
1442 if (prot->memory_pressure) {
1443 if (!*prot->memory_pressure ||
1444 prot->sysctl_mem[2] > atomic_read(prot->sockets_allocated) *
1445 sk_mem_pages(sk->sk_wmem_queued +
1446 atomic_read(&sk->sk_rmem_alloc) +
1447 sk->sk_forward_alloc))
1448 return 1;
1449 }
1450
1451suppress_allocation:
1452
1453 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1454 sk_stream_moderate_sndbuf(sk);
1455
1456 /* Fail only if socket is _under_ its sndbuf.
1457 * In this case we cannot block, so that we have to fail.
1458 */
1459 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1460 return 1;
1461 }
1462
1463 /* Alas. Undo changes. */
1464 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1465 atomic_sub(amt, prot->memory_allocated);
1466 return 0;
1467}
1468
1469EXPORT_SYMBOL(__sk_mem_schedule);
1470
1471/**
1472 * __sk_reclaim - reclaim memory_allocated
1473 * @sk: socket
1474 */
1475void __sk_mem_reclaim(struct sock *sk)
1476{
1477 struct proto *prot = sk->sk_prot;
1478
1479 atomic_sub(sk->sk_forward_alloc / SK_MEM_QUANTUM,
1480 prot->memory_allocated);
1481 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1482
1483 if (prot->memory_pressure && *prot->memory_pressure &&
1484 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1485 *prot->memory_pressure = 0;
1486}
1487
1488EXPORT_SYMBOL(__sk_mem_reclaim);
1489
1490
1387/* 1491/*
1388 * Set of default routines for initialising struct proto_ops when 1492 * Set of default routines for initialising struct proto_ops when
1389 * the protocol does not support a particular function. In certain 1493 * the protocol does not support a particular function. In certain
diff --git a/net/core/stream.c b/net/core/stream.c
index bf188ffdbdbe..4a0ad152c9c4 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -172,17 +172,6 @@ do_interrupted:
172 172
173EXPORT_SYMBOL(sk_stream_wait_memory); 173EXPORT_SYMBOL(sk_stream_wait_memory);
174 174
175void sk_stream_rfree(struct sk_buff *skb)
176{
177 struct sock *sk = skb->sk;
178
179 skb_truesize_check(skb);
180 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
181 sk->sk_forward_alloc += skb->truesize;
182}
183
184EXPORT_SYMBOL(sk_stream_rfree);
185
186int sk_stream_error(struct sock *sk, int flags, int err) 175int sk_stream_error(struct sock *sk, int flags, int err)
187{ 176{
188 if (err == -EPIPE) 177 if (err == -EPIPE)
@@ -194,77 +183,6 @@ int sk_stream_error(struct sock *sk, int flags, int err)
194 183
195EXPORT_SYMBOL(sk_stream_error); 184EXPORT_SYMBOL(sk_stream_error);
196 185
197void __sk_stream_mem_reclaim(struct sock *sk)
198{
199 atomic_sub(sk->sk_forward_alloc >> SK_STREAM_MEM_QUANTUM_SHIFT,
200 sk->sk_prot->memory_allocated);
201 sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1;
202 if (*sk->sk_prot->memory_pressure &&
203 (atomic_read(sk->sk_prot->memory_allocated) <
204 sk->sk_prot->sysctl_mem[0]))
205 *sk->sk_prot->memory_pressure = 0;
206}
207
208EXPORT_SYMBOL(__sk_stream_mem_reclaim);
209
210int sk_stream_mem_schedule(struct sock *sk, int size, int kind)
211{
212 int amt = sk_stream_pages(size);
213 struct proto *prot = sk->sk_prot;
214
215 sk->sk_forward_alloc += amt * SK_STREAM_MEM_QUANTUM;
216 atomic_add(amt, prot->memory_allocated);
217
218 /* Under limit. */
219 if (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]) {
220 if (*prot->memory_pressure)
221 *prot->memory_pressure = 0;
222 return 1;
223 }
224
225 /* Over hard limit. */
226 if (atomic_read(prot->memory_allocated) > prot->sysctl_mem[2]) {
227 prot->enter_memory_pressure();
228 goto suppress_allocation;
229 }
230
231 /* Under pressure. */
232 if (atomic_read(prot->memory_allocated) > prot->sysctl_mem[1])
233 prot->enter_memory_pressure();
234
235 if (kind) {
236 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
237 return 1;
238 } else if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
239 return 1;
240
241 if (!*prot->memory_pressure ||
242 prot->sysctl_mem[2] > atomic_read(prot->sockets_allocated) *
243 sk_stream_pages(sk->sk_wmem_queued +
244 atomic_read(&sk->sk_rmem_alloc) +
245 sk->sk_forward_alloc))
246 return 1;
247
248suppress_allocation:
249
250 if (!kind) {
251 sk_stream_moderate_sndbuf(sk);
252
253 /* Fail only if socket is _under_ its sndbuf.
254 * In this case we cannot block, so that we have to fail.
255 */
256 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
257 return 1;
258 }
259
260 /* Alas. Undo changes. */
261 sk->sk_forward_alloc -= amt * SK_STREAM_MEM_QUANTUM;
262 atomic_sub(amt, prot->memory_allocated);
263 return 0;
264}
265
266EXPORT_SYMBOL(sk_stream_mem_schedule);
267
268void sk_stream_kill_queues(struct sock *sk) 186void sk_stream_kill_queues(struct sock *sk)
269{ 187{
270 /* First the read buffer. */ 188 /* First the read buffer. */
@@ -277,7 +195,7 @@ void sk_stream_kill_queues(struct sock *sk)
277 BUG_TRAP(skb_queue_empty(&sk->sk_write_queue)); 195 BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
278 196
279 /* Account for returned memory. */ 197 /* Account for returned memory. */
280 sk_stream_mem_reclaim(sk); 198 sk_mem_reclaim(sk);
281 199
282 BUG_TRAP(!sk->sk_wmem_queued); 200 BUG_TRAP(!sk->sk_wmem_queued);
283 BUG_TRAP(!sk->sk_forward_alloc); 201 BUG_TRAP(!sk->sk_forward_alloc);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index fdaf965a6794..2cbfa6df7976 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -308,7 +308,7 @@ struct tcp_splice_state {
308/* 308/*
309 * Pressure flag: try to collapse. 309 * Pressure flag: try to collapse.
310 * Technical note: it is used by multiple contexts non atomically. 310 * Technical note: it is used by multiple contexts non atomically.
311 * All the sk_stream_mem_schedule() is of this nature: accounting 311 * All the __sk_mem_schedule() is of this nature: accounting
312 * is strict, actions are advisory and have some latency. 312 * is strict, actions are advisory and have some latency.
313 */ 313 */
314int tcp_memory_pressure __read_mostly; 314int tcp_memory_pressure __read_mostly;
@@ -485,7 +485,8 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
485 tcb->sacked = 0; 485 tcb->sacked = 0;
486 skb_header_release(skb); 486 skb_header_release(skb);
487 tcp_add_write_queue_tail(sk, skb); 487 tcp_add_write_queue_tail(sk, skb);
488 sk_charge_skb(sk, skb); 488 sk->sk_wmem_queued += skb->truesize;
489 sk_mem_charge(sk, skb->truesize);
489 if (tp->nonagle & TCP_NAGLE_PUSH) 490 if (tp->nonagle & TCP_NAGLE_PUSH)
490 tp->nonagle &= ~TCP_NAGLE_PUSH; 491 tp->nonagle &= ~TCP_NAGLE_PUSH;
491} 492}
@@ -638,7 +639,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
638 639
639 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); 640 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
640 if (skb) { 641 if (skb) {
641 if (sk_stream_wmem_schedule(sk, skb->truesize)) { 642 if (sk_wmem_schedule(sk, skb->truesize)) {
642 /* 643 /*
643 * Make sure that we have exactly size bytes 644 * Make sure that we have exactly size bytes
644 * available to the caller, no more, no less. 645 * available to the caller, no more, no less.
@@ -707,7 +708,7 @@ new_segment:
707 tcp_mark_push(tp, skb); 708 tcp_mark_push(tp, skb);
708 goto new_segment; 709 goto new_segment;
709 } 710 }
710 if (!sk_stream_wmem_schedule(sk, copy)) 711 if (!sk_wmem_schedule(sk, copy))
711 goto wait_for_memory; 712 goto wait_for_memory;
712 713
713 if (can_coalesce) { 714 if (can_coalesce) {
@@ -721,7 +722,7 @@ new_segment:
721 skb->data_len += copy; 722 skb->data_len += copy;
722 skb->truesize += copy; 723 skb->truesize += copy;
723 sk->sk_wmem_queued += copy; 724 sk->sk_wmem_queued += copy;
724 sk->sk_forward_alloc -= copy; 725 sk_mem_charge(sk, copy);
725 skb->ip_summed = CHECKSUM_PARTIAL; 726 skb->ip_summed = CHECKSUM_PARTIAL;
726 tp->write_seq += copy; 727 tp->write_seq += copy;
727 TCP_SKB_CB(skb)->end_seq += copy; 728 TCP_SKB_CB(skb)->end_seq += copy;
@@ -928,7 +929,7 @@ new_segment:
928 if (copy > PAGE_SIZE - off) 929 if (copy > PAGE_SIZE - off)
929 copy = PAGE_SIZE - off; 930 copy = PAGE_SIZE - off;
930 931
931 if (!sk_stream_wmem_schedule(sk, copy)) 932 if (!sk_wmem_schedule(sk, copy))
932 goto wait_for_memory; 933 goto wait_for_memory;
933 934
934 if (!page) { 935 if (!page) {
@@ -1019,7 +1020,7 @@ do_fault:
1019 * reset, where we can be unlinking the send_head. 1020 * reset, where we can be unlinking the send_head.
1020 */ 1021 */
1021 tcp_check_send_head(sk, skb); 1022 tcp_check_send_head(sk, skb);
1022 sk_stream_free_skb(sk, skb); 1023 sk_wmem_free_skb(sk, skb);
1023 } 1024 }
1024 1025
1025do_error: 1026do_error:
@@ -1738,7 +1739,7 @@ void tcp_close(struct sock *sk, long timeout)
1738 __kfree_skb(skb); 1739 __kfree_skb(skb);
1739 } 1740 }
1740 1741
1741 sk_stream_mem_reclaim(sk); 1742 sk_mem_reclaim(sk);
1742 1743
1743 /* As outlined in RFC 2525, section 2.17, we send a RST here because 1744 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1744 * data was lost. To witness the awful effects of the old behavior of 1745 * data was lost. To witness the awful effects of the old behavior of
@@ -1841,7 +1842,7 @@ adjudge_to_death:
1841 } 1842 }
1842 } 1843 }
1843 if (sk->sk_state != TCP_CLOSE) { 1844 if (sk->sk_state != TCP_CLOSE) {
1844 sk_stream_mem_reclaim(sk); 1845 sk_mem_reclaim(sk);
1845 if (tcp_too_many_orphans(sk, 1846 if (tcp_too_many_orphans(sk,
1846 atomic_read(sk->sk_prot->orphan_count))) { 1847 atomic_read(sk->sk_prot->orphan_count))) {
1847 if (net_ratelimit()) 1848 if (net_ratelimit())
@@ -2658,11 +2659,11 @@ void __init tcp_init(void)
2658 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); 2659 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2659 max_share = min(4UL*1024*1024, limit); 2660 max_share = min(4UL*1024*1024, limit);
2660 2661
2661 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM; 2662 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
2662 sysctl_tcp_wmem[1] = 16*1024; 2663 sysctl_tcp_wmem[1] = 16*1024;
2663 sysctl_tcp_wmem[2] = max(64*1024, max_share); 2664 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2664 2665
2665 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM; 2666 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
2666 sysctl_tcp_rmem[1] = 87380; 2667 sysctl_tcp_rmem[1] = 87380;
2667 sysctl_tcp_rmem[2] = max(87380, max_share); 2668 sysctl_tcp_rmem[2] = max(87380, max_share);
2668 2669
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index efea9873208e..722c9cbb91e3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -591,7 +591,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
591 * restart window, so that we send ACKs quickly. 591 * restart window, so that we send ACKs quickly.
592 */ 592 */
593 tcp_incr_quickack(sk); 593 tcp_incr_quickack(sk);
594 sk_stream_mem_reclaim(sk); 594 sk_mem_reclaim(sk);
595 } 595 }
596 } 596 }
597 icsk->icsk_ack.lrcvtime = now; 597 icsk->icsk_ack.lrcvtime = now;
@@ -2851,7 +2851,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2851 break; 2851 break;
2852 2852
2853 tcp_unlink_write_queue(skb, sk); 2853 tcp_unlink_write_queue(skb, sk);
2854 sk_stream_free_skb(sk, skb); 2854 sk_wmem_free_skb(sk, skb);
2855 tcp_clear_all_retrans_hints(tp); 2855 tcp_clear_all_retrans_hints(tp);
2856 } 2856 }
2857 2857
@@ -3567,7 +3567,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
3567 __skb_queue_purge(&tp->out_of_order_queue); 3567 __skb_queue_purge(&tp->out_of_order_queue);
3568 if (tcp_is_sack(tp)) 3568 if (tcp_is_sack(tp))
3569 tcp_sack_reset(&tp->rx_opt); 3569 tcp_sack_reset(&tp->rx_opt);
3570 sk_stream_mem_reclaim(sk); 3570 sk_mem_reclaim(sk);
3571 3571
3572 if (!sock_flag(sk, SOCK_DEAD)) { 3572 if (!sock_flag(sk, SOCK_DEAD)) {
3573 sk->sk_state_change(sk); 3573 sk->sk_state_change(sk);
@@ -3850,12 +3850,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3850queue_and_out: 3850queue_and_out:
3851 if (eaten < 0 && 3851 if (eaten < 0 &&
3852 (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 3852 (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3853 !sk_stream_rmem_schedule(sk, skb))) { 3853 !sk_rmem_schedule(sk, skb->truesize))) {
3854 if (tcp_prune_queue(sk) < 0 || 3854 if (tcp_prune_queue(sk) < 0 ||
3855 !sk_stream_rmem_schedule(sk, skb)) 3855 !sk_rmem_schedule(sk, skb->truesize))
3856 goto drop; 3856 goto drop;
3857 } 3857 }
3858 sk_stream_set_owner_r(skb, sk); 3858 skb_set_owner_r(skb, sk);
3859 __skb_queue_tail(&sk->sk_receive_queue, skb); 3859 __skb_queue_tail(&sk->sk_receive_queue, skb);
3860 } 3860 }
3861 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 3861 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
@@ -3924,9 +3924,9 @@ drop:
3924 TCP_ECN_check_ce(tp, skb); 3924 TCP_ECN_check_ce(tp, skb);
3925 3925
3926 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 3926 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3927 !sk_stream_rmem_schedule(sk, skb)) { 3927 !sk_rmem_schedule(sk, skb->truesize)) {
3928 if (tcp_prune_queue(sk) < 0 || 3928 if (tcp_prune_queue(sk) < 0 ||
3929 !sk_stream_rmem_schedule(sk, skb)) 3929 !sk_rmem_schedule(sk, skb->truesize))
3930 goto drop; 3930 goto drop;
3931 } 3931 }
3932 3932
@@ -3937,7 +3937,7 @@ drop:
3937 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 3937 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
3938 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 3938 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
3939 3939
3940 sk_stream_set_owner_r(skb, sk); 3940 skb_set_owner_r(skb, sk);
3941 3941
3942 if (!skb_peek(&tp->out_of_order_queue)) { 3942 if (!skb_peek(&tp->out_of_order_queue)) {
3943 /* Initial out of order segment, build 1 SACK. */ 3943 /* Initial out of order segment, build 1 SACK. */
@@ -4079,7 +4079,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4079 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4079 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
4080 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4080 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
4081 __skb_insert(nskb, skb->prev, skb, list); 4081 __skb_insert(nskb, skb->prev, skb, list);
4082 sk_stream_set_owner_r(nskb, sk); 4082 skb_set_owner_r(nskb, sk);
4083 4083
4084 /* Copy data, releasing collapsed skbs. */ 4084 /* Copy data, releasing collapsed skbs. */
4085 while (copy > 0) { 4085 while (copy > 0) {
@@ -4177,7 +4177,7 @@ static int tcp_prune_queue(struct sock *sk)
4177 sk->sk_receive_queue.next, 4177 sk->sk_receive_queue.next,
4178 (struct sk_buff*)&sk->sk_receive_queue, 4178 (struct sk_buff*)&sk->sk_receive_queue,
4179 tp->copied_seq, tp->rcv_nxt); 4179 tp->copied_seq, tp->rcv_nxt);
4180 sk_stream_mem_reclaim(sk); 4180 sk_mem_reclaim(sk);
4181 4181
4182 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4182 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
4183 return 0; 4183 return 0;
@@ -4197,7 +4197,7 @@ static int tcp_prune_queue(struct sock *sk)
4197 */ 4197 */
4198 if (tcp_is_sack(tp)) 4198 if (tcp_is_sack(tp))
4199 tcp_sack_reset(&tp->rx_opt); 4199 tcp_sack_reset(&tp->rx_opt);
4200 sk_stream_mem_reclaim(sk); 4200 sk_mem_reclaim(sk);
4201 } 4201 }
4202 4202
4203 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4203 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
@@ -4699,7 +4699,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4699 /* Bulk data transfer: receiver */ 4699 /* Bulk data transfer: receiver */
4700 __skb_pull(skb,tcp_header_len); 4700 __skb_pull(skb,tcp_header_len);
4701 __skb_queue_tail(&sk->sk_receive_queue, skb); 4701 __skb_queue_tail(&sk->sk_receive_queue, skb);
4702 sk_stream_set_owner_r(skb, sk); 4702 skb_set_owner_r(skb, sk);
4703 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4703 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4704 } 4704 }
4705 4705
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9058e0a25107..7a4834a2ae84 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -637,7 +637,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
637 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 637 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
638 skb_header_release(skb); 638 skb_header_release(skb);
639 tcp_add_write_queue_tail(sk, skb); 639 tcp_add_write_queue_tail(sk, skb);
640 sk_charge_skb(sk, skb); 640 sk->sk_wmem_queued += skb->truesize;
641 sk_mem_charge(sk, skb->truesize);
641} 642}
642 643
643static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 644static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
@@ -701,7 +702,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
701 if (buff == NULL) 702 if (buff == NULL)
702 return -ENOMEM; /* We'll just try again later. */ 703 return -ENOMEM; /* We'll just try again later. */
703 704
704 sk_charge_skb(sk, buff); 705 sk->sk_wmem_queued += buff->truesize;
706 sk_mem_charge(sk, buff->truesize);
705 nlen = skb->len - len - nsize; 707 nlen = skb->len - len - nsize;
706 buff->truesize += nlen; 708 buff->truesize += nlen;
707 skb->truesize -= nlen; 709 skb->truesize -= nlen;
@@ -825,7 +827,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
825 827
826 skb->truesize -= len; 828 skb->truesize -= len;
827 sk->sk_wmem_queued -= len; 829 sk->sk_wmem_queued -= len;
828 sk->sk_forward_alloc += len; 830 sk_mem_uncharge(sk, len);
829 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 831 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
830 832
831 /* Any change of skb->len requires recalculation of tso 833 /* Any change of skb->len requires recalculation of tso
@@ -1197,7 +1199,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1197 if (unlikely(buff == NULL)) 1199 if (unlikely(buff == NULL))
1198 return -ENOMEM; 1200 return -ENOMEM;
1199 1201
1200 sk_charge_skb(sk, buff); 1202 sk->sk_wmem_queued += buff->truesize;
1203 sk_mem_charge(sk, buff->truesize);
1201 buff->truesize += nlen; 1204 buff->truesize += nlen;
1202 skb->truesize -= nlen; 1205 skb->truesize -= nlen;
1203 1206
@@ -1350,7 +1353,8 @@ static int tcp_mtu_probe(struct sock *sk)
1350 /* We're allowed to probe. Build it now. */ 1353 /* We're allowed to probe. Build it now. */
1351 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1354 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1352 return -1; 1355 return -1;
1353 sk_charge_skb(sk, nskb); 1356 sk->sk_wmem_queued += nskb->truesize;
1357 sk_mem_charge(sk, nskb->truesize);
1354 1358
1355 skb = tcp_send_head(sk); 1359 skb = tcp_send_head(sk);
1356 1360
@@ -1377,7 +1381,7 @@ static int tcp_mtu_probe(struct sock *sk)
1377 * Throw it away. */ 1381 * Throw it away. */
1378 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1382 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1379 tcp_unlink_write_queue(skb, sk); 1383 tcp_unlink_write_queue(skb, sk);
1380 sk_stream_free_skb(sk, skb); 1384 sk_wmem_free_skb(sk, skb);
1381 } else { 1385 } else {
1382 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1386 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
1383 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1387 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
@@ -1744,7 +1748,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1744 /* changed transmit queue under us so clear hints */ 1748 /* changed transmit queue under us so clear hints */
1745 tcp_clear_retrans_hints_partial(tp); 1749 tcp_clear_retrans_hints_partial(tp);
1746 1750
1747 sk_stream_free_skb(sk, next_skb); 1751 sk_wmem_free_skb(sk, next_skb);
1748 } 1752 }
1749} 1753}
1750 1754
@@ -2139,8 +2143,9 @@ int tcp_send_synack(struct sock *sk)
2139 tcp_unlink_write_queue(skb, sk); 2143 tcp_unlink_write_queue(skb, sk);
2140 skb_header_release(nskb); 2144 skb_header_release(nskb);
2141 __tcp_add_write_queue_head(sk, nskb); 2145 __tcp_add_write_queue_head(sk, nskb);
2142 sk_stream_free_skb(sk, skb); 2146 sk_wmem_free_skb(sk, skb);
2143 sk_charge_skb(sk, nskb); 2147 sk->sk_wmem_queued += nskb->truesize;
2148 sk_mem_charge(sk, nskb->truesize);
2144 skb = nskb; 2149 skb = nskb;
2145 } 2150 }
2146 2151
@@ -2343,7 +2348,8 @@ int tcp_connect(struct sock *sk)
2343 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2348 tp->retrans_stamp = TCP_SKB_CB(buff)->when;
2344 skb_header_release(buff); 2349 skb_header_release(buff);
2345 __tcp_add_write_queue_tail(sk, buff); 2350 __tcp_add_write_queue_tail(sk, buff);
2346 sk_charge_skb(sk, buff); 2351 sk->sk_wmem_queued += buff->truesize;
2352 sk_mem_charge(sk, buff->truesize);
2347 tp->packets_out += tcp_skb_pcount(buff); 2353 tp->packets_out += tcp_skb_pcount(buff);
2348 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2354 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2349 2355
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index ea85bc00c61f..17931be6d584 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -186,7 +186,7 @@ static void tcp_delack_timer(unsigned long data)
186 goto out_unlock; 186 goto out_unlock;
187 } 187 }
188 188
189 sk_stream_mem_reclaim(sk); 189 sk_mem_reclaim(sk);
190 190
191 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 191 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
192 goto out; 192 goto out;
@@ -226,7 +226,7 @@ static void tcp_delack_timer(unsigned long data)
226 226
227out: 227out:
228 if (tcp_memory_pressure) 228 if (tcp_memory_pressure)
229 sk_stream_mem_reclaim(sk); 229 sk_mem_reclaim(sk);
230out_unlock: 230out_unlock:
231 bh_unlock_sock(sk); 231 bh_unlock_sock(sk);
232 sock_put(sk); 232 sock_put(sk);
@@ -420,7 +420,7 @@ static void tcp_write_timer(unsigned long data)
420 TCP_CHECK_TIMER(sk); 420 TCP_CHECK_TIMER(sk);
421 421
422out: 422out:
423 sk_stream_mem_reclaim(sk); 423 sk_mem_reclaim(sk);
424out_unlock: 424out_unlock:
425 bh_unlock_sock(sk); 425 bh_unlock_sock(sk);
426 sock_put(sk); 426 sock_put(sk);
@@ -514,7 +514,7 @@ static void tcp_keepalive_timer (unsigned long data)
514 } 514 }
515 515
516 TCP_CHECK_TIMER(sk); 516 TCP_CHECK_TIMER(sk);
517 sk_stream_mem_reclaim(sk); 517 sk_mem_reclaim(sk);
518 518
519resched: 519resched:
520 inet_csk_reset_keepalive_timer (sk, elapsed); 520 inet_csk_reset_keepalive_timer (sk, elapsed);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e466e00b9a9f..b92196495027 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1109,7 +1109,7 @@ SCTP_STATIC __init int sctp_init(void)
1109 sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1)); 1109 sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1));
1110 sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); 1110 sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
1111 1111
1112 sysctl_sctp_wmem[0] = SK_STREAM_MEM_QUANTUM; 1112 sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
1113 sysctl_sctp_wmem[1] = 16*1024; 1113 sysctl_sctp_wmem[1] = 16*1024;
1114 sysctl_sctp_wmem[2] = max(64*1024, max_share); 1114 sysctl_sctp_wmem[2] = max(64*1024, max_share);
1115 1115
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 511d8c9a171a..b1267519183b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5844,7 +5844,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5844 /* 5844 /*
5845 * Also try to renege to limit our memory usage in the event that 5845 * Also try to renege to limit our memory usage in the event that
5846 * we are under memory pressure 5846 * we are under memory pressure
5847 * If we can't renege, don't worry about it, the sk_stream_rmem_schedule 5847 * If we can't renege, don't worry about it, the sk_rmem_schedule
5848 * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our 5848 * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
5849 * memory usage too much 5849 * memory usage too much
5850 */ 5850 */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 7a8650f01d08..710df67a6785 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -174,7 +174,8 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
174 sizeof(struct sctp_chunk); 174 sizeof(struct sctp_chunk);
175 175
176 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 176 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
177 sk_charge_skb(sk, chunk->skb); 177 sk->sk_wmem_queued += chunk->skb->truesize;
178 sk_mem_charge(sk, chunk->skb->truesize);
178} 179}
179 180
180/* Verify that this is a valid address. */ 181/* Verify that this is a valid address. */
@@ -6035,10 +6036,10 @@ static void sctp_wfree(struct sk_buff *skb)
6035 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 6036 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
6036 6037
6037 /* 6038 /*
6038 * This undoes what is done via sk_charge_skb 6039 * This undoes what is done via sctp_set_owner_w and sk_mem_charge
6039 */ 6040 */
6040 sk->sk_wmem_queued -= skb->truesize; 6041 sk->sk_wmem_queued -= skb->truesize;
6041 sk->sk_forward_alloc += skb->truesize; 6042 sk_mem_uncharge(sk, skb->truesize);
6042 6043
6043 sock_wfree(skb); 6044 sock_wfree(skb);
6044 __sctp_write_space(asoc); 6045 __sctp_write_space(asoc);
@@ -6059,9 +6060,9 @@ void sctp_sock_rfree(struct sk_buff *skb)
6059 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 6060 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
6060 6061
6061 /* 6062 /*
6062 * Mimic the behavior of sk_stream_rfree 6063 * Mimic the behavior of sock_rfree
6063 */ 6064 */
6064 sk->sk_forward_alloc += event->rmem_len; 6065 sk_mem_uncharge(sk, event->rmem_len);
6065} 6066}
6066 6067
6067 6068
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 307314356e16..047c27df98f4 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -700,7 +700,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
700 if (rx_count >= asoc->base.sk->sk_rcvbuf) { 700 if (rx_count >= asoc->base.sk->sk_rcvbuf) {
701 701
702 if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || 702 if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
703 (!sk_stream_rmem_schedule(asoc->base.sk, chunk->skb))) 703 (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize)))
704 goto fail; 704 goto fail;
705 } 705 }
706 706
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 1733fa29a501..c25caefa3bcb 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -1046,7 +1046,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1046 sctp_ulpq_partial_delivery(ulpq, chunk, gfp); 1046 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
1047 } 1047 }
1048 1048
1049 sk_stream_mem_reclaim(asoc->base.sk); 1049 sk_mem_reclaim(asoc->base.sk);
1050 return; 1050 return;
1051} 1051}
1052 1052