diff options
Diffstat (limited to 'net/core/sock.c')
-rw-r--r-- | net/core/sock.c | 104 |
1 files changed, 104 insertions, 0 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index 118214047ed..8c184c4a381 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -282,6 +282,11 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
282 | if (err) | 282 | if (err) |
283 | goto out; | 283 | goto out; |
284 | 284 | ||
285 | if (!sk_rmem_schedule(sk, skb->truesize)) { | ||
286 | err = -ENOBUFS; | ||
287 | goto out; | ||
288 | } | ||
289 | |||
285 | skb->dev = NULL; | 290 | skb->dev = NULL; |
286 | skb_set_owner_r(skb, sk); | 291 | skb_set_owner_r(skb, sk); |
287 | 292 | ||
@@ -1107,7 +1112,9 @@ void sock_rfree(struct sk_buff *skb) | |||
1107 | { | 1112 | { |
1108 | struct sock *sk = skb->sk; | 1113 | struct sock *sk = skb->sk; |
1109 | 1114 | ||
1115 | skb_truesize_check(skb); | ||
1110 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); | 1116 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
1117 | sk_mem_uncharge(skb->sk, skb->truesize); | ||
1111 | } | 1118 | } |
1112 | 1119 | ||
1113 | 1120 | ||
@@ -1384,6 +1391,103 @@ int sk_wait_data(struct sock *sk, long *timeo) | |||
1384 | 1391 | ||
1385 | EXPORT_SYMBOL(sk_wait_data); | 1392 | EXPORT_SYMBOL(sk_wait_data); |
1386 | 1393 | ||
1394 | /** | ||
1395 | * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated | ||
1396 | * @sk: socket | ||
1397 | * @size: memory size to allocate | ||
1398 | * @kind: allocation type | ||
1399 | * | ||
1400 | * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means | ||
1401 | * rmem allocation. This function assumes that protocols which have | ||
1402 | * memory_pressure use sk_wmem_queued as write buffer accounting. | ||
1403 | */ | ||
1404 | int __sk_mem_schedule(struct sock *sk, int size, int kind) | ||
1405 | { | ||
1406 | struct proto *prot = sk->sk_prot; | ||
1407 | int amt = sk_mem_pages(size); | ||
1408 | int allocated; | ||
1409 | |||
1410 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; | ||
1411 | allocated = atomic_add_return(amt, prot->memory_allocated); | ||
1412 | |||
1413 | /* Under limit. */ | ||
1414 | if (allocated <= prot->sysctl_mem[0]) { | ||
1415 | if (prot->memory_pressure && *prot->memory_pressure) | ||
1416 | *prot->memory_pressure = 0; | ||
1417 | return 1; | ||
1418 | } | ||
1419 | |||
1420 | /* Under pressure. */ | ||
1421 | if (allocated > prot->sysctl_mem[1]) | ||
1422 | if (prot->enter_memory_pressure) | ||
1423 | prot->enter_memory_pressure(); | ||
1424 | |||
1425 | /* Over hard limit. */ | ||
1426 | if (allocated > prot->sysctl_mem[2]) | ||
1427 | goto suppress_allocation; | ||
1428 | |||
1429 | /* guarantee minimum buffer size under pressure */ | ||
1430 | if (kind == SK_MEM_RECV) { | ||
1431 | if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) | ||
1432 | return 1; | ||
1433 | } else { /* SK_MEM_SEND */ | ||
1434 | if (sk->sk_type == SOCK_STREAM) { | ||
1435 | if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) | ||
1436 | return 1; | ||
1437 | } else if (atomic_read(&sk->sk_wmem_alloc) < | ||
1438 | prot->sysctl_wmem[0]) | ||
1439 | return 1; | ||
1440 | } | ||
1441 | |||
1442 | if (prot->memory_pressure) { | ||
1443 | if (!*prot->memory_pressure || | ||
1444 | prot->sysctl_mem[2] > atomic_read(prot->sockets_allocated) * | ||
1445 | sk_mem_pages(sk->sk_wmem_queued + | ||
1446 | atomic_read(&sk->sk_rmem_alloc) + | ||
1447 | sk->sk_forward_alloc)) | ||
1448 | return 1; | ||
1449 | } | ||
1450 | |||
1451 | suppress_allocation: | ||
1452 | |||
1453 | if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { | ||
1454 | sk_stream_moderate_sndbuf(sk); | ||
1455 | |||
1456 | /* Fail only if socket is _under_ its sndbuf. | ||
1457 | * In this case we cannot block, so that we have to fail. | ||
1458 | */ | ||
1459 | if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) | ||
1460 | return 1; | ||
1461 | } | ||
1462 | |||
1463 | /* Alas. Undo changes. */ | ||
1464 | sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; | ||
1465 | atomic_sub(amt, prot->memory_allocated); | ||
1466 | return 0; | ||
1467 | } | ||
1468 | |||
1469 | EXPORT_SYMBOL(__sk_mem_schedule); | ||
1470 | |||
1471 | /** | ||
1472 | * __sk_reclaim - reclaim memory_allocated | ||
1473 | * @sk: socket | ||
1474 | */ | ||
1475 | void __sk_mem_reclaim(struct sock *sk) | ||
1476 | { | ||
1477 | struct proto *prot = sk->sk_prot; | ||
1478 | |||
1479 | atomic_sub(sk->sk_forward_alloc / SK_MEM_QUANTUM, | ||
1480 | prot->memory_allocated); | ||
1481 | sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; | ||
1482 | |||
1483 | if (prot->memory_pressure && *prot->memory_pressure && | ||
1484 | (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) | ||
1485 | *prot->memory_pressure = 0; | ||
1486 | } | ||
1487 | |||
1488 | EXPORT_SYMBOL(__sk_mem_reclaim); | ||
1489 | |||
1490 | |||
1387 | /* | 1491 | /* |
1388 | * Set of default routines for initialising struct proto_ops when | 1492 | * Set of default routines for initialising struct proto_ops when |
1389 | * the protocol does not support a particular function. In certain | 1493 | * the protocol does not support a particular function. In certain |