aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/stream.c
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2007-11-20 02:20:59 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:54:36 -0500
commit9859a79023d71dd4e56c195a345abc4112abfd02 (patch)
tree5492ce13e237b6fb9cf284d3fdfd062793dc1126 /net/core/stream.c
parent3ef1355dcb8551730cc71e9ef4363f5c66ccad17 (diff)
[NET]: Compact sk_stream_mem_schedule() code
This function references sk->sk_prot->xxx for many times. It turned out, that there's so many code in it, that gcc cannot always optimize access to sk->sk_prot's fields. After saving the sk->sk_prot on the stack and comparing disassembled code, it turned out that the function became ~10 bytes shorter and made less dereferences (on i386 and x86_64). Stack consumption didn't grow. Besides, this patch drives most of this function into the 80 columns limit. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Acked-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/stream.c')
-rw-r--r--net/core/stream.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/net/core/stream.c b/net/core/stream.c
index 755bacbcb321..b2fb846f42a4 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -210,35 +210,36 @@ EXPORT_SYMBOL(__sk_stream_mem_reclaim);
210int sk_stream_mem_schedule(struct sock *sk, int size, int kind) 210int sk_stream_mem_schedule(struct sock *sk, int size, int kind)
211{ 211{
212 int amt = sk_stream_pages(size); 212 int amt = sk_stream_pages(size);
213 struct proto *prot = sk->sk_prot;
213 214
214 sk->sk_forward_alloc += amt * SK_STREAM_MEM_QUANTUM; 215 sk->sk_forward_alloc += amt * SK_STREAM_MEM_QUANTUM;
215 atomic_add(amt, sk->sk_prot->memory_allocated); 216 atomic_add(amt, prot->memory_allocated);
216 217
217 /* Under limit. */ 218 /* Under limit. */
218 if (atomic_read(sk->sk_prot->memory_allocated) < sk->sk_prot->sysctl_mem[0]) { 219 if (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]) {
219 if (*sk->sk_prot->memory_pressure) 220 if (*prot->memory_pressure)
220 *sk->sk_prot->memory_pressure = 0; 221 *prot->memory_pressure = 0;
221 return 1; 222 return 1;
222 } 223 }
223 224
224 /* Over hard limit. */ 225 /* Over hard limit. */
225 if (atomic_read(sk->sk_prot->memory_allocated) > sk->sk_prot->sysctl_mem[2]) { 226 if (atomic_read(prot->memory_allocated) > prot->sysctl_mem[2]) {
226 sk->sk_prot->enter_memory_pressure(); 227 prot->enter_memory_pressure();
227 goto suppress_allocation; 228 goto suppress_allocation;
228 } 229 }
229 230
230 /* Under pressure. */ 231 /* Under pressure. */
231 if (atomic_read(sk->sk_prot->memory_allocated) > sk->sk_prot->sysctl_mem[1]) 232 if (atomic_read(prot->memory_allocated) > prot->sysctl_mem[1])
232 sk->sk_prot->enter_memory_pressure(); 233 prot->enter_memory_pressure();
233 234
234 if (kind) { 235 if (kind) {
235 if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_prot->sysctl_rmem[0]) 236 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
236 return 1; 237 return 1;
237 } else if (sk->sk_wmem_queued < sk->sk_prot->sysctl_wmem[0]) 238 } else if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
238 return 1; 239 return 1;
239 240
240 if (!*sk->sk_prot->memory_pressure || 241 if (!*prot->memory_pressure ||
241 sk->sk_prot->sysctl_mem[2] > atomic_read(sk->sk_prot->sockets_allocated) * 242 prot->sysctl_mem[2] > atomic_read(prot->sockets_allocated) *
242 sk_stream_pages(sk->sk_wmem_queued + 243 sk_stream_pages(sk->sk_wmem_queued +
243 atomic_read(&sk->sk_rmem_alloc) + 244 atomic_read(&sk->sk_rmem_alloc) +
244 sk->sk_forward_alloc)) 245 sk->sk_forward_alloc))
@@ -258,7 +259,7 @@ suppress_allocation:
258 259
259 /* Alas. Undo changes. */ 260 /* Alas. Undo changes. */
260 sk->sk_forward_alloc -= amt * SK_STREAM_MEM_QUANTUM; 261 sk->sk_forward_alloc -= amt * SK_STREAM_MEM_QUANTUM;
261 atomic_sub(amt, sk->sk_prot->memory_allocated); 262 atomic_sub(amt, prot->memory_allocated);
262 return 0; 263 return 0;
263} 264}
264 265