aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-03-18 16:36:06 -0400
committerDavid S. Miller <davem@davemloft.net>2010-03-21 21:34:16 -0400
commitec733b15a3ef0b5759141a177f8044a2f40c41e7 (patch)
tree41af80ea4dcc89cc049bd36cbd882916a6469263 /net/ipv4
parent62c97ac04a67c120ec37a9bfd445a8d5dbbc1ed2 (diff)
net: snmp mib cleanup
There is no point to align or pad mibs to cache lines, they are per cpu allocated with a 8 bytes alignment anyway. This wastes space for no gain. This patch removes __SNMP_MIB_ALIGN__ Since SNMP mibs contain "unsigned long" fields only, we can relax the allocation alignment from "unsigned long long" to "unsigned long" Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/af_inet.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 33b7dffa7732..55e11906a73a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1401,10 +1401,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field);
1401int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) 1401int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
1402{ 1402{
1403 BUG_ON(ptr == NULL); 1403 BUG_ON(ptr == NULL);
1404 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); 1404 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long));
1405 if (!ptr[0]) 1405 if (!ptr[0])
1406 goto err0; 1406 goto err0;
1407 ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); 1407 ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long));
1408 if (!ptr[1]) 1408 if (!ptr[1])
1409 goto err1; 1409 goto err1;
1410 return 0; 1410 return 0;