diff options
author | Andy Grover <andy.grover@oracle.com> | 2010-07-07 19:46:26 -0400 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-08 21:16:38 -0400 |
commit | c20f5b9633bb0953bd2422f0f1430a2028cdbd0a (patch) | |
tree | dc628e1168c963940195ac5d2b5dbe3f54682240 /net | |
parent | d455ab64096b9a86849c7315c53e595330842db6 (diff) |
RDS/IB: Use SLAB_HWCACHE_ALIGN flag for kmem_cache_create()
We are *definitely* counting cycles as closely as DaveM, so
ensure hwcache alignment for our recv ring control structs.
Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/rds/ib_recv.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 9c4208f6b451..37dab2898ad0 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -1051,13 +1051,13 @@ int __init rds_ib_recv_init(void) | |||
1051 | 1051 | ||
1052 | rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming", | 1052 | rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming", |
1053 | sizeof(struct rds_ib_incoming), | 1053 | sizeof(struct rds_ib_incoming), |
1054 | 0, 0, NULL); | 1054 | 0, SLAB_HWCACHE_ALIGN, NULL); |
1055 | if (!rds_ib_incoming_slab) | 1055 | if (!rds_ib_incoming_slab) |
1056 | goto out; | 1056 | goto out; |
1057 | 1057 | ||
1058 | rds_ib_frag_slab = kmem_cache_create("rds_ib_frag", | 1058 | rds_ib_frag_slab = kmem_cache_create("rds_ib_frag", |
1059 | sizeof(struct rds_page_frag), | 1059 | sizeof(struct rds_page_frag), |
1060 | 0, 0, NULL); | 1060 | 0, SLAB_HWCACHE_ALIGN, NULL); |
1061 | if (!rds_ib_frag_slab) | 1061 | if (!rds_ib_frag_slab) |
1062 | kmem_cache_destroy(rds_ib_incoming_slab); | 1062 | kmem_cache_destroy(rds_ib_incoming_slab); |
1063 | else | 1063 | else |