aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/sn
diff options
context:
space:
mode:
authorJes Sorensen <jes@sgi.com>2006-02-17 05:18:43 -0500
committerTony Luck <tony.luck@intel.com>2006-02-27 18:26:58 -0500
commit7aa6ba41362a7f888ad11fdcfe51ca8d92226cd3 (patch)
tree3d7b177ee966f21d1ac6e630061bdb0b256e76d8 /include/asm-ia64/sn
parente95a9ec1bb66e07b138861c743192f06e7b3e4de (diff)
[IA64-SGI] SN2-XP reduce kmalloc wrapper inlining
Take advantage of kzalloc() as well as reduce the size of code generated for the error returns in xpc_setup_infrastructure(). Signed-off-by: Jes Sorensen <jes@sgi.com> Acked-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64/sn')
-rw-r--r--include/asm-ia64/sn/xpc.h22
1 files changed, 0 insertions, 22 deletions
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index df7f5f4f3cde..aa3b8ace9030 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -1227,28 +1227,6 @@ xpc_map_bte_errors(bte_result_t error)
1227 1227
1228 1228
1229 1229
1230static inline void *
1231xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
1232{
1233 /* see if kmalloc will give us cachline aligned memory by default */
1234 *base = kmalloc(size, flags);
1235 if (*base == NULL) {
1236 return NULL;
1237 }
1238 if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
1239 return *base;
1240 }
1241 kfree(*base);
1242
1243 /* nope, we'll have to do it ourselves */
1244 *base = kmalloc(size + L1_CACHE_BYTES, flags);
1245 if (*base == NULL) {
1246 return NULL;
1247 }
1248 return (void *) L1_CACHE_ALIGN((u64) *base);
1249}
1250
1251
1252/* 1230/*
1253 * Check to see if there is any channel activity to/from the specified 1231 * Check to see if there is any channel activity to/from the specified
1254 * partition. 1232 * partition.