aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/kernel/topology.c6
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c4
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c28
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c24
5 files changed, 30 insertions, 36 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 47de9ee6bcd6..674de8943478 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -258,7 +258,7 @@ config NR_CPUS
258 int "Maximum number of CPUs (2-1024)" 258 int "Maximum number of CPUs (2-1024)"
259 range 2 1024 259 range 2 1024
260 depends on SMP 260 depends on SMP
261 default "64" 261 default "1024"
262 help 262 help
263 You should set this to the number of CPUs in your system, but 263 You should set this to the number of CPUs in your system, but
264 keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but 264 keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but
@@ -354,7 +354,7 @@ config NUMA
354config NODES_SHIFT 354config NODES_SHIFT
355 int "Max num nodes shift(3-10)" 355 int "Max num nodes shift(3-10)"
356 range 3 10 356 range 3 10
357 default "8" 357 default "10"
358 depends on NEED_MULTIPLE_NODES 358 depends on NEED_MULTIPLE_NODES
359 help 359 help
360 This option specifies the maximum number of nodes in your SSI system. 360 This option specifies the maximum number of nodes in your SSI system.
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index d24fa393b182..f648c610b10c 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -67,10 +67,8 @@ static int __init topology_init(void)
67#endif 67#endif
68 68
69 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL); 69 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
70 if (!sysfs_cpus) { 70 if (!sysfs_cpus)
71 err = -ENOMEM; 71 panic("kzalloc in topology_init failed - NR_CPUS too big?");
72 goto out;
73 }
74 72
75 for_each_present_cpu(i) { 73 for_each_present_cpu(i) {
76 if((err = arch_register_cpu(i))) 74 if((err = arch_register_cpu(i)))
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index c2f69f7942af..1f3540826e68 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -279,8 +279,8 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
279 return part->reason; 279 return part->reason;
280 } 280 }
281 281
282 bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst), 282 bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt,
283 (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL); 283 (BTE_NORMAL | BTE_WACQUIRE), NULL);
284 if (bte_ret == BTE_SUCCESS) { 284 if (bte_ret == BTE_SUCCESS) {
285 return xpcSuccess; 285 return xpcSuccess;
286 } 286 }
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 5e8e59efb347..4d026f9dd98b 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -1052,6 +1052,8 @@ xpc_do_exit(enum xpc_retval reason)
1052 if (xpc_sysctl) { 1052 if (xpc_sysctl) {
1053 unregister_sysctl_table(xpc_sysctl); 1053 unregister_sysctl_table(xpc_sysctl);
1054 } 1054 }
1055
1056 kfree(xpc_remote_copy_buffer_base);
1055} 1057}
1056 1058
1057 1059
@@ -1212,24 +1214,20 @@ xpc_init(void)
1212 partid_t partid; 1214 partid_t partid;
1213 struct xpc_partition *part; 1215 struct xpc_partition *part;
1214 pid_t pid; 1216 pid_t pid;
1217 size_t buf_size;
1215 1218
1216 1219
1217 if (!ia64_platform_is("sn2")) { 1220 if (!ia64_platform_is("sn2")) {
1218 return -ENODEV; 1221 return -ENODEV;
1219 } 1222 }
1220 1223
1221 /* 1224
1222 * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng 1225 buf_size = max(XPC_RP_VARS_SIZE,
1223 * various portions of a partition's reserved page. Its size is based 1226 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1224 * on the size of the reserved page header and part_nasids mask. So we 1227 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1225 * need to ensure that the other items will fit as well. 1228 GFP_KERNEL, &xpc_remote_copy_buffer_base);
1226 */ 1229 if (xpc_remote_copy_buffer == NULL)
1227 if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) { 1230 return -ENOMEM;
1228 dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n");
1229 return -EPERM;
1230 }
1231 DBUG_ON((u64) xpc_remote_copy_buffer !=
1232 L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer));
1233 1231
1234 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); 1232 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
1235 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); 1233 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
@@ -1293,6 +1291,8 @@ xpc_init(void)
1293 if (xpc_sysctl) { 1291 if (xpc_sysctl) {
1294 unregister_sysctl_table(xpc_sysctl); 1292 unregister_sysctl_table(xpc_sysctl);
1295 } 1293 }
1294
1295 kfree(xpc_remote_copy_buffer_base);
1296 return -EBUSY; 1296 return -EBUSY;
1297 } 1297 }
1298 1298
@@ -1311,6 +1311,8 @@ xpc_init(void)
1311 if (xpc_sysctl) { 1311 if (xpc_sysctl) {
1312 unregister_sysctl_table(xpc_sysctl); 1312 unregister_sysctl_table(xpc_sysctl);
1313 } 1313 }
1314
1315 kfree(xpc_remote_copy_buffer_base);
1314 return -EBUSY; 1316 return -EBUSY;
1315 } 1317 }
1316 1318
@@ -1362,6 +1364,8 @@ xpc_init(void)
1362 if (xpc_sysctl) { 1364 if (xpc_sysctl) {
1363 unregister_sysctl_table(xpc_sysctl); 1365 unregister_sysctl_table(xpc_sysctl);
1364 } 1366 }
1367
1368 kfree(xpc_remote_copy_buffer_base);
1365 return -EBUSY; 1369 return -EBUSY;
1366 } 1370 }
1367 1371
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index 2a89cfce4954..57c723f5cba4 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -71,19 +71,15 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
71 * Generic buffer used to store a local copy of portions of a remote 71 * Generic buffer used to store a local copy of portions of a remote
72 * partition's reserved page (either its header and part_nasids mask, 72 * partition's reserved page (either its header and part_nasids mask,
73 * or its vars). 73 * or its vars).
74 *
75 * xpc_discovery runs only once and is a seperate thread that is
76 * very likely going to be processing in parallel with receiving
77 * interrupts.
78 */ 74 */
79char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE + 75char *xpc_remote_copy_buffer;
80 XP_NASID_MASK_BYTES]; 76void *xpc_remote_copy_buffer_base;
81 77
82 78
83/* 79/*
84 * Guarantee that the kmalloc'd memory is cacheline aligned. 80 * Guarantee that the kmalloc'd memory is cacheline aligned.
85 */ 81 */
86static void * 82void *
87xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) 83xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
88{ 84{
89 /* see if kmalloc will give us cachline aligned memory by default */ 85 /* see if kmalloc will give us cachline aligned memory by default */
@@ -148,7 +144,7 @@ xpc_get_rsvd_page_pa(int nasid)
148 } 144 }
149 } 145 }
150 146
151 bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len, 147 bte_res = xp_bte_copy(rp_pa, buf, buf_len,
152 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 148 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
153 if (bte_res != BTE_SUCCESS) { 149 if (bte_res != BTE_SUCCESS) {
154 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); 150 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
@@ -447,7 +443,7 @@ xpc_check_remote_hb(void)
447 443
448 /* pull the remote_hb cache line */ 444 /* pull the remote_hb cache line */
449 bres = xp_bte_copy(part->remote_vars_pa, 445 bres = xp_bte_copy(part->remote_vars_pa,
450 ia64_tpa((u64) remote_vars), 446 (u64) remote_vars,
451 XPC_RP_VARS_SIZE, 447 XPC_RP_VARS_SIZE,
452 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 448 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
453 if (bres != BTE_SUCCESS) { 449 if (bres != BTE_SUCCESS) {
@@ -498,8 +494,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
498 494
499 495
500 /* pull over the reserved page header and part_nasids mask */ 496 /* pull over the reserved page header and part_nasids mask */
501 497 bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp,
502 bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp),
503 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, 498 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
504 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 499 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
505 if (bres != BTE_SUCCESS) { 500 if (bres != BTE_SUCCESS) {
@@ -554,11 +549,8 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
554 return xpcVarsNotSet; 549 return xpcVarsNotSet;
555 } 550 }
556 551
557
558 /* pull over the cross partition variables */ 552 /* pull over the cross partition variables */
559 553 bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE,
560 bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
561 XPC_RP_VARS_SIZE,
562 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 554 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
563 if (bres != BTE_SUCCESS) { 555 if (bres != BTE_SUCCESS) {
564 return xpc_map_bte_errors(bres); 556 return xpc_map_bte_errors(bres);
@@ -1239,7 +1231,7 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
1239 1231
1240 part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); 1232 part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
1241 1233
1242 bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask), 1234 bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask,
1243 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); 1235 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
1244 1236
1245 return xpc_map_bte_errors(bte_res); 1237 return xpc_map_bte_errors(bte_res);