diff options
author | Dean Nelson <dcn@sgi.com> | 2008-07-30 01:34:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-30 12:41:49 -0400 |
commit | bc63d387e4f5dbbe4ea0c5ade862c38073fd7fa3 (patch) | |
tree | ebeb9a381224a935fc50697902fcc38b9df47d90 /drivers/misc | |
parent | 78ce1bbe446e9b46dcd6c1e60a4768448a8ce355 (diff) |
sgi-xp: support runtime selection of xp_max_npartitions
Support runtime selection of the max number of partitions based on the
hardware being run on.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc')
-rw-r--r-- | drivers/misc/sgi-xp/Makefile | 3 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp.h | 53 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_main.c | 84 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_sn2.c | 92 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_uv.c | 30 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc.h | 12 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_channel.c | 20 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 103 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_partition.c | 16 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpnet.c | 4 |
10 files changed, 266 insertions, 151 deletions
diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile index b6e40a7958ce..b50f29217813 100644 --- a/drivers/misc/sgi-xp/Makefile +++ b/drivers/misc/sgi-xp/Makefile | |||
@@ -3,7 +3,8 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_SGI_XP) += xp.o | 5 | obj-$(CONFIG_SGI_XP) += xp.o |
6 | xp-y := xp_main.o xp_nofault.o | 6 | xp-y := xp_main.o xp_uv.o |
7 | xp-$(CONFIG_IA64) += xp_sn2.o xp_nofault.o | ||
7 | 8 | ||
8 | obj-$(CONFIG_SGI_XP) += xpc.o | 9 | obj-$(CONFIG_SGI_XP) += xpc.o |
9 | xpc-y := xpc_main.o xpc_channel.o xpc_partition.o | 10 | xpc-y := xpc_main.o xpc_channel.o xpc_partition.o |
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 867fb4863d5a..51087e111887 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h | |||
@@ -18,6 +18,9 @@ | |||
18 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
19 | #include <asm/sn/types.h> | 19 | #include <asm/sn/types.h> |
20 | #include <asm/sn/bte.h> | 20 | #include <asm/sn/bte.h> |
21 | #ifdef CONFIG_IA64 | ||
22 | #include <asm/sn/arch.h> | ||
23 | #endif | ||
21 | 24 | ||
22 | /* >>> Add this #define to some linux header file some day. */ | 25 | /* >>> Add this #define to some linux header file some day. */ |
23 | #define BYTES_PER_WORD sizeof(void *) | 26 | #define BYTES_PER_WORD sizeof(void *) |
@@ -45,17 +48,18 @@ | |||
45 | #endif | 48 | #endif |
46 | 49 | ||
47 | /* | 50 | /* |
48 | * Define the maximum number of logically defined partitions the system | 51 | * Define the maximum number of partitions the system can possibly support. |
49 | * can support. It is constrained by the maximum number of hardware | 52 | * It is based on the maximum number of hardware partitionable regions. The |
50 | * partitionable regions. The term 'region' in this context refers to the | 53 | * term 'region' in this context refers to the minimum number of nodes that |
51 | * minimum number of nodes that can comprise an access protection grouping. | 54 | * can comprise an access protection grouping. The access protection is in |
52 | * The access protection is in regards to memory, IPI and IOI. | 55 | * regards to memory, IPI and IOI. |
53 | * | 56 | * |
54 | * The maximum number of hardware partitionable regions is equal to the | 57 | * The maximum number of hardware partitionable regions is equal to the |
55 | * maximum number of nodes in the entire system divided by the minimum number | 58 | * maximum number of nodes in the entire system divided by the minimum number |
56 | * of nodes that comprise an access protection grouping. | 59 | * of nodes that comprise an access protection grouping. |
57 | */ | 60 | */ |
58 | #define XP_MAX_PARTITIONS 64 | 61 | #define XP_MAX_NPARTITIONS_SN2 64 |
62 | #define XP_MAX_NPARTITIONS_UV 256 | ||
59 | 63 | ||
60 | /* | 64 | /* |
61 | * Define the number of u64s required to represent all the C-brick nasids | 65 | * Define the number of u64s required to represent all the C-brick nasids |
@@ -112,24 +116,28 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) | |||
112 | * other partition that is currently up. Over these channels, kernel-level | 116 | * other partition that is currently up. Over these channels, kernel-level |
113 | * `users' can communicate with their counterparts on the other partitions. | 117 | * `users' can communicate with their counterparts on the other partitions. |
114 | * | 118 | * |
115 | * The maxinum number of channels is limited to eight. For performance reasons, | 119 | >>> The following described limitation of a max of eight channels possible |
116 | * the internal cross partition structures require sixteen bytes per channel, | 120 | >>> pertains only to ia64-sn2. THIS ISN'T TRUE SINCE I'M PLANNING TO JUST |
117 | * and eight allows all of this interface-shared info to fit in one cache line. | 121 | >>> TIE INTO THE EXISTING MECHANISM ONCE THE CHANNEL MESSAGES ARE RECEIVED. |
122 | >>> THE 128-BYTE CACHELINE PERFORMANCE ISSUE IS TIED TO IA64-SN2. | ||
118 | * | 123 | * |
119 | * XPC_NCHANNELS reflects the total number of channels currently defined. | ||
120 | * If the need for additional channels arises, one can simply increase | 124 | * If the need for additional channels arises, one can simply increase |
121 | * XPC_NCHANNELS accordingly. If the day should come where that number | 125 | * XPC_MAX_NCHANNELS accordingly. If the day should come where that number |
122 | * exceeds the MAXIMUM number of channels allowed (eight), then one will need | 126 | * exceeds the absolute MAXIMUM number of channels possible (eight), then one |
123 | * to make changes to the XPC code to allow for this. | 127 | * will need to make changes to the XPC code to accommodate for this. |
128 | * | ||
129 | * The absolute maximum number of channels possible is currently limited to | ||
130 | * eight for performance reasons. The internal cross partition structures | ||
131 | * require sixteen bytes per channel, and eight allows all of this | ||
132 | * interface-shared info to fit in one 128-byte cacheline. | ||
124 | */ | 133 | */ |
125 | #define XPC_MEM_CHANNEL 0 /* memory channel number */ | 134 | #define XPC_MEM_CHANNEL 0 /* memory channel number */ |
126 | #define XPC_NET_CHANNEL 1 /* network channel number */ | 135 | #define XPC_NET_CHANNEL 1 /* network channel number */ |
127 | 136 | ||
128 | #define XPC_NCHANNELS 2 /* #of defined channels */ | 137 | #define XPC_MAX_NCHANNELS 2 /* max #of channels allowed */ |
129 | #define XPC_MAX_NCHANNELS 8 /* max #of channels allowed */ | ||
130 | 138 | ||
131 | #if XPC_NCHANNELS > XPC_MAX_NCHANNELS | 139 | #if XPC_MAX_NCHANNELS > 8 |
132 | #error XPC_NCHANNELS exceeds MAXIMUM allowed. | 140 | #error XPC_MAX_NCHANNELS exceeds absolute MAXIMUM possible. |
133 | #endif | 141 | #endif |
134 | 142 | ||
135 | /* | 143 | /* |
@@ -254,7 +262,8 @@ enum xp_retval { | |||
254 | xpBteCopyError, /* 52: bte_copy() returned error */ | 262 | xpBteCopyError, /* 52: bte_copy() returned error */ |
255 | xpSalError, /* 53: sn SAL error */ | 263 | xpSalError, /* 53: sn SAL error */ |
256 | 264 | ||
257 | xpUnknownReason /* 54: unknown reason - must be last in enum */ | 265 | xpUnsupported, /* 54: unsupported functionality or resource */ |
266 | xpUnknownReason /* 55: unknown reason - must be last in enum */ | ||
258 | }; | 267 | }; |
259 | 268 | ||
260 | /* | 269 | /* |
@@ -397,8 +406,16 @@ xpc_partid_to_nasids(short partid, void *nasids) | |||
397 | return xpc_interface.partid_to_nasids(partid, nasids); | 406 | return xpc_interface.partid_to_nasids(partid, nasids); |
398 | } | 407 | } |
399 | 408 | ||
409 | extern short xp_max_npartitions; | ||
410 | |||
400 | extern u64 xp_nofault_PIOR_target; | 411 | extern u64 xp_nofault_PIOR_target; |
401 | extern int xp_nofault_PIOR(void *); | 412 | extern int xp_nofault_PIOR(void *); |
402 | extern int xp_error_PIOR(void); | 413 | extern int xp_error_PIOR(void); |
403 | 414 | ||
415 | extern struct device *xp; | ||
416 | extern enum xp_retval xp_init_sn2(void); | ||
417 | extern enum xp_retval xp_init_uv(void); | ||
418 | extern void xp_exit_sn2(void); | ||
419 | extern void xp_exit_uv(void); | ||
420 | |||
404 | #endif /* _DRIVERS_MISC_SGIXP_XP_H */ | 421 | #endif /* _DRIVERS_MISC_SGIXP_XP_H */ |
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 196480b691a1..c5cec606377d 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c | |||
@@ -15,28 +15,32 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/module.h> | 18 | #include <linux/module.h> |
20 | #include <linux/mutex.h> | 19 | #include <linux/device.h> |
21 | #include <asm/sn/intr.h> | ||
22 | #include <asm/sn/sn_sal.h> | ||
23 | #include "xp.h" | 20 | #include "xp.h" |
24 | 21 | ||
25 | /* | 22 | /* define the XP debug device structures to be used with dev_dbg() et al */ |
26 | * The export of xp_nofault_PIOR needs to happen here since it is defined | ||
27 | * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is | ||
28 | * defined here. | ||
29 | */ | ||
30 | EXPORT_SYMBOL_GPL(xp_nofault_PIOR); | ||
31 | 23 | ||
32 | u64 xp_nofault_PIOR_target; | 24 | struct device_driver xp_dbg_name = { |
33 | EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target); | 25 | .name = "xp" |
26 | }; | ||
27 | |||
28 | struct device xp_dbg_subname = { | ||
29 | .bus_id = {0}, /* set to "" */ | ||
30 | .driver = &xp_dbg_name | ||
31 | }; | ||
32 | |||
33 | struct device *xp = &xp_dbg_subname; | ||
34 | |||
35 | /* max #of partitions possible */ | ||
36 | short xp_max_npartitions; | ||
37 | EXPORT_SYMBOL_GPL(xp_max_npartitions); | ||
34 | 38 | ||
35 | /* | 39 | /* |
36 | * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level | 40 | * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level |
37 | * users of XPC. | 41 | * users of XPC. |
38 | */ | 42 | */ |
39 | struct xpc_registration xpc_registrations[XPC_NCHANNELS]; | 43 | struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS]; |
40 | EXPORT_SYMBOL_GPL(xpc_registrations); | 44 | EXPORT_SYMBOL_GPL(xpc_registrations); |
41 | 45 | ||
42 | /* | 46 | /* |
@@ -135,7 +139,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, | |||
135 | { | 139 | { |
136 | struct xpc_registration *registration; | 140 | struct xpc_registration *registration; |
137 | 141 | ||
138 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); | 142 | DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); |
139 | DBUG_ON(payload_size == 0 || nentries == 0); | 143 | DBUG_ON(payload_size == 0 || nentries == 0); |
140 | DBUG_ON(func == NULL); | 144 | DBUG_ON(func == NULL); |
141 | DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); | 145 | DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); |
@@ -185,7 +189,7 @@ xpc_disconnect(int ch_number) | |||
185 | { | 189 | { |
186 | struct xpc_registration *registration; | 190 | struct xpc_registration *registration; |
187 | 191 | ||
188 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); | 192 | DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); |
189 | 193 | ||
190 | registration = &xpc_registrations[ch_number]; | 194 | registration = &xpc_registrations[ch_number]; |
191 | 195 | ||
@@ -221,39 +225,21 @@ EXPORT_SYMBOL_GPL(xpc_disconnect); | |||
221 | int __init | 225 | int __init |
222 | xp_init(void) | 226 | xp_init(void) |
223 | { | 227 | { |
224 | int ret, ch_number; | 228 | enum xp_retval ret; |
225 | u64 func_addr = *(u64 *)xp_nofault_PIOR; | 229 | int ch_number; |
226 | u64 err_func_addr = *(u64 *)xp_error_PIOR; | ||
227 | |||
228 | if (!ia64_platform_is("sn2")) | ||
229 | return -ENODEV; | ||
230 | 230 | ||
231 | /* | 231 | if (is_shub()) |
232 | * Register a nofault code region which performs a cross-partition | 232 | ret = xp_init_sn2(); |
233 | * PIO read. If the PIO read times out, the MCA handler will consume | 233 | else if (is_uv()) |
234 | * the error and return to a kernel-provided instruction to indicate | 234 | ret = xp_init_uv(); |
235 | * an error. This PIO read exists because it is guaranteed to timeout | ||
236 | * if the destination is down (AMO operations do not timeout on at | ||
237 | * least some CPUs on Shubs <= v1.2, which unfortunately we have to | ||
238 | * work around). | ||
239 | */ | ||
240 | ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, | ||
241 | 1, 1); | ||
242 | if (ret != 0) { | ||
243 | printk(KERN_ERR "XP: can't register nofault code, error=%d\n", | ||
244 | ret); | ||
245 | } | ||
246 | /* | ||
247 | * Setup the nofault PIO read target. (There is no special reason why | ||
248 | * SH_IPI_ACCESS was selected.) | ||
249 | */ | ||
250 | if (is_shub2()) | ||
251 | xp_nofault_PIOR_target = SH2_IPI_ACCESS0; | ||
252 | else | 235 | else |
253 | xp_nofault_PIOR_target = SH1_IPI_ACCESS; | 236 | ret = xpUnsupported; |
237 | |||
238 | if (ret != xpSuccess) | ||
239 | return -ENODEV; | ||
254 | 240 | ||
255 | /* initialize the connection registration mutex */ | 241 | /* initialize the connection registration mutex */ |
256 | for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) | 242 | for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) |
257 | mutex_init(&xpc_registrations[ch_number].mutex); | 243 | mutex_init(&xpc_registrations[ch_number].mutex); |
258 | 244 | ||
259 | return 0; | 245 | return 0; |
@@ -264,12 +250,10 @@ module_init(xp_init); | |||
264 | void __exit | 250 | void __exit |
265 | xp_exit(void) | 251 | xp_exit(void) |
266 | { | 252 | { |
267 | u64 func_addr = *(u64 *)xp_nofault_PIOR; | 253 | if (is_shub()) |
268 | u64 err_func_addr = *(u64 *)xp_error_PIOR; | 254 | xp_exit_sn2(); |
269 | 255 | else if (is_uv()) | |
270 | /* unregister the PIO read nofault code region */ | 256 | xp_exit_uv(); |
271 | (void)sn_register_nofault_code(func_addr, err_func_addr, | ||
272 | err_func_addr, 1, 0); | ||
273 | } | 257 | } |
274 | 258 | ||
275 | module_exit(xp_exit); | 259 | module_exit(xp_exit); |
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c new file mode 100644 index 000000000000..b92579356a64 --- /dev/null +++ b/drivers/misc/sgi-xp/xp_sn2.c | |||
@@ -0,0 +1,92 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * Cross Partition (XP) sn2-based functions. | ||
11 | * | ||
12 | * Architecture specific implementation of common functions. | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <asm/sn/sn_sal.h> | ||
17 | #include "xp.h" | ||
18 | |||
19 | /* | ||
20 | * The export of xp_nofault_PIOR needs to happen here since it is defined | ||
21 | * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is | ||
22 | * defined here. | ||
23 | */ | ||
24 | EXPORT_SYMBOL_GPL(xp_nofault_PIOR); | ||
25 | |||
26 | u64 xp_nofault_PIOR_target; | ||
27 | EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target); | ||
28 | |||
29 | /* | ||
30 | * Register a nofault code region which performs a cross-partition PIO read. | ||
31 | * If the PIO read times out, the MCA handler will consume the error and | ||
32 | * return to a kernel-provided instruction to indicate an error. This PIO read | ||
33 | * exists because it is guaranteed to timeout if the destination is down | ||
34 | * (AMO operations do not timeout on at least some CPUs on Shubs <= v1.2, | ||
35 | * which unfortunately we have to work around). | ||
36 | */ | ||
37 | static enum xp_retval | ||
38 | xp_register_nofault_code_sn2(void) | ||
39 | { | ||
40 | int ret; | ||
41 | u64 func_addr; | ||
42 | u64 err_func_addr; | ||
43 | |||
44 | func_addr = *(u64 *)xp_nofault_PIOR; | ||
45 | err_func_addr = *(u64 *)xp_error_PIOR; | ||
46 | ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, | ||
47 | 1, 1); | ||
48 | if (ret != 0) { | ||
49 | dev_err(xp, "can't register nofault code, error=%d\n", ret); | ||
50 | return xpSalError; | ||
51 | } | ||
52 | /* | ||
53 | * Setup the nofault PIO read target. (There is no special reason why | ||
54 | * SH_IPI_ACCESS was selected.) | ||
55 | */ | ||
56 | if (is_shub1()) | ||
57 | xp_nofault_PIOR_target = SH1_IPI_ACCESS; | ||
58 | else if (is_shub2()) | ||
59 | xp_nofault_PIOR_target = SH2_IPI_ACCESS0; | ||
60 | |||
61 | return xpSuccess; | ||
62 | } | ||
63 | |||
64 | void | ||
65 | xp_unregister_nofault_code_sn2(void) | ||
66 | { | ||
67 | u64 func_addr = *(u64 *)xp_nofault_PIOR; | ||
68 | u64 err_func_addr = *(u64 *)xp_error_PIOR; | ||
69 | |||
70 | /* unregister the PIO read nofault code region */ | ||
71 | (void)sn_register_nofault_code(func_addr, err_func_addr, | ||
72 | err_func_addr, 1, 0); | ||
73 | } | ||
74 | |||
75 | enum xp_retval | ||
76 | xp_init_sn2(void) | ||
77 | { | ||
78 | BUG_ON(!is_shub()); | ||
79 | |||
80 | xp_max_npartitions = XP_MAX_NPARTITIONS_SN2; | ||
81 | |||
82 | return xp_register_nofault_code_sn2(); | ||
83 | } | ||
84 | |||
85 | void | ||
86 | xp_exit_sn2(void) | ||
87 | { | ||
88 | BUG_ON(!is_shub()); | ||
89 | |||
90 | xp_unregister_nofault_code_sn2(); | ||
91 | } | ||
92 | |||
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c new file mode 100644 index 000000000000..30888be2cdb0 --- /dev/null +++ b/drivers/misc/sgi-xp/xp_uv.c | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * Cross Partition (XP) uv-based functions. | ||
11 | * | ||
12 | * Architecture specific implementation of common functions. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include "xp.h" | ||
17 | |||
18 | enum xp_retval | ||
19 | xp_init_uv(void) | ||
20 | { | ||
21 | BUG_ON(!is_uv()); | ||
22 | |||
23 | xp_max_npartitions = XP_MAX_NPARTITIONS_UV; | ||
24 | } | ||
25 | |||
26 | void | ||
27 | xp_exit_uv(void) | ||
28 | { | ||
29 | BUG_ON(!is_uv()); | ||
30 | } | ||
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 11ac267ed68f..0f2affd01df1 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h | |||
@@ -210,7 +210,7 @@ xpc_disallow_hb(short partid, struct xpc_vars *vars) | |||
210 | * the XPC running on the remote partition). | 210 | * the XPC running on the remote partition). |
211 | */ | 211 | */ |
212 | #define XPC_NOTIFY_IRQ_AMOS 0 | 212 | #define XPC_NOTIFY_IRQ_AMOS 0 |
213 | #define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS) | 213 | #define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_NPARTITIONS_SN2) |
214 | #define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) | 214 | #define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) |
215 | #define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) | 215 | #define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) |
216 | 216 | ||
@@ -285,7 +285,7 @@ struct xpc_gp { | |||
285 | }; | 285 | }; |
286 | 286 | ||
287 | #define XPC_GP_SIZE \ | 287 | #define XPC_GP_SIZE \ |
288 | L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) | 288 | L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_MAX_NCHANNELS) |
289 | 289 | ||
290 | /* | 290 | /* |
291 | * Define a structure that contains arguments associated with opening and | 291 | * Define a structure that contains arguments associated with opening and |
@@ -300,7 +300,8 @@ struct xpc_openclose_args { | |||
300 | }; | 300 | }; |
301 | 301 | ||
302 | #define XPC_OPENCLOSE_ARGS_SIZE \ | 302 | #define XPC_OPENCLOSE_ARGS_SIZE \ |
303 | L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) | 303 | L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * \ |
304 | XPC_MAX_NCHANNELS) | ||
304 | 305 | ||
305 | /* struct xpc_msg flags */ | 306 | /* struct xpc_msg flags */ |
306 | 307 | ||
@@ -637,7 +638,7 @@ extern int xpc_exiting; | |||
637 | extern struct xpc_vars *xpc_vars; | 638 | extern struct xpc_vars *xpc_vars; |
638 | extern struct xpc_rsvd_page *xpc_rsvd_page; | 639 | extern struct xpc_rsvd_page *xpc_rsvd_page; |
639 | extern struct xpc_vars_part *xpc_vars_part; | 640 | extern struct xpc_vars_part *xpc_vars_part; |
640 | extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; | 641 | extern struct xpc_partition *xpc_partitions; |
641 | extern char *xpc_remote_copy_buffer; | 642 | extern char *xpc_remote_copy_buffer; |
642 | extern void *xpc_remote_copy_buffer_base; | 643 | extern void *xpc_remote_copy_buffer_base; |
643 | extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); | 644 | extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); |
@@ -1104,13 +1105,14 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch) | |||
1104 | } | 1105 | } |
1105 | 1106 | ||
1106 | /* | 1107 | /* |
1108 | >>> this block comment needs to be moved and re-written. | ||
1107 | * Memory for XPC's AMO variables is allocated by the MSPEC driver. These | 1109 | * Memory for XPC's AMO variables is allocated by the MSPEC driver. These |
1108 | * pages are located in the lowest granule. The lowest granule uses 4k pages | 1110 | * pages are located in the lowest granule. The lowest granule uses 4k pages |
1109 | * for cached references and an alternate TLB handler to never provide a | 1111 | * for cached references and an alternate TLB handler to never provide a |
1110 | * cacheable mapping for the entire region. This will prevent speculative | 1112 | * cacheable mapping for the entire region. This will prevent speculative |
1111 | * reading of cached copies of our lines from being issued which will cause | 1113 | * reading of cached copies of our lines from being issued which will cause |
1112 | * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 | 1114 | * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 |
1113 | * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an | 1115 | * AMO variables (based on xp_max_npartitions) for message notification and an |
1114 | * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition | 1116 | * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition |
1115 | * activation and 2 AMO variables for partition deactivation. | 1117 | * activation and 2 AMO variables for partition deactivation. |
1116 | */ | 1118 | */ |
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 9c90c2d55c08..12d8eb6957a7 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c | |||
@@ -110,14 +110,14 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
110 | * Allocate all of the channel structures as a contiguous chunk of | 110 | * Allocate all of the channel structures as a contiguous chunk of |
111 | * memory. | 111 | * memory. |
112 | */ | 112 | */ |
113 | part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, | 113 | part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS, |
114 | GFP_KERNEL); | 114 | GFP_KERNEL); |
115 | if (part->channels == NULL) { | 115 | if (part->channels == NULL) { |
116 | dev_err(xpc_chan, "can't get memory for channels\n"); | 116 | dev_err(xpc_chan, "can't get memory for channels\n"); |
117 | return xpNoMemory; | 117 | return xpNoMemory; |
118 | } | 118 | } |
119 | 119 | ||
120 | part->nchannels = XPC_NCHANNELS; | 120 | part->nchannels = XPC_MAX_NCHANNELS; |
121 | 121 | ||
122 | /* allocate all the required GET/PUT values */ | 122 | /* allocate all the required GET/PUT values */ |
123 | 123 | ||
@@ -1432,9 +1432,9 @@ xpc_initiate_connect(int ch_number) | |||
1432 | struct xpc_partition *part; | 1432 | struct xpc_partition *part; |
1433 | struct xpc_channel *ch; | 1433 | struct xpc_channel *ch; |
1434 | 1434 | ||
1435 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); | 1435 | DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); |
1436 | 1436 | ||
1437 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 1437 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
1438 | part = &xpc_partitions[partid]; | 1438 | part = &xpc_partitions[partid]; |
1439 | 1439 | ||
1440 | if (xpc_part_ref(part)) { | 1440 | if (xpc_part_ref(part)) { |
@@ -1488,10 +1488,10 @@ xpc_initiate_disconnect(int ch_number) | |||
1488 | struct xpc_partition *part; | 1488 | struct xpc_partition *part; |
1489 | struct xpc_channel *ch; | 1489 | struct xpc_channel *ch; |
1490 | 1490 | ||
1491 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); | 1491 | DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); |
1492 | 1492 | ||
1493 | /* initiate the channel disconnect for every active partition */ | 1493 | /* initiate the channel disconnect for every active partition */ |
1494 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 1494 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
1495 | part = &xpc_partitions[partid]; | 1495 | part = &xpc_partitions[partid]; |
1496 | 1496 | ||
1497 | if (xpc_part_ref(part)) { | 1497 | if (xpc_part_ref(part)) { |
@@ -1734,7 +1734,7 @@ xpc_initiate_allocate(short partid, int ch_number, u32 flags, void **payload) | |||
1734 | enum xp_retval ret = xpUnknownReason; | 1734 | enum xp_retval ret = xpUnknownReason; |
1735 | struct xpc_msg *msg = NULL; | 1735 | struct xpc_msg *msg = NULL; |
1736 | 1736 | ||
1737 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 1737 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
1738 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); | 1738 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); |
1739 | 1739 | ||
1740 | *payload = NULL; | 1740 | *payload = NULL; |
@@ -1918,7 +1918,7 @@ xpc_initiate_send(short partid, int ch_number, void *payload) | |||
1918 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, | 1918 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, |
1919 | partid, ch_number); | 1919 | partid, ch_number); |
1920 | 1920 | ||
1921 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 1921 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
1922 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); | 1922 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); |
1923 | DBUG_ON(msg == NULL); | 1923 | DBUG_ON(msg == NULL); |
1924 | 1924 | ||
@@ -1968,7 +1968,7 @@ xpc_initiate_send_notify(short partid, int ch_number, void *payload, | |||
1968 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, | 1968 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, |
1969 | partid, ch_number); | 1969 | partid, ch_number); |
1970 | 1970 | ||
1971 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 1971 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
1972 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); | 1972 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); |
1973 | DBUG_ON(msg == NULL); | 1973 | DBUG_ON(msg == NULL); |
1974 | DBUG_ON(func == NULL); | 1974 | DBUG_ON(func == NULL); |
@@ -2210,7 +2210,7 @@ xpc_initiate_received(short partid, int ch_number, void *payload) | |||
2210 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); | 2210 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); |
2211 | s64 get, msg_number = msg->number; | 2211 | s64 get, msg_number = msg->number; |
2212 | 2212 | ||
2213 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 2213 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
2214 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); | 2214 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); |
2215 | 2215 | ||
2216 | ch = &part->channels[ch_number]; | 2216 | ch = &part->channels[ch_number]; |
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index c3b4227f48a5..a05c7c7da228 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -433,7 +433,7 @@ xpc_activating(void *__partid) | |||
433 | struct xpc_partition *part = &xpc_partitions[partid]; | 433 | struct xpc_partition *part = &xpc_partitions[partid]; |
434 | unsigned long irq_flags; | 434 | unsigned long irq_flags; |
435 | 435 | ||
436 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 436 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
437 | 437 | ||
438 | spin_lock_irqsave(&part->act_lock, irq_flags); | 438 | spin_lock_irqsave(&part->act_lock, irq_flags); |
439 | 439 | ||
@@ -544,7 +544,7 @@ xpc_notify_IRQ_handler(int irq, void *dev_id) | |||
544 | short partid = (short)(u64)dev_id; | 544 | short partid = (short)(u64)dev_id; |
545 | struct xpc_partition *part = &xpc_partitions[partid]; | 545 | struct xpc_partition *part = &xpc_partitions[partid]; |
546 | 546 | ||
547 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 547 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
548 | 548 | ||
549 | if (xpc_part_ref(part)) { | 549 | if (xpc_part_ref(part)) { |
550 | xpc_check_for_channel_activity(part); | 550 | xpc_check_for_channel_activity(part); |
@@ -815,7 +815,7 @@ xpc_disconnect_wait(int ch_number) | |||
815 | int wakeup_channel_mgr; | 815 | int wakeup_channel_mgr; |
816 | 816 | ||
817 | /* now wait for all callouts to the caller's function to cease */ | 817 | /* now wait for all callouts to the caller's function to cease */ |
818 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 818 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
819 | part = &xpc_partitions[partid]; | 819 | part = &xpc_partitions[partid]; |
820 | 820 | ||
821 | if (!xpc_part_ref(part)) | 821 | if (!xpc_part_ref(part)) |
@@ -895,7 +895,7 @@ xpc_do_exit(enum xp_retval reason) | |||
895 | do { | 895 | do { |
896 | active_part_count = 0; | 896 | active_part_count = 0; |
897 | 897 | ||
898 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 898 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
899 | part = &xpc_partitions[partid]; | 899 | part = &xpc_partitions[partid]; |
900 | 900 | ||
901 | if (xpc_partition_disengaged(part) && | 901 | if (xpc_partition_disengaged(part) && |
@@ -956,11 +956,8 @@ xpc_do_exit(enum xp_retval reason) | |||
956 | DBUG_ON(xpc_vars->heartbeating_to_mask != 0); | 956 | DBUG_ON(xpc_vars->heartbeating_to_mask != 0); |
957 | 957 | ||
958 | if (reason == xpUnloading) { | 958 | if (reason == xpUnloading) { |
959 | /* take ourselves off of the reboot_notifier_list */ | ||
960 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); | ||
961 | |||
962 | /* take ourselves off of the die_notifier list */ | ||
963 | (void)unregister_die_notifier(&xpc_die_notifier); | 959 | (void)unregister_die_notifier(&xpc_die_notifier); |
960 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); | ||
964 | } | 961 | } |
965 | 962 | ||
966 | /* close down protections for IPI operations */ | 963 | /* close down protections for IPI operations */ |
@@ -972,6 +969,7 @@ xpc_do_exit(enum xp_retval reason) | |||
972 | if (xpc_sysctl) | 969 | if (xpc_sysctl) |
973 | unregister_sysctl_table(xpc_sysctl); | 970 | unregister_sysctl_table(xpc_sysctl); |
974 | 971 | ||
972 | kfree(xpc_partitions); | ||
975 | kfree(xpc_remote_copy_buffer_base); | 973 | kfree(xpc_remote_copy_buffer_base); |
976 | } | 974 | } |
977 | 975 | ||
@@ -1017,7 +1015,7 @@ xpc_die_disengage(void) | |||
1017 | 1015 | ||
1018 | xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ | 1016 | xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ |
1019 | 1017 | ||
1020 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 1018 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
1021 | part = &xpc_partitions[partid]; | 1019 | part = &xpc_partitions[partid]; |
1022 | 1020 | ||
1023 | if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> | 1021 | if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> |
@@ -1053,7 +1051,8 @@ xpc_die_disengage(void) | |||
1053 | 1051 | ||
1054 | time = rtc_time(); | 1052 | time = rtc_time(); |
1055 | if (time >= disengage_request_timeout) { | 1053 | if (time >= disengage_request_timeout) { |
1056 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 1054 | for (partid = 0; partid < xp_max_npartitions; |
1055 | partid++) { | ||
1057 | if (engaged & (1UL << partid)) { | 1056 | if (engaged & (1UL << partid)) { |
1058 | dev_info(xpc_part, "disengage from " | 1057 | dev_info(xpc_part, "disengage from " |
1059 | "remote partition %d timed " | 1058 | "remote partition %d timed " |
@@ -1132,18 +1131,26 @@ xpc_init(void) | |||
1132 | if (!ia64_platform_is("sn2")) | 1131 | if (!ia64_platform_is("sn2")) |
1133 | return -ENODEV; | 1132 | return -ENODEV; |
1134 | 1133 | ||
1134 | snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); | ||
1135 | snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); | ||
1136 | |||
1135 | buf_size = max(XPC_RP_VARS_SIZE, | 1137 | buf_size = max(XPC_RP_VARS_SIZE, |
1136 | XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); | 1138 | XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); |
1137 | xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, | 1139 | xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, |
1138 | GFP_KERNEL, | 1140 | GFP_KERNEL, |
1139 | &xpc_remote_copy_buffer_base); | 1141 | &xpc_remote_copy_buffer_base); |
1140 | if (xpc_remote_copy_buffer == NULL) | 1142 | if (xpc_remote_copy_buffer == NULL) { |
1143 | dev_err(xpc_part, "can't get memory for remote copy buffer\n"); | ||
1141 | return -ENOMEM; | 1144 | return -ENOMEM; |
1145 | } | ||
1142 | 1146 | ||
1143 | snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); | 1147 | xpc_partitions = kzalloc(sizeof(struct xpc_partition) * |
1144 | snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); | 1148 | xp_max_npartitions, GFP_KERNEL); |
1145 | 1149 | if (xpc_partitions == NULL) { | |
1146 | xpc_sysctl = register_sysctl_table(xpc_sys_dir); | 1150 | dev_err(xpc_part, "can't get memory for partition structure\n"); |
1151 | ret = -ENOMEM; | ||
1152 | goto out_1; | ||
1153 | } | ||
1147 | 1154 | ||
1148 | /* | 1155 | /* |
1149 | * The first few fields of each entry of xpc_partitions[] need to | 1156 | * The first few fields of each entry of xpc_partitions[] need to |
@@ -1153,7 +1160,7 @@ xpc_init(void) | |||
1153 | * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING | 1160 | * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING |
1154 | * PARTITION HAS BEEN ACTIVATED. | 1161 | * PARTITION HAS BEEN ACTIVATED. |
1155 | */ | 1162 | */ |
1156 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 1163 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
1157 | part = &xpc_partitions[partid]; | 1164 | part = &xpc_partitions[partid]; |
1158 | 1165 | ||
1159 | DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); | 1166 | DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); |
@@ -1173,6 +1180,8 @@ xpc_init(void) | |||
1173 | atomic_set(&part->references, 0); | 1180 | atomic_set(&part->references, 0); |
1174 | } | 1181 | } |
1175 | 1182 | ||
1183 | xpc_sysctl = register_sysctl_table(xpc_sys_dir); | ||
1184 | |||
1176 | /* | 1185 | /* |
1177 | * Open up protections for IPI operations (and AMO operations on | 1186 | * Open up protections for IPI operations (and AMO operations on |
1178 | * Shub 1.1 systems). | 1187 | * Shub 1.1 systems). |
@@ -1196,14 +1205,8 @@ xpc_init(void) | |||
1196 | if (ret != 0) { | 1205 | if (ret != 0) { |
1197 | dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " | 1206 | dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " |
1198 | "errno=%d\n", -ret); | 1207 | "errno=%d\n", -ret); |
1199 | 1208 | ret = -EBUSY; | |
1200 | xpc_restrict_IPI_ops(); | 1209 | goto out_2; |
1201 | |||
1202 | if (xpc_sysctl) | ||
1203 | unregister_sysctl_table(xpc_sysctl); | ||
1204 | |||
1205 | kfree(xpc_remote_copy_buffer_base); | ||
1206 | return -EBUSY; | ||
1207 | } | 1210 | } |
1208 | 1211 | ||
1209 | /* | 1212 | /* |
@@ -1213,16 +1216,9 @@ xpc_init(void) | |||
1213 | */ | 1216 | */ |
1214 | xpc_rsvd_page = xpc_rsvd_page_init(); | 1217 | xpc_rsvd_page = xpc_rsvd_page_init(); |
1215 | if (xpc_rsvd_page == NULL) { | 1218 | if (xpc_rsvd_page == NULL) { |
1216 | dev_err(xpc_part, "could not setup our reserved page\n"); | 1219 | dev_err(xpc_part, "can't setup our reserved page\n"); |
1217 | 1220 | ret = -EBUSY; | |
1218 | free_irq(SGI_XPC_ACTIVATE, NULL); | 1221 | goto out_3; |
1219 | xpc_restrict_IPI_ops(); | ||
1220 | |||
1221 | if (xpc_sysctl) | ||
1222 | unregister_sysctl_table(xpc_sysctl); | ||
1223 | |||
1224 | kfree(xpc_remote_copy_buffer_base); | ||
1225 | return -EBUSY; | ||
1226 | } | 1222 | } |
1227 | 1223 | ||
1228 | /* add ourselves to the reboot_notifier_list */ | 1224 | /* add ourselves to the reboot_notifier_list */ |
@@ -1245,25 +1241,8 @@ xpc_init(void) | |||
1245 | kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); | 1241 | kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); |
1246 | if (IS_ERR(kthread)) { | 1242 | if (IS_ERR(kthread)) { |
1247 | dev_err(xpc_part, "failed while forking hb check thread\n"); | 1243 | dev_err(xpc_part, "failed while forking hb check thread\n"); |
1248 | 1244 | ret = -EBUSY; | |
1249 | /* indicate to others that our reserved page is uninitialized */ | 1245 | goto out_4; |
1250 | xpc_rsvd_page->vars_pa = 0; | ||
1251 | |||
1252 | /* take ourselves off of the reboot_notifier_list */ | ||
1253 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); | ||
1254 | |||
1255 | /* take ourselves off of the die_notifier list */ | ||
1256 | (void)unregister_die_notifier(&xpc_die_notifier); | ||
1257 | |||
1258 | del_timer_sync(&xpc_hb_timer); | ||
1259 | free_irq(SGI_XPC_ACTIVATE, NULL); | ||
1260 | xpc_restrict_IPI_ops(); | ||
1261 | |||
1262 | if (xpc_sysctl) | ||
1263 | unregister_sysctl_table(xpc_sysctl); | ||
1264 | |||
1265 | kfree(xpc_remote_copy_buffer_base); | ||
1266 | return -EBUSY; | ||
1267 | } | 1246 | } |
1268 | 1247 | ||
1269 | /* | 1248 | /* |
@@ -1290,6 +1269,24 @@ xpc_init(void) | |||
1290 | xpc_initiate_partid_to_nasids); | 1269 | xpc_initiate_partid_to_nasids); |
1291 | 1270 | ||
1292 | return 0; | 1271 | return 0; |
1272 | |||
1273 | /* initialization was not successful */ | ||
1274 | out_4: | ||
1275 | /* indicate to others that our reserved page is uninitialized */ | ||
1276 | xpc_rsvd_page->vars_pa = 0; | ||
1277 | del_timer_sync(&xpc_hb_timer); | ||
1278 | (void)unregister_die_notifier(&xpc_die_notifier); | ||
1279 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); | ||
1280 | out_3: | ||
1281 | free_irq(SGI_XPC_ACTIVATE, NULL); | ||
1282 | out_2: | ||
1283 | xpc_restrict_IPI_ops(); | ||
1284 | if (xpc_sysctl) | ||
1285 | unregister_sysctl_table(xpc_sysctl); | ||
1286 | kfree(xpc_partitions); | ||
1287 | out_1: | ||
1288 | kfree(xpc_remote_copy_buffer_base); | ||
1289 | return ret; | ||
1293 | } | 1290 | } |
1294 | 1291 | ||
1295 | module_init(xpc_init); | 1292 | module_init(xpc_init); |
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 7dd4b5812c42..02a858eddd8d 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c | |||
@@ -51,13 +51,7 @@ struct xpc_vars_part *xpc_vars_part; | |||
51 | static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ | 51 | static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ |
52 | static int xp_nasid_mask_words; /* actual size in words of nasid mask */ | 52 | static int xp_nasid_mask_words; /* actual size in words of nasid mask */ |
53 | 53 | ||
54 | /* | 54 | struct xpc_partition *xpc_partitions; |
55 | * For performance reasons, each entry of xpc_partitions[] is cacheline | ||
56 | * aligned. And xpc_partitions[] is padded with an additional entry at the | ||
57 | * end so that the last legitimate entry doesn't share its cacheline with | ||
58 | * another variable. | ||
59 | */ | ||
60 | struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; | ||
61 | 55 | ||
62 | /* | 56 | /* |
63 | * Generic buffer used to store a local copy of portions of a remote | 57 | * Generic buffer used to store a local copy of portions of a remote |
@@ -261,7 +255,7 @@ xpc_rsvd_page_init(void) | |||
261 | 255 | ||
262 | /* clear xpc_vars_part */ | 256 | /* clear xpc_vars_part */ |
263 | memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) * | 257 | memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) * |
264 | XP_MAX_PARTITIONS); | 258 | xp_max_npartitions); |
265 | 259 | ||
266 | /* initialize the activate IRQ related AMO variables */ | 260 | /* initialize the activate IRQ related AMO variables */ |
267 | for (i = 0; i < xp_nasid_mask_words; i++) | 261 | for (i = 0; i < xp_nasid_mask_words; i++) |
@@ -408,7 +402,7 @@ xpc_check_remote_hb(void) | |||
408 | 402 | ||
409 | remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; | 403 | remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; |
410 | 404 | ||
411 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 405 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
412 | 406 | ||
413 | if (xpc_exiting) | 407 | if (xpc_exiting) |
414 | break; | 408 | break; |
@@ -487,10 +481,8 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | |||
487 | 481 | ||
488 | /* check that the partid is for another partition */ | 482 | /* check that the partid is for another partition */ |
489 | 483 | ||
490 | if (remote_rp->partid < 1 || | 484 | if (remote_rp->partid < 0 || remote_rp->partid >= xp_max_npartitions) |
491 | remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { | ||
492 | return xpInvalidPartid; | 485 | return xpInvalidPartid; |
493 | } | ||
494 | 486 | ||
495 | if (remote_rp->partid == sn_partition_id) | 487 | if (remote_rp->partid == sn_partition_id) |
496 | return xpLocalPartid; | 488 | return xpLocalPartid; |
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 822dc8e8d7f0..cc252f201b25 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c | |||
@@ -287,7 +287,7 @@ xpnet_connection_activity(enum xp_retval reason, short partid, int channel, | |||
287 | { | 287 | { |
288 | long bp; | 288 | long bp; |
289 | 289 | ||
290 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 290 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
291 | DBUG_ON(channel != XPC_NET_CHANNEL); | 291 | DBUG_ON(channel != XPC_NET_CHANNEL); |
292 | 292 | ||
293 | switch (reason) { | 293 | switch (reason) { |
@@ -513,7 +513,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
513 | /* | 513 | /* |
514 | * Main send loop. | 514 | * Main send loop. |
515 | */ | 515 | */ |
516 | for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS; | 516 | for (dest_partid = 0; dp && dest_partid < xp_max_npartitions; |
517 | dest_partid++) { | 517 | dest_partid++) { |
518 | 518 | ||
519 | if (!(dp & (1UL << (dest_partid - 1)))) { | 519 | if (!(dp & (1UL << (dest_partid - 1)))) { |