diff options
author | Dean Nelson <dcn@sgi.com> | 2005-10-25 15:09:51 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-10-25 19:32:39 -0400 |
commit | 4b38fcd4858204cb3667eb7b3aca48ffb1002f05 (patch) | |
tree | 2125f5d9689e5bdc64ce2e4f35d8a8220c2e0054 /arch/ia64/sn | |
parent | e54af724c1ae3530c95135157776c9be65cdb747 (diff) |
[IA64-SGI] XPC changes to support more than 2k nasids
XPC needs to be changed to support up to 16k nasids on an SGI Altix system.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/sn')
-rw-r--r-- | arch/ia64/sn/kernel/xpc.h | 77 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_main.c | 8 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_partition.c | 140 |
3 files changed, 141 insertions, 84 deletions
diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h index ae51d7b4c42e..33df1b3758b6 100644 --- a/arch/ia64/sn/kernel/xpc.h +++ b/arch/ia64/sn/kernel/xpc.h | |||
@@ -68,29 +68,58 @@ | |||
68 | 68 | ||
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Reserved Page provided by SAL. | 71 | * the reserved page |
72 | * | 72 | * |
73 | * SAL provides one page per partition of reserved memory. When SAL | 73 | * SAL reserves one page of memory per partition for XPC. Though a full page |
74 | * initialization is complete, SAL_signature, SAL_version, partid, | 74 | * in length (16384 bytes), its starting address is not page aligned, but it |
75 | * part_nasids, and mach_nasids are set. | 75 | * is cacheline aligned. The reserved page consists of the following: |
76 | * | ||
77 | * reserved page header | ||
78 | * | ||
79 | * The first cacheline of the reserved page contains the header | ||
80 | * (struct xpc_rsvd_page). Before SAL initialization has completed, | ||
81 | * SAL has set up the following fields of the reserved page header: | ||
82 | * SAL_signature, SAL_version, partid, and nasids_size. The other | ||
83 | * fields are set up by XPC. (xpc_rsvd_page points to the local | ||
84 | * partition's reserved page.) | ||
85 | * | ||
86 | * part_nasids mask | ||
87 | * mach_nasids mask | ||
88 | * | ||
89 | * SAL also sets up two bitmaps (or masks), one that reflects the actual | ||
90 | * nasids in this partition (part_nasids), and the other that reflects | ||
91 | * the actual nasids in the entire machine (mach_nasids). We're only | ||
92 | * interested in the even numbered nasids (which contain the processors | ||
93 | * and/or memory), so we only need half as many bits to represent the | ||
94 | * nasids. The part_nasids mask is located starting at the first cacheline | ||
95 | * following the reserved page header. The mach_nasids mask follows right | ||
96 | * after the part_nasids mask. The size in bytes of each mask is reflected | ||
97 | * by the reserved page header field 'nasids_size'. (Local partition's | ||
98 | * mask pointers are xpc_part_nasids and xpc_mach_nasids.) | ||
99 | * | ||
100 | * vars | ||
101 | * vars part | ||
102 | * | ||
103 | * Immediately following the mach_nasids mask are the XPC variables | ||
104 | * required by other partitions. First are those that are generic to all | ||
105 | * partitions (vars), followed on the next available cacheline by those | ||
106 | * which are partition specific (vars part). These are setup by XPC. | ||
107 | * (Local partition's vars pointers are xpc_vars and xpc_vars_part.) | ||
76 | * | 108 | * |
77 | * Note: Until vars_pa is set, the partition XPC code has not been initialized. | 109 | * Note: Until vars_pa is set, the partition XPC code has not been initialized. |
78 | */ | 110 | */ |
79 | struct xpc_rsvd_page { | 111 | struct xpc_rsvd_page { |
80 | u64 SAL_signature; /* SAL unique signature */ | 112 | u64 SAL_signature; /* SAL: unique signature */ |
81 | u64 SAL_version; /* SAL specified version */ | 113 | u64 SAL_version; /* SAL: version */ |
82 | u8 partid; /* partition ID from SAL */ | 114 | u8 partid; /* SAL: partition ID */ |
83 | u8 version; | 115 | u8 version; |
84 | u8 pad[6]; /* pad to u64 align */ | 116 | u8 pad1[6]; /* align to next u64 in cacheline */ |
85 | volatile u64 vars_pa; | 117 | volatile u64 vars_pa; |
86 | struct timespec stamp; /* time when reserved page was initialized */ | 118 | struct timespec stamp; /* time when reserved page was setup by XPC */ |
87 | u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; | 119 | u64 pad2[9]; /* align to last u64 in cacheline */ |
88 | u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; | 120 | u64 nasids_size; /* SAL: size of each nasid mask in bytes */ |
89 | }; | 121 | }; |
90 | 122 | ||
91 | #define XPC_RSVD_PAGE_ALIGNED_SIZE \ | ||
92 | (L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))) | ||
93 | |||
94 | #define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */ | 123 | #define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */ |
95 | 124 | ||
96 | #define XPC_SUPPORTS_RP_STAMP(_version) \ | 125 | #define XPC_SUPPORTS_RP_STAMP(_version) \ |
@@ -142,8 +171,6 @@ struct xpc_vars { | |||
142 | AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ | 171 | AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ |
143 | }; | 172 | }; |
144 | 173 | ||
145 | #define XPC_VARS_ALIGNED_SIZE (L1_CACHE_ALIGN(sizeof(struct xpc_vars))) | ||
146 | |||
147 | #define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */ | 174 | #define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */ |
148 | 175 | ||
149 | #define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ | 176 | #define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ |
@@ -184,7 +211,7 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars) | |||
184 | /* | 211 | /* |
185 | * The AMOs page consists of a number of AMO variables which are divided into | 212 | * The AMOs page consists of a number of AMO variables which are divided into |
186 | * four groups, The first two groups are used to identify an IRQ's sender. | 213 | * four groups, The first two groups are used to identify an IRQ's sender. |
187 | * These two groups consist of 64 and 16 AMO variables respectively. The last | 214 | * These two groups consist of 64 and 128 AMO variables respectively. The last |
188 | * two groups, consisting of just one AMO variable each, are used to identify | 215 | * two groups, consisting of just one AMO variable each, are used to identify |
189 | * the remote partitions that are currently engaged (from the viewpoint of | 216 | * the remote partitions that are currently engaged (from the viewpoint of |
190 | * the XPC running on the remote partition). | 217 | * the XPC running on the remote partition). |
@@ -233,6 +260,16 @@ struct xpc_vars_part { | |||
233 | #define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ | 260 | #define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ |
234 | 261 | ||
235 | 262 | ||
263 | /* the reserved page sizes and offsets */ | ||
264 | |||
265 | #define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) | ||
266 | #define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars)) | ||
267 | |||
268 | #define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE) | ||
269 | #define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) | ||
270 | #define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words) | ||
271 | #define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE) | ||
272 | |||
236 | 273 | ||
237 | /* | 274 | /* |
238 | * Functions registered by add_timer() or called by kernel_thread() only | 275 | * Functions registered by add_timer() or called by kernel_thread() only |
@@ -1147,9 +1184,9 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch) | |||
1147 | * cacheable mapping for the entire region. This will prevent speculative | 1184 | * cacheable mapping for the entire region. This will prevent speculative |
1148 | * reading of cached copies of our lines from being issued which will cause | 1185 | * reading of cached copies of our lines from being issued which will cause |
1149 | * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 | 1186 | * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 |
1150 | * (XP_MAX_PARTITIONS) AMO variables for message notification and an | 1187 | * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an |
1151 | * additional 16 (XP_NASID_MASK_WORDS) AMO variables for partition activation | 1188 | * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition |
1152 | * and 2 AMO variables for partition deactivation. | 1189 | * activation and 2 AMO variables for partition deactivation. |
1153 | */ | 1190 | */ |
1154 | static inline AMO_t * | 1191 | static inline AMO_t * |
1155 | xpc_IPI_init(int index) | 1192 | xpc_IPI_init(int index) |
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index db349c6d4c58..38f2c699192c 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -1049,11 +1049,11 @@ xpc_init(void) | |||
1049 | 1049 | ||
1050 | /* | 1050 | /* |
1051 | * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng | 1051 | * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng |
1052 | * both a partition's reserved page and its XPC variables. Its size was | 1052 | * various portions of a partition's reserved page. Its size is based |
1053 | * based on the size of a reserved page. So we need to ensure that the | 1053 | * on the size of the reserved page header and part_nasids mask. So we |
1054 | * XPC variables will fit as well. | 1054 | * need to ensure that the other items will fit as well. |
1055 | */ | 1055 | */ |
1056 | if (XPC_VARS_ALIGNED_SIZE > XPC_RSVD_PAGE_ALIGNED_SIZE) { | 1056 | if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) { |
1057 | dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n"); | 1057 | dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n"); |
1058 | return -EPERM; | 1058 | return -EPERM; |
1059 | } | 1059 | } |
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c index 958488f55699..6bb1091f2a4d 100644 --- a/arch/ia64/sn/kernel/xpc_partition.c +++ b/arch/ia64/sn/kernel/xpc_partition.c | |||
@@ -47,13 +47,16 @@ static u64 xpc_sh2_IPI_access3; | |||
47 | u64 xpc_prot_vec[MAX_COMPACT_NODES]; | 47 | u64 xpc_prot_vec[MAX_COMPACT_NODES]; |
48 | 48 | ||
49 | 49 | ||
50 | /* this partition's reserved page */ | 50 | /* this partition's reserved page pointers */ |
51 | struct xpc_rsvd_page *xpc_rsvd_page; | 51 | struct xpc_rsvd_page *xpc_rsvd_page; |
52 | 52 | static u64 *xpc_part_nasids; | |
53 | /* this partition's XPC variables (within the reserved page) */ | 53 | static u64 *xpc_mach_nasids; |
54 | struct xpc_vars *xpc_vars; | 54 | struct xpc_vars *xpc_vars; |
55 | struct xpc_vars_part *xpc_vars_part; | 55 | struct xpc_vars_part *xpc_vars_part; |
56 | 56 | ||
57 | static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ | ||
58 | static int xp_nasid_mask_words; /* actual size in words of nasid mask */ | ||
59 | |||
57 | 60 | ||
58 | /* | 61 | /* |
59 | * For performance reasons, each entry of xpc_partitions[] is cacheline | 62 | * For performance reasons, each entry of xpc_partitions[] is cacheline |
@@ -65,15 +68,16 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; | |||
65 | 68 | ||
66 | 69 | ||
67 | /* | 70 | /* |
68 | * Generic buffer used to store a local copy of the remote partitions | 71 | * Generic buffer used to store a local copy of portions of a remote |
69 | * reserved page or XPC variables. | 72 | * partition's reserved page (either its header and part_nasids mask, |
73 | * or its vars). | ||
70 | * | 74 | * |
71 | * xpc_discovery runs only once and is a seperate thread that is | 75 | * xpc_discovery runs only once and is a seperate thread that is |
72 | * very likely going to be processing in parallel with receiving | 76 | * very likely going to be processing in parallel with receiving |
73 | * interrupts. | 77 | * interrupts. |
74 | */ | 78 | */ |
75 | char ____cacheline_aligned | 79 | char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE + |
76 | xpc_remote_copy_buffer[XPC_RSVD_PAGE_ALIGNED_SIZE]; | 80 | XP_NASID_MASK_BYTES]; |
77 | 81 | ||
78 | 82 | ||
79 | /* | 83 | /* |
@@ -136,7 +140,7 @@ xpc_rsvd_page_init(void) | |||
136 | { | 140 | { |
137 | struct xpc_rsvd_page *rp; | 141 | struct xpc_rsvd_page *rp; |
138 | AMO_t *amos_page; | 142 | AMO_t *amos_page; |
139 | u64 rp_pa, next_cl, nasid_array = 0; | 143 | u64 rp_pa, nasid_array = 0; |
140 | int i, ret; | 144 | int i, ret; |
141 | 145 | ||
142 | 146 | ||
@@ -144,7 +148,8 @@ xpc_rsvd_page_init(void) | |||
144 | 148 | ||
145 | rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0), | 149 | rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0), |
146 | (u64) xpc_remote_copy_buffer, | 150 | (u64) xpc_remote_copy_buffer, |
147 | XPC_RSVD_PAGE_ALIGNED_SIZE); | 151 | XPC_RP_HEADER_SIZE + |
152 | L1_CACHE_BYTES); | ||
148 | if (rp_pa == 0) { | 153 | if (rp_pa == 0) { |
149 | dev_err(xpc_part, "SAL failed to locate the reserved page\n"); | 154 | dev_err(xpc_part, "SAL failed to locate the reserved page\n"); |
150 | return NULL; | 155 | return NULL; |
@@ -159,12 +164,19 @@ xpc_rsvd_page_init(void) | |||
159 | 164 | ||
160 | rp->version = XPC_RP_VERSION; | 165 | rp->version = XPC_RP_VERSION; |
161 | 166 | ||
162 | /* | 167 | /* establish the actual sizes of the nasid masks */ |
163 | * Place the XPC variables on the cache line following the | 168 | if (rp->SAL_version == 1) { |
164 | * reserved page structure. | 169 | /* SAL_version 1 didn't set the nasids_size field */ |
165 | */ | 170 | rp->nasids_size = 128; |
166 | next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE; | 171 | } |
167 | xpc_vars = (struct xpc_vars *) next_cl; | 172 | xp_nasid_mask_bytes = rp->nasids_size; |
173 | xp_nasid_mask_words = xp_nasid_mask_bytes / 8; | ||
174 | |||
175 | /* setup the pointers to the various items in the reserved page */ | ||
176 | xpc_part_nasids = XPC_RP_PART_NASIDS(rp); | ||
177 | xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); | ||
178 | xpc_vars = XPC_RP_VARS(rp); | ||
179 | xpc_vars_part = XPC_RP_VARS_PART(rp); | ||
168 | 180 | ||
169 | /* | 181 | /* |
170 | * Before clearing xpc_vars, see if a page of AMOs had been previously | 182 | * Before clearing xpc_vars, see if a page of AMOs had been previously |
@@ -216,26 +228,23 @@ xpc_rsvd_page_init(void) | |||
216 | amos_page = (AMO_t *) TO_AMO((u64) amos_page); | 228 | amos_page = (AMO_t *) TO_AMO((u64) amos_page); |
217 | } | 229 | } |
218 | 230 | ||
231 | /* clear xpc_vars */ | ||
219 | memset(xpc_vars, 0, sizeof(struct xpc_vars)); | 232 | memset(xpc_vars, 0, sizeof(struct xpc_vars)); |
220 | 233 | ||
221 | /* | ||
222 | * Place the XPC per partition specific variables on the cache line | ||
223 | * following the XPC variables structure. | ||
224 | */ | ||
225 | next_cl += XPC_VARS_ALIGNED_SIZE; | ||
226 | memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) * | ||
227 | XP_MAX_PARTITIONS); | ||
228 | xpc_vars_part = (struct xpc_vars_part *) next_cl; | ||
229 | xpc_vars->vars_part_pa = __pa(next_cl); | ||
230 | |||
231 | xpc_vars->version = XPC_V_VERSION; | 234 | xpc_vars->version = XPC_V_VERSION; |
232 | xpc_vars->act_nasid = cpuid_to_nasid(0); | 235 | xpc_vars->act_nasid = cpuid_to_nasid(0); |
233 | xpc_vars->act_phys_cpuid = cpu_physical_id(0); | 236 | xpc_vars->act_phys_cpuid = cpu_physical_id(0); |
237 | xpc_vars->vars_part_pa = __pa(xpc_vars_part); | ||
238 | xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page); | ||
234 | xpc_vars->amos_page = amos_page; /* save for next load of XPC */ | 239 | xpc_vars->amos_page = amos_page; /* save for next load of XPC */ |
235 | 240 | ||
236 | 241 | ||
242 | /* clear xpc_vars_part */ | ||
243 | memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) * | ||
244 | XP_MAX_PARTITIONS); | ||
245 | |||
237 | /* initialize the activate IRQ related AMO variables */ | 246 | /* initialize the activate IRQ related AMO variables */ |
238 | for (i = 0; i < XP_NASID_MASK_WORDS; i++) { | 247 | for (i = 0; i < xp_nasid_mask_words; i++) { |
239 | (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); | 248 | (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); |
240 | } | 249 | } |
241 | 250 | ||
@@ -243,10 +252,7 @@ xpc_rsvd_page_init(void) | |||
243 | (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); | 252 | (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); |
244 | (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); | 253 | (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); |
245 | 254 | ||
246 | /* export AMO page's physical address to other partitions */ | 255 | /* timestamp of when reserved page was setup by XPC */ |
247 | xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page); | ||
248 | |||
249 | /* timestamp of when reserved page was initialized */ | ||
250 | rp->stamp = CURRENT_TIME; | 256 | rp->stamp = CURRENT_TIME; |
251 | 257 | ||
252 | /* | 258 | /* |
@@ -406,7 +412,7 @@ xpc_check_remote_hb(void) | |||
406 | /* pull the remote_hb cache line */ | 412 | /* pull the remote_hb cache line */ |
407 | bres = xp_bte_copy(part->remote_vars_pa, | 413 | bres = xp_bte_copy(part->remote_vars_pa, |
408 | ia64_tpa((u64) remote_vars), | 414 | ia64_tpa((u64) remote_vars), |
409 | XPC_VARS_ALIGNED_SIZE, | 415 | XPC_RP_VARS_SIZE, |
410 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | 416 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
411 | if (bres != BTE_SUCCESS) { | 417 | if (bres != BTE_SUCCESS) { |
412 | XPC_DEACTIVATE_PARTITION(part, | 418 | XPC_DEACTIVATE_PARTITION(part, |
@@ -434,10 +440,11 @@ xpc_check_remote_hb(void) | |||
434 | 440 | ||
435 | 441 | ||
436 | /* | 442 | /* |
437 | * Get a copy of the remote partition's rsvd page. | 443 | * Get a copy of a portion of the remote partition's rsvd page. |
438 | * | 444 | * |
439 | * remote_rp points to a buffer that is cacheline aligned for BTE copies and | 445 | * remote_rp points to a buffer that is cacheline aligned for BTE copies and |
440 | * assumed to be of size XPC_RSVD_PAGE_ALIGNED_SIZE. | 446 | * is large enough to contain a copy of their reserved page header and |
447 | * part_nasids mask. | ||
441 | */ | 448 | */ |
442 | static enum xpc_retval | 449 | static enum xpc_retval |
443 | xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | 450 | xpc_get_remote_rp(int nasid, u64 *discovered_nasids, |
@@ -449,16 +456,17 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | |||
449 | /* get the reserved page's physical address */ | 456 | /* get the reserved page's physical address */ |
450 | 457 | ||
451 | *remote_rp_pa = xpc_get_rsvd_page_pa(nasid, (u64) remote_rp, | 458 | *remote_rp_pa = xpc_get_rsvd_page_pa(nasid, (u64) remote_rp, |
452 | XPC_RSVD_PAGE_ALIGNED_SIZE); | 459 | XPC_RP_HEADER_SIZE + |
460 | xp_nasid_mask_bytes); | ||
453 | if (*remote_rp_pa == 0) { | 461 | if (*remote_rp_pa == 0) { |
454 | return xpcNoRsvdPageAddr; | 462 | return xpcNoRsvdPageAddr; |
455 | } | 463 | } |
456 | 464 | ||
457 | 465 | ||
458 | /* pull over the reserved page structure */ | 466 | /* pull over the reserved page header and part_nasids mask */ |
459 | 467 | ||
460 | bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp), | 468 | bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp), |
461 | XPC_RSVD_PAGE_ALIGNED_SIZE, | 469 | XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, |
462 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | 470 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
463 | if (bres != BTE_SUCCESS) { | 471 | if (bres != BTE_SUCCESS) { |
464 | return xpc_map_bte_errors(bres); | 472 | return xpc_map_bte_errors(bres); |
@@ -466,8 +474,11 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | |||
466 | 474 | ||
467 | 475 | ||
468 | if (discovered_nasids != NULL) { | 476 | if (discovered_nasids != NULL) { |
469 | for (i = 0; i < XP_NASID_MASK_WORDS; i++) { | 477 | u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); |
470 | discovered_nasids[i] |= remote_rp->part_nasids[i]; | 478 | |
479 | |||
480 | for (i = 0; i < xp_nasid_mask_words; i++) { | ||
481 | discovered_nasids[i] |= remote_part_nasids[i]; | ||
471 | } | 482 | } |
472 | } | 483 | } |
473 | 484 | ||
@@ -494,10 +505,10 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | |||
494 | 505 | ||
495 | 506 | ||
496 | /* | 507 | /* |
497 | * Get a copy of the remote partition's XPC variables. | 508 | * Get a copy of the remote partition's XPC variables from the reserved page. |
498 | * | 509 | * |
499 | * remote_vars points to a buffer that is cacheline aligned for BTE copies and | 510 | * remote_vars points to a buffer that is cacheline aligned for BTE copies and |
500 | * assumed to be of size XPC_VARS_ALIGNED_SIZE. | 511 | * assumed to be of size XPC_RP_VARS_SIZE. |
501 | */ | 512 | */ |
502 | static enum xpc_retval | 513 | static enum xpc_retval |
503 | xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) | 514 | xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) |
@@ -513,7 +524,7 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) | |||
513 | /* pull over the cross partition variables */ | 524 | /* pull over the cross partition variables */ |
514 | 525 | ||
515 | bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars), | 526 | bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars), |
516 | XPC_VARS_ALIGNED_SIZE, | 527 | XPC_RP_VARS_SIZE, |
517 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | 528 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
518 | if (bres != BTE_SUCCESS) { | 529 | if (bres != BTE_SUCCESS) { |
519 | return xpc_map_bte_errors(bres); | 530 | return xpc_map_bte_errors(bres); |
@@ -778,14 +789,13 @@ xpc_identify_act_IRQ_sender(void) | |||
778 | u64 nasid; /* remote nasid */ | 789 | u64 nasid; /* remote nasid */ |
779 | int n_IRQs_detected = 0; | 790 | int n_IRQs_detected = 0; |
780 | AMO_t *act_amos; | 791 | AMO_t *act_amos; |
781 | struct xpc_rsvd_page *rp = (struct xpc_rsvd_page *) xpc_rsvd_page; | ||
782 | 792 | ||
783 | 793 | ||
784 | act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; | 794 | act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; |
785 | 795 | ||
786 | 796 | ||
787 | /* scan through act AMO variable looking for non-zero entries */ | 797 | /* scan through act AMO variable looking for non-zero entries */ |
788 | for (word = 0; word < XP_NASID_MASK_WORDS; word++) { | 798 | for (word = 0; word < xp_nasid_mask_words; word++) { |
789 | 799 | ||
790 | if (xpc_exiting) { | 800 | if (xpc_exiting) { |
791 | break; | 801 | break; |
@@ -807,7 +817,7 @@ xpc_identify_act_IRQ_sender(void) | |||
807 | * remote nasid in our reserved pages machine mask. | 817 | * remote nasid in our reserved pages machine mask. |
808 | * This is used in the event of module reload. | 818 | * This is used in the event of module reload. |
809 | */ | 819 | */ |
810 | rp->mach_nasids[word] |= nasid_mask; | 820 | xpc_mach_nasids[word] |= nasid_mask; |
811 | 821 | ||
812 | 822 | ||
813 | /* locate the nasid(s) which sent interrupts */ | 823 | /* locate the nasid(s) which sent interrupts */ |
@@ -992,6 +1002,7 @@ xpc_discovery(void) | |||
992 | u64 remote_rp_pa; | 1002 | u64 remote_rp_pa; |
993 | u64 remote_vars_pa; | 1003 | u64 remote_vars_pa; |
994 | int region; | 1004 | int region; |
1005 | int region_size; | ||
995 | int max_regions; | 1006 | int max_regions; |
996 | int nasid; | 1007 | int nasid; |
997 | struct xpc_rsvd_page *rp; | 1008 | struct xpc_rsvd_page *rp; |
@@ -1001,7 +1012,8 @@ xpc_discovery(void) | |||
1001 | enum xpc_retval ret; | 1012 | enum xpc_retval ret; |
1002 | 1013 | ||
1003 | 1014 | ||
1004 | remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RSVD_PAGE_ALIGNED_SIZE, | 1015 | remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + |
1016 | xp_nasid_mask_bytes, | ||
1005 | GFP_KERNEL, &remote_rp_base); | 1017 | GFP_KERNEL, &remote_rp_base); |
1006 | if (remote_rp == NULL) { | 1018 | if (remote_rp == NULL) { |
1007 | return; | 1019 | return; |
@@ -1009,13 +1021,13 @@ xpc_discovery(void) | |||
1009 | remote_vars = (struct xpc_vars *) remote_rp; | 1021 | remote_vars = (struct xpc_vars *) remote_rp; |
1010 | 1022 | ||
1011 | 1023 | ||
1012 | discovered_nasids = kmalloc(sizeof(u64) * XP_NASID_MASK_WORDS, | 1024 | discovered_nasids = kmalloc(sizeof(u64) * xp_nasid_mask_words, |
1013 | GFP_KERNEL); | 1025 | GFP_KERNEL); |
1014 | if (discovered_nasids == NULL) { | 1026 | if (discovered_nasids == NULL) { |
1015 | kfree(remote_rp_base); | 1027 | kfree(remote_rp_base); |
1016 | return; | 1028 | return; |
1017 | } | 1029 | } |
1018 | memset(discovered_nasids, 0, sizeof(u64) * XP_NASID_MASK_WORDS); | 1030 | memset(discovered_nasids, 0, sizeof(u64) * xp_nasid_mask_words); |
1019 | 1031 | ||
1020 | rp = (struct xpc_rsvd_page *) xpc_rsvd_page; | 1032 | rp = (struct xpc_rsvd_page *) xpc_rsvd_page; |
1021 | 1033 | ||
@@ -1024,11 +1036,19 @@ xpc_discovery(void) | |||
1024 | * nodes that can comprise an access protection grouping. The access | 1036 | * nodes that can comprise an access protection grouping. The access |
1025 | * protection is in regards to memory, IOI and IPI. | 1037 | * protection is in regards to memory, IOI and IPI. |
1026 | */ | 1038 | */ |
1027 | //>>> move the next two #defines into either include/asm-ia64/sn/arch.h or | 1039 | max_regions = 64; |
1028 | //>>> include/asm-ia64/sn/addrs.h | 1040 | region_size = sn_region_size; |
1029 | #define SH1_MAX_REGIONS 64 | 1041 | |
1030 | #define SH2_MAX_REGIONS 256 | 1042 | switch (region_size) { |
1031 | max_regions = is_shub2() ? SH2_MAX_REGIONS : SH1_MAX_REGIONS; | 1043 | case 128: |
1044 | max_regions *= 2; | ||
1045 | case 64: | ||
1046 | max_regions *= 2; | ||
1047 | case 32: | ||
1048 | max_regions *= 2; | ||
1049 | region_size = 16; | ||
1050 | DBUG_ON(!is_shub2()); | ||
1051 | } | ||
1032 | 1052 | ||
1033 | for (region = 0; region < max_regions; region++) { | 1053 | for (region = 0; region < max_regions; region++) { |
1034 | 1054 | ||
@@ -1038,8 +1058,8 @@ xpc_discovery(void) | |||
1038 | 1058 | ||
1039 | dev_dbg(xpc_part, "searching region %d\n", region); | 1059 | dev_dbg(xpc_part, "searching region %d\n", region); |
1040 | 1060 | ||
1041 | for (nasid = (region * sn_region_size * 2); | 1061 | for (nasid = (region * region_size * 2); |
1042 | nasid < ((region + 1) * sn_region_size * 2); | 1062 | nasid < ((region + 1) * region_size * 2); |
1043 | nasid += 2) { | 1063 | nasid += 2) { |
1044 | 1064 | ||
1045 | if ((volatile int) xpc_exiting) { | 1065 | if ((volatile int) xpc_exiting) { |
@@ -1049,14 +1069,14 @@ xpc_discovery(void) | |||
1049 | dev_dbg(xpc_part, "checking nasid %d\n", nasid); | 1069 | dev_dbg(xpc_part, "checking nasid %d\n", nasid); |
1050 | 1070 | ||
1051 | 1071 | ||
1052 | if (XPC_NASID_IN_ARRAY(nasid, rp->part_nasids)) { | 1072 | if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) { |
1053 | dev_dbg(xpc_part, "PROM indicates Nasid %d is " | 1073 | dev_dbg(xpc_part, "PROM indicates Nasid %d is " |
1054 | "part of the local partition; skipping " | 1074 | "part of the local partition; skipping " |
1055 | "region\n", nasid); | 1075 | "region\n", nasid); |
1056 | break; | 1076 | break; |
1057 | } | 1077 | } |
1058 | 1078 | ||
1059 | if (!(XPC_NASID_IN_ARRAY(nasid, rp->mach_nasids))) { | 1079 | if (!(XPC_NASID_IN_ARRAY(nasid, xpc_mach_nasids))) { |
1060 | dev_dbg(xpc_part, "PROM indicates Nasid %d was " | 1080 | dev_dbg(xpc_part, "PROM indicates Nasid %d was " |
1061 | "not on Numa-Link network at reset\n", | 1081 | "not on Numa-Link network at reset\n", |
1062 | nasid); | 1082 | nasid); |
@@ -1178,12 +1198,12 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) | |||
1178 | return xpcPartitionDown; | 1198 | return xpcPartitionDown; |
1179 | } | 1199 | } |
1180 | 1200 | ||
1181 | part_nasid_pa = part->remote_rp_pa + | 1201 | memset(nasid_mask, 0, XP_NASID_MASK_BYTES); |
1182 | (u64) &((struct xpc_rsvd_page *) 0)->part_nasids; | 1202 | |
1203 | part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); | ||
1183 | 1204 | ||
1184 | bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask), | 1205 | bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask), |
1185 | L1_CACHE_ALIGN(XP_NASID_MASK_BYTES), | 1206 | xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
1186 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | ||
1187 | 1207 | ||
1188 | return xpc_map_bte_errors(bte_res); | 1208 | return xpc_map_bte_errors(bte_res); |
1189 | } | 1209 | } |