aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/uv/bios.h34
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h87
-rw-r--r--arch/x86/kernel/bios_uv.c60
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c108
-rw-r--r--arch/x86/kernel/tlb_uv.c4
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c1
-rw-r--r--drivers/misc/sgi-xp/xp.h7
-rw-r--r--drivers/misc/sgi-xp/xp_main.c7
-rw-r--r--drivers/misc/sgi-xp/xp_sn2.c34
-rw-r--r--drivers/misc/sgi-xp/xp_uv.c70
-rw-r--r--drivers/misc/sgi-xp/xpc.h12
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c15
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c290
13 files changed, 631 insertions, 98 deletions
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index d931d3b7e6f7..da1c4e8e78fc 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -32,13 +32,18 @@
32enum uv_bios_cmd { 32enum uv_bios_cmd {
33 UV_BIOS_COMMON, 33 UV_BIOS_COMMON,
34 UV_BIOS_GET_SN_INFO, 34 UV_BIOS_GET_SN_INFO,
35 UV_BIOS_FREQ_BASE 35 UV_BIOS_FREQ_BASE,
36 UV_BIOS_WATCHLIST_ALLOC,
37 UV_BIOS_WATCHLIST_FREE,
38 UV_BIOS_MEMPROTECT,
39 UV_BIOS_GET_PARTITION_ADDR
36}; 40};
37 41
38/* 42/*
39 * Status values returned from a BIOS call. 43 * Status values returned from a BIOS call.
40 */ 44 */
41enum { 45enum {
46 BIOS_STATUS_MORE_PASSES = 1,
42 BIOS_STATUS_SUCCESS = 0, 47 BIOS_STATUS_SUCCESS = 0,
43 BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, 48 BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
44 BIOS_STATUS_EINVAL = -EINVAL, 49 BIOS_STATUS_EINVAL = -EINVAL,
@@ -71,6 +76,21 @@ union partition_info_u {
71 }; 76 };
72}; 77};
73 78
79union uv_watchlist_u {
80 u64 val;
81 struct {
82 u64 blade : 16,
83 size : 32,
84 filler : 16;
85 };
86};
87
88enum uv_memprotect {
89 UV_MEMPROT_RESTRICT_ACCESS,
90 UV_MEMPROT_ALLOW_AMO,
91 UV_MEMPROT_ALLOW_RW
92};
93
74/* 94/*
75 * bios calls have 6 parameters 95 * bios calls have 6 parameters
76 */ 96 */
@@ -80,14 +100,20 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
80 100
81extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *); 101extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
82extern s64 uv_bios_freq_base(u64, u64 *); 102extern s64 uv_bios_freq_base(u64, u64 *);
103extern int uv_bios_mq_watchlist_alloc(int, void *, unsigned int,
104 unsigned long *);
105extern int uv_bios_mq_watchlist_free(int, int);
106extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
107extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *);
83 108
84extern void uv_bios_init(void); 109extern void uv_bios_init(void);
85 110
111extern unsigned long sn_rtc_cycles_per_second;
86extern int uv_type; 112extern int uv_type;
87extern long sn_partition_id; 113extern long sn_partition_id;
88extern long uv_coherency_id; 114extern long sn_coherency_id;
89extern long uv_region_size; 115extern long sn_region_size;
90#define partition_coherence_id() (uv_coherency_id) 116#define partition_coherence_id() (sn_coherency_id)
91 117
92extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ 118extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
93 119
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 7a5782610b2b..52aa943c634f 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -113,25 +113,37 @@
113 */ 113 */
114#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2) 114#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2)
115 115
116struct uv_scir_s {
117 struct timer_list timer;
118 unsigned long offset;
119 unsigned long last;
120 unsigned long idle_on;
121 unsigned long idle_off;
122 unsigned char state;
123 unsigned char enabled;
124};
125
116/* 126/*
117 * The following defines attributes of the HUB chip. These attributes are 127 * The following defines attributes of the HUB chip. These attributes are
118 * frequently referenced and are kept in the per-cpu data areas of each cpu. 128 * frequently referenced and are kept in the per-cpu data areas of each cpu.
119 * They are kept together in a struct to minimize cache misses. 129 * They are kept together in a struct to minimize cache misses.
120 */ 130 */
121struct uv_hub_info_s { 131struct uv_hub_info_s {
122 unsigned long global_mmr_base; 132 unsigned long global_mmr_base;
123 unsigned long gpa_mask; 133 unsigned long gpa_mask;
124 unsigned long gnode_upper; 134 unsigned long gnode_upper;
125 unsigned long lowmem_remap_top; 135 unsigned long lowmem_remap_top;
126 unsigned long lowmem_remap_base; 136 unsigned long lowmem_remap_base;
127 unsigned short pnode; 137 unsigned short pnode;
128 unsigned short pnode_mask; 138 unsigned short pnode_mask;
129 unsigned short coherency_domain_number; 139 unsigned short coherency_domain_number;
130 unsigned short numa_blade_id; 140 unsigned short numa_blade_id;
131 unsigned char blade_processor_id; 141 unsigned char blade_processor_id;
132 unsigned char m_val; 142 unsigned char m_val;
133 unsigned char n_val; 143 unsigned char n_val;
144 struct uv_scir_s scir;
134}; 145};
146
135DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 147DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
136#define uv_hub_info (&__get_cpu_var(__uv_hub_info)) 148#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
137#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) 149#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
@@ -163,6 +175,30 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
163 175
164#define UV_APIC_PNODE_SHIFT 6 176#define UV_APIC_PNODE_SHIFT 6
165 177
178/* Local Bus from cpu's perspective */
179#define LOCAL_BUS_BASE 0x1c00000
180#define LOCAL_BUS_SIZE (4 * 1024 * 1024)
181
182/*
183 * System Controller Interface Reg
184 *
185 * Note there are NO leds on a UV system. This register is only
186 * used by the system controller to monitor system-wide operation.
187 * There are 64 regs per node. With Nahelem cpus (2 cores per node,
188 * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on
189 * a node.
190 *
191 * The window is located at top of ACPI MMR space
192 */
193#define SCIR_WINDOW_COUNT 64
194#define SCIR_LOCAL_MMR_BASE (LOCAL_BUS_BASE + \
195 LOCAL_BUS_SIZE - \
196 SCIR_WINDOW_COUNT)
197
198#define SCIR_CPU_HEARTBEAT 0x01 /* timer interrupt */
199#define SCIR_CPU_ACTIVITY 0x02 /* not idle */
200#define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */
201
166/* 202/*
167 * Macros for converting between kernel virtual addresses, socket local physical 203 * Macros for converting between kernel virtual addresses, socket local physical
168 * addresses, and UV global physical addresses. 204 * addresses, and UV global physical addresses.
@@ -277,6 +313,16 @@ static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
277 *uv_local_mmr_address(offset) = val; 313 *uv_local_mmr_address(offset) = val;
278} 314}
279 315
316static inline unsigned char uv_read_local_mmr8(unsigned long offset)
317{
318 return *((unsigned char *)uv_local_mmr_address(offset));
319}
320
321static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val)
322{
323 *((unsigned char *)uv_local_mmr_address(offset)) = val;
324}
325
280/* 326/*
281 * Structures and definitions for converting between cpu, node, pnode, and blade 327 * Structures and definitions for converting between cpu, node, pnode, and blade
282 * numbers. 328 * numbers.
@@ -351,5 +397,20 @@ static inline int uv_num_possible_blades(void)
351 return uv_possible_blades; 397 return uv_possible_blades;
352} 398}
353 399
354#endif /* _ASM_X86_UV_UV_HUB_H */ 400/* Update SCIR state */
401static inline void uv_set_scir_bits(unsigned char value)
402{
403 if (uv_hub_info->scir.state != value) {
404 uv_hub_info->scir.state = value;
405 uv_write_local_mmr8(uv_hub_info->scir.offset, value);
406 }
407}
408static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
409{
410 if (uv_cpu_hub_info(cpu)->scir.state != value) {
411 uv_cpu_hub_info(cpu)->scir.state = value;
412 uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
413 }
414}
355 415
416#endif /* _ASM_X86_UV_UV_HUB_H */
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index f0dfe6f17e7e..d22d0f1bbea0 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -69,10 +69,10 @@ s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
69 69
70long sn_partition_id; 70long sn_partition_id;
71EXPORT_SYMBOL_GPL(sn_partition_id); 71EXPORT_SYMBOL_GPL(sn_partition_id);
72long uv_coherency_id; 72long sn_coherency_id;
73EXPORT_SYMBOL_GPL(uv_coherency_id); 73EXPORT_SYMBOL_GPL(sn_coherency_id);
74long uv_region_size; 74long sn_region_size;
75EXPORT_SYMBOL_GPL(uv_region_size); 75EXPORT_SYMBOL_GPL(sn_region_size);
76int uv_type; 76int uv_type;
77 77
78 78
@@ -100,6 +100,58 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
100 return ret; 100 return ret;
101} 101}
102 102
103int
104uv_bios_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size,
105 unsigned long *intr_mmr_offset)
106{
107 union uv_watchlist_u size_blade;
108 unsigned long addr;
109 u64 watchlist;
110 s64 ret;
111
112 addr = (unsigned long)mq;
113 size_blade.size = mq_size;
114 size_blade.blade = blade;
115
116 /*
117 * bios returns watchlist number or negative error number.
118 */
119 ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
120 size_blade.val, (u64)intr_mmr_offset,
121 (u64)&watchlist, 0);
122 if (ret < BIOS_STATUS_SUCCESS)
123 return ret;
124
125 return watchlist;
126}
127EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_alloc);
128
129int
130uv_bios_mq_watchlist_free(int blade, int watchlist_num)
131{
132 return (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_FREE,
133 blade, watchlist_num, 0, 0, 0);
134}
135EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free);
136
137s64
138uv_bios_change_memprotect(u64 paddr, u64 len, enum uv_memprotect perms)
139{
140 return uv_bios_call_irqsave(UV_BIOS_MEMPROTECT, paddr, len,
141 perms, 0, 0);
142}
143EXPORT_SYMBOL_GPL(uv_bios_change_memprotect);
144
145s64
146uv_bios_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
147{
148 s64 ret;
149
150 ret = uv_bios_call_irqsave(UV_BIOS_GET_PARTITION_ADDR, (u64)cookie,
151 (u64)addr, buf, (u64)len, 0);
152 return ret;
153}
154EXPORT_SYMBOL_GPL(uv_bios_reserved_page_pa);
103 155
104s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second) 156s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
105{ 157{
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 2c7dbdb98278..f02bbe5d0178 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/threads.h> 12#include <linux/threads.h>
13#include <linux/cpu.h>
13#include <linux/cpumask.h> 14#include <linux/cpumask.h>
14#include <linux/string.h> 15#include <linux/string.h>
15#include <linux/ctype.h> 16#include <linux/ctype.h>
@@ -17,6 +18,9 @@
17#include <linux/sched.h> 18#include <linux/sched.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/hardirq.h> 20#include <linux/hardirq.h>
21#include <linux/timer.h>
22#include <linux/proc_fs.h>
23#include <asm/current.h>
20#include <asm/smp.h> 24#include <asm/smp.h>
21#include <asm/ipi.h> 25#include <asm/ipi.h>
22#include <asm/genapic.h> 26#include <asm/genapic.h>
@@ -356,6 +360,103 @@ static __init void uv_rtc_init(void)
356} 360}
357 361
358/* 362/*
363 * percpu heartbeat timer
364 */
365static void uv_heartbeat(unsigned long ignored)
366{
367 struct timer_list *timer = &uv_hub_info->scir.timer;
368 unsigned char bits = uv_hub_info->scir.state;
369
370 /* flip heartbeat bit */
371 bits ^= SCIR_CPU_HEARTBEAT;
372
373 /* is this cpu idle? */
374 if (idle_cpu(raw_smp_processor_id()))
375 bits &= ~SCIR_CPU_ACTIVITY;
376 else
377 bits |= SCIR_CPU_ACTIVITY;
378
379 /* update system controller interface reg */
380 uv_set_scir_bits(bits);
381
382 /* enable next timer period */
383 mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
384}
385
386static void __cpuinit uv_heartbeat_enable(int cpu)
387{
388 if (!uv_cpu_hub_info(cpu)->scir.enabled) {
389 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
390
391 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
392 setup_timer(timer, uv_heartbeat, cpu);
393 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
394 add_timer_on(timer, cpu);
395 uv_cpu_hub_info(cpu)->scir.enabled = 1;
396 }
397
398 /* check boot cpu */
399 if (!uv_cpu_hub_info(0)->scir.enabled)
400 uv_heartbeat_enable(0);
401}
402
403static void __cpuinit uv_heartbeat_disable(int cpu)
404{
405 if (uv_cpu_hub_info(cpu)->scir.enabled) {
406 uv_cpu_hub_info(cpu)->scir.enabled = 0;
407 del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
408 }
409 uv_set_cpu_scir_bits(cpu, 0xff);
410}
411
412#ifdef CONFIG_HOTPLUG_CPU
413/*
414 * cpu hotplug notifier
415 */
416static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
417 unsigned long action, void *hcpu)
418{
419 long cpu = (long)hcpu;
420
421 switch (action) {
422 case CPU_ONLINE:
423 uv_heartbeat_enable(cpu);
424 break;
425 case CPU_DOWN_PREPARE:
426 uv_heartbeat_disable(cpu);
427 break;
428 default:
429 break;
430 }
431 return NOTIFY_OK;
432}
433
434static __init void uv_scir_register_cpu_notifier(void)
435{
436 hotcpu_notifier(uv_scir_cpu_notify, 0);
437}
438
439#else /* !CONFIG_HOTPLUG_CPU */
440
441static __init void uv_scir_register_cpu_notifier(void)
442{
443}
444
445static __init int uv_init_heartbeat(void)
446{
447 int cpu;
448
449 if (is_uv_system())
450 for_each_online_cpu(cpu)
451 uv_heartbeat_enable(cpu);
452 return 0;
453}
454
455late_initcall(uv_init_heartbeat);
456
457#endif /* !CONFIG_HOTPLUG_CPU */
458
459/*
359 * Called on each cpu to initialize the per_cpu UV data area. 460 * Called on each cpu to initialize the per_cpu UV data area.
360 * ZZZ hotplug not supported yet 461 * ZZZ hotplug not supported yet
361 */ 462 */
@@ -428,7 +529,7 @@ void __init uv_system_init(void)
428 529
429 uv_bios_init(); 530 uv_bios_init();
430 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, 531 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
431 &uv_coherency_id, &uv_region_size); 532 &sn_coherency_id, &sn_region_size);
432 uv_rtc_init(); 533 uv_rtc_init();
433 534
434 for_each_present_cpu(cpu) { 535 for_each_present_cpu(cpu) {
@@ -450,7 +551,8 @@ void __init uv_system_init(void)
450 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 551 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
451 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 552 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
452 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 553 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
453 uv_cpu_hub_info(cpu)->coherency_domain_number = uv_coherency_id; 554 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
555 uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
454 uv_node_to_blade[nid] = blade; 556 uv_node_to_blade[nid] = blade;
455 uv_cpu_to_blade[cpu] = blade; 557 uv_cpu_to_blade[cpu] = blade;
456 max_pnode = max(pnode, max_pnode); 558 max_pnode = max(pnode, max_pnode);
@@ -467,4 +569,6 @@ void __init uv_system_init(void)
467 map_mmioh_high(max_pnode); 569 map_mmioh_high(max_pnode);
468 570
469 uv_cpu_init(); 571 uv_cpu_init();
572 uv_scir_register_cpu_notifier();
573 proc_mkdir("sgi_uv", NULL);
470} 574}
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 04431f34fd16..6a00e5faaa74 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -566,14 +566,10 @@ static int __init uv_ptc_init(void)
566 if (!is_uv_system()) 566 if (!is_uv_system())
567 return 0; 567 return 0;
568 568
569 if (!proc_mkdir("sgi_uv", NULL))
570 return -EINVAL;
571
572 proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL); 569 proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL);
573 if (!proc_uv_ptc) { 570 if (!proc_uv_ptc) {
574 printk(KERN_ERR "unable to create %s proc entry\n", 571 printk(KERN_ERR "unable to create %s proc entry\n",
575 UV_PTC_BASENAME); 572 UV_PTC_BASENAME);
576 remove_proc_entry("sgi_uv", NULL);
577 return -EINVAL; 573 return -EINVAL;
578 } 574 }
579 proc_uv_ptc->proc_fops = &proc_uv_ptc_operations; 575 proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 533923f83f1a..73b0ca061bb5 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -317,7 +317,6 @@ int gru_proc_init(void)
317{ 317{
318 struct proc_entry *p; 318 struct proc_entry *p;
319 319
320 proc_mkdir("sgi_uv", NULL);
321 proc_gru = proc_mkdir("sgi_uv/gru", NULL); 320 proc_gru = proc_mkdir("sgi_uv/gru", NULL);
322 321
323 for (p = proc_files; p->name; p++) 322 for (p = proc_files; p->name; p++)
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index ed1722e50049..7b4cbd5e03e9 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -194,9 +194,10 @@ enum xp_retval {
194 xpGruSendMqError, /* 59: gru send message queue related error */ 194 xpGruSendMqError, /* 59: gru send message queue related error */
195 195
196 xpBadChannelNumber, /* 60: invalid channel number */ 196 xpBadChannelNumber, /* 60: invalid channel number */
197 xpBadMsgType, /* 60: invalid message type */ 197 xpBadMsgType, /* 61: invalid message type */
198 xpBiosError, /* 62: BIOS error */
198 199
199 xpUnknownReason /* 61: unknown reason - must be last in enum */ 200 xpUnknownReason /* 63: unknown reason - must be last in enum */
200}; 201};
201 202
202/* 203/*
@@ -345,6 +346,8 @@ extern unsigned long (*xp_pa) (void *);
345extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, 346extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
346 size_t); 347 size_t);
347extern int (*xp_cpu_to_nasid) (int); 348extern int (*xp_cpu_to_nasid) (int);
349extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long);
350extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long);
348 351
349extern u64 xp_nofault_PIOR_target; 352extern u64 xp_nofault_PIOR_target;
350extern int xp_nofault_PIOR(void *); 353extern int xp_nofault_PIOR(void *);
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 66a1d19e08ad..9a2e77172d94 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -51,6 +51,13 @@ EXPORT_SYMBOL_GPL(xp_remote_memcpy);
51int (*xp_cpu_to_nasid) (int cpuid); 51int (*xp_cpu_to_nasid) (int cpuid);
52EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); 52EXPORT_SYMBOL_GPL(xp_cpu_to_nasid);
53 53
54enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr,
55 unsigned long size);
56EXPORT_SYMBOL_GPL(xp_expand_memprotect);
57enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr,
58 unsigned long size);
59EXPORT_SYMBOL_GPL(xp_restrict_memprotect);
60
54/* 61/*
55 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level 62 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
56 * users of XPC. 63 * users of XPC.
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
index 1440134caf31..fb3ec9d735a9 100644
--- a/drivers/misc/sgi-xp/xp_sn2.c
+++ b/drivers/misc/sgi-xp/xp_sn2.c
@@ -120,6 +120,38 @@ xp_cpu_to_nasid_sn2(int cpuid)
120 return cpuid_to_nasid(cpuid); 120 return cpuid_to_nasid(cpuid);
121} 121}
122 122
123static enum xp_retval
124xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size)
125{
126 u64 nasid_array = 0;
127 int ret;
128
129 ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
130 &nasid_array);
131 if (ret != 0) {
132 dev_err(xp, "sn_change_memprotect(,, "
133 "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
134 return xpSalError;
135 }
136 return xpSuccess;
137}
138
139static enum xp_retval
140xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size)
141{
142 u64 nasid_array = 0;
143 int ret;
144
145 ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
146 &nasid_array);
147 if (ret != 0) {
148 dev_err(xp, "sn_change_memprotect(,, "
149 "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
150 return xpSalError;
151 }
152 return xpSuccess;
153}
154
123enum xp_retval 155enum xp_retval
124xp_init_sn2(void) 156xp_init_sn2(void)
125{ 157{
@@ -132,6 +164,8 @@ xp_init_sn2(void)
132 xp_pa = xp_pa_sn2; 164 xp_pa = xp_pa_sn2;
133 xp_remote_memcpy = xp_remote_memcpy_sn2; 165 xp_remote_memcpy = xp_remote_memcpy_sn2;
134 xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; 166 xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
167 xp_expand_memprotect = xp_expand_memprotect_sn2;
168 xp_restrict_memprotect = xp_restrict_memprotect_sn2;
135 169
136 return xp_register_nofault_code_sn2(); 170 return xp_register_nofault_code_sn2();
137} 171}
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
index d9f7ce2510bc..d238576b26fa 100644
--- a/drivers/misc/sgi-xp/xp_uv.c
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -15,6 +15,11 @@
15 15
16#include <linux/device.h> 16#include <linux/device.h>
17#include <asm/uv/uv_hub.h> 17#include <asm/uv/uv_hub.h>
18#if defined CONFIG_X86_64
19#include <asm/uv/bios.h>
20#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
21#include <asm/sn/sn_sal.h>
22#endif
18#include "../sgi-gru/grukservices.h" 23#include "../sgi-gru/grukservices.h"
19#include "xp.h" 24#include "xp.h"
20 25
@@ -49,18 +54,79 @@ xp_cpu_to_nasid_uv(int cpuid)
49 return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); 54 return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid));
50} 55}
51 56
57static enum xp_retval
58xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size)
59{
60 int ret;
61
62#if defined CONFIG_X86_64
63 ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW);
64 if (ret != BIOS_STATUS_SUCCESS) {
65 dev_err(xp, "uv_bios_change_memprotect(,, "
66 "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret);
67 return xpBiosError;
68 }
69
70#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
71 u64 nasid_array;
72
73 ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
74 &nasid_array);
75 if (ret != 0) {
76 dev_err(xp, "sn_change_memprotect(,, "
77 "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
78 return xpSalError;
79 }
80#else
81 #error not a supported configuration
82#endif
83 return xpSuccess;
84}
85
86static enum xp_retval
87xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size)
88{
89 int ret;
90
91#if defined CONFIG_X86_64
92 ret = uv_bios_change_memprotect(phys_addr, size,
93 UV_MEMPROT_RESTRICT_ACCESS);
94 if (ret != BIOS_STATUS_SUCCESS) {
95 dev_err(xp, "uv_bios_change_memprotect(,, "
96 "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret);
97 return xpBiosError;
98 }
99
100#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
101 u64 nasid_array;
102
103 ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
104 &nasid_array);
105 if (ret != 0) {
106 dev_err(xp, "sn_change_memprotect(,, "
107 "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
108 return xpSalError;
109 }
110#else
111 #error not a supported configuration
112#endif
113 return xpSuccess;
114}
115
52enum xp_retval 116enum xp_retval
53xp_init_uv(void) 117xp_init_uv(void)
54{ 118{
55 BUG_ON(!is_uv()); 119 BUG_ON(!is_uv());
56 120
57 xp_max_npartitions = XP_MAX_NPARTITIONS_UV; 121 xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
58 xp_partition_id = 0; /* !!! not correct value */ 122 xp_partition_id = sn_partition_id;
59 xp_region_size = 0; /* !!! not correct value */ 123 xp_region_size = sn_region_size;
60 124
61 xp_pa = xp_pa_uv; 125 xp_pa = xp_pa_uv;
62 xp_remote_memcpy = xp_remote_memcpy_uv; 126 xp_remote_memcpy = xp_remote_memcpy_uv;
63 xp_cpu_to_nasid = xp_cpu_to_nasid_uv; 127 xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
128 xp_expand_memprotect = xp_expand_memprotect_uv;
129 xp_restrict_memprotect = xp_restrict_memprotect_uv;
64 130
65 return xpSuccess; 131 return xpSuccess;
66} 132}
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 619208d61862..a5bd658c2e83 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -181,6 +181,18 @@ struct xpc_vars_part_sn2 {
181 xpc_nasid_mask_nlongs)) 181 xpc_nasid_mask_nlongs))
182 182
183/* 183/*
184 * Info pertinent to a GRU message queue using a watch list for irq generation.
185 */
186struct xpc_gru_mq_uv {
187 void *address; /* address of GRU message queue */
188 unsigned int order; /* size of GRU message queue as a power of 2 */
189 int irq; /* irq raised when message is received in mq */
190 int mmr_blade; /* blade where watchlist was allocated from */
191 unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */
192 int watchlist_num; /* number of watchlist allocatd by BIOS */
193};
194
195/*
184 * The activate_mq is used to send/receive GRU messages that affect XPC's 196 * The activate_mq is used to send/receive GRU messages that affect XPC's
185 * heartbeat, partition active state, and channel state. This is UV only. 197 * heartbeat, partition active state, and channel state. This is UV only.
186 */ 198 */
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index b4882ccf6344..73b7fb8de47a 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -553,22 +553,17 @@ static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
553static enum xp_retval 553static enum xp_retval
554xpc_allow_amo_ops_sn2(struct amo *amos_page) 554xpc_allow_amo_ops_sn2(struct amo *amos_page)
555{ 555{
556 u64 nasid_array = 0; 556 enum xp_retval ret = xpSuccess;
557 int ret;
558 557
559 /* 558 /*
560 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST 559 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
561 * collides with memory operations. On those systems we call 560 * collides with memory operations. On those systems we call
562 * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead. 561 * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
563 */ 562 */
564 if (!enable_shub_wars_1_1()) { 563 if (!enable_shub_wars_1_1())
565 ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE, 564 ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE);
566 SN_MEMPROT_ACCESS_CLASS_1, 565
567 &nasid_array); 566 return ret;
568 if (ret != 0)
569 return xpSalError;
570 }
571 return xpSuccess;
572} 567}
573 568
574/* 569/*
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 1ac694c01623..684b2dd17583 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -18,7 +18,15 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/err.h>
21#include <asm/uv/uv_hub.h> 22#include <asm/uv/uv_hub.h>
23#if defined CONFIG_X86_64
24#include <asm/uv/bios.h>
25#include <asm/uv/uv_irq.h>
26#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
27#include <asm/sn/intr.h>
28#include <asm/sn/sn_sal.h>
29#endif
22#include "../sgi-gru/gru.h" 30#include "../sgi-gru/gru.h"
23#include "../sgi-gru/grukservices.h" 31#include "../sgi-gru/grukservices.h"
24#include "xpc.h" 32#include "xpc.h"
@@ -27,15 +35,17 @@ static atomic64_t xpc_heartbeat_uv;
27static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); 35static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
28 36
29#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) 37#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
30#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) 38#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
39 XPC_ACTIVATE_MSG_SIZE_UV)
40#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
31 41
32#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 42#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
33 XPC_ACTIVATE_MSG_SIZE_UV) 43#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
34#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 44 XPC_NOTIFY_MSG_SIZE_UV)
35 XPC_NOTIFY_MSG_SIZE_UV) 45#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
36 46
37static void *xpc_activate_mq_uv; 47static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
38static void *xpc_notify_mq_uv; 48static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
39 49
40static int 50static int
41xpc_setup_partitions_sn_uv(void) 51xpc_setup_partitions_sn_uv(void)
@@ -52,62 +62,209 @@ xpc_setup_partitions_sn_uv(void)
52 return 0; 62 return 0;
53} 63}
54 64
55static void * 65static int
56xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, 66xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
67{
68#if defined CONFIG_X86_64
69 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset);
70 if (mq->irq < 0) {
71 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
72 mq->irq);
73 }
74
75#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
76 int mmr_pnode;
77 unsigned long mmr_value;
78
79 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
80 mq->irq = SGI_XPC_ACTIVATE;
81 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
82 mq->irq = SGI_XPC_NOTIFY;
83 else
84 return -EINVAL;
85
86 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
87 mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
88
89 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
90#else
91 #error not a supported configuration
92#endif
93
94 return 0;
95}
96
97static void
98xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
99{
100#if defined CONFIG_X86_64
101 uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset);
102
103#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
104 int mmr_pnode;
105 unsigned long mmr_value;
106
107 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
108 mmr_value = 1UL << 16;
109
110 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
111#else
112 #error not a supported configuration
113#endif
114}
115
116static int
117xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
118{
119 int ret;
120
121#if defined CONFIG_X86_64
122 ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order,
123 &mq->mmr_offset);
124 if (ret < 0) {
125 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
126 "ret=%d\n", ret);
127 return ret;
128 }
129#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
130 ret = sn_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order,
131 &mq->mmr_offset);
132 if (ret < 0) {
133 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
134 ret);
135 return -EBUSY;
136 }
137#else
138 #error not a supported configuration
139#endif
140
141 mq->watchlist_num = ret;
142 return 0;
143}
144
145static void
146xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
147{
148 int ret;
149
150#if defined CONFIG_X86_64
151 ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
152 BUG_ON(ret != BIOS_STATUS_SUCCESS);
153#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
154 ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
155 BUG_ON(ret != SALRET_OK);
156#else
157 #error not a supported configuration
158#endif
159}
160
161static struct xpc_gru_mq_uv *
162xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
57 irq_handler_t irq_handler) 163 irq_handler_t irq_handler)
58{ 164{
165 enum xp_retval xp_ret;
59 int ret; 166 int ret;
60 int nid; 167 int nid;
61 int mq_order; 168 int pg_order;
62 struct page *page; 169 struct page *page;
63 void *mq; 170 struct xpc_gru_mq_uv *mq;
171
172 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
173 if (mq == NULL) {
174 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
175 "a xpc_gru_mq_uv structure\n");
176 ret = -ENOMEM;
177 goto out_1;
178 }
179
180 pg_order = get_order(mq_size);
181 mq->order = pg_order + PAGE_SHIFT;
182 mq_size = 1UL << mq->order;
183
184 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
64 185
65 nid = cpu_to_node(cpuid); 186 nid = cpu_to_node(cpu);
66 mq_order = get_order(mq_size);
67 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 187 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
68 mq_order); 188 pg_order);
69 if (page == NULL) { 189 if (page == NULL) {
70 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 190 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
71 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); 191 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
72 return NULL; 192 ret = -ENOMEM;
193 goto out_2;
73 } 194 }
195 mq->address = page_address(page);
74 196
75 mq = page_address(page); 197 ret = gru_create_message_queue(mq->address, mq_size);
76 ret = gru_create_message_queue(mq, mq_size);
77 if (ret != 0) { 198 if (ret != 0) {
78 dev_err(xpc_part, "gru_create_message_queue() returned " 199 dev_err(xpc_part, "gru_create_message_queue() returned "
79 "error=%d\n", ret); 200 "error=%d\n", ret);
80 free_pages((unsigned long)mq, mq_order); 201 ret = -EINVAL;
81 return NULL; 202 goto out_3;
82 } 203 }
83 204
84 /* !!! Need to do some other things to set up IRQ */ 205 /* enable generation of irq when GRU mq operation occurs to this mq */
206 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
207 if (ret != 0)
208 goto out_3;
85 209
86 ret = request_irq(irq, irq_handler, 0, "xpc", NULL); 210 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
211 if (ret != 0)
212 goto out_4;
213
214 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
87 if (ret != 0) { 215 if (ret != 0) {
88 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", 216 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
89 irq, ret); 217 mq->irq, ret);
90 free_pages((unsigned long)mq, mq_order); 218 goto out_5;
91 return NULL;
92 } 219 }
93 220
94 /* !!! enable generation of irq when GRU mq op occurs to this mq */ 221 /* allow other partitions to access this GRU mq */
95 222 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
96 /* ??? allow other partitions to access GRU mq? */ 223 if (xp_ret != xpSuccess) {
224 ret = -EACCES;
225 goto out_6;
226 }
97 227
98 return mq; 228 return mq;
229
230 /* something went wrong */
231out_6:
232 free_irq(mq->irq, NULL);
233out_5:
234 xpc_release_gru_mq_irq_uv(mq);
235out_4:
236 xpc_gru_mq_watchlist_free_uv(mq);
237out_3:
238 free_pages((unsigned long)mq->address, pg_order);
239out_2:
240 kfree(mq);
241out_1:
242 return ERR_PTR(ret);
99} 243}
100 244
101static void 245static void
102xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) 246xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
103{ 247{
104 /* ??? disallow other partitions to access GRU mq? */ 248 unsigned int mq_size;
249 int pg_order;
250 int ret;
251
252 /* disallow other partitions to access GRU mq */
253 mq_size = 1UL << mq->order;
254 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
255 BUG_ON(ret != xpSuccess);
105 256
106 /* !!! disable generation of irq when GRU mq op occurs to this mq */ 257 /* unregister irq handler and release mq irq/vector mapping */
258 free_irq(mq->irq, NULL);
259 xpc_release_gru_mq_irq_uv(mq);
107 260
108 free_irq(irq, NULL); 261 /* disable generation of irq when GRU mq op occurs to this mq */
262 xpc_gru_mq_watchlist_free_uv(mq);
109 263
110 free_pages((unsigned long)mq, get_order(mq_size)); 264 pg_order = mq->order - PAGE_SHIFT;
265 free_pages((unsigned long)mq->address, pg_order);
266
267 kfree(mq);
111} 268}
112 269
113static enum xp_retval 270static enum xp_retval
@@ -402,7 +559,10 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
402 struct xpc_partition *part; 559 struct xpc_partition *part;
403 int wakeup_hb_checker = 0; 560 int wakeup_hb_checker = 0;
404 561
405 while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { 562 while (1) {
563 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address);
564 if (msg_hdr == NULL)
565 break;
406 566
407 partid = msg_hdr->partid; 567 partid = msg_hdr->partid;
408 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 568 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
@@ -418,7 +578,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
418 } 578 }
419 } 579 }
420 580
421 gru_free_message(xpc_activate_mq_uv, msg_hdr); 581 gru_free_message(xpc_activate_mq_uv->address, msg_hdr);
422 } 582 }
423 583
424 if (wakeup_hb_checker) 584 if (wakeup_hb_checker)
@@ -482,7 +642,7 @@ xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
482 struct xpc_partition_uv *part_uv = &part->sn.uv; 642 struct xpc_partition_uv *part_uv = &part->sn.uv;
483 643
484 /* 644 /*
485 * !!! Make our side think that the remote parition sent an activate 645 * !!! Make our side think that the remote partition sent an activate
486 * !!! message our way by doing what the activate IRQ handler would 646 * !!! message our way by doing what the activate IRQ handler would
487 * !!! do had one really been sent. 647 * !!! do had one really been sent.
488 */ 648 */
@@ -500,14 +660,39 @@ static enum xp_retval
500xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, 660xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
501 size_t *len) 661 size_t *len)
502{ 662{
503 /* !!! call the UV version of sn_partition_reserved_page_pa() */ 663 s64 status;
504 return xpUnsupported; 664 enum xp_retval ret;
665
666#if defined CONFIG_X86_64
667 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
668 (u64 *)len);
669 if (status == BIOS_STATUS_SUCCESS)
670 ret = xpSuccess;
671 else if (status == BIOS_STATUS_MORE_PASSES)
672 ret = xpNeedMoreInfo;
673 else
674 ret = xpBiosError;
675
676#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
677 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
678 if (status == SALRET_OK)
679 ret = xpSuccess;
680 else if (status == SALRET_MORE_PASSES)
681 ret = xpNeedMoreInfo;
682 else
683 ret = xpSalError;
684
685#else
686 #error not a supported configuration
687#endif
688
689 return ret;
505} 690}
506 691
507static int 692static int
508xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) 693xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
509{ 694{
510 rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); 695 rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address);
511 return 0; 696 return 0;
512} 697}
513 698
@@ -1411,22 +1596,18 @@ xpc_init_uv(void)
1411 return -E2BIG; 1596 return -E2BIG;
1412 } 1597 }
1413 1598
1414 /* ??? The cpuid argument's value is 0, is that what we want? */ 1599 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
1415 /* !!! The irq argument's value isn't correct. */ 1600 XPC_ACTIVATE_IRQ_NAME,
1416 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0,
1417 xpc_handle_activate_IRQ_uv); 1601 xpc_handle_activate_IRQ_uv);
1418 if (xpc_activate_mq_uv == NULL) 1602 if (IS_ERR(xpc_activate_mq_uv))
1419 return -ENOMEM; 1603 return PTR_ERR(xpc_activate_mq_uv);
1420 1604
1421 /* ??? The cpuid argument's value is 0, is that what we want? */ 1605 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
1422 /* !!! The irq argument's value isn't correct. */ 1606 XPC_NOTIFY_IRQ_NAME,
1423 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0,
1424 xpc_handle_notify_IRQ_uv); 1607 xpc_handle_notify_IRQ_uv);
1425 if (xpc_notify_mq_uv == NULL) { 1608 if (IS_ERR(xpc_notify_mq_uv)) {
1426 /* !!! The irq argument's value isn't correct. */ 1609 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1427 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, 1610 return PTR_ERR(xpc_notify_mq_uv);
1428 XPC_ACTIVATE_MQ_SIZE_UV, 0);
1429 return -ENOMEM;
1430 } 1611 }
1431 1612
1432 return 0; 1613 return 0;
@@ -1435,9 +1616,6 @@ xpc_init_uv(void)
1435void 1616void
1436xpc_exit_uv(void) 1617xpc_exit_uv(void)
1437{ 1618{
1438 /* !!! The irq argument's value isn't correct. */ 1619 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1439 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0); 1620 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1440
1441 /* !!! The irq argument's value isn't correct. */
1442 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0);
1443} 1621}