aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-22 19:47:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-22 19:47:54 -0400
commitaca239b793a4006db0d92ad0e43846ab6b54d816 (patch)
tree48cc2e4ce1e954b8f3d3ee8fb8a53bb46f5c79aa
parent16abef0e9e79643827fd5a2a14a07bced851ae72 (diff)
parent2c2b94f93f4732c3b9703ce62627e6187e7d6128 (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] run drivers/misc/sgi-xp through scripts/checkpatch.pl [IA64] run rest drivers/misc/sgi-xp through scripts/Lindent [IA64] run some drivers/misc/sgi-xp through scripts/Lindent [IA64] move XP and XPC to drivers/misc/sgi-xp [IA64] minor irq handler cleanups [IA64] simplify notify hooks in mca.c [IA64] do notify DIE_MCA_MONARCH_PROCESS for each monarchs [IA64] disable interrupts on exit of ia64_trace_syscall
-rw-r--r--arch/ia64/Kconfig11
-rw-r--r--arch/ia64/kernel/crash.c4
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/ia64/kernel/mca.c77
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/sn/kernel/Makefile7
-rw-r--r--arch/ia64/sn/kernel/huberror.c4
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c6
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/sgi-xp/Makefile11
-rw-r--r--drivers/misc/sgi-xp/xp.h (renamed from include/asm-ia64/sn/xp.h)94
-rw-r--r--drivers/misc/sgi-xp/xp_main.c (renamed from arch/ia64/sn/kernel/xp_main.c)141
-rw-r--r--drivers/misc/sgi-xp/xp_nofault.S (renamed from arch/ia64/sn/kernel/xp_nofault.S)3
-rw-r--r--drivers/misc/sgi-xp/xpc.h (renamed from include/asm-ia64/sn/xpc.h)500
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c (renamed from arch/ia64/sn/kernel/xpc_channel.c)528
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c (renamed from arch/ia64/sn/kernel/xpc_main.c)432
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c (renamed from arch/ia64/sn/kernel/xpc_partition.c)409
-rw-r--r--drivers/misc/sgi-xp/xpnet.c (renamed from arch/ia64/sn/kernel/xpnet.c)139
-rw-r--r--include/asm-ia64/mca.h1
20 files changed, 965 insertions, 1420 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ed21737a00c5..cd13e138bd03 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -266,17 +266,6 @@ config IOSAPIC
266 depends on !IA64_HP_SIM 266 depends on !IA64_HP_SIM
267 default y 267 default y
268 268
269config IA64_SGI_SN_XP
270 tristate "Support communication between SGI SSIs"
271 depends on IA64_GENERIC || IA64_SGI_SN2
272 select IA64_UNCACHED_ALLOCATOR
273 help
274 An SGI machine can be divided into multiple Single System
275 Images which act independently of each other and have
276 hardware based memory protection from the others. Enabling
277 this feature will allow for direct communication between SSIs
278 based on a network adapter and DMA messaging.
279
280config FORCE_MAX_ZONEORDER 269config FORCE_MAX_ZONEORDER
281 int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE 270 int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE
282 range 11 17 if !HUGETLB_PAGE 271 range 11 17 if !HUGETLB_PAGE
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 90ef338cf46f..f065093f8e9b 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -194,8 +194,8 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
194 unw_init_running(kdump_cpu_freeze, NULL); 194 unw_init_running(kdump_cpu_freeze, NULL);
195 break; 195 break;
196 case DIE_MCA_MONARCH_LEAVE: 196 case DIE_MCA_MONARCH_LEAVE:
197 /* die_register->signr indicate if MCA is recoverable */ 197 /* *(nd->data) indicate if MCA is recoverable */
198 if (kdump_on_fatal_mca && !args->signr) { 198 if (kdump_on_fatal_mca && !(*(nd->data))) {
199 atomic_set(&kdump_in_progress, 1); 199 atomic_set(&kdump_in_progress, 1);
200 *(nd->monarch_cpu) = -1; 200 *(nd->monarch_cpu) = -1;
201 machine_kdump_on_init(); 201 machine_kdump_on_init();
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index b0be4a280174..e49ad8c5dc69 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -570,6 +570,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
570 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value 570 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
571.ret3: 571.ret3:
572(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 572(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
573(pUStk) rsm psr.i // disable interrupts
573 br.cond.sptk .work_pending_syscall_end 574 br.cond.sptk .work_pending_syscall_end
574 575
575strace_error: 576strace_error:
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index e51bced3b0fa..705176b434b3 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -109,6 +109,20 @@
109# define IA64_MCA_DEBUG(fmt...) 109# define IA64_MCA_DEBUG(fmt...)
110#endif 110#endif
111 111
112#define NOTIFY_INIT(event, regs, arg, spin) \
113do { \
114 if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \
115 == NOTIFY_STOP) && ((spin) == 1)) \
116 ia64_mca_spin(__func__); \
117} while (0)
118
119#define NOTIFY_MCA(event, regs, arg, spin) \
120do { \
121 if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \
122 == NOTIFY_STOP) && ((spin) == 1)) \
123 ia64_mca_spin(__func__); \
124} while (0)
125
112/* Used by mca_asm.S */ 126/* Used by mca_asm.S */
113DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ 127DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
114DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 128DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
@@ -766,9 +780,8 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
766 780
767 /* Mask all interrupts */ 781 /* Mask all interrupts */
768 local_irq_save(flags); 782 local_irq_save(flags);
769 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(), 783
770 (long)&nd, 0, 0) == NOTIFY_STOP) 784 NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
771 ia64_mca_spin(__func__);
772 785
773 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; 786 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
774 /* Register with the SAL monarch that the slave has 787 /* Register with the SAL monarch that the slave has
@@ -776,17 +789,13 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
776 */ 789 */
777 ia64_sal_mc_rendez(); 790 ia64_sal_mc_rendez();
778 791
779 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), 792 NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
780 (long)&nd, 0, 0) == NOTIFY_STOP)
781 ia64_mca_spin(__func__);
782 793
783 /* Wait for the monarch cpu to exit. */ 794 /* Wait for the monarch cpu to exit. */
784 while (monarch_cpu != -1) 795 while (monarch_cpu != -1)
785 cpu_relax(); /* spin until monarch leaves */ 796 cpu_relax(); /* spin until monarch leaves */
786 797
787 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), 798 NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
788 (long)&nd, 0, 0) == NOTIFY_STOP)
789 ia64_mca_spin(__func__);
790 799
791 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 800 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
792 /* Enable all interrupts */ 801 /* Enable all interrupts */
@@ -1256,7 +1265,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1256 int recover, cpu = smp_processor_id(); 1265 int recover, cpu = smp_processor_id();
1257 struct task_struct *previous_current; 1266 struct task_struct *previous_current;
1258 struct ia64_mca_notify_die nd = 1267 struct ia64_mca_notify_die nd =
1259 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1268 { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
1260 static atomic_t mca_count; 1269 static atomic_t mca_count;
1261 static cpumask_t mca_cpu; 1270 static cpumask_t mca_cpu;
1262 1271
@@ -1272,9 +1281,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1272 1281
1273 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); 1282 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
1274 1283
1275 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) 1284 NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
1276 == NOTIFY_STOP)
1277 ia64_mca_spin(__func__);
1278 1285
1279 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; 1286 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1280 if (sos->monarch) { 1287 if (sos->monarch) {
@@ -1288,13 +1295,12 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1288 * does not work. 1295 * does not work.
1289 */ 1296 */
1290 ia64_mca_wakeup_all(); 1297 ia64_mca_wakeup_all();
1291 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
1292 == NOTIFY_STOP)
1293 ia64_mca_spin(__func__);
1294 } else { 1298 } else {
1295 while (cpu_isset(cpu, mca_cpu)) 1299 while (cpu_isset(cpu, mca_cpu))
1296 cpu_relax(); /* spin until monarch wakes us */ 1300 cpu_relax(); /* spin until monarch wakes us */
1297 } 1301 }
1302
1303 NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
1298 1304
1299 /* Get the MCA error record and log it */ 1305 /* Get the MCA error record and log it */
1300 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); 1306 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
@@ -1320,9 +1326,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1320 mca_insert_tr(0x2); /*Reload dynamic itrs*/ 1326 mca_insert_tr(0x2); /*Reload dynamic itrs*/
1321 } 1327 }
1322 1328
1323 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1329 NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
1324 == NOTIFY_STOP)
1325 ia64_mca_spin(__func__);
1326 1330
1327 if (atomic_dec_return(&mca_count) > 0) { 1331 if (atomic_dec_return(&mca_count) > 0) {
1328 int i; 1332 int i;
@@ -1643,7 +1647,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1643 struct ia64_mca_notify_die nd = 1647 struct ia64_mca_notify_die nd =
1644 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1648 { .sos = sos, .monarch_cpu = &monarch_cpu };
1645 1649
1646 (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); 1650 NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
1647 1651
1648 mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", 1652 mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
1649 sos->proc_state_param, cpu, sos->monarch); 1653 sos->proc_state_param, cpu, sos->monarch);
@@ -1680,17 +1684,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1680 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; 1684 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
1681 while (monarch_cpu == -1) 1685 while (monarch_cpu == -1)
1682 cpu_relax(); /* spin until monarch enters */ 1686 cpu_relax(); /* spin until monarch enters */
1683 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) 1687
1684 == NOTIFY_STOP) 1688 NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
1685 ia64_mca_spin(__func__); 1689 NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
1686 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1690
1687 == NOTIFY_STOP)
1688 ia64_mca_spin(__func__);
1689 while (monarch_cpu != -1) 1691 while (monarch_cpu != -1)
1690 cpu_relax(); /* spin until monarch leaves */ 1692 cpu_relax(); /* spin until monarch leaves */
1691 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1693
1692 == NOTIFY_STOP) 1694 NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
1693 ia64_mca_spin(__func__); 1695
1694 mprintk("Slave on cpu %d returning to normal service.\n", cpu); 1696 mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1695 set_curr_task(cpu, previous_current); 1697 set_curr_task(cpu, previous_current);
1696 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1698 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -1699,9 +1701,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1699 } 1701 }
1700 1702
1701 monarch_cpu = cpu; 1703 monarch_cpu = cpu;
1702 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) 1704 NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
1703 == NOTIFY_STOP)
1704 ia64_mca_spin(__func__);
1705 1705
1706 /* 1706 /*
1707 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 1707 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
@@ -1716,12 +1716,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1716 * to default_monarch_init_process() above and just print all the 1716 * to default_monarch_init_process() above and just print all the
1717 * tasks. 1717 * tasks.
1718 */ 1718 */
1719 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1719 NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
1720 == NOTIFY_STOP) 1720 NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
1721 ia64_mca_spin(__func__); 1721
1722 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1723 == NOTIFY_STOP)
1724 ia64_mca_spin(__func__);
1725 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1722 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
1726 atomic_dec(&monarchs); 1723 atomic_dec(&monarchs);
1727 set_curr_task(cpu, previous_current); 1724 set_curr_task(cpu, previous_current);
@@ -1953,7 +1950,7 @@ ia64_mca_init(void)
1953 printk(KERN_INFO "Increasing MCA rendezvous timeout from " 1950 printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1954 "%ld to %ld milliseconds\n", timeout, isrv.v0); 1951 "%ld to %ld milliseconds\n", timeout, isrv.v0);
1955 timeout = isrv.v0; 1952 timeout = isrv.v0;
1956 (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0); 1953 NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
1957 continue; 1954 continue;
1958 } 1955 }
1959 printk(KERN_ERR "Failed to register rendezvous interrupt " 1956 printk(KERN_ERR "Failed to register rendezvous interrupt "
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index d1d24f4598da..c8e403752a0c 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5511,7 +5511,7 @@ stop_monitoring:
5511} 5511}
5512 5512
5513static int 5513static int
5514pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs) 5514pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
5515{ 5515{
5516 struct task_struct *task; 5516 struct task_struct *task;
5517 pfm_context_t *ctx; 5517 pfm_context_t *ctx;
@@ -5591,7 +5591,7 @@ pfm_interrupt_handler(int irq, void *arg)
5591 5591
5592 start_cycles = ia64_get_itc(); 5592 start_cycles = ia64_get_itc();
5593 5593
5594 ret = pfm_do_interrupt_handler(irq, arg, regs); 5594 ret = pfm_do_interrupt_handler(arg, regs);
5595 5595
5596 total_cycles = ia64_get_itc(); 5596 total_cycles = ia64_get_itc();
5597 5597
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 688a3c27e0f6..0591038735af 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -4,7 +4,7 @@
4# License. See the file "COPYING" in the main directory of this archive 4# License. See the file "COPYING" in the main directory of this archive
5# for more details. 5# for more details.
6# 6#
7# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All Rights Reserved. 7# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved.
8# 8#
9 9
10EXTRA_CFLAGS += -Iarch/ia64/sn/include 10EXTRA_CFLAGS += -Iarch/ia64/sn/include
@@ -15,9 +15,4 @@ obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
15 sn2/ 15 sn2/
16obj-$(CONFIG_IA64_GENERIC) += machvec.o 16obj-$(CONFIG_IA64_GENERIC) += machvec.o
17obj-$(CONFIG_SGI_TIOCX) += tiocx.o 17obj-$(CONFIG_SGI_TIOCX) += tiocx.o
18obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o
19xp-y := xp_main.o xp_nofault.o
20obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o
21xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
22obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
23obj-$(CONFIG_PCI_MSI) += msi_sn.o 18obj-$(CONFIG_PCI_MSI) += msi_sn.o
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index 0101c7924a4d..08b0d9bb62ec 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -187,8 +187,8 @@ void hub_error_init(struct hubdev_info *hubdev_info)
187{ 187{
188 188
189 if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED, 189 if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED,
190 "SN_hub_error", (void *)hubdev_info)) { 190 "SN_hub_error", hubdev_info)) {
191 printk("hub_error_init: Failed to request_irq for 0x%p\n", 191 printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n",
192 hubdev_info); 192 hubdev_info);
193 return; 193 return;
194 } 194 }
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 9b3c11373022..94e584527f48 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -655,7 +655,8 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
655 * 655 *
656 * Simply call tioce_do_dma_map() to create a map with the barrier bit set 656 * Simply call tioce_do_dma_map() to create a map with the barrier bit set
657 * in the address. 657 * in the address.
658 */ static u64 658 */
659static u64
659tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) 660tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
660{ 661{
661 return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags); 662 return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
@@ -668,7 +669,8 @@ tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma
668 * 669 *
669 * Handle a CE error interrupt. Simply a wrapper around a SAL call which 670 * Handle a CE error interrupt. Simply a wrapper around a SAL call which
670 * defers processing to the SGI prom. 671 * defers processing to the SGI prom.
671 */ static irqreturn_t 672 */
673static irqreturn_t
672tioce_error_intr_handler(int irq, void *arg) 674tioce_error_intr_handler(int irq, void *arg)
673{ 675{
674 struct tioce_common *soft = arg; 676 struct tioce_common *soft = arg;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index bb94ce78a6d0..297a48f85446 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -360,4 +360,16 @@ config ENCLOSURE_SERVICES
360 driver (SCSI/ATA) which supports enclosures 360 driver (SCSI/ATA) which supports enclosures
361 or a SCSI enclosure device (SES) to use these services. 361 or a SCSI enclosure device (SES) to use these services.
362 362
363config SGI_XP
364 tristate "Support communication between SGI SSIs"
365 depends on IA64_GENERIC || IA64_SGI_SN2
366 select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
367 select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
368 ---help---
369 An SGI machine can be divided into multiple Single System
370 Images which act independently of each other and have
371 hardware based memory protection from the others. Enabling
372 this feature will allow for direct communication between SSIs
373 based on a network adapter and DMA messaging.
374
363endif # MISC_DEVICES 375endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 4581b2533111..5914da434854 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -24,3 +24,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
24obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o 24obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
25obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o 25obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
26obj-$(CONFIG_KGDB_TESTS) += kgdbts.o 26obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
27obj-$(CONFIG_SGI_XP) += sgi-xp/
diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile
new file mode 100644
index 000000000000..b6e40a7958ce
--- /dev/null
+++ b/drivers/misc/sgi-xp/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for SGI's XP devices.
3#
4
5obj-$(CONFIG_SGI_XP) += xp.o
6xp-y := xp_main.o xp_nofault.o
7
8obj-$(CONFIG_SGI_XP) += xpc.o
9xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
10
11obj-$(CONFIG_SGI_XP) += xpnet.o
diff --git a/include/asm-ia64/sn/xp.h b/drivers/misc/sgi-xp/xp.h
index f7711b308e48..5515234be86a 100644
--- a/include/asm-ia64/sn/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -3,18 +3,15 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * External Cross Partition (XP) structures and defines. 10 * External Cross Partition (XP) structures and defines.
12 */ 11 */
13 12
14 13#ifndef _DRIVERS_MISC_SGIXP_XP_H
15#ifndef _ASM_IA64_SN_XP_H 14#define _DRIVERS_MISC_SGIXP_XP_H
16#define _ASM_IA64_SN_XP_H
17
18 15
19#include <linux/cache.h> 16#include <linux/cache.h>
20#include <linux/hardirq.h> 17#include <linux/hardirq.h>
@@ -22,14 +19,12 @@
22#include <asm/sn/types.h> 19#include <asm/sn/types.h>
23#include <asm/sn/bte.h> 20#include <asm/sn/bte.h>
24 21
25
26#ifdef USE_DBUG_ON 22#ifdef USE_DBUG_ON
27#define DBUG_ON(condition) BUG_ON(condition) 23#define DBUG_ON(condition) BUG_ON(condition)
28#else 24#else
29#define DBUG_ON(condition) 25#define DBUG_ON(condition)
30#endif 26#endif
31 27
32
33/* 28/*
34 * Define the maximum number of logically defined partitions the system 29 * Define the maximum number of logically defined partitions the system
35 * can support. It is constrained by the maximum number of hardware 30 * can support. It is constrained by the maximum number of hardware
@@ -43,7 +38,6 @@
43 */ 38 */
44#define XP_MAX_PARTITIONS 64 39#define XP_MAX_PARTITIONS 64
45 40
46
47/* 41/*
48 * Define the number of u64s required to represent all the C-brick nasids 42 * Define the number of u64s required to represent all the C-brick nasids
49 * as a bitmap. The cross-partition kernel modules deal only with 43 * as a bitmap. The cross-partition kernel modules deal only with
@@ -54,7 +48,6 @@
54#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) 48#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8)
55#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) 49#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64)
56 50
57
58/* 51/*
59 * Wrapper for bte_copy() that should it return a failure status will retry 52 * Wrapper for bte_copy() that should it return a failure status will retry
60 * the bte_copy() once in the hope that the failure was due to a temporary 53 * the bte_copy() once in the hope that the failure was due to a temporary
@@ -74,7 +67,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
74 bte_result_t ret; 67 bte_result_t ret;
75 u64 pdst = ia64_tpa(vdst); 68 u64 pdst = ia64_tpa(vdst);
76 69
77
78 /* 70 /*
79 * Ensure that the physically mapped memory is contiguous. 71 * Ensure that the physically mapped memory is contiguous.
80 * 72 *
@@ -87,16 +79,15 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
87 79
88 ret = bte_copy(src, pdst, len, mode, notification); 80 ret = bte_copy(src, pdst, len, mode, notification);
89 if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) { 81 if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) {
90 if (!in_interrupt()) { 82 if (!in_interrupt())
91 cond_resched(); 83 cond_resched();
92 } 84
93 ret = bte_copy(src, pdst, len, mode, notification); 85 ret = bte_copy(src, pdst, len, mode, notification);
94 } 86 }
95 87
96 return ret; 88 return ret;
97} 89}
98 90
99
100/* 91/*
101 * XPC establishes channel connections between the local partition and any 92 * XPC establishes channel connections between the local partition and any
102 * other partition that is currently up. Over these channels, kernel-level 93 * other partition that is currently up. Over these channels, kernel-level
@@ -122,7 +113,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
122#error XPC_NCHANNELS exceeds MAXIMUM allowed. 113#error XPC_NCHANNELS exceeds MAXIMUM allowed.
123#endif 114#endif
124 115
125
126/* 116/*
127 * The format of an XPC message is as follows: 117 * The format of an XPC message is as follows:
128 * 118 *
@@ -160,12 +150,10 @@ struct xpc_msg {
160 u64 payload; /* user defined portion of message */ 150 u64 payload; /* user defined portion of message */
161}; 151};
162 152
163
164#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload) 153#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload)
165#define XPC_MSG_SIZE(_payload_size) \ 154#define XPC_MSG_SIZE(_payload_size) \
166 L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) 155 L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size))
167 156
168
169/* 157/*
170 * Define the return values and values passed to user's callout functions. 158 * Define the return values and values passed to user's callout functions.
171 * (It is important to add new value codes at the end just preceding 159 * (It is important to add new value codes at the end just preceding
@@ -267,10 +255,9 @@ enum xpc_retval {
267 /* 115: BTE end */ 255 /* 115: BTE end */
268 xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL, 256 xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL,
269 257
270 xpcUnknownReason /* 116: unknown reason -- must be last in list */ 258 xpcUnknownReason /* 116: unknown reason - must be last in enum */
271}; 259};
272 260
273
274/* 261/*
275 * Define the callout function types used by XPC to update the user on 262 * Define the callout function types used by XPC to update the user on
276 * connection activity and state changes (via the user function registered by 263 * connection activity and state changes (via the user function registered by
@@ -375,12 +362,11 @@ enum xpc_retval {
375 * =====================+================================+===================== 362 * =====================+================================+=====================
376 */ 363 */
377 364
378typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid, 365typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid,
379 int ch_number, void *data, void *key); 366 int ch_number, void *data, void *key);
380
381typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
382 int ch_number, void *key);
383 367
368typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid,
369 int ch_number, void *key);
384 370
385/* 371/*
386 * The following is a registration entry. There is a global array of these, 372 * The following is a registration entry. There is a global array of these,
@@ -398,50 +384,45 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
398 */ 384 */
399struct xpc_registration { 385struct xpc_registration {
400 struct mutex mutex; 386 struct mutex mutex;
401 xpc_channel_func func; /* function to call */ 387 xpc_channel_func func; /* function to call */
402 void *key; /* pointer to user's key */ 388 void *key; /* pointer to user's key */
403 u16 nentries; /* #of msg entries in local msg queue */ 389 u16 nentries; /* #of msg entries in local msg queue */
404 u16 msg_size; /* message queue's message size */ 390 u16 msg_size; /* message queue's message size */
405 u32 assigned_limit; /* limit on #of assigned kthreads */ 391 u32 assigned_limit; /* limit on #of assigned kthreads */
406 u32 idle_limit; /* limit on #of idle kthreads */ 392 u32 idle_limit; /* limit on #of idle kthreads */
407} ____cacheline_aligned; 393} ____cacheline_aligned;
408 394
409
410#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) 395#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
411 396
412
413/* the following are valid xpc_allocate() flags */ 397/* the following are valid xpc_allocate() flags */
414#define XPC_WAIT 0 /* wait flag */ 398#define XPC_WAIT 0 /* wait flag */
415#define XPC_NOWAIT 1 /* no wait flag */ 399#define XPC_NOWAIT 1 /* no wait flag */
416
417 400
418struct xpc_interface { 401struct xpc_interface {
419 void (*connect)(int); 402 void (*connect) (int);
420 void (*disconnect)(int); 403 void (*disconnect) (int);
421 enum xpc_retval (*allocate)(partid_t, int, u32, void **); 404 enum xpc_retval (*allocate) (partid_t, int, u32, void **);
422 enum xpc_retval (*send)(partid_t, int, void *); 405 enum xpc_retval (*send) (partid_t, int, void *);
423 enum xpc_retval (*send_notify)(partid_t, int, void *, 406 enum xpc_retval (*send_notify) (partid_t, int, void *,
424 xpc_notify_func, void *); 407 xpc_notify_func, void *);
425 void (*received)(partid_t, int, void *); 408 void (*received) (partid_t, int, void *);
426 enum xpc_retval (*partid_to_nasids)(partid_t, void *); 409 enum xpc_retval (*partid_to_nasids) (partid_t, void *);
427}; 410};
428 411
429
430extern struct xpc_interface xpc_interface; 412extern struct xpc_interface xpc_interface;
431 413
432extern void xpc_set_interface(void (*)(int), 414extern void xpc_set_interface(void (*)(int),
433 void (*)(int), 415 void (*)(int),
434 enum xpc_retval (*)(partid_t, int, u32, void **), 416 enum xpc_retval (*)(partid_t, int, u32, void **),
435 enum xpc_retval (*)(partid_t, int, void *), 417 enum xpc_retval (*)(partid_t, int, void *),
436 enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, 418 enum xpc_retval (*)(partid_t, int, void *,
437 void *), 419 xpc_notify_func, void *),
438 void (*)(partid_t, int, void *), 420 void (*)(partid_t, int, void *),
439 enum xpc_retval (*)(partid_t, void *)); 421 enum xpc_retval (*)(partid_t, void *));
440extern void xpc_clear_interface(void); 422extern void xpc_clear_interface(void);
441 423
442
443extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, 424extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
444 u16, u32, u32); 425 u16, u32, u32);
445extern void xpc_disconnect(int); 426extern void xpc_disconnect(int);
446 427
447static inline enum xpc_retval 428static inline enum xpc_retval
@@ -458,7 +439,7 @@ xpc_send(partid_t partid, int ch_number, void *payload)
458 439
459static inline enum xpc_retval 440static inline enum xpc_retval
460xpc_send_notify(partid_t partid, int ch_number, void *payload, 441xpc_send_notify(partid_t partid, int ch_number, void *payload,
461 xpc_notify_func func, void *key) 442 xpc_notify_func func, void *key)
462{ 443{
463 return xpc_interface.send_notify(partid, ch_number, payload, func, key); 444 return xpc_interface.send_notify(partid, ch_number, payload, func, key);
464} 445}
@@ -475,11 +456,8 @@ xpc_partid_to_nasids(partid_t partid, void *nasids)
475 return xpc_interface.partid_to_nasids(partid, nasids); 456 return xpc_interface.partid_to_nasids(partid, nasids);
476} 457}
477 458
478
479extern u64 xp_nofault_PIOR_target; 459extern u64 xp_nofault_PIOR_target;
480extern int xp_nofault_PIOR(void *); 460extern int xp_nofault_PIOR(void *);
481extern int xp_error_PIOR(void); 461extern int xp_error_PIOR(void);
482 462
483 463#endif /* _DRIVERS_MISC_SGIXP_XP_H */
484#endif /* _ASM_IA64_SN_XP_H */
485
diff --git a/arch/ia64/sn/kernel/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index b7ea46645e12..1fbf99bae963 100644
--- a/arch/ia64/sn/kernel/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition (XP) base. 10 * Cross Partition (XP) base.
12 * 11 *
@@ -15,58 +14,64 @@
15 * 14 *
16 */ 15 */
17 16
18
19#include <linux/kernel.h> 17#include <linux/kernel.h>
20#include <linux/interrupt.h> 18#include <linux/interrupt.h>
21#include <linux/module.h> 19#include <linux/module.h>
22#include <linux/mutex.h> 20#include <linux/mutex.h>
23#include <asm/sn/intr.h> 21#include <asm/sn/intr.h>
24#include <asm/sn/sn_sal.h> 22#include <asm/sn/sn_sal.h>
25#include <asm/sn/xp.h> 23#include "xp.h"
26
27 24
28/* 25/*
29 * Target of nofault PIO read. 26 * The export of xp_nofault_PIOR needs to happen here since it is defined
27 * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
28 * defined here.
30 */ 29 */
31u64 xp_nofault_PIOR_target; 30EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
32 31
32u64 xp_nofault_PIOR_target;
33EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
33 34
34/* 35/*
35 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level 36 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
36 * users of XPC. 37 * users of XPC.
37 */ 38 */
38struct xpc_registration xpc_registrations[XPC_NCHANNELS]; 39struct xpc_registration xpc_registrations[XPC_NCHANNELS];
39 40EXPORT_SYMBOL_GPL(xpc_registrations);
40 41
41/* 42/*
42 * Initialize the XPC interface to indicate that XPC isn't loaded. 43 * Initialize the XPC interface to indicate that XPC isn't loaded.
43 */ 44 */
44static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; } 45static enum xpc_retval
46xpc_notloaded(void)
47{
48 return xpcNotLoaded;
49}
45 50
46struct xpc_interface xpc_interface = { 51struct xpc_interface xpc_interface = {
47 (void (*)(int)) xpc_notloaded, 52 (void (*)(int))xpc_notloaded,
48 (void (*)(int)) xpc_notloaded, 53 (void (*)(int))xpc_notloaded,
49 (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded, 54 (enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded,
50 (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded, 55 (enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded,
51 (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *)) 56 (enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *))
52 xpc_notloaded, 57 xpc_notloaded,
53 (void (*)(partid_t, int, void *)) xpc_notloaded, 58 (void (*)(partid_t, int, void *))xpc_notloaded,
54 (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded 59 (enum xpc_retval(*)(partid_t, void *))xpc_notloaded
55}; 60};
56 61EXPORT_SYMBOL_GPL(xpc_interface);
57 62
58/* 63/*
59 * XPC calls this when it (the XPC module) has been loaded. 64 * XPC calls this when it (the XPC module) has been loaded.
60 */ 65 */
61void 66void
62xpc_set_interface(void (*connect)(int), 67xpc_set_interface(void (*connect) (int),
63 void (*disconnect)(int), 68 void (*disconnect) (int),
64 enum xpc_retval (*allocate)(partid_t, int, u32, void **), 69 enum xpc_retval (*allocate) (partid_t, int, u32, void **),
65 enum xpc_retval (*send)(partid_t, int, void *), 70 enum xpc_retval (*send) (partid_t, int, void *),
66 enum xpc_retval (*send_notify)(partid_t, int, void *, 71 enum xpc_retval (*send_notify) (partid_t, int, void *,
67 xpc_notify_func, void *), 72 xpc_notify_func, void *),
68 void (*received)(partid_t, int, void *), 73 void (*received) (partid_t, int, void *),
69 enum xpc_retval (*partid_to_nasids)(partid_t, void *)) 74 enum xpc_retval (*partid_to_nasids) (partid_t, void *))
70{ 75{
71 xpc_interface.connect = connect; 76 xpc_interface.connect = connect;
72 xpc_interface.disconnect = disconnect; 77 xpc_interface.disconnect = disconnect;
@@ -76,7 +81,7 @@ xpc_set_interface(void (*connect)(int),
76 xpc_interface.received = received; 81 xpc_interface.received = received;
77 xpc_interface.partid_to_nasids = partid_to_nasids; 82 xpc_interface.partid_to_nasids = partid_to_nasids;
78} 83}
79 84EXPORT_SYMBOL_GPL(xpc_set_interface);
80 85
81/* 86/*
82 * XPC calls this when it (the XPC module) is being unloaded. 87 * XPC calls this when it (the XPC module) is being unloaded.
@@ -84,20 +89,21 @@ xpc_set_interface(void (*connect)(int),
84void 89void
85xpc_clear_interface(void) 90xpc_clear_interface(void)
86{ 91{
87 xpc_interface.connect = (void (*)(int)) xpc_notloaded; 92 xpc_interface.connect = (void (*)(int))xpc_notloaded;
88 xpc_interface.disconnect = (void (*)(int)) xpc_notloaded; 93 xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
89 xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32, 94 xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32,
90 void **)) xpc_notloaded; 95 void **))xpc_notloaded;
91 xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *)) 96 xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *))
92 xpc_notloaded; 97 xpc_notloaded;
93 xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *, 98 xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *,
94 xpc_notify_func, void *)) xpc_notloaded; 99 xpc_notify_func,
100 void *))xpc_notloaded;
95 xpc_interface.received = (void (*)(partid_t, int, void *)) 101 xpc_interface.received = (void (*)(partid_t, int, void *))
96 xpc_notloaded; 102 xpc_notloaded;
97 xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *)) 103 xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *))
98 xpc_notloaded; 104 xpc_notloaded;
99} 105}
100 106EXPORT_SYMBOL_GPL(xpc_clear_interface);
101 107
102/* 108/*
103 * Register for automatic establishment of a channel connection whenever 109 * Register for automatic establishment of a channel connection whenever
@@ -125,11 +131,10 @@ xpc_clear_interface(void)
125 */ 131 */
126enum xpc_retval 132enum xpc_retval
127xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, 133xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
128 u16 nentries, u32 assigned_limit, u32 idle_limit) 134 u16 nentries, u32 assigned_limit, u32 idle_limit)
129{ 135{
130 struct xpc_registration *registration; 136 struct xpc_registration *registration;
131 137
132
133 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 138 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
134 DBUG_ON(payload_size == 0 || nentries == 0); 139 DBUG_ON(payload_size == 0 || nentries == 0);
135 DBUG_ON(func == NULL); 140 DBUG_ON(func == NULL);
@@ -137,9 +142,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
137 142
138 registration = &xpc_registrations[ch_number]; 143 registration = &xpc_registrations[ch_number];
139 144
140 if (mutex_lock_interruptible(&registration->mutex) != 0) { 145 if (mutex_lock_interruptible(&registration->mutex) != 0)
141 return xpcInterrupted; 146 return xpcInterrupted;
142 }
143 147
144 /* if XPC_CHANNEL_REGISTERED(ch_number) */ 148 /* if XPC_CHANNEL_REGISTERED(ch_number) */
145 if (registration->func != NULL) { 149 if (registration->func != NULL) {
@@ -161,7 +165,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
161 165
162 return xpcSuccess; 166 return xpcSuccess;
163} 167}
164 168EXPORT_SYMBOL_GPL(xpc_connect);
165 169
166/* 170/*
167 * Remove the registration for automatic connection of the specified channel 171 * Remove the registration for automatic connection of the specified channel
@@ -181,7 +185,6 @@ xpc_disconnect(int ch_number)
181{ 185{
182 struct xpc_registration *registration; 186 struct xpc_registration *registration;
183 187
184
185 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 188 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
186 189
187 registration = &xpc_registrations[ch_number]; 190 registration = &xpc_registrations[ch_number];
@@ -213,19 +216,17 @@ xpc_disconnect(int ch_number)
213 216
214 return; 217 return;
215} 218}
216 219EXPORT_SYMBOL_GPL(xpc_disconnect);
217 220
218int __init 221int __init
219xp_init(void) 222xp_init(void)
220{ 223{
221 int ret, ch_number; 224 int ret, ch_number;
222 u64 func_addr = *(u64 *) xp_nofault_PIOR; 225 u64 func_addr = *(u64 *)xp_nofault_PIOR;
223 u64 err_func_addr = *(u64 *) xp_error_PIOR; 226 u64 err_func_addr = *(u64 *)xp_error_PIOR;
224 227
225 228 if (!ia64_platform_is("sn2"))
226 if (!ia64_platform_is("sn2")) {
227 return -ENODEV; 229 return -ENODEV;
228 }
229 230
230 /* 231 /*
231 * Register a nofault code region which performs a cross-partition 232 * Register a nofault code region which performs a cross-partition
@@ -236,55 +237,43 @@ xp_init(void)
236 * least some CPUs on Shubs <= v1.2, which unfortunately we have to 237 * least some CPUs on Shubs <= v1.2, which unfortunately we have to
237 * work around). 238 * work around).
238 */ 239 */
239 if ((ret = sn_register_nofault_code(func_addr, err_func_addr, 240 ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
240 err_func_addr, 1, 1)) != 0) { 241 1, 1);
242 if (ret != 0) {
241 printk(KERN_ERR "XP: can't register nofault code, error=%d\n", 243 printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
242 ret); 244 ret);
243 } 245 }
244 /* 246 /*
245 * Setup the nofault PIO read target. (There is no special reason why 247 * Setup the nofault PIO read target. (There is no special reason why
246 * SH_IPI_ACCESS was selected.) 248 * SH_IPI_ACCESS was selected.)
247 */ 249 */
248 if (is_shub2()) { 250 if (is_shub2())
249 xp_nofault_PIOR_target = SH2_IPI_ACCESS0; 251 xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
250 } else { 252 else
251 xp_nofault_PIOR_target = SH1_IPI_ACCESS; 253 xp_nofault_PIOR_target = SH1_IPI_ACCESS;
252 }
253 254
254 /* initialize the connection registration mutex */ 255 /* initialize the connection registration mutex */
255 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { 256 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++)
256 mutex_init(&xpc_registrations[ch_number].mutex); 257 mutex_init(&xpc_registrations[ch_number].mutex);
257 }
258 258
259 return 0; 259 return 0;
260} 260}
261module_init(xp_init);
262 261
262module_init(xp_init);
263 263
264void __exit 264void __exit
265xp_exit(void) 265xp_exit(void)
266{ 266{
267 u64 func_addr = *(u64 *) xp_nofault_PIOR; 267 u64 func_addr = *(u64 *)xp_nofault_PIOR;
268 u64 err_func_addr = *(u64 *) xp_error_PIOR; 268 u64 err_func_addr = *(u64 *)xp_error_PIOR;
269
270 269
271 /* unregister the PIO read nofault code region */ 270 /* unregister the PIO read nofault code region */
272 (void) sn_register_nofault_code(func_addr, err_func_addr, 271 (void)sn_register_nofault_code(func_addr, err_func_addr,
273 err_func_addr, 1, 0); 272 err_func_addr, 1, 0);
274} 273}
275module_exit(xp_exit);
276 274
275module_exit(xp_exit);
277 276
278MODULE_AUTHOR("Silicon Graphics, Inc."); 277MODULE_AUTHOR("Silicon Graphics, Inc.");
279MODULE_DESCRIPTION("Cross Partition (XP) base"); 278MODULE_DESCRIPTION("Cross Partition (XP) base");
280MODULE_LICENSE("GPL"); 279MODULE_LICENSE("GPL");
281
282EXPORT_SYMBOL(xp_nofault_PIOR);
283EXPORT_SYMBOL(xp_nofault_PIOR_target);
284EXPORT_SYMBOL(xpc_registrations);
285EXPORT_SYMBOL(xpc_interface);
286EXPORT_SYMBOL(xpc_clear_interface);
287EXPORT_SYMBOL(xpc_set_interface);
288EXPORT_SYMBOL(xpc_connect);
289EXPORT_SYMBOL(xpc_disconnect);
290
diff --git a/arch/ia64/sn/kernel/xp_nofault.S b/drivers/misc/sgi-xp/xp_nofault.S
index 98e7c7dbfdd8..e38d43319429 100644
--- a/arch/ia64/sn/kernel/xp_nofault.S
+++ b/drivers/misc/sgi-xp/xp_nofault.S
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * The xp_nofault_PIOR function takes a pointer to a remote PIO register 10 * The xp_nofault_PIOR function takes a pointer to a remote PIO register
12 * and attempts to load and consume a value from it. This function 11 * and attempts to load and consume a value from it. This function
diff --git a/include/asm-ia64/sn/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 3c0900ab8003..9eb6d4a3269c 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -3,17 +3,15 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Communication (XPC) structures and macros. 10 * Cross Partition Communication (XPC) structures and macros.
12 */ 11 */
13 12
14#ifndef _ASM_IA64_SN_XPC_H 13#ifndef _DRIVERS_MISC_SGIXP_XPC_H
15#define _ASM_IA64_SN_XPC_H 14#define _DRIVERS_MISC_SGIXP_XPC_H
16
17 15
18#include <linux/interrupt.h> 16#include <linux/interrupt.h>
19#include <linux/sysctl.h> 17#include <linux/sysctl.h>
@@ -27,8 +25,7 @@
27#include <asm/sn/addrs.h> 25#include <asm/sn/addrs.h>
28#include <asm/sn/mspec.h> 26#include <asm/sn/mspec.h>
29#include <asm/sn/shub_mmr.h> 27#include <asm/sn/shub_mmr.h>
30#include <asm/sn/xp.h> 28#include "xp.h"
31
32 29
33/* 30/*
34 * XPC Version numbers consist of a major and minor number. XPC can always 31 * XPC Version numbers consist of a major and minor number. XPC can always
@@ -39,7 +36,6 @@
39#define XPC_VERSION_MAJOR(_v) ((_v) >> 4) 36#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
40#define XPC_VERSION_MINOR(_v) ((_v) & 0xf) 37#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
41 38
42
43/* 39/*
44 * The next macros define word or bit representations for given 40 * The next macros define word or bit representations for given
45 * C-brick nasid in either the SAL provided bit array representing 41 * C-brick nasid in either the SAL provided bit array representing
@@ -67,7 +63,6 @@
67/* define the process name of the discovery thread */ 63/* define the process name of the discovery thread */
68#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" 64#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
69 65
70
71/* 66/*
72 * the reserved page 67 * the reserved page
73 * 68 *
@@ -115,16 +110,16 @@ struct xpc_rsvd_page {
115 u8 partid; /* SAL: partition ID */ 110 u8 partid; /* SAL: partition ID */
116 u8 version; 111 u8 version;
117 u8 pad1[6]; /* align to next u64 in cacheline */ 112 u8 pad1[6]; /* align to next u64 in cacheline */
118 volatile u64 vars_pa; 113 u64 vars_pa; /* physical address of struct xpc_vars */
119 struct timespec stamp; /* time when reserved page was setup by XPC */ 114 struct timespec stamp; /* time when reserved page was setup by XPC */
120 u64 pad2[9]; /* align to last u64 in cacheline */ 115 u64 pad2[9]; /* align to last u64 in cacheline */
121 u64 nasids_size; /* SAL: size of each nasid mask in bytes */ 116 u64 nasids_size; /* SAL: size of each nasid mask in bytes */
122}; 117};
123 118
124#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */ 119#define XPC_RP_VERSION _XPC_VERSION(1, 1) /* version 1.1 of the reserved page */
125 120
126#define XPC_SUPPORTS_RP_STAMP(_version) \ 121#define XPC_SUPPORTS_RP_STAMP(_version) \
127 (_version >= _XPC_VERSION(1,1)) 122 (_version >= _XPC_VERSION(1, 1))
128 123
129/* 124/*
130 * compare stamps - the return value is: 125 * compare stamps - the return value is:
@@ -138,14 +133,13 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
138{ 133{
139 int ret; 134 int ret;
140 135
141 136 ret = stamp1->tv_sec - stamp2->tv_sec;
142 if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) { 137 if (ret == 0)
143 ret = stamp1->tv_nsec - stamp2->tv_nsec; 138 ret = stamp1->tv_nsec - stamp2->tv_nsec;
144 } 139
145 return ret; 140 return ret;
146} 141}
147 142
148
149/* 143/*
150 * Define the structures by which XPC variables can be exported to other 144 * Define the structures by which XPC variables can be exported to other
151 * partitions. (There are two: struct xpc_vars and struct xpc_vars_part) 145 * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
@@ -172,11 +166,10 @@ struct xpc_vars {
172 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ 166 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
173}; 167};
174 168
175#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */ 169#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */
176 170
177#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ 171#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
178 (_version >= _XPC_VERSION(3,1)) 172 (_version >= _XPC_VERSION(3, 1))
179
180 173
181static inline int 174static inline int
182xpc_hb_allowed(partid_t partid, struct xpc_vars *vars) 175xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
@@ -193,7 +186,7 @@ xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
193 old_mask = vars->heartbeating_to_mask; 186 old_mask = vars->heartbeating_to_mask;
194 new_mask = (old_mask | (1UL << partid)); 187 new_mask = (old_mask | (1UL << partid));
195 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != 188 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
196 old_mask); 189 old_mask);
197} 190}
198 191
199static inline void 192static inline void
@@ -205,10 +198,9 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
205 old_mask = vars->heartbeating_to_mask; 198 old_mask = vars->heartbeating_to_mask;
206 new_mask = (old_mask & ~(1UL << partid)); 199 new_mask = (old_mask & ~(1UL << partid));
207 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != 200 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
208 old_mask); 201 old_mask);
209} 202}
210 203
211
212/* 204/*
213 * The AMOs page consists of a number of AMO variables which are divided into 205 * The AMOs page consists of a number of AMO variables which are divided into
214 * four groups, The first two groups are used to identify an IRQ's sender. 206 * four groups, The first two groups are used to identify an IRQ's sender.
@@ -222,7 +214,6 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
222#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) 214#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
223#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) 215#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
224 216
225
226/* 217/*
227 * The following structure describes the per partition specific variables. 218 * The following structure describes the per partition specific variables.
228 * 219 *
@@ -234,7 +225,7 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
234 * occupies half a cacheline. 225 * occupies half a cacheline.
235 */ 226 */
236struct xpc_vars_part { 227struct xpc_vars_part {
237 volatile u64 magic; 228 u64 magic;
238 229
239 u64 openclose_args_pa; /* physical address of open and close args */ 230 u64 openclose_args_pa; /* physical address of open and close args */
240 u64 GPs_pa; /* physical address of Get/Put values */ 231 u64 GPs_pa; /* physical address of Get/Put values */
@@ -257,20 +248,20 @@ struct xpc_vars_part {
257 * MAGIC2 indicates that this partition has pulled the remote partititions 248 * MAGIC2 indicates that this partition has pulled the remote partititions
258 * per partition variables that pertain to this partition. 249 * per partition variables that pertain to this partition.
259 */ 250 */
260#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ 251#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
261#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ 252#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
262
263 253
264/* the reserved page sizes and offsets */ 254/* the reserved page sizes and offsets */
265 255
266#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) 256#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
267#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars)) 257#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars))
268 258
269#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE) 259#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE))
270#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) 260#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
271#define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words) 261#define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
272#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE) 262 xp_nasid_mask_words))
273 263#define XPC_RP_VARS_PART(_rp) ((struct xpc_vars_part *) \
264 ((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE))
274 265
275/* 266/*
276 * Functions registered by add_timer() or called by kernel_thread() only 267 * Functions registered by add_timer() or called by kernel_thread() only
@@ -285,21 +276,17 @@ struct xpc_vars_part {
285#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) 276#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
286#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) 277#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
287 278
288
289
290/* 279/*
291 * Define a Get/Put value pair (pointers) used with a message queue. 280 * Define a Get/Put value pair (pointers) used with a message queue.
292 */ 281 */
293struct xpc_gp { 282struct xpc_gp {
294 volatile s64 get; /* Get value */ 283 s64 get; /* Get value */
295 volatile s64 put; /* Put value */ 284 s64 put; /* Put value */
296}; 285};
297 286
298#define XPC_GP_SIZE \ 287#define XPC_GP_SIZE \
299 L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) 288 L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
300 289
301
302
303/* 290/*
304 * Define a structure that contains arguments associated with opening and 291 * Define a structure that contains arguments associated with opening and
305 * closing a channel. 292 * closing a channel.
@@ -315,20 +302,15 @@ struct xpc_openclose_args {
315#define XPC_OPENCLOSE_ARGS_SIZE \ 302#define XPC_OPENCLOSE_ARGS_SIZE \
316 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) 303 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
317 304
318
319
320/* struct xpc_msg flags */ 305/* struct xpc_msg flags */
321 306
322#define XPC_M_DONE 0x01 /* msg has been received/consumed */ 307#define XPC_M_DONE 0x01 /* msg has been received/consumed */
323#define XPC_M_READY 0x02 /* msg is ready to be sent */ 308#define XPC_M_READY 0x02 /* msg is ready to be sent */
324#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ 309#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */
325 310
326
327#define XPC_MSG_ADDRESS(_payload) \ 311#define XPC_MSG_ADDRESS(_payload) \
328 ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) 312 ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
329 313
330
331
332/* 314/*
333 * Defines notify entry. 315 * Defines notify entry.
334 * 316 *
@@ -336,19 +318,17 @@ struct xpc_openclose_args {
336 * and consumed by the intended recipient. 318 * and consumed by the intended recipient.
337 */ 319 */
338struct xpc_notify { 320struct xpc_notify {
339 volatile u8 type; /* type of notification */ 321 u8 type; /* type of notification */
340 322
341 /* the following two fields are only used if type == XPC_N_CALL */ 323 /* the following two fields are only used if type == XPC_N_CALL */
342 xpc_notify_func func; /* user's notify function */ 324 xpc_notify_func func; /* user's notify function */
343 void *key; /* pointer to user's key */ 325 void *key; /* pointer to user's key */
344}; 326};
345 327
346/* struct xpc_notify type of notification */ 328/* struct xpc_notify type of notification */
347 329
348#define XPC_N_CALL 0x01 /* notify function provided by user */ 330#define XPC_N_CALL 0x01 /* notify function provided by user */
349 331
350
351
352/* 332/*
353 * Define the structure that manages all the stuff required by a channel. In 333 * Define the structure that manages all the stuff required by a channel. In
354 * particular, they are used to manage the messages sent across the channel. 334 * particular, they are used to manage the messages sent across the channel.
@@ -428,48 +408,48 @@ struct xpc_notify {
428 * messages. 408 * messages.
429 */ 409 */
430struct xpc_channel { 410struct xpc_channel {
431 partid_t partid; /* ID of remote partition connected */ 411 partid_t partid; /* ID of remote partition connected */
432 spinlock_t lock; /* lock for updating this structure */ 412 spinlock_t lock; /* lock for updating this structure */
433 u32 flags; /* general flags */ 413 u32 flags; /* general flags */
434 414
435 enum xpc_retval reason; /* reason why channel is disconnect'g */ 415 enum xpc_retval reason; /* reason why channel is disconnect'g */
436 int reason_line; /* line# disconnect initiated from */ 416 int reason_line; /* line# disconnect initiated from */
437 417
438 u16 number; /* channel # */ 418 u16 number; /* channel # */
439 419
440 u16 msg_size; /* sizeof each msg entry */ 420 u16 msg_size; /* sizeof each msg entry */
441 u16 local_nentries; /* #of msg entries in local msg queue */ 421 u16 local_nentries; /* #of msg entries in local msg queue */
442 u16 remote_nentries; /* #of msg entries in remote msg queue*/ 422 u16 remote_nentries; /* #of msg entries in remote msg queue */
443 423
444 void *local_msgqueue_base; /* base address of kmalloc'd space */ 424 void *local_msgqueue_base; /* base address of kmalloc'd space */
445 struct xpc_msg *local_msgqueue; /* local message queue */ 425 struct xpc_msg *local_msgqueue; /* local message queue */
446 void *remote_msgqueue_base; /* base address of kmalloc'd space */ 426 void *remote_msgqueue_base; /* base address of kmalloc'd space */
447 struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */ 427 struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
448 /* local message queue */ 428 /* local message queue */
449 u64 remote_msgqueue_pa; /* phys addr of remote partition's */ 429 u64 remote_msgqueue_pa; /* phys addr of remote partition's */
450 /* local message queue */ 430 /* local message queue */
451 431
452 atomic_t references; /* #of external references to queues */ 432 atomic_t references; /* #of external references to queues */
453 433
454 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ 434 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
455 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ 435 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
456 436
457 u8 delayed_IPI_flags; /* IPI flags received, but delayed */ 437 u8 delayed_IPI_flags; /* IPI flags received, but delayed */
458 /* action until channel disconnected */ 438 /* action until channel disconnected */
459 439
460 /* queue of msg senders who want to be notified when msg received */ 440 /* queue of msg senders who want to be notified when msg received */
461 441
462 atomic_t n_to_notify; /* #of msg senders to notify */ 442 atomic_t n_to_notify; /* #of msg senders to notify */
463 struct xpc_notify *notify_queue;/* notify queue for messages sent */ 443 struct xpc_notify *notify_queue; /* notify queue for messages sent */
464 444
465 xpc_channel_func func; /* user's channel function */ 445 xpc_channel_func func; /* user's channel function */
466 void *key; /* pointer to user's key */ 446 void *key; /* pointer to user's key */
467 447
468 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */ 448 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
469 struct completion wdisconnect_wait; /* wait for channel disconnect */ 449 struct completion wdisconnect_wait; /* wait for channel disconnect */
470 450
471 struct xpc_openclose_args *local_openclose_args; /* args passed on */ 451 struct xpc_openclose_args *local_openclose_args; /* args passed on */
472 /* opening or closing of channel */ 452 /* opening or closing of channel */
473 453
474 /* various flavors of local and remote Get/Put values */ 454 /* various flavors of local and remote Get/Put values */
475 455
@@ -477,56 +457,48 @@ struct xpc_channel {
477 struct xpc_gp remote_GP; /* remote Get/Put values */ 457 struct xpc_gp remote_GP; /* remote Get/Put values */
478 struct xpc_gp w_local_GP; /* working local Get/Put values */ 458 struct xpc_gp w_local_GP; /* working local Get/Put values */
479 struct xpc_gp w_remote_GP; /* working remote Get/Put values */ 459 struct xpc_gp w_remote_GP; /* working remote Get/Put values */
480 s64 next_msg_to_pull; /* Put value of next msg to pull */ 460 s64 next_msg_to_pull; /* Put value of next msg to pull */
481 461
482 /* kthread management related fields */ 462 /* kthread management related fields */
483 463
484// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
485// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
486// >>> dependent on activity over the last interval of time
487 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ 464 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
488 u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ 465 u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
489 atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ 466 atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
490 u32 kthreads_idle_limit; /* limit on #of kthreads idle */ 467 u32 kthreads_idle_limit; /* limit on #of kthreads idle */
491 atomic_t kthreads_active; /* #of kthreads actively working */ 468 atomic_t kthreads_active; /* #of kthreads actively working */
492 // >>> following field is temporary
493 u32 kthreads_created; /* total #of kthreads created */
494 469
495 wait_queue_head_t idle_wq; /* idle kthread wait queue */ 470 wait_queue_head_t idle_wq; /* idle kthread wait queue */
496 471
497} ____cacheline_aligned; 472} ____cacheline_aligned;
498 473
499
500/* struct xpc_channel flags */ 474/* struct xpc_channel flags */
501 475
502#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ 476#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
503 477
504#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ 478#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
505#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ 479#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
506#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ 480#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
507#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ 481#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
508 482
509#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ 483#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
510#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ 484#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
511#define XPC_C_CONNECTEDCALLOUT_MADE \ 485#define XPC_C_CONNECTEDCALLOUT_MADE \
512 0x00000080 /* connected callout completed */ 486 0x00000080 /* connected callout completed */
513#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ 487#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */
514#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ 488#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */
515 489
516#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ 490#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */
517#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ 491#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */
518#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ 492#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
519#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ 493#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
520 494
521#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ 495#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */
522#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ 496#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
523#define XPC_C_DISCONNECTINGCALLOUT \ 497#define XPC_C_DISCONNECTINGCALLOUT \
524 0x00010000 /* disconnecting callout initiated */ 498 0x00010000 /* disconnecting callout initiated */
525#define XPC_C_DISCONNECTINGCALLOUT_MADE \ 499#define XPC_C_DISCONNECTINGCALLOUT_MADE \
526 0x00020000 /* disconnecting callout completed */ 500 0x00020000 /* disconnecting callout completed */
527#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ 501#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
528
529
530 502
531/* 503/*
532 * Manages channels on a partition basis. There is one of these structures 504 * Manages channels on a partition basis. There is one of these structures
@@ -537,33 +509,31 @@ struct xpc_partition {
537 509
538 /* XPC HB infrastructure */ 510 /* XPC HB infrastructure */
539 511
540 u8 remote_rp_version; /* version# of partition's rsvd pg */ 512 u8 remote_rp_version; /* version# of partition's rsvd pg */
541 struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */ 513 struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */
542 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ 514 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
543 u64 remote_vars_pa; /* phys addr of partition's vars */ 515 u64 remote_vars_pa; /* phys addr of partition's vars */
544 u64 remote_vars_part_pa; /* phys addr of partition's vars part */ 516 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
545 u64 last_heartbeat; /* HB at last read */ 517 u64 last_heartbeat; /* HB at last read */
546 u64 remote_amos_page_pa; /* phys addr of partition's amos page */ 518 u64 remote_amos_page_pa; /* phys addr of partition's amos page */
547 int remote_act_nasid; /* active part's act/deact nasid */ 519 int remote_act_nasid; /* active part's act/deact nasid */
548 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ 520 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
549 u32 act_IRQ_rcvd; /* IRQs since activation */ 521 u32 act_IRQ_rcvd; /* IRQs since activation */
550 spinlock_t act_lock; /* protect updating of act_state */ 522 spinlock_t act_lock; /* protect updating of act_state */
551 u8 act_state; /* from XPC HB viewpoint */ 523 u8 act_state; /* from XPC HB viewpoint */
552 u8 remote_vars_version; /* version# of partition's vars */ 524 u8 remote_vars_version; /* version# of partition's vars */
553 enum xpc_retval reason; /* reason partition is deactivating */ 525 enum xpc_retval reason; /* reason partition is deactivating */
554 int reason_line; /* line# deactivation initiated from */ 526 int reason_line; /* line# deactivation initiated from */
555 int reactivate_nasid; /* nasid in partition to reactivate */ 527 int reactivate_nasid; /* nasid in partition to reactivate */
556 528
557 unsigned long disengage_request_timeout; /* timeout in jiffies */ 529 unsigned long disengage_request_timeout; /* timeout in jiffies */
558 struct timer_list disengage_request_timer; 530 struct timer_list disengage_request_timer;
559 531
560
561 /* XPC infrastructure referencing and teardown control */ 532 /* XPC infrastructure referencing and teardown control */
562 533
563 volatile u8 setup_state; /* infrastructure setup state */ 534 u8 setup_state; /* infrastructure setup state */
564 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ 535 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
565 atomic_t references; /* #of references to infrastructure */ 536 atomic_t references; /* #of references to infrastructure */
566
567 537
568 /* 538 /*
569 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN 539 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
@@ -572,53 +542,48 @@ struct xpc_partition {
572 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) 542 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
573 */ 543 */
574 544
575 545 u8 nchannels; /* #of defined channels supported */
576 u8 nchannels; /* #of defined channels supported */ 546 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
577 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ 547 atomic_t nchannels_engaged; /* #of channels engaged with remote part */
578 atomic_t nchannels_engaged;/* #of channels engaged with remote part */ 548 struct xpc_channel *channels; /* array of channel structures */
579 struct xpc_channel *channels;/* array of channel structures */ 549
580 550 void *local_GPs_base; /* base address of kmalloc'd space */
581 void *local_GPs_base; /* base address of kmalloc'd space */ 551 struct xpc_gp *local_GPs; /* local Get/Put values */
582 struct xpc_gp *local_GPs; /* local Get/Put values */ 552 void *remote_GPs_base; /* base address of kmalloc'd space */
583 void *remote_GPs_base; /* base address of kmalloc'd space */ 553 struct xpc_gp *remote_GPs; /* copy of remote partition's local */
584 struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */ 554 /* Get/Put values */
585 /* values */ 555 u64 remote_GPs_pa; /* phys address of remote partition's local */
586 u64 remote_GPs_pa; /* phys address of remote partition's local */ 556 /* Get/Put values */
587 /* Get/Put values */
588
589 557
590 /* fields used to pass args when opening or closing a channel */ 558 /* fields used to pass args when opening or closing a channel */
591 559
592 void *local_openclose_args_base; /* base address of kmalloc'd space */ 560 void *local_openclose_args_base; /* base address of kmalloc'd space */
593 struct xpc_openclose_args *local_openclose_args; /* local's args */ 561 struct xpc_openclose_args *local_openclose_args; /* local's args */
594 void *remote_openclose_args_base; /* base address of kmalloc'd space */ 562 void *remote_openclose_args_base; /* base address of kmalloc'd space */
595 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ 563 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
596 /* args */ 564 /* args */
597 u64 remote_openclose_args_pa; /* phys addr of remote's args */ 565 u64 remote_openclose_args_pa; /* phys addr of remote's args */
598
599 566
600 /* IPI sending, receiving and handling related fields */ 567 /* IPI sending, receiving and handling related fields */
601 568
602 int remote_IPI_nasid; /* nasid of where to send IPIs */ 569 int remote_IPI_nasid; /* nasid of where to send IPIs */
603 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ 570 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
604 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ 571 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
605
606 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
607 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
608 char IPI_owner[8]; /* IPI owner's name */
609 struct timer_list dropped_IPI_timer; /* dropped IPI timer */
610 572
611 spinlock_t IPI_lock; /* IPI handler lock */ 573 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
574 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
575 char IPI_owner[8]; /* IPI owner's name */
576 struct timer_list dropped_IPI_timer; /* dropped IPI timer */
612 577
578 spinlock_t IPI_lock; /* IPI handler lock */
613 579
614 /* channel manager related fields */ 580 /* channel manager related fields */
615 581
616 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ 582 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
617 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ 583 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
618 584
619} ____cacheline_aligned; 585} ____cacheline_aligned;
620 586
621
622/* struct xpc_partition act_state values (for XPC HB) */ 587/* struct xpc_partition act_state values (for XPC HB) */
623 588
624#define XPC_P_INACTIVE 0x00 /* partition is not active */ 589#define XPC_P_INACTIVE 0x00 /* partition is not active */
@@ -627,11 +592,9 @@ struct xpc_partition {
627#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ 592#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */
628#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ 593#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */
629 594
630
631#define XPC_DEACTIVATE_PARTITION(_p, _reason) \ 595#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
632 xpc_deactivate_partition(__LINE__, (_p), (_reason)) 596 xpc_deactivate_partition(__LINE__, (_p), (_reason))
633 597
634
635/* struct xpc_partition setup_state values */ 598/* struct xpc_partition setup_state values */
636 599
637#define XPC_P_UNSET 0x00 /* infrastructure was never setup */ 600#define XPC_P_UNSET 0x00 /* infrastructure was never setup */
@@ -639,8 +602,6 @@ struct xpc_partition {
639#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ 602#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
640#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ 603#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
641 604
642
643
644/* 605/*
645 * struct xpc_partition IPI_timer #of seconds to wait before checking for 606 * struct xpc_partition IPI_timer #of seconds to wait before checking for
646 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until 607 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
@@ -648,22 +609,17 @@ struct xpc_partition {
648 */ 609 */
649#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) 610#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
650 611
651
652/* number of seconds to wait for other partitions to disengage */ 612/* number of seconds to wait for other partitions to disengage */
653#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 613#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90
654 614
655/* interval in seconds to print 'waiting disengagement' messages */ 615/* interval in seconds to print 'waiting disengagement' messages */
656#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 616#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10
657 617
658
659#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) 618#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
660 619
661
662
663/* found in xp_main.c */ 620/* found in xp_main.c */
664extern struct xpc_registration xpc_registrations[]; 621extern struct xpc_registration xpc_registrations[];
665 622
666
667/* found in xpc_main.c */ 623/* found in xpc_main.c */
668extern struct device *xpc_part; 624extern struct device *xpc_part;
669extern struct device *xpc_chan; 625extern struct device *xpc_chan;
@@ -676,7 +632,6 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int);
676extern void xpc_create_kthreads(struct xpc_channel *, int, int); 632extern void xpc_create_kthreads(struct xpc_channel *, int, int);
677extern void xpc_disconnect_wait(int); 633extern void xpc_disconnect_wait(int);
678 634
679
680/* found in xpc_partition.c */ 635/* found in xpc_partition.c */
681extern int xpc_exiting; 636extern int xpc_exiting;
682extern struct xpc_vars *xpc_vars; 637extern struct xpc_vars *xpc_vars;
@@ -696,10 +651,9 @@ extern void xpc_mark_partition_inactive(struct xpc_partition *);
696extern void xpc_discovery(void); 651extern void xpc_discovery(void);
697extern void xpc_check_remote_hb(void); 652extern void xpc_check_remote_hb(void);
698extern void xpc_deactivate_partition(const int, struct xpc_partition *, 653extern void xpc_deactivate_partition(const int, struct xpc_partition *,
699 enum xpc_retval); 654 enum xpc_retval);
700extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); 655extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
701 656
702
703/* found in xpc_channel.c */ 657/* found in xpc_channel.c */
704extern void xpc_initiate_connect(int); 658extern void xpc_initiate_connect(int);
705extern void xpc_initiate_disconnect(int); 659extern void xpc_initiate_disconnect(int);
@@ -714,23 +668,18 @@ extern void xpc_process_channel_activity(struct xpc_partition *);
714extern void xpc_connected_callout(struct xpc_channel *); 668extern void xpc_connected_callout(struct xpc_channel *);
715extern void xpc_deliver_msg(struct xpc_channel *); 669extern void xpc_deliver_msg(struct xpc_channel *);
716extern void xpc_disconnect_channel(const int, struct xpc_channel *, 670extern void xpc_disconnect_channel(const int, struct xpc_channel *,
717 enum xpc_retval, unsigned long *); 671 enum xpc_retval, unsigned long *);
718extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval); 672extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval);
719extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); 673extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
720extern void xpc_teardown_infrastructure(struct xpc_partition *); 674extern void xpc_teardown_infrastructure(struct xpc_partition *);
721 675
722
723
724static inline void 676static inline void
725xpc_wakeup_channel_mgr(struct xpc_partition *part) 677xpc_wakeup_channel_mgr(struct xpc_partition *part)
726{ 678{
727 if (atomic_inc_return(&part->channel_mgr_requests) == 1) { 679 if (atomic_inc_return(&part->channel_mgr_requests) == 1)
728 wake_up(&part->channel_mgr_wq); 680 wake_up(&part->channel_mgr_wq);
729 }
730} 681}
731 682
732
733
734/* 683/*
735 * These next two inlines are used to keep us from tearing down a channel's 684 * These next two inlines are used to keep us from tearing down a channel's
736 * msg queues while a thread may be referencing them. 685 * msg queues while a thread may be referencing them.
@@ -747,17 +696,13 @@ xpc_msgqueue_deref(struct xpc_channel *ch)
747 s32 refs = atomic_dec_return(&ch->references); 696 s32 refs = atomic_dec_return(&ch->references);
748 697
749 DBUG_ON(refs < 0); 698 DBUG_ON(refs < 0);
750 if (refs == 0) { 699 if (refs == 0)
751 xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); 700 xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
752 }
753} 701}
754 702
755
756
757#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ 703#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
758 xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) 704 xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
759 705
760
761/* 706/*
762 * These two inlines are used to keep us from tearing down a partition's 707 * These two inlines are used to keep us from tearing down a partition's
763 * setup infrastructure while a thread may be referencing it. 708 * setup infrastructure while a thread may be referencing it.
@@ -767,11 +712,9 @@ xpc_part_deref(struct xpc_partition *part)
767{ 712{
768 s32 refs = atomic_dec_return(&part->references); 713 s32 refs = atomic_dec_return(&part->references);
769 714
770
771 DBUG_ON(refs < 0); 715 DBUG_ON(refs < 0);
772 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { 716 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN)
773 wake_up(&part->teardown_wq); 717 wake_up(&part->teardown_wq);
774 }
775} 718}
776 719
777static inline int 720static inline int
@@ -779,17 +722,14 @@ xpc_part_ref(struct xpc_partition *part)
779{ 722{
780 int setup; 723 int setup;
781 724
782
783 atomic_inc(&part->references); 725 atomic_inc(&part->references);
784 setup = (part->setup_state == XPC_P_SETUP); 726 setup = (part->setup_state == XPC_P_SETUP);
785 if (!setup) { 727 if (!setup)
786 xpc_part_deref(part); 728 xpc_part_deref(part);
787 } 729
788 return setup; 730 return setup;
789} 731}
790 732
791
792
793/* 733/*
794 * The following macro is to be used for the setting of the reason and 734 * The following macro is to be used for the setting of the reason and
795 * reason_line fields in both the struct xpc_channel and struct xpc_partition 735 * reason_line fields in both the struct xpc_channel and struct xpc_partition
@@ -801,8 +741,6 @@ xpc_part_ref(struct xpc_partition *part)
801 (_p)->reason_line = _line; \ 741 (_p)->reason_line = _line; \
802 } 742 }
803 743
804
805
806/* 744/*
807 * This next set of inlines are used to keep track of when a partition is 745 * This next set of inlines are used to keep track of when a partition is
808 * potentially engaged in accessing memory belonging to another partition. 746 * potentially engaged in accessing memory belonging to another partition.
@@ -812,23 +750,24 @@ static inline void
812xpc_mark_partition_engaged(struct xpc_partition *part) 750xpc_mark_partition_engaged(struct xpc_partition *part)
813{ 751{
814 unsigned long irq_flags; 752 unsigned long irq_flags;
815 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 753 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
816 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t))); 754 (XPC_ENGAGED_PARTITIONS_AMO *
817 755 sizeof(AMO_t)));
818 756
819 local_irq_save(irq_flags); 757 local_irq_save(irq_flags);
820 758
821 /* set bit corresponding to our partid in remote partition's AMO */ 759 /* set bit corresponding to our partid in remote partition's AMO */
822 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, 760 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
823 (1UL << sn_partition_id)); 761 (1UL << sn_partition_id));
824 /* 762 /*
825 * We must always use the nofault function regardless of whether we 763 * We must always use the nofault function regardless of whether we
826 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 764 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
827 * didn't, we'd never know that the other partition is down and would 765 * didn't, we'd never know that the other partition is down and would
828 * keep sending IPIs and AMOs to it until the heartbeat times out. 766 * keep sending IPIs and AMOs to it until the heartbeat times out.
829 */ 767 */
830 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 768 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
831 variable), xp_nofault_PIOR_target)); 769 variable),
770 xp_nofault_PIOR_target));
832 771
833 local_irq_restore(irq_flags); 772 local_irq_restore(irq_flags);
834} 773}
@@ -837,23 +776,24 @@ static inline void
837xpc_mark_partition_disengaged(struct xpc_partition *part) 776xpc_mark_partition_disengaged(struct xpc_partition *part)
838{ 777{
839 unsigned long irq_flags; 778 unsigned long irq_flags;
840 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 779 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
841 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t))); 780 (XPC_ENGAGED_PARTITIONS_AMO *
842 781 sizeof(AMO_t)));
843 782
844 local_irq_save(irq_flags); 783 local_irq_save(irq_flags);
845 784
846 /* clear bit corresponding to our partid in remote partition's AMO */ 785 /* clear bit corresponding to our partid in remote partition's AMO */
847 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 786 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
848 ~(1UL << sn_partition_id)); 787 ~(1UL << sn_partition_id));
849 /* 788 /*
850 * We must always use the nofault function regardless of whether we 789 * We must always use the nofault function regardless of whether we
851 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 790 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
852 * didn't, we'd never know that the other partition is down and would 791 * didn't, we'd never know that the other partition is down and would
853 * keep sending IPIs and AMOs to it until the heartbeat times out. 792 * keep sending IPIs and AMOs to it until the heartbeat times out.
854 */ 793 */
855 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 794 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
856 variable), xp_nofault_PIOR_target)); 795 variable),
796 xp_nofault_PIOR_target));
857 797
858 local_irq_restore(irq_flags); 798 local_irq_restore(irq_flags);
859} 799}
@@ -862,23 +802,23 @@ static inline void
862xpc_request_partition_disengage(struct xpc_partition *part) 802xpc_request_partition_disengage(struct xpc_partition *part)
863{ 803{
864 unsigned long irq_flags; 804 unsigned long irq_flags;
865 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 805 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
866 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); 806 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
867
868 807
869 local_irq_save(irq_flags); 808 local_irq_save(irq_flags);
870 809
871 /* set bit corresponding to our partid in remote partition's AMO */ 810 /* set bit corresponding to our partid in remote partition's AMO */
872 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, 811 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
873 (1UL << sn_partition_id)); 812 (1UL << sn_partition_id));
874 /* 813 /*
875 * We must always use the nofault function regardless of whether we 814 * We must always use the nofault function regardless of whether we
876 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 815 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
877 * didn't, we'd never know that the other partition is down and would 816 * didn't, we'd never know that the other partition is down and would
878 * keep sending IPIs and AMOs to it until the heartbeat times out. 817 * keep sending IPIs and AMOs to it until the heartbeat times out.
879 */ 818 */
880 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 819 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
881 variable), xp_nofault_PIOR_target)); 820 variable),
821 xp_nofault_PIOR_target));
882 822
883 local_irq_restore(irq_flags); 823 local_irq_restore(irq_flags);
884} 824}
@@ -887,23 +827,23 @@ static inline void
887xpc_cancel_partition_disengage_request(struct xpc_partition *part) 827xpc_cancel_partition_disengage_request(struct xpc_partition *part)
888{ 828{
889 unsigned long irq_flags; 829 unsigned long irq_flags;
890 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 830 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
891 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); 831 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
892
893 832
894 local_irq_save(irq_flags); 833 local_irq_save(irq_flags);
895 834
896 /* clear bit corresponding to our partid in remote partition's AMO */ 835 /* clear bit corresponding to our partid in remote partition's AMO */
897 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 836 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
898 ~(1UL << sn_partition_id)); 837 ~(1UL << sn_partition_id));
899 /* 838 /*
900 * We must always use the nofault function regardless of whether we 839 * We must always use the nofault function regardless of whether we
901 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 840 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
902 * didn't, we'd never know that the other partition is down and would 841 * didn't, we'd never know that the other partition is down and would
903 * keep sending IPIs and AMOs to it until the heartbeat times out. 842 * keep sending IPIs and AMOs to it until the heartbeat times out.
904 */ 843 */
905 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 844 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
906 variable), xp_nofault_PIOR_target)); 845 variable),
846 xp_nofault_PIOR_target));
907 847
908 local_irq_restore(irq_flags); 848 local_irq_restore(irq_flags);
909} 849}
@@ -913,10 +853,9 @@ xpc_partition_engaged(u64 partid_mask)
913{ 853{
914 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; 854 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
915 855
916
917 /* return our partition's AMO variable ANDed with partid_mask */ 856 /* return our partition's AMO variable ANDed with partid_mask */
918 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & 857 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
919 partid_mask); 858 partid_mask);
920} 859}
921 860
922static inline u64 861static inline u64
@@ -924,10 +863,9 @@ xpc_partition_disengage_requested(u64 partid_mask)
924{ 863{
925 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; 864 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
926 865
927
928 /* return our partition's AMO variable ANDed with partid_mask */ 866 /* return our partition's AMO variable ANDed with partid_mask */
929 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & 867 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
930 partid_mask); 868 partid_mask);
931} 869}
932 870
933static inline void 871static inline void
@@ -935,10 +873,9 @@ xpc_clear_partition_engaged(u64 partid_mask)
935{ 873{
936 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; 874 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
937 875
938
939 /* clear bit(s) based on partid_mask in our partition's AMO */ 876 /* clear bit(s) based on partid_mask in our partition's AMO */
940 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 877 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
941 ~partid_mask); 878 ~partid_mask);
942} 879}
943 880
944static inline void 881static inline void
@@ -946,14 +883,11 @@ xpc_clear_partition_disengage_request(u64 partid_mask)
946{ 883{
947 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; 884 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
948 885
949
950 /* clear bit(s) based on partid_mask in our partition's AMO */ 886 /* clear bit(s) based on partid_mask in our partition's AMO */
951 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 887 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
952 ~partid_mask); 888 ~partid_mask);
953} 889}
954 890
955
956
957/* 891/*
958 * The following set of macros and inlines are used for the sending and 892 * The following set of macros and inlines are used for the sending and
959 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, 893 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
@@ -964,20 +898,18 @@ xpc_clear_partition_disengage_request(u64 partid_mask)
964static inline u64 898static inline u64
965xpc_IPI_receive(AMO_t *amo) 899xpc_IPI_receive(AMO_t *amo)
966{ 900{
967 return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR); 901 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
968} 902}
969 903
970
971static inline enum xpc_retval 904static inline enum xpc_retval
972xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) 905xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
973{ 906{
974 int ret = 0; 907 int ret = 0;
975 unsigned long irq_flags; 908 unsigned long irq_flags;
976 909
977
978 local_irq_save(irq_flags); 910 local_irq_save(irq_flags);
979 911
980 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag); 912 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
981 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); 913 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
982 914
983 /* 915 /*
@@ -986,15 +918,14 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
986 * didn't, we'd never know that the other partition is down and would 918 * didn't, we'd never know that the other partition is down and would
987 * keep sending IPIs and AMOs to it until the heartbeat times out. 919 * keep sending IPIs and AMOs to it until the heartbeat times out.
988 */ 920 */
989 ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), 921 ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
990 xp_nofault_PIOR_target)); 922 xp_nofault_PIOR_target));
991 923
992 local_irq_restore(irq_flags); 924 local_irq_restore(irq_flags);
993 925
994 return ((ret == 0) ? xpcSuccess : xpcPioReadError); 926 return ((ret == 0) ? xpcSuccess : xpcPioReadError);
995} 927}
996 928
997
998/* 929/*
999 * IPIs associated with SGI_XPC_ACTIVATE IRQ. 930 * IPIs associated with SGI_XPC_ACTIVATE IRQ.
1000 */ 931 */
@@ -1004,47 +935,47 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
1004 */ 935 */
1005static inline void 936static inline void
1006xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid, 937xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
1007 int to_phys_cpuid) 938 int to_phys_cpuid)
1008{ 939{
1009 int w_index = XPC_NASID_W_INDEX(from_nasid); 940 int w_index = XPC_NASID_W_INDEX(from_nasid);
1010 int b_index = XPC_NASID_B_INDEX(from_nasid); 941 int b_index = XPC_NASID_B_INDEX(from_nasid);
1011 AMO_t *amos = (AMO_t *) __va(amos_page_pa + 942 AMO_t *amos = (AMO_t *)__va(amos_page_pa +
1012 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); 943 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
1013 944
1014 945 (void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
1015 (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, 946 to_phys_cpuid, SGI_XPC_ACTIVATE);
1016 to_phys_cpuid, SGI_XPC_ACTIVATE);
1017} 947}
1018 948
1019static inline void 949static inline void
1020xpc_IPI_send_activate(struct xpc_vars *vars) 950xpc_IPI_send_activate(struct xpc_vars *vars)
1021{ 951{
1022 xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), 952 xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
1023 vars->act_nasid, vars->act_phys_cpuid); 953 vars->act_nasid, vars->act_phys_cpuid);
1024} 954}
1025 955
1026static inline void 956static inline void
1027xpc_IPI_send_activated(struct xpc_partition *part) 957xpc_IPI_send_activated(struct xpc_partition *part)
1028{ 958{
1029 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), 959 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
1030 part->remote_act_nasid, part->remote_act_phys_cpuid); 960 part->remote_act_nasid,
961 part->remote_act_phys_cpuid);
1031} 962}
1032 963
1033static inline void 964static inline void
1034xpc_IPI_send_reactivate(struct xpc_partition *part) 965xpc_IPI_send_reactivate(struct xpc_partition *part)
1035{ 966{
1036 xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, 967 xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
1037 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); 968 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
1038} 969}
1039 970
1040static inline void 971static inline void
1041xpc_IPI_send_disengage(struct xpc_partition *part) 972xpc_IPI_send_disengage(struct xpc_partition *part)
1042{ 973{
1043 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), 974 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
1044 part->remote_act_nasid, part->remote_act_phys_cpuid); 975 part->remote_act_nasid,
976 part->remote_act_phys_cpuid);
1045} 977}
1046 978
1047
1048/* 979/*
1049 * IPIs associated with SGI_XPC_NOTIFY IRQ. 980 * IPIs associated with SGI_XPC_NOTIFY IRQ.
1050 */ 981 */
@@ -1058,33 +989,28 @@ xpc_IPI_send_disengage(struct xpc_partition *part)
1058 989
1059static inline void 990static inline void
1060xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, 991xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
1061 unsigned long *irq_flags) 992 unsigned long *irq_flags)
1062{ 993{
1063 struct xpc_partition *part = &xpc_partitions[ch->partid]; 994 struct xpc_partition *part = &xpc_partitions[ch->partid];
1064 enum xpc_retval ret; 995 enum xpc_retval ret;
1065 996
1066
1067 if (likely(part->act_state != XPC_P_DEACTIVATING)) { 997 if (likely(part->act_state != XPC_P_DEACTIVATING)) {
1068 ret = xpc_IPI_send(part->remote_IPI_amo_va, 998 ret = xpc_IPI_send(part->remote_IPI_amo_va,
1069 (u64) ipi_flag << (ch->number * 8), 999 (u64)ipi_flag << (ch->number * 8),
1070 part->remote_IPI_nasid, 1000 part->remote_IPI_nasid,
1071 part->remote_IPI_phys_cpuid, 1001 part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY);
1072 SGI_XPC_NOTIFY);
1073 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", 1002 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
1074 ipi_flag_string, ch->partid, ch->number, ret); 1003 ipi_flag_string, ch->partid, ch->number, ret);
1075 if (unlikely(ret != xpcSuccess)) { 1004 if (unlikely(ret != xpcSuccess)) {
1076 if (irq_flags != NULL) { 1005 if (irq_flags != NULL)
1077 spin_unlock_irqrestore(&ch->lock, *irq_flags); 1006 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1078 }
1079 XPC_DEACTIVATE_PARTITION(part, ret); 1007 XPC_DEACTIVATE_PARTITION(part, ret);
1080 if (irq_flags != NULL) { 1008 if (irq_flags != NULL)
1081 spin_lock_irqsave(&ch->lock, *irq_flags); 1009 spin_lock_irqsave(&ch->lock, *irq_flags);
1082 }
1083 } 1010 }
1084 } 1011 }
1085} 1012}
1086 1013
1087
1088/* 1014/*
1089 * Make it look like the remote partition, which is associated with the 1015 * Make it look like the remote partition, which is associated with the
1090 * specified channel, sent us an IPI. This faked IPI will be handled 1016 * specified channel, sent us an IPI. This faked IPI will be handled
@@ -1095,18 +1021,16 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
1095 1021
1096static inline void 1022static inline void
1097xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, 1023xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
1098 char *ipi_flag_string) 1024 char *ipi_flag_string)
1099{ 1025{
1100 struct xpc_partition *part = &xpc_partitions[ch->partid]; 1026 struct xpc_partition *part = &xpc_partitions[ch->partid];
1101 1027
1102 1028 FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable),
1103 FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable), 1029 FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8)));
1104 FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
1105 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", 1030 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
1106 ipi_flag_string, ch->partid, ch->number); 1031 ipi_flag_string, ch->partid, ch->number);
1107} 1032}
1108 1033
1109
1110/* 1034/*
1111 * The sending and receiving of IPIs includes the setting of an AMO variable 1035 * The sending and receiving of IPIs includes the setting of an AMO variable
1112 * to indicate the reason the IPI was sent. The 64-bit variable is divided 1036 * to indicate the reason the IPI was sent. The 64-bit variable is divided
@@ -1121,21 +1045,18 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
1121#define XPC_IPI_OPENREPLY 0x08 1045#define XPC_IPI_OPENREPLY 0x08
1122#define XPC_IPI_MSGREQUEST 0x10 1046#define XPC_IPI_MSGREQUEST 0x10
1123 1047
1124
1125/* given an AMO variable and a channel#, get its associated IPI flags */ 1048/* given an AMO variable and a channel#, get its associated IPI flags */
1126#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) 1049#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
1127#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) 1050#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
1128 1051
1129#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f)) 1052#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
1130#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010)) 1053#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL)
1131
1132 1054
1133static inline void 1055static inline void
1134xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) 1056xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
1135{ 1057{
1136 struct xpc_openclose_args *args = ch->local_openclose_args; 1058 struct xpc_openclose_args *args = ch->local_openclose_args;
1137 1059
1138
1139 args->reason = ch->reason; 1060 args->reason = ch->reason;
1140 1061
1141 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags); 1062 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
@@ -1152,7 +1073,6 @@ xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
1152{ 1073{
1153 struct xpc_openclose_args *args = ch->local_openclose_args; 1074 struct xpc_openclose_args *args = ch->local_openclose_args;
1154 1075
1155
1156 args->msg_size = ch->msg_size; 1076 args->msg_size = ch->msg_size;
1157 args->local_nentries = ch->local_nentries; 1077 args->local_nentries = ch->local_nentries;
1158 1078
@@ -1164,7 +1084,6 @@ xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
1164{ 1084{
1165 struct xpc_openclose_args *args = ch->local_openclose_args; 1085 struct xpc_openclose_args *args = ch->local_openclose_args;
1166 1086
1167
1168 args->remote_nentries = ch->remote_nentries; 1087 args->remote_nentries = ch->remote_nentries;
1169 args->local_nentries = ch->local_nentries; 1088 args->local_nentries = ch->local_nentries;
1170 args->local_msgqueue_pa = __pa(ch->local_msgqueue); 1089 args->local_msgqueue_pa = __pa(ch->local_msgqueue);
@@ -1184,7 +1103,6 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
1184 XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); 1103 XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
1185} 1104}
1186 1105
1187
1188/* 1106/*
1189 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These 1107 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
1190 * pages are located in the lowest granule. The lowest granule uses 4k pages 1108 * pages are located in the lowest granule. The lowest granule uses 4k pages
@@ -1201,13 +1119,10 @@ xpc_IPI_init(int index)
1201{ 1119{
1202 AMO_t *amo = xpc_vars->amos_page + index; 1120 AMO_t *amo = xpc_vars->amos_page + index;
1203 1121
1204 1122 (void)xpc_IPI_receive(amo); /* clear AMO variable */
1205 (void) xpc_IPI_receive(amo); /* clear AMO variable */
1206 return amo; 1123 return amo;
1207} 1124}
1208 1125
1209
1210
1211static inline enum xpc_retval 1126static inline enum xpc_retval
1212xpc_map_bte_errors(bte_result_t error) 1127xpc_map_bte_errors(bte_result_t error)
1213{ 1128{
@@ -1220,22 +1135,31 @@ xpc_map_bte_errors(bte_result_t error)
1220 return xpcBteUnmappedError; 1135 return xpcBteUnmappedError;
1221 } 1136 }
1222 switch (error) { 1137 switch (error) {
1223 case BTE_SUCCESS: return xpcSuccess; 1138 case BTE_SUCCESS:
1224 case BTEFAIL_DIR: return xpcBteDirectoryError; 1139 return xpcSuccess;
1225 case BTEFAIL_POISON: return xpcBtePoisonError; 1140 case BTEFAIL_DIR:
1226 case BTEFAIL_WERR: return xpcBteWriteError; 1141 return xpcBteDirectoryError;
1227 case BTEFAIL_ACCESS: return xpcBteAccessError; 1142 case BTEFAIL_POISON:
1228 case BTEFAIL_PWERR: return xpcBtePWriteError; 1143 return xpcBtePoisonError;
1229 case BTEFAIL_PRERR: return xpcBtePReadError; 1144 case BTEFAIL_WERR:
1230 case BTEFAIL_TOUT: return xpcBteTimeOutError; 1145 return xpcBteWriteError;
1231 case BTEFAIL_XTERR: return xpcBteXtalkError; 1146 case BTEFAIL_ACCESS:
1232 case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable; 1147 return xpcBteAccessError;
1233 default: return xpcBteUnmappedError; 1148 case BTEFAIL_PWERR:
1149 return xpcBtePWriteError;
1150 case BTEFAIL_PRERR:
1151 return xpcBtePReadError;
1152 case BTEFAIL_TOUT:
1153 return xpcBteTimeOutError;
1154 case BTEFAIL_XTERR:
1155 return xpcBteXtalkError;
1156 case BTEFAIL_NOTAVAIL:
1157 return xpcBteNotAvailable;
1158 default:
1159 return xpcBteUnmappedError;
1234 } 1160 }
1235} 1161}
1236 1162
1237
1238
1239/* 1163/*
1240 * Check to see if there is any channel activity to/from the specified 1164 * Check to see if there is any channel activity to/from the specified
1241 * partition. 1165 * partition.
@@ -1246,11 +1170,9 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
1246 u64 IPI_amo; 1170 u64 IPI_amo;
1247 unsigned long irq_flags; 1171 unsigned long irq_flags;
1248 1172
1249
1250 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); 1173 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
1251 if (IPI_amo == 0) { 1174 if (IPI_amo == 0)
1252 return; 1175 return;
1253 }
1254 1176
1255 spin_lock_irqsave(&part->IPI_lock, irq_flags); 1177 spin_lock_irqsave(&part->IPI_lock, irq_flags);
1256 part->local_IPI_amo |= IPI_amo; 1178 part->local_IPI_amo |= IPI_amo;
@@ -1262,6 +1184,4 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
1262 xpc_wakeup_channel_mgr(part); 1184 xpc_wakeup_channel_mgr(part);
1263} 1185}
1264 1186
1265 1187#endif /* _DRIVERS_MISC_SGIXP_XPC_H */
1266#endif /* _ASM_IA64_SN_XPC_H */
1267
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 44ccc0d789c9..bfcb9ea968e9 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Communication (XPC) channel support. 10 * Cross Partition Communication (XPC) channel support.
12 * 11 *
@@ -15,7 +14,6 @@
15 * 14 *
16 */ 15 */
17 16
18
19#include <linux/kernel.h> 17#include <linux/kernel.h>
20#include <linux/init.h> 18#include <linux/init.h>
21#include <linux/sched.h> 19#include <linux/sched.h>
@@ -25,8 +23,7 @@
25#include <linux/completion.h> 23#include <linux/completion.h>
26#include <asm/sn/bte.h> 24#include <asm/sn/bte.h>
27#include <asm/sn/sn_sal.h> 25#include <asm/sn/sn_sal.h>
28#include <asm/sn/xpc.h> 26#include "xpc.h"
29
30 27
31/* 28/*
32 * Guarantee that the kzalloc'd memory is cacheline aligned. 29 * Guarantee that the kzalloc'd memory is cacheline aligned.
@@ -36,22 +33,21 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
36{ 33{
37 /* see if kzalloc will give us cachline aligned memory by default */ 34 /* see if kzalloc will give us cachline aligned memory by default */
38 *base = kzalloc(size, flags); 35 *base = kzalloc(size, flags);
39 if (*base == NULL) { 36 if (*base == NULL)
40 return NULL; 37 return NULL;
41 } 38
42 if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { 39 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
43 return *base; 40 return *base;
44 } 41
45 kfree(*base); 42 kfree(*base);
46 43
47 /* nope, we'll have to do it ourselves */ 44 /* nope, we'll have to do it ourselves */
48 *base = kzalloc(size + L1_CACHE_BYTES, flags); 45 *base = kzalloc(size + L1_CACHE_BYTES, flags);
49 if (*base == NULL) { 46 if (*base == NULL)
50 return NULL; 47 return NULL;
51 }
52 return (void *) L1_CACHE_ALIGN((u64) *base);
53}
54 48
49 return (void *)L1_CACHE_ALIGN((u64)*base);
50}
55 51
56/* 52/*
57 * Set up the initial values for the XPartition Communication channels. 53 * Set up the initial values for the XPartition Communication channels.
@@ -62,7 +58,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
62 int ch_number; 58 int ch_number;
63 struct xpc_channel *ch; 59 struct xpc_channel *ch;
64 60
65
66 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 61 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
67 ch = &part->channels[ch_number]; 62 ch = &part->channels[ch_number];
68 63
@@ -72,7 +67,7 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
72 67
73 ch->local_GP = &part->local_GPs[ch_number]; 68 ch->local_GP = &part->local_GPs[ch_number];
74 ch->local_openclose_args = 69 ch->local_openclose_args =
75 &part->local_openclose_args[ch_number]; 70 &part->local_openclose_args[ch_number];
76 71
77 atomic_set(&ch->kthreads_assigned, 0); 72 atomic_set(&ch->kthreads_assigned, 0);
78 atomic_set(&ch->kthreads_idle, 0); 73 atomic_set(&ch->kthreads_idle, 0);
@@ -91,7 +86,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
91 } 86 }
92} 87}
93 88
94
95/* 89/*
96 * Setup the infrastructure necessary to support XPartition Communication 90 * Setup the infrastructure necessary to support XPartition Communication
97 * between the specified remote partition and the local one. 91 * between the specified remote partition and the local one.
@@ -103,7 +97,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
103 struct timer_list *timer; 97 struct timer_list *timer;
104 partid_t partid = XPC_PARTID(part); 98 partid_t partid = XPC_PARTID(part);
105 99
106
107 /* 100 /*
108 * Zero out MOST of the entry for this partition. Only the fields 101 * Zero out MOST of the entry for this partition. Only the fields
109 * starting with `nchannels' will be zeroed. The preceding fields must 102 * starting with `nchannels' will be zeroed. The preceding fields must
@@ -111,14 +104,14 @@ xpc_setup_infrastructure(struct xpc_partition *part)
111 * referenced during this memset() operation. 104 * referenced during this memset() operation.
112 */ 105 */
113 memset(&part->nchannels, 0, sizeof(struct xpc_partition) - 106 memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
114 offsetof(struct xpc_partition, nchannels)); 107 offsetof(struct xpc_partition, nchannels));
115 108
116 /* 109 /*
117 * Allocate all of the channel structures as a contiguous chunk of 110 * Allocate all of the channel structures as a contiguous chunk of
118 * memory. 111 * memory.
119 */ 112 */
120 part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, 113 part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
121 GFP_KERNEL); 114 GFP_KERNEL);
122 if (part->channels == NULL) { 115 if (part->channels == NULL) {
123 dev_err(xpc_chan, "can't get memory for channels\n"); 116 dev_err(xpc_chan, "can't get memory for channels\n");
124 return xpcNoMemory; 117 return xpcNoMemory;
@@ -126,11 +119,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
126 119
127 part->nchannels = XPC_NCHANNELS; 120 part->nchannels = XPC_NCHANNELS;
128 121
129
130 /* allocate all the required GET/PUT values */ 122 /* allocate all the required GET/PUT values */
131 123
132 part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, 124 part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
133 GFP_KERNEL, &part->local_GPs_base); 125 GFP_KERNEL,
126 &part->local_GPs_base);
134 if (part->local_GPs == NULL) { 127 if (part->local_GPs == NULL) {
135 kfree(part->channels); 128 kfree(part->channels);
136 part->channels = NULL; 129 part->channels = NULL;
@@ -140,7 +133,9 @@ xpc_setup_infrastructure(struct xpc_partition *part)
140 } 133 }
141 134
142 part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, 135 part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
143 GFP_KERNEL, &part->remote_GPs_base); 136 GFP_KERNEL,
137 &part->
138 remote_GPs_base);
144 if (part->remote_GPs == NULL) { 139 if (part->remote_GPs == NULL) {
145 dev_err(xpc_chan, "can't get memory for remote get/put " 140 dev_err(xpc_chan, "can't get memory for remote get/put "
146 "values\n"); 141 "values\n");
@@ -151,12 +146,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
151 return xpcNoMemory; 146 return xpcNoMemory;
152 } 147 }
153 148
154
155 /* allocate all the required open and close args */ 149 /* allocate all the required open and close args */
156 150
157 part->local_openclose_args = xpc_kzalloc_cacheline_aligned( 151 part->local_openclose_args =
158 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, 152 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
159 &part->local_openclose_args_base); 153 &part->local_openclose_args_base);
160 if (part->local_openclose_args == NULL) { 154 if (part->local_openclose_args == NULL) {
161 dev_err(xpc_chan, "can't get memory for local connect args\n"); 155 dev_err(xpc_chan, "can't get memory for local connect args\n");
162 kfree(part->remote_GPs_base); 156 kfree(part->remote_GPs_base);
@@ -168,9 +162,9 @@ xpc_setup_infrastructure(struct xpc_partition *part)
168 return xpcNoMemory; 162 return xpcNoMemory;
169 } 163 }
170 164
171 part->remote_openclose_args = xpc_kzalloc_cacheline_aligned( 165 part->remote_openclose_args =
172 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, 166 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
173 &part->remote_openclose_args_base); 167 &part->remote_openclose_args_base);
174 if (part->remote_openclose_args == NULL) { 168 if (part->remote_openclose_args == NULL) {
175 dev_err(xpc_chan, "can't get memory for remote connect args\n"); 169 dev_err(xpc_chan, "can't get memory for remote connect args\n");
176 kfree(part->local_openclose_args_base); 170 kfree(part->local_openclose_args_base);
@@ -184,13 +178,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
184 return xpcNoMemory; 178 return xpcNoMemory;
185 } 179 }
186 180
187
188 xpc_initialize_channels(part, partid); 181 xpc_initialize_channels(part, partid);
189 182
190 atomic_set(&part->nchannels_active, 0); 183 atomic_set(&part->nchannels_active, 0);
191 atomic_set(&part->nchannels_engaged, 0); 184 atomic_set(&part->nchannels_engaged, 0);
192 185
193
194 /* local_IPI_amo were set to 0 by an earlier memset() */ 186 /* local_IPI_amo were set to 0 by an earlier memset() */
195 187
196 /* Initialize this partitions AMO_t structure */ 188 /* Initialize this partitions AMO_t structure */
@@ -203,7 +195,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
203 195
204 sprintf(part->IPI_owner, "xpc%02d", partid); 196 sprintf(part->IPI_owner, "xpc%02d", partid);
205 ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED, 197 ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED,
206 part->IPI_owner, (void *) (u64) partid); 198 part->IPI_owner, (void *)(u64)partid);
207 if (ret != 0) { 199 if (ret != 0) {
208 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " 200 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
209 "errno=%d\n", -ret); 201 "errno=%d\n", -ret);
@@ -223,8 +215,8 @@ xpc_setup_infrastructure(struct xpc_partition *part)
223 /* Setup a timer to check for dropped IPIs */ 215 /* Setup a timer to check for dropped IPIs */
224 timer = &part->dropped_IPI_timer; 216 timer = &part->dropped_IPI_timer;
225 init_timer(timer); 217 init_timer(timer);
226 timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check; 218 timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check;
227 timer->data = (unsigned long) part; 219 timer->data = (unsigned long)part;
228 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; 220 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
229 add_timer(timer); 221 add_timer(timer);
230 222
@@ -234,7 +226,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
234 */ 226 */
235 part->setup_state = XPC_P_SETUP; 227 part->setup_state = XPC_P_SETUP;
236 228
237
238 /* 229 /*
239 * Setup the per partition specific variables required by the 230 * Setup the per partition specific variables required by the
240 * remote partition to establish channel connections with us. 231 * remote partition to establish channel connections with us.
@@ -244,7 +235,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
244 */ 235 */
245 xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); 236 xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
246 xpc_vars_part[partid].openclose_args_pa = 237 xpc_vars_part[partid].openclose_args_pa =
247 __pa(part->local_openclose_args); 238 __pa(part->local_openclose_args);
248 xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); 239 xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
249 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ 240 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
250 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); 241 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
@@ -255,7 +246,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
255 return xpcSuccess; 246 return xpcSuccess;
256} 247}
257 248
258
259/* 249/*
260 * Create a wrapper that hides the underlying mechanism for pulling a cacheline 250 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
261 * (or multiple cachelines) from a remote partition. 251 * (or multiple cachelines) from a remote partition.
@@ -266,24 +256,21 @@ xpc_setup_infrastructure(struct xpc_partition *part)
266 */ 256 */
267static enum xpc_retval 257static enum xpc_retval
268xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, 258xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
269 const void *src, size_t cnt) 259 const void *src, size_t cnt)
270{ 260{
271 bte_result_t bte_ret; 261 bte_result_t bte_ret;
272 262
273 263 DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src));
274 DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src)); 264 DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
275 DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
276 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); 265 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
277 266
278 if (part->act_state == XPC_P_DEACTIVATING) { 267 if (part->act_state == XPC_P_DEACTIVATING)
279 return part->reason; 268 return part->reason;
280 }
281 269
282 bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt, 270 bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt,
283 (BTE_NORMAL | BTE_WACQUIRE), NULL); 271 (BTE_NORMAL | BTE_WACQUIRE), NULL);
284 if (bte_ret == BTE_SUCCESS) { 272 if (bte_ret == BTE_SUCCESS)
285 return xpcSuccess; 273 return xpcSuccess;
286 }
287 274
288 dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", 275 dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
289 XPC_PARTID(part), bte_ret); 276 XPC_PARTID(part), bte_ret);
@@ -291,7 +278,6 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
291 return xpc_map_bte_errors(bte_ret); 278 return xpc_map_bte_errors(bte_ret);
292} 279}
293 280
294
295/* 281/*
296 * Pull the remote per partition specific variables from the specified 282 * Pull the remote per partition specific variables from the specified
297 * partition. 283 * partition.
@@ -301,41 +287,40 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
301{ 287{
302 u8 buffer[L1_CACHE_BYTES * 2]; 288 u8 buffer[L1_CACHE_BYTES * 2];
303 struct xpc_vars_part *pulled_entry_cacheline = 289 struct xpc_vars_part *pulled_entry_cacheline =
304 (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer); 290 (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer);
305 struct xpc_vars_part *pulled_entry; 291 struct xpc_vars_part *pulled_entry;
306 u64 remote_entry_cacheline_pa, remote_entry_pa; 292 u64 remote_entry_cacheline_pa, remote_entry_pa;
307 partid_t partid = XPC_PARTID(part); 293 partid_t partid = XPC_PARTID(part);
308 enum xpc_retval ret; 294 enum xpc_retval ret;
309 295
310
311 /* pull the cacheline that contains the variables we're interested in */ 296 /* pull the cacheline that contains the variables we're interested in */
312 297
313 DBUG_ON(part->remote_vars_part_pa != 298 DBUG_ON(part->remote_vars_part_pa !=
314 L1_CACHE_ALIGN(part->remote_vars_part_pa)); 299 L1_CACHE_ALIGN(part->remote_vars_part_pa));
315 DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); 300 DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
316 301
317 remote_entry_pa = part->remote_vars_part_pa + 302 remote_entry_pa = part->remote_vars_part_pa +
318 sn_partition_id * sizeof(struct xpc_vars_part); 303 sn_partition_id * sizeof(struct xpc_vars_part);
319 304
320 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); 305 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
321 306
322 pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline + 307 pulled_entry = (struct xpc_vars_part *)((u64)pulled_entry_cacheline +
323 (remote_entry_pa & (L1_CACHE_BYTES - 1))); 308 (remote_entry_pa &
309 (L1_CACHE_BYTES - 1)));
324 310
325 ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, 311 ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
326 (void *) remote_entry_cacheline_pa, 312 (void *)remote_entry_cacheline_pa,
327 L1_CACHE_BYTES); 313 L1_CACHE_BYTES);
328 if (ret != xpcSuccess) { 314 if (ret != xpcSuccess) {
329 dev_dbg(xpc_chan, "failed to pull XPC vars_part from " 315 dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
330 "partition %d, ret=%d\n", partid, ret); 316 "partition %d, ret=%d\n", partid, ret);
331 return ret; 317 return ret;
332 } 318 }
333 319
334
335 /* see if they've been set up yet */ 320 /* see if they've been set up yet */
336 321
337 if (pulled_entry->magic != XPC_VP_MAGIC1 && 322 if (pulled_entry->magic != XPC_VP_MAGIC1 &&
338 pulled_entry->magic != XPC_VP_MAGIC2) { 323 pulled_entry->magic != XPC_VP_MAGIC2) {
339 324
340 if (pulled_entry->magic != 0) { 325 if (pulled_entry->magic != 0) {
341 dev_dbg(xpc_chan, "partition %d's XPC vars_part for " 326 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
@@ -353,8 +338,8 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
353 /* validate the variables */ 338 /* validate the variables */
354 339
355 if (pulled_entry->GPs_pa == 0 || 340 if (pulled_entry->GPs_pa == 0 ||
356 pulled_entry->openclose_args_pa == 0 || 341 pulled_entry->openclose_args_pa == 0 ||
357 pulled_entry->IPI_amo_pa == 0) { 342 pulled_entry->IPI_amo_pa == 0) {
358 343
359 dev_err(xpc_chan, "partition %d's XPC vars_part for " 344 dev_err(xpc_chan, "partition %d's XPC vars_part for "
360 "partition %d are not valid\n", partid, 345 "partition %d are not valid\n", partid,
@@ -366,29 +351,26 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
366 351
367 part->remote_GPs_pa = pulled_entry->GPs_pa; 352 part->remote_GPs_pa = pulled_entry->GPs_pa;
368 part->remote_openclose_args_pa = 353 part->remote_openclose_args_pa =
369 pulled_entry->openclose_args_pa; 354 pulled_entry->openclose_args_pa;
370 part->remote_IPI_amo_va = 355 part->remote_IPI_amo_va =
371 (AMO_t *) __va(pulled_entry->IPI_amo_pa); 356 (AMO_t *)__va(pulled_entry->IPI_amo_pa);
372 part->remote_IPI_nasid = pulled_entry->IPI_nasid; 357 part->remote_IPI_nasid = pulled_entry->IPI_nasid;
373 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; 358 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
374 359
375 if (part->nchannels > pulled_entry->nchannels) { 360 if (part->nchannels > pulled_entry->nchannels)
376 part->nchannels = pulled_entry->nchannels; 361 part->nchannels = pulled_entry->nchannels;
377 }
378 362
379 /* let the other side know that we've pulled their variables */ 363 /* let the other side know that we've pulled their variables */
380 364
381 xpc_vars_part[partid].magic = XPC_VP_MAGIC2; 365 xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
382 } 366 }
383 367
384 if (pulled_entry->magic == XPC_VP_MAGIC1) { 368 if (pulled_entry->magic == XPC_VP_MAGIC1)
385 return xpcRetry; 369 return xpcRetry;
386 }
387 370
388 return xpcSuccess; 371 return xpcSuccess;
389} 372}
390 373
391
392/* 374/*
393 * Get the IPI flags and pull the openclose args and/or remote GPs as needed. 375 * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
394 */ 376 */
@@ -399,23 +381,23 @@ xpc_get_IPI_flags(struct xpc_partition *part)
399 u64 IPI_amo; 381 u64 IPI_amo;
400 enum xpc_retval ret; 382 enum xpc_retval ret;
401 383
402
403 /* 384 /*
404 * See if there are any IPI flags to be handled. 385 * See if there are any IPI flags to be handled.
405 */ 386 */
406 387
407 spin_lock_irqsave(&part->IPI_lock, irq_flags); 388 spin_lock_irqsave(&part->IPI_lock, irq_flags);
408 if ((IPI_amo = part->local_IPI_amo) != 0) { 389 IPI_amo = part->local_IPI_amo;
390 if (IPI_amo != 0)
409 part->local_IPI_amo = 0; 391 part->local_IPI_amo = 0;
410 }
411 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
412 392
393 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
413 394
414 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { 395 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
415 ret = xpc_pull_remote_cachelines(part, 396 ret = xpc_pull_remote_cachelines(part,
416 part->remote_openclose_args, 397 part->remote_openclose_args,
417 (void *) part->remote_openclose_args_pa, 398 (void *)part->
418 XPC_OPENCLOSE_ARGS_SIZE); 399 remote_openclose_args_pa,
400 XPC_OPENCLOSE_ARGS_SIZE);
419 if (ret != xpcSuccess) { 401 if (ret != xpcSuccess) {
420 XPC_DEACTIVATE_PARTITION(part, ret); 402 XPC_DEACTIVATE_PARTITION(part, ret);
421 403
@@ -430,8 +412,8 @@ xpc_get_IPI_flags(struct xpc_partition *part)
430 412
431 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { 413 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
432 ret = xpc_pull_remote_cachelines(part, part->remote_GPs, 414 ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
433 (void *) part->remote_GPs_pa, 415 (void *)part->remote_GPs_pa,
434 XPC_GP_SIZE); 416 XPC_GP_SIZE);
435 if (ret != xpcSuccess) { 417 if (ret != xpcSuccess) {
436 XPC_DEACTIVATE_PARTITION(part, ret); 418 XPC_DEACTIVATE_PARTITION(part, ret);
437 419
@@ -446,7 +428,6 @@ xpc_get_IPI_flags(struct xpc_partition *part)
446 return IPI_amo; 428 return IPI_amo;
447} 429}
448 430
449
450/* 431/*
451 * Allocate the local message queue and the notify queue. 432 * Allocate the local message queue and the notify queue.
452 */ 433 */
@@ -457,20 +438,14 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
457 int nentries; 438 int nentries;
458 size_t nbytes; 439 size_t nbytes;
459 440
460
461 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
462 // >>> iterations of the for-loop, bail if set?
463
464 // >>> should we impose a minimum #of entries? like 4 or 8?
465 for (nentries = ch->local_nentries; nentries > 0; nentries--) { 441 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
466 442
467 nbytes = nentries * ch->msg_size; 443 nbytes = nentries * ch->msg_size;
468 ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, 444 ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
469 GFP_KERNEL, 445 GFP_KERNEL,
470 &ch->local_msgqueue_base); 446 &ch->local_msgqueue_base);
471 if (ch->local_msgqueue == NULL) { 447 if (ch->local_msgqueue == NULL)
472 continue; 448 continue;
473 }
474 449
475 nbytes = nentries * sizeof(struct xpc_notify); 450 nbytes = nentries * sizeof(struct xpc_notify);
476 ch->notify_queue = kzalloc(nbytes, GFP_KERNEL); 451 ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
@@ -497,7 +472,6 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
497 return xpcNoMemory; 472 return xpcNoMemory;
498} 473}
499 474
500
501/* 475/*
502 * Allocate the cached remote message queue. 476 * Allocate the cached remote message queue.
503 */ 477 */
@@ -508,22 +482,16 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
508 int nentries; 482 int nentries;
509 size_t nbytes; 483 size_t nbytes;
510 484
511
512 DBUG_ON(ch->remote_nentries <= 0); 485 DBUG_ON(ch->remote_nentries <= 0);
513 486
514 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
515 // >>> iterations of the for-loop, bail if set?
516
517 // >>> should we impose a minimum #of entries? like 4 or 8?
518 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { 487 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
519 488
520 nbytes = nentries * ch->msg_size; 489 nbytes = nentries * ch->msg_size;
521 ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, 490 ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
522 GFP_KERNEL, 491 GFP_KERNEL,
523 &ch->remote_msgqueue_base); 492 &ch->remote_msgqueue_base);
524 if (ch->remote_msgqueue == NULL) { 493 if (ch->remote_msgqueue == NULL)
525 continue; 494 continue;
526 }
527 495
528 spin_lock_irqsave(&ch->lock, irq_flags); 496 spin_lock_irqsave(&ch->lock, irq_flags);
529 if (nentries < ch->remote_nentries) { 497 if (nentries < ch->remote_nentries) {
@@ -542,7 +510,6 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
542 return xpcNoMemory; 510 return xpcNoMemory;
543} 511}
544 512
545
546/* 513/*
547 * Allocate message queues and other stuff associated with a channel. 514 * Allocate message queues and other stuff associated with a channel.
548 * 515 *
@@ -554,14 +521,14 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
554 unsigned long irq_flags; 521 unsigned long irq_flags;
555 enum xpc_retval ret; 522 enum xpc_retval ret;
556 523
557
558 DBUG_ON(ch->flags & XPC_C_SETUP); 524 DBUG_ON(ch->flags & XPC_C_SETUP);
559 525
560 if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) { 526 ret = xpc_allocate_local_msgqueue(ch);
527 if (ret != xpcSuccess)
561 return ret; 528 return ret;
562 }
563 529
564 if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) { 530 ret = xpc_allocate_remote_msgqueue(ch);
531 if (ret != xpcSuccess) {
565 kfree(ch->local_msgqueue_base); 532 kfree(ch->local_msgqueue_base);
566 ch->local_msgqueue = NULL; 533 ch->local_msgqueue = NULL;
567 kfree(ch->notify_queue); 534 kfree(ch->notify_queue);
@@ -576,7 +543,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
576 return xpcSuccess; 543 return xpcSuccess;
577} 544}
578 545
579
580/* 546/*
581 * Process a connect message from a remote partition. 547 * Process a connect message from a remote partition.
582 * 548 *
@@ -588,11 +554,10 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
588{ 554{
589 enum xpc_retval ret; 555 enum xpc_retval ret;
590 556
591
592 DBUG_ON(!spin_is_locked(&ch->lock)); 557 DBUG_ON(!spin_is_locked(&ch->lock));
593 558
594 if (!(ch->flags & XPC_C_OPENREQUEST) || 559 if (!(ch->flags & XPC_C_OPENREQUEST) ||
595 !(ch->flags & XPC_C_ROPENREQUEST)) { 560 !(ch->flags & XPC_C_ROPENREQUEST)) {
596 /* nothing more to do for now */ 561 /* nothing more to do for now */
597 return; 562 return;
598 } 563 }
@@ -603,12 +568,11 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
603 ret = xpc_allocate_msgqueues(ch); 568 ret = xpc_allocate_msgqueues(ch);
604 spin_lock_irqsave(&ch->lock, *irq_flags); 569 spin_lock_irqsave(&ch->lock, *irq_flags);
605 570
606 if (ret != xpcSuccess) { 571 if (ret != xpcSuccess)
607 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); 572 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
608 } 573
609 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) { 574 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
610 return; 575 return;
611 }
612 576
613 DBUG_ON(!(ch->flags & XPC_C_SETUP)); 577 DBUG_ON(!(ch->flags & XPC_C_SETUP));
614 DBUG_ON(ch->local_msgqueue == NULL); 578 DBUG_ON(ch->local_msgqueue == NULL);
@@ -620,23 +584,21 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
620 xpc_IPI_send_openreply(ch, irq_flags); 584 xpc_IPI_send_openreply(ch, irq_flags);
621 } 585 }
622 586
623 if (!(ch->flags & XPC_C_ROPENREPLY)) { 587 if (!(ch->flags & XPC_C_ROPENREPLY))
624 return; 588 return;
625 }
626 589
627 DBUG_ON(ch->remote_msgqueue_pa == 0); 590 DBUG_ON(ch->remote_msgqueue_pa == 0);
628 591
629 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ 592 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
630 593
631 dev_info(xpc_chan, "channel %d to partition %d connected\n", 594 dev_info(xpc_chan, "channel %d to partition %d connected\n",
632 ch->number, ch->partid); 595 ch->number, ch->partid);
633 596
634 spin_unlock_irqrestore(&ch->lock, *irq_flags); 597 spin_unlock_irqrestore(&ch->lock, *irq_flags);
635 xpc_create_kthreads(ch, 1, 0); 598 xpc_create_kthreads(ch, 1, 0);
636 spin_lock_irqsave(&ch->lock, *irq_flags); 599 spin_lock_irqsave(&ch->lock, *irq_flags);
637} 600}
638 601
639
640/* 602/*
641 * Notify those who wanted to be notified upon delivery of their message. 603 * Notify those who wanted to be notified upon delivery of their message.
642 */ 604 */
@@ -647,7 +609,6 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
647 u8 notify_type; 609 u8 notify_type;
648 s64 get = ch->w_remote_GP.get - 1; 610 s64 get = ch->w_remote_GP.get - 1;
649 611
650
651 while (++get < put && atomic_read(&ch->n_to_notify) > 0) { 612 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
652 613
653 notify = &ch->notify_queue[get % ch->local_nentries]; 614 notify = &ch->notify_queue[get % ch->local_nentries];
@@ -660,8 +621,7 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
660 */ 621 */
661 notify_type = notify->type; 622 notify_type = notify->type;
662 if (notify_type == 0 || 623 if (notify_type == 0 ||
663 cmpxchg(&notify->type, notify_type, 0) != 624 cmpxchg(&notify->type, notify_type, 0) != notify_type) {
664 notify_type) {
665 continue; 625 continue;
666 } 626 }
667 627
@@ -672,20 +632,19 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
672 if (notify->func != NULL) { 632 if (notify->func != NULL) {
673 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " 633 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
674 "msg_number=%ld, partid=%d, channel=%d\n", 634 "msg_number=%ld, partid=%d, channel=%d\n",
675 (void *) notify, get, ch->partid, ch->number); 635 (void *)notify, get, ch->partid, ch->number);
676 636
677 notify->func(reason, ch->partid, ch->number, 637 notify->func(reason, ch->partid, ch->number,
678 notify->key); 638 notify->key);
679 639
680 dev_dbg(xpc_chan, "notify->func() returned, " 640 dev_dbg(xpc_chan, "notify->func() returned, "
681 "notify=0x%p, msg_number=%ld, partid=%d, " 641 "notify=0x%p, msg_number=%ld, partid=%d, "
682 "channel=%d\n", (void *) notify, get, 642 "channel=%d\n", (void *)notify, get,
683 ch->partid, ch->number); 643 ch->partid, ch->number);
684 } 644 }
685 } 645 }
686} 646}
687 647
688
689/* 648/*
690 * Free up message queues and other stuff that were allocated for the specified 649 * Free up message queues and other stuff that were allocated for the specified
691 * channel. 650 * channel.
@@ -733,7 +692,6 @@ xpc_free_msgqueues(struct xpc_channel *ch)
733 } 692 }
734} 693}
735 694
736
737/* 695/*
738 * spin_lock_irqsave() is expected to be held on entry. 696 * spin_lock_irqsave() is expected to be held on entry.
739 */ 697 */
@@ -743,46 +701,41 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
743 struct xpc_partition *part = &xpc_partitions[ch->partid]; 701 struct xpc_partition *part = &xpc_partitions[ch->partid];
744 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); 702 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
745 703
746
747 DBUG_ON(!spin_is_locked(&ch->lock)); 704 DBUG_ON(!spin_is_locked(&ch->lock));
748 705
749 if (!(ch->flags & XPC_C_DISCONNECTING)) { 706 if (!(ch->flags & XPC_C_DISCONNECTING))
750 return; 707 return;
751 }
752 708
753 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 709 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
754 710
755 /* make sure all activity has settled down first */ 711 /* make sure all activity has settled down first */
756 712
757 if (atomic_read(&ch->kthreads_assigned) > 0 || 713 if (atomic_read(&ch->kthreads_assigned) > 0 ||
758 atomic_read(&ch->references) > 0) { 714 atomic_read(&ch->references) > 0) {
759 return; 715 return;
760 } 716 }
761 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 717 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
762 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); 718 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
763 719
764 if (part->act_state == XPC_P_DEACTIVATING) { 720 if (part->act_state == XPC_P_DEACTIVATING) {
765 /* can't proceed until the other side disengages from us */ 721 /* can't proceed until the other side disengages from us */
766 if (xpc_partition_engaged(1UL << ch->partid)) { 722 if (xpc_partition_engaged(1UL << ch->partid))
767 return; 723 return;
768 }
769 724
770 } else { 725 } else {
771 726
772 /* as long as the other side is up do the full protocol */ 727 /* as long as the other side is up do the full protocol */
773 728
774 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { 729 if (!(ch->flags & XPC_C_RCLOSEREQUEST))
775 return; 730 return;
776 }
777 731
778 if (!(ch->flags & XPC_C_CLOSEREPLY)) { 732 if (!(ch->flags & XPC_C_CLOSEREPLY)) {
779 ch->flags |= XPC_C_CLOSEREPLY; 733 ch->flags |= XPC_C_CLOSEREPLY;
780 xpc_IPI_send_closereply(ch, irq_flags); 734 xpc_IPI_send_closereply(ch, irq_flags);
781 } 735 }
782 736
783 if (!(ch->flags & XPC_C_RCLOSEREPLY)) { 737 if (!(ch->flags & XPC_C_RCLOSEREPLY))
784 return; 738 return;
785 }
786 } 739 }
787 740
788 /* wake those waiting for notify completion */ 741 /* wake those waiting for notify completion */
@@ -809,7 +762,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
809 762
810 if (channel_was_connected) { 763 if (channel_was_connected) {
811 dev_info(xpc_chan, "channel %d to partition %d disconnected, " 764 dev_info(xpc_chan, "channel %d to partition %d disconnected, "
812 "reason=%d\n", ch->number, ch->partid, ch->reason); 765 "reason=%d\n", ch->number, ch->partid, ch->reason);
813 } 766 }
814 767
815 if (ch->flags & XPC_C_WDISCONNECT) { 768 if (ch->flags & XPC_C_WDISCONNECT) {
@@ -820,35 +773,32 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
820 /* time to take action on any delayed IPI flags */ 773 /* time to take action on any delayed IPI flags */
821 spin_lock(&part->IPI_lock); 774 spin_lock(&part->IPI_lock);
822 XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number, 775 XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
823 ch->delayed_IPI_flags); 776 ch->delayed_IPI_flags);
824 spin_unlock(&part->IPI_lock); 777 spin_unlock(&part->IPI_lock);
825 } 778 }
826 ch->delayed_IPI_flags = 0; 779 ch->delayed_IPI_flags = 0;
827 } 780 }
828} 781}
829 782
830
831/* 783/*
832 * Process a change in the channel's remote connection state. 784 * Process a change in the channel's remote connection state.
833 */ 785 */
834static void 786static void
835xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, 787xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
836 u8 IPI_flags) 788 u8 IPI_flags)
837{ 789{
838 unsigned long irq_flags; 790 unsigned long irq_flags;
839 struct xpc_openclose_args *args = 791 struct xpc_openclose_args *args =
840 &part->remote_openclose_args[ch_number]; 792 &part->remote_openclose_args[ch_number];
841 struct xpc_channel *ch = &part->channels[ch_number]; 793 struct xpc_channel *ch = &part->channels[ch_number];
842 enum xpc_retval reason; 794 enum xpc_retval reason;
843 795
844
845
846 spin_lock_irqsave(&ch->lock, irq_flags); 796 spin_lock_irqsave(&ch->lock, irq_flags);
847 797
848again: 798again:
849 799
850 if ((ch->flags & XPC_C_DISCONNECTED) && 800 if ((ch->flags & XPC_C_DISCONNECTED) &&
851 (ch->flags & XPC_C_WDISCONNECT)) { 801 (ch->flags & XPC_C_WDISCONNECT)) {
852 /* 802 /*
853 * Delay processing IPI flags until thread waiting disconnect 803 * Delay processing IPI flags until thread waiting disconnect
854 * has had a chance to see that the channel is disconnected. 804 * has had a chance to see that the channel is disconnected.
@@ -858,7 +808,6 @@ again:
858 return; 808 return;
859 } 809 }
860 810
861
862 if (IPI_flags & XPC_IPI_CLOSEREQUEST) { 811 if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
863 812
864 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received " 813 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
@@ -890,13 +839,14 @@ again:
890 if (ch->flags & XPC_C_DISCONNECTED) { 839 if (ch->flags & XPC_C_DISCONNECTED) {
891 if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { 840 if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
892 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, 841 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo,
893 ch_number) & XPC_IPI_OPENREQUEST)) { 842 ch_number) &
843 XPC_IPI_OPENREQUEST)) {
894 844
895 DBUG_ON(ch->delayed_IPI_flags != 0); 845 DBUG_ON(ch->delayed_IPI_flags != 0);
896 spin_lock(&part->IPI_lock); 846 spin_lock(&part->IPI_lock);
897 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 847 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
898 ch_number, 848 ch_number,
899 XPC_IPI_CLOSEREQUEST); 849 XPC_IPI_CLOSEREQUEST);
900 spin_unlock(&part->IPI_lock); 850 spin_unlock(&part->IPI_lock);
901 } 851 }
902 spin_unlock_irqrestore(&ch->lock, irq_flags); 852 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -921,11 +871,10 @@ again:
921 871
922 if (!(ch->flags & XPC_C_DISCONNECTING)) { 872 if (!(ch->flags & XPC_C_DISCONNECTING)) {
923 reason = args->reason; 873 reason = args->reason;
924 if (reason <= xpcSuccess || reason > xpcUnknownReason) { 874 if (reason <= xpcSuccess || reason > xpcUnknownReason)
925 reason = xpcUnknownReason; 875 reason = xpcUnknownReason;
926 } else if (reason == xpcUnregistering) { 876 else if (reason == xpcUnregistering)
927 reason = xpcOtherUnregistering; 877 reason = xpcOtherUnregistering;
928 }
929 878
930 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 879 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
931 880
@@ -937,7 +886,6 @@ again:
937 xpc_process_disconnect(ch, &irq_flags); 886 xpc_process_disconnect(ch, &irq_flags);
938 } 887 }
939 888
940
941 if (IPI_flags & XPC_IPI_CLOSEREPLY) { 889 if (IPI_flags & XPC_IPI_CLOSEREPLY) {
942 890
943 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d," 891 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
@@ -953,12 +901,13 @@ again:
953 901
954 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { 902 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
955 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number) 903 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number)
956 & XPC_IPI_CLOSEREQUEST)) { 904 & XPC_IPI_CLOSEREQUEST)) {
957 905
958 DBUG_ON(ch->delayed_IPI_flags != 0); 906 DBUG_ON(ch->delayed_IPI_flags != 0);
959 spin_lock(&part->IPI_lock); 907 spin_lock(&part->IPI_lock);
960 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 908 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
961 ch_number, XPC_IPI_CLOSEREPLY); 909 ch_number,
910 XPC_IPI_CLOSEREPLY);
962 spin_unlock(&part->IPI_lock); 911 spin_unlock(&part->IPI_lock);
963 } 912 }
964 spin_unlock_irqrestore(&ch->lock, irq_flags); 913 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -973,7 +922,6 @@ again:
973 } 922 }
974 } 923 }
975 924
976
977 if (IPI_flags & XPC_IPI_OPENREQUEST) { 925 if (IPI_flags & XPC_IPI_OPENREQUEST) {
978 926
979 dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, " 927 dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
@@ -982,7 +930,7 @@ again:
982 ch->partid, ch->number); 930 ch->partid, ch->number);
983 931
984 if (part->act_state == XPC_P_DEACTIVATING || 932 if (part->act_state == XPC_P_DEACTIVATING ||
985 (ch->flags & XPC_C_ROPENREQUEST)) { 933 (ch->flags & XPC_C_ROPENREQUEST)) {
986 spin_unlock_irqrestore(&ch->lock, irq_flags); 934 spin_unlock_irqrestore(&ch->lock, irq_flags);
987 return; 935 return;
988 } 936 }
@@ -993,9 +941,9 @@ again:
993 return; 941 return;
994 } 942 }
995 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | 943 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
996 XPC_C_OPENREQUEST))); 944 XPC_C_OPENREQUEST)));
997 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 945 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
998 XPC_C_OPENREPLY | XPC_C_CONNECTED)); 946 XPC_C_OPENREPLY | XPC_C_CONNECTED));
999 947
1000 /* 948 /*
1001 * The meaningful OPENREQUEST connection state fields are: 949 * The meaningful OPENREQUEST connection state fields are:
@@ -1011,11 +959,10 @@ again:
1011 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); 959 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
1012 ch->remote_nentries = args->local_nentries; 960 ch->remote_nentries = args->local_nentries;
1013 961
1014
1015 if (ch->flags & XPC_C_OPENREQUEST) { 962 if (ch->flags & XPC_C_OPENREQUEST) {
1016 if (args->msg_size != ch->msg_size) { 963 if (args->msg_size != ch->msg_size) {
1017 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 964 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1018 &irq_flags); 965 &irq_flags);
1019 spin_unlock_irqrestore(&ch->lock, irq_flags); 966 spin_unlock_irqrestore(&ch->lock, irq_flags);
1020 return; 967 return;
1021 } 968 }
@@ -1031,7 +978,6 @@ again:
1031 xpc_process_connect(ch, &irq_flags); 978 xpc_process_connect(ch, &irq_flags);
1032 } 979 }
1033 980
1034
1035 if (IPI_flags & XPC_IPI_OPENREPLY) { 981 if (IPI_flags & XPC_IPI_OPENREPLY) {
1036 982
1037 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, " 983 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
@@ -1046,7 +992,7 @@ again:
1046 } 992 }
1047 if (!(ch->flags & XPC_C_OPENREQUEST)) { 993 if (!(ch->flags & XPC_C_OPENREQUEST)) {
1048 XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError, 994 XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError,
1049 &irq_flags); 995 &irq_flags);
1050 spin_unlock_irqrestore(&ch->lock, irq_flags); 996 spin_unlock_irqrestore(&ch->lock, irq_flags);
1051 return; 997 return;
1052 } 998 }
@@ -1057,7 +1003,7 @@ again:
1057 /* 1003 /*
1058 * The meaningful OPENREPLY connection state fields are: 1004 * The meaningful OPENREPLY connection state fields are:
1059 * local_msgqueue_pa = physical address of remote 1005 * local_msgqueue_pa = physical address of remote
1060 * partition's local_msgqueue 1006 * partition's local_msgqueue
1061 * local_nentries = remote partition's local_nentries 1007 * local_nentries = remote partition's local_nentries
1062 * remote_nentries = remote partition's remote_nentries 1008 * remote_nentries = remote partition's remote_nentries
1063 */ 1009 */
@@ -1093,7 +1039,6 @@ again:
1093 spin_unlock_irqrestore(&ch->lock, irq_flags); 1039 spin_unlock_irqrestore(&ch->lock, irq_flags);
1094} 1040}
1095 1041
1096
1097/* 1042/*
1098 * Attempt to establish a channel connection to a remote partition. 1043 * Attempt to establish a channel connection to a remote partition.
1099 */ 1044 */
@@ -1103,10 +1048,8 @@ xpc_connect_channel(struct xpc_channel *ch)
1103 unsigned long irq_flags; 1048 unsigned long irq_flags;
1104 struct xpc_registration *registration = &xpc_registrations[ch->number]; 1049 struct xpc_registration *registration = &xpc_registrations[ch->number];
1105 1050
1106 1051 if (mutex_trylock(&registration->mutex) == 0)
1107 if (mutex_trylock(&registration->mutex) == 0) {
1108 return xpcRetry; 1052 return xpcRetry;
1109 }
1110 1053
1111 if (!XPC_CHANNEL_REGISTERED(ch->number)) { 1054 if (!XPC_CHANNEL_REGISTERED(ch->number)) {
1112 mutex_unlock(&registration->mutex); 1055 mutex_unlock(&registration->mutex);
@@ -1124,7 +1067,6 @@ xpc_connect_channel(struct xpc_channel *ch)
1124 return ch->reason; 1067 return ch->reason;
1125 } 1068 }
1126 1069
1127
1128 /* add info from the channel connect registration to the channel */ 1070 /* add info from the channel connect registration to the channel */
1129 1071
1130 ch->kthreads_assigned_limit = registration->assigned_limit; 1072 ch->kthreads_assigned_limit = registration->assigned_limit;
@@ -1154,7 +1096,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1154 */ 1096 */
1155 mutex_unlock(&registration->mutex); 1097 mutex_unlock(&registration->mutex);
1156 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 1098 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1157 &irq_flags); 1099 &irq_flags);
1158 spin_unlock_irqrestore(&ch->lock, irq_flags); 1100 spin_unlock_irqrestore(&ch->lock, irq_flags);
1159 return xpcUnequalMsgSizes; 1101 return xpcUnequalMsgSizes;
1160 } 1102 }
@@ -1169,7 +1111,6 @@ xpc_connect_channel(struct xpc_channel *ch)
1169 1111
1170 mutex_unlock(&registration->mutex); 1112 mutex_unlock(&registration->mutex);
1171 1113
1172
1173 /* initiate the connection */ 1114 /* initiate the connection */
1174 1115
1175 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); 1116 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
@@ -1182,7 +1123,6 @@ xpc_connect_channel(struct xpc_channel *ch)
1182 return xpcSuccess; 1123 return xpcSuccess;
1183} 1124}
1184 1125
1185
1186/* 1126/*
1187 * Clear some of the msg flags in the local message queue. 1127 * Clear some of the msg flags in the local message queue.
1188 */ 1128 */
@@ -1192,16 +1132,15 @@ xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
1192 struct xpc_msg *msg; 1132 struct xpc_msg *msg;
1193 s64 get; 1133 s64 get;
1194 1134
1195
1196 get = ch->w_remote_GP.get; 1135 get = ch->w_remote_GP.get;
1197 do { 1136 do {
1198 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1137 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1199 (get % ch->local_nentries) * ch->msg_size); 1138 (get % ch->local_nentries) *
1139 ch->msg_size);
1200 msg->flags = 0; 1140 msg->flags = 0;
1201 } while (++get < (volatile s64) ch->remote_GP.get); 1141 } while (++get < ch->remote_GP.get);
1202} 1142}
1203 1143
1204
1205/* 1144/*
1206 * Clear some of the msg flags in the remote message queue. 1145 * Clear some of the msg flags in the remote message queue.
1207 */ 1146 */
@@ -1211,43 +1150,39 @@ xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
1211 struct xpc_msg *msg; 1150 struct xpc_msg *msg;
1212 s64 put; 1151 s64 put;
1213 1152
1214
1215 put = ch->w_remote_GP.put; 1153 put = ch->w_remote_GP.put;
1216 do { 1154 do {
1217 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 1155 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
1218 (put % ch->remote_nentries) * ch->msg_size); 1156 (put % ch->remote_nentries) *
1157 ch->msg_size);
1219 msg->flags = 0; 1158 msg->flags = 0;
1220 } while (++put < (volatile s64) ch->remote_GP.put); 1159 } while (++put < ch->remote_GP.put);
1221} 1160}
1222 1161
1223
1224static void 1162static void
1225xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) 1163xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1226{ 1164{
1227 struct xpc_channel *ch = &part->channels[ch_number]; 1165 struct xpc_channel *ch = &part->channels[ch_number];
1228 int nmsgs_sent; 1166 int nmsgs_sent;
1229 1167
1230
1231 ch->remote_GP = part->remote_GPs[ch_number]; 1168 ch->remote_GP = part->remote_GPs[ch_number];
1232 1169
1233
1234 /* See what, if anything, has changed for each connected channel */ 1170 /* See what, if anything, has changed for each connected channel */
1235 1171
1236 xpc_msgqueue_ref(ch); 1172 xpc_msgqueue_ref(ch);
1237 1173
1238 if (ch->w_remote_GP.get == ch->remote_GP.get && 1174 if (ch->w_remote_GP.get == ch->remote_GP.get &&
1239 ch->w_remote_GP.put == ch->remote_GP.put) { 1175 ch->w_remote_GP.put == ch->remote_GP.put) {
1240 /* nothing changed since GPs were last pulled */ 1176 /* nothing changed since GPs were last pulled */
1241 xpc_msgqueue_deref(ch); 1177 xpc_msgqueue_deref(ch);
1242 return; 1178 return;
1243 } 1179 }
1244 1180
1245 if (!(ch->flags & XPC_C_CONNECTED)){ 1181 if (!(ch->flags & XPC_C_CONNECTED)) {
1246 xpc_msgqueue_deref(ch); 1182 xpc_msgqueue_deref(ch);
1247 return; 1183 return;
1248 } 1184 }
1249 1185
1250
1251 /* 1186 /*
1252 * First check to see if messages recently sent by us have been 1187 * First check to see if messages recently sent by us have been
1253 * received by the other side. (The remote GET value will have 1188 * received by the other side. (The remote GET value will have
@@ -1269,7 +1204,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1269 * received and delivered by the other side. 1204 * received and delivered by the other side.
1270 */ 1205 */
1271 xpc_notify_senders(ch, xpcMsgDelivered, 1206 xpc_notify_senders(ch, xpcMsgDelivered,
1272 ch->remote_GP.get); 1207 ch->remote_GP.get);
1273 } 1208 }
1274 1209
1275 /* 1210 /*
@@ -1288,12 +1223,10 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1288 * If anyone was waiting for message queue entries to become 1223 * If anyone was waiting for message queue entries to become
1289 * available, wake them up. 1224 * available, wake them up.
1290 */ 1225 */
1291 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { 1226 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1292 wake_up(&ch->msg_allocate_wq); 1227 wake_up(&ch->msg_allocate_wq);
1293 }
1294 } 1228 }
1295 1229
1296
1297 /* 1230 /*
1298 * Now check for newly sent messages by the other side. (The remote 1231 * Now check for newly sent messages by the other side. (The remote
1299 * PUT value will have changed since we last looked at it.) 1232 * PUT value will have changed since we last looked at it.)
@@ -1318,16 +1251,14 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1318 "delivered=%d, partid=%d, channel=%d\n", 1251 "delivered=%d, partid=%d, channel=%d\n",
1319 nmsgs_sent, ch->partid, ch->number); 1252 nmsgs_sent, ch->partid, ch->number);
1320 1253
1321 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { 1254 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
1322 xpc_activate_kthreads(ch, nmsgs_sent); 1255 xpc_activate_kthreads(ch, nmsgs_sent);
1323 }
1324 } 1256 }
1325 } 1257 }
1326 1258
1327 xpc_msgqueue_deref(ch); 1259 xpc_msgqueue_deref(ch);
1328} 1260}
1329 1261
1330
1331void 1262void
1332xpc_process_channel_activity(struct xpc_partition *part) 1263xpc_process_channel_activity(struct xpc_partition *part)
1333{ 1264{
@@ -1337,7 +1268,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
1337 int ch_number; 1268 int ch_number;
1338 u32 ch_flags; 1269 u32 ch_flags;
1339 1270
1340
1341 IPI_amo = xpc_get_IPI_flags(part); 1271 IPI_amo = xpc_get_IPI_flags(part);
1342 1272
1343 /* 1273 /*
@@ -1350,7 +1280,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
1350 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 1280 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1351 ch = &part->channels[ch_number]; 1281 ch = &part->channels[ch_number];
1352 1282
1353
1354 /* 1283 /*
1355 * Process any open or close related IPI flags, and then deal 1284 * Process any open or close related IPI flags, and then deal
1356 * with connecting or disconnecting the channel as required. 1285 * with connecting or disconnecting the channel as required.
@@ -1358,9 +1287,8 @@ xpc_process_channel_activity(struct xpc_partition *part)
1358 1287
1359 IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); 1288 IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
1360 1289
1361 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) { 1290 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags))
1362 xpc_process_openclose_IPI(part, ch_number, IPI_flags); 1291 xpc_process_openclose_IPI(part, ch_number, IPI_flags);
1363 }
1364 1292
1365 ch_flags = ch->flags; /* need an atomic snapshot of flags */ 1293 ch_flags = ch->flags; /* need an atomic snapshot of flags */
1366 1294
@@ -1371,14 +1299,13 @@ xpc_process_channel_activity(struct xpc_partition *part)
1371 continue; 1299 continue;
1372 } 1300 }
1373 1301
1374 if (part->act_state == XPC_P_DEACTIVATING) { 1302 if (part->act_state == XPC_P_DEACTIVATING)
1375 continue; 1303 continue;
1376 }
1377 1304
1378 if (!(ch_flags & XPC_C_CONNECTED)) { 1305 if (!(ch_flags & XPC_C_CONNECTED)) {
1379 if (!(ch_flags & XPC_C_OPENREQUEST)) { 1306 if (!(ch_flags & XPC_C_OPENREQUEST)) {
1380 DBUG_ON(ch_flags & XPC_C_SETUP); 1307 DBUG_ON(ch_flags & XPC_C_SETUP);
1381 (void) xpc_connect_channel(ch); 1308 (void)xpc_connect_channel(ch);
1382 } else { 1309 } else {
1383 spin_lock_irqsave(&ch->lock, irq_flags); 1310 spin_lock_irqsave(&ch->lock, irq_flags);
1384 xpc_process_connect(ch, &irq_flags); 1311 xpc_process_connect(ch, &irq_flags);
@@ -1387,20 +1314,17 @@ xpc_process_channel_activity(struct xpc_partition *part)
1387 continue; 1314 continue;
1388 } 1315 }
1389 1316
1390
1391 /* 1317 /*
1392 * Process any message related IPI flags, this may involve the 1318 * Process any message related IPI flags, this may involve the
1393 * activation of kthreads to deliver any pending messages sent 1319 * activation of kthreads to deliver any pending messages sent
1394 * from the other partition. 1320 * from the other partition.
1395 */ 1321 */
1396 1322
1397 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) { 1323 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags))
1398 xpc_process_msg_IPI(part, ch_number); 1324 xpc_process_msg_IPI(part, ch_number);
1399 }
1400 } 1325 }
1401} 1326}
1402 1327
1403
1404/* 1328/*
1405 * XPC's heartbeat code calls this function to inform XPC that a partition is 1329 * XPC's heartbeat code calls this function to inform XPC that a partition is
1406 * going down. XPC responds by tearing down the XPartition Communication 1330 * going down. XPC responds by tearing down the XPartition Communication
@@ -1417,7 +1341,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
1417 int ch_number; 1341 int ch_number;
1418 struct xpc_channel *ch; 1342 struct xpc_channel *ch;
1419 1343
1420
1421 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", 1344 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
1422 XPC_PARTID(part), reason); 1345 XPC_PARTID(part), reason);
1423 1346
@@ -1426,7 +1349,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
1426 return; 1349 return;
1427 } 1350 }
1428 1351
1429
1430 /* disconnect channels associated with the partition going down */ 1352 /* disconnect channels associated with the partition going down */
1431 1353
1432 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 1354 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
@@ -1446,7 +1368,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
1446 xpc_part_deref(part); 1368 xpc_part_deref(part);
1447} 1369}
1448 1370
1449
1450/* 1371/*
1451 * Teardown the infrastructure necessary to support XPartition Communication 1372 * Teardown the infrastructure necessary to support XPartition Communication
1452 * between the specified remote partition and the local one. 1373 * between the specified remote partition and the local one.
@@ -1456,7 +1377,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1456{ 1377{
1457 partid_t partid = XPC_PARTID(part); 1378 partid_t partid = XPC_PARTID(part);
1458 1379
1459
1460 /* 1380 /*
1461 * We start off by making this partition inaccessible to local 1381 * We start off by making this partition inaccessible to local
1462 * processes by marking it as no longer setup. Then we make it 1382 * processes by marking it as no longer setup. Then we make it
@@ -1473,9 +1393,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1473 1393
1474 xpc_vars_part[partid].magic = 0; 1394 xpc_vars_part[partid].magic = 0;
1475 1395
1476 1396 free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
1477 free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
1478
1479 1397
1480 /* 1398 /*
1481 * Before proceeding with the teardown we have to wait until all 1399 * Before proceeding with the teardown we have to wait until all
@@ -1483,7 +1401,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1483 */ 1401 */
1484 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); 1402 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
1485 1403
1486
1487 /* now we can begin tearing down the infrastructure */ 1404 /* now we can begin tearing down the infrastructure */
1488 1405
1489 part->setup_state = XPC_P_TORNDOWN; 1406 part->setup_state = XPC_P_TORNDOWN;
@@ -1504,7 +1421,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1504 part->local_IPI_amo_va = NULL; 1421 part->local_IPI_amo_va = NULL;
1505} 1422}
1506 1423
1507
1508/* 1424/*
1509 * Called by XP at the time of channel connection registration to cause 1425 * Called by XP at the time of channel connection registration to cause
1510 * XPC to establish connections to all currently active partitions. 1426 * XPC to establish connections to all currently active partitions.
@@ -1516,7 +1432,6 @@ xpc_initiate_connect(int ch_number)
1516 struct xpc_partition *part; 1432 struct xpc_partition *part;
1517 struct xpc_channel *ch; 1433 struct xpc_channel *ch;
1518 1434
1519
1520 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 1435 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1521 1436
1522 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1437 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
@@ -1535,7 +1450,6 @@ xpc_initiate_connect(int ch_number)
1535 } 1450 }
1536} 1451}
1537 1452
1538
1539void 1453void
1540xpc_connected_callout(struct xpc_channel *ch) 1454xpc_connected_callout(struct xpc_channel *ch)
1541{ 1455{
@@ -1546,14 +1460,13 @@ xpc_connected_callout(struct xpc_channel *ch)
1546 "partid=%d, channel=%d\n", ch->partid, ch->number); 1460 "partid=%d, channel=%d\n", ch->partid, ch->number);
1547 1461
1548 ch->func(xpcConnected, ch->partid, ch->number, 1462 ch->func(xpcConnected, ch->partid, ch->number,
1549 (void *) (u64) ch->local_nentries, ch->key); 1463 (void *)(u64)ch->local_nentries, ch->key);
1550 1464
1551 dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " 1465 dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
1552 "partid=%d, channel=%d\n", ch->partid, ch->number); 1466 "partid=%d, channel=%d\n", ch->partid, ch->number);
1553 } 1467 }
1554} 1468}
1555 1469
1556
1557/* 1470/*
1558 * Called by XP at the time of channel connection unregistration to cause 1471 * Called by XP at the time of channel connection unregistration to cause
1559 * XPC to teardown all current connections for the specified channel. 1472 * XPC to teardown all current connections for the specified channel.
@@ -1575,7 +1488,6 @@ xpc_initiate_disconnect(int ch_number)
1575 struct xpc_partition *part; 1488 struct xpc_partition *part;
1576 struct xpc_channel *ch; 1489 struct xpc_channel *ch;
1577 1490
1578
1579 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 1491 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1580 1492
1581 /* initiate the channel disconnect for every active partition */ 1493 /* initiate the channel disconnect for every active partition */
@@ -1592,7 +1504,7 @@ xpc_initiate_disconnect(int ch_number)
1592 ch->flags |= XPC_C_WDISCONNECT; 1504 ch->flags |= XPC_C_WDISCONNECT;
1593 1505
1594 XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, 1506 XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
1595 &irq_flags); 1507 &irq_flags);
1596 } 1508 }
1597 1509
1598 spin_unlock_irqrestore(&ch->lock, irq_flags); 1510 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1605,7 +1517,6 @@ xpc_initiate_disconnect(int ch_number)
1605 xpc_disconnect_wait(ch_number); 1517 xpc_disconnect_wait(ch_number);
1606} 1518}
1607 1519
1608
1609/* 1520/*
1610 * To disconnect a channel, and reflect it back to all who may be waiting. 1521 * To disconnect a channel, and reflect it back to all who may be waiting.
1611 * 1522 *
@@ -1617,16 +1528,15 @@ xpc_initiate_disconnect(int ch_number)
1617 */ 1528 */
1618void 1529void
1619xpc_disconnect_channel(const int line, struct xpc_channel *ch, 1530xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1620 enum xpc_retval reason, unsigned long *irq_flags) 1531 enum xpc_retval reason, unsigned long *irq_flags)
1621{ 1532{
1622 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); 1533 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
1623 1534
1624
1625 DBUG_ON(!spin_is_locked(&ch->lock)); 1535 DBUG_ON(!spin_is_locked(&ch->lock));
1626 1536
1627 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { 1537 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
1628 return; 1538 return;
1629 } 1539
1630 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); 1540 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
1631 1541
1632 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", 1542 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
@@ -1637,14 +1547,13 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1637 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); 1547 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
1638 /* some of these may not have been set */ 1548 /* some of these may not have been set */
1639 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | 1549 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
1640 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 1550 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
1641 XPC_C_CONNECTING | XPC_C_CONNECTED); 1551 XPC_C_CONNECTING | XPC_C_CONNECTED);
1642 1552
1643 xpc_IPI_send_closerequest(ch, irq_flags); 1553 xpc_IPI_send_closerequest(ch, irq_flags);
1644 1554
1645 if (channel_was_connected) { 1555 if (channel_was_connected)
1646 ch->flags |= XPC_C_WASCONNECTED; 1556 ch->flags |= XPC_C_WASCONNECTED;
1647 }
1648 1557
1649 spin_unlock_irqrestore(&ch->lock, *irq_flags); 1558 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1650 1559
@@ -1653,20 +1562,18 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1653 wake_up_all(&ch->idle_wq); 1562 wake_up_all(&ch->idle_wq);
1654 1563
1655 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 1564 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
1656 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 1565 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
1657 /* start a kthread that will do the xpcDisconnecting callout */ 1566 /* start a kthread that will do the xpcDisconnecting callout */
1658 xpc_create_kthreads(ch, 1, 1); 1567 xpc_create_kthreads(ch, 1, 1);
1659 } 1568 }
1660 1569
1661 /* wake those waiting to allocate an entry from the local msg queue */ 1570 /* wake those waiting to allocate an entry from the local msg queue */
1662 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { 1571 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1663 wake_up(&ch->msg_allocate_wq); 1572 wake_up(&ch->msg_allocate_wq);
1664 }
1665 1573
1666 spin_lock_irqsave(&ch->lock, *irq_flags); 1574 spin_lock_irqsave(&ch->lock, *irq_flags);
1667} 1575}
1668 1576
1669
1670void 1577void
1671xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) 1578xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
1672{ 1579{
@@ -1687,7 +1594,6 @@ xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
1687 } 1594 }
1688} 1595}
1689 1596
1690
1691/* 1597/*
1692 * Wait for a message entry to become available for the specified channel, 1598 * Wait for a message entry to become available for the specified channel,
1693 * but don't wait any longer than 1 jiffy. 1599 * but don't wait any longer than 1 jiffy.
@@ -1697,9 +1603,8 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1697{ 1603{
1698 enum xpc_retval ret; 1604 enum xpc_retval ret;
1699 1605
1700
1701 if (ch->flags & XPC_C_DISCONNECTING) { 1606 if (ch->flags & XPC_C_DISCONNECTING) {
1702 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? 1607 DBUG_ON(ch->reason == xpcInterrupted);
1703 return ch->reason; 1608 return ch->reason;
1704 } 1609 }
1705 1610
@@ -1709,7 +1614,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1709 1614
1710 if (ch->flags & XPC_C_DISCONNECTING) { 1615 if (ch->flags & XPC_C_DISCONNECTING) {
1711 ret = ch->reason; 1616 ret = ch->reason;
1712 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? 1617 DBUG_ON(ch->reason == xpcInterrupted);
1713 } else if (ret == 0) { 1618 } else if (ret == 0) {
1714 ret = xpcTimeout; 1619 ret = xpcTimeout;
1715 } else { 1620 } else {
@@ -1719,20 +1624,18 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1719 return ret; 1624 return ret;
1720} 1625}
1721 1626
1722
1723/* 1627/*
1724 * Allocate an entry for a message from the message queue associated with the 1628 * Allocate an entry for a message from the message queue associated with the
1725 * specified channel. 1629 * specified channel.
1726 */ 1630 */
1727static enum xpc_retval 1631static enum xpc_retval
1728xpc_allocate_msg(struct xpc_channel *ch, u32 flags, 1632xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1729 struct xpc_msg **address_of_msg) 1633 struct xpc_msg **address_of_msg)
1730{ 1634{
1731 struct xpc_msg *msg; 1635 struct xpc_msg *msg;
1732 enum xpc_retval ret; 1636 enum xpc_retval ret;
1733 s64 put; 1637 s64 put;
1734 1638
1735
1736 /* this reference will be dropped in xpc_send_msg() */ 1639 /* this reference will be dropped in xpc_send_msg() */
1737 xpc_msgqueue_ref(ch); 1640 xpc_msgqueue_ref(ch);
1738 1641
@@ -1745,7 +1648,6 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1745 return xpcNotConnected; 1648 return xpcNotConnected;
1746 } 1649 }
1747 1650
1748
1749 /* 1651 /*
1750 * Get the next available message entry from the local message queue. 1652 * Get the next available message entry from the local message queue.
1751 * If none are available, we'll make sure that we grab the latest 1653 * If none are available, we'll make sure that we grab the latest
@@ -1755,25 +1657,23 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1755 1657
1756 while (1) { 1658 while (1) {
1757 1659
1758 put = (volatile s64) ch->w_local_GP.put; 1660 put = ch->w_local_GP.put;
1759 if (put - (volatile s64) ch->w_remote_GP.get < 1661 rmb(); /* guarantee that .put loads before .get */
1760 ch->local_nentries) { 1662 if (put - ch->w_remote_GP.get < ch->local_nentries) {
1761 1663
1762 /* There are available message entries. We need to try 1664 /* There are available message entries. We need to try
1763 * to secure one for ourselves. We'll do this by trying 1665 * to secure one for ourselves. We'll do this by trying
1764 * to increment w_local_GP.put as long as someone else 1666 * to increment w_local_GP.put as long as someone else
1765 * doesn't beat us to it. If they do, we'll have to 1667 * doesn't beat us to it. If they do, we'll have to
1766 * try again. 1668 * try again.
1767 */ 1669 */
1768 if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == 1670 if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == put) {
1769 put) {
1770 /* we got the entry referenced by put */ 1671 /* we got the entry referenced by put */
1771 break; 1672 break;
1772 } 1673 }
1773 continue; /* try again */ 1674 continue; /* try again */
1774 } 1675 }
1775 1676
1776
1777 /* 1677 /*
1778 * There aren't any available msg entries at this time. 1678 * There aren't any available msg entries at this time.
1779 * 1679 *
@@ -1783,9 +1683,8 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1783 * that will cause the IPI handler to fetch the latest 1683 * that will cause the IPI handler to fetch the latest
1784 * GP values as if an IPI was sent by the other side. 1684 * GP values as if an IPI was sent by the other side.
1785 */ 1685 */
1786 if (ret == xpcTimeout) { 1686 if (ret == xpcTimeout)
1787 xpc_IPI_send_local_msgrequest(ch); 1687 xpc_IPI_send_local_msgrequest(ch);
1788 }
1789 1688
1790 if (flags & XPC_NOWAIT) { 1689 if (flags & XPC_NOWAIT) {
1791 xpc_msgqueue_deref(ch); 1690 xpc_msgqueue_deref(ch);
@@ -1799,25 +1698,22 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1799 } 1698 }
1800 } 1699 }
1801 1700
1802
1803 /* get the message's address and initialize it */ 1701 /* get the message's address and initialize it */
1804 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1702 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1805 (put % ch->local_nentries) * ch->msg_size); 1703 (put % ch->local_nentries) * ch->msg_size);
1806
1807 1704
1808 DBUG_ON(msg->flags != 0); 1705 DBUG_ON(msg->flags != 0);
1809 msg->number = put; 1706 msg->number = put;
1810 1707
1811 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, " 1708 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
1812 "msg_number=%ld, partid=%d, channel=%d\n", put + 1, 1709 "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
1813 (void *) msg, msg->number, ch->partid, ch->number); 1710 (void *)msg, msg->number, ch->partid, ch->number);
1814 1711
1815 *address_of_msg = msg; 1712 *address_of_msg = msg;
1816 1713
1817 return xpcSuccess; 1714 return xpcSuccess;
1818} 1715}
1819 1716
1820
1821/* 1717/*
1822 * Allocate an entry for a message from the message queue associated with the 1718 * Allocate an entry for a message from the message queue associated with the
1823 * specified channel. NOTE that this routine can sleep waiting for a message 1719 * specified channel. NOTE that this routine can sleep waiting for a message
@@ -1838,7 +1734,6 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
1838 enum xpc_retval ret = xpcUnknownReason; 1734 enum xpc_retval ret = xpcUnknownReason;
1839 struct xpc_msg *msg = NULL; 1735 struct xpc_msg *msg = NULL;
1840 1736
1841
1842 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 1737 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
1843 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 1738 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1844 1739
@@ -1848,15 +1743,13 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
1848 ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); 1743 ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
1849 xpc_part_deref(part); 1744 xpc_part_deref(part);
1850 1745
1851 if (msg != NULL) { 1746 if (msg != NULL)
1852 *payload = &msg->payload; 1747 *payload = &msg->payload;
1853 }
1854 } 1748 }
1855 1749
1856 return ret; 1750 return ret;
1857} 1751}
1858 1752
1859
1860/* 1753/*
1861 * Now we actually send the messages that are ready to be sent by advancing 1754 * Now we actually send the messages that are ready to be sent by advancing
1862 * the local message queue's Put value and then send an IPI to the recipient 1755 * the local message queue's Put value and then send an IPI to the recipient
@@ -1869,20 +1762,18 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1869 s64 put = initial_put + 1; 1762 s64 put = initial_put + 1;
1870 int send_IPI = 0; 1763 int send_IPI = 0;
1871 1764
1872
1873 while (1) { 1765 while (1) {
1874 1766
1875 while (1) { 1767 while (1) {
1876 if (put == (volatile s64) ch->w_local_GP.put) { 1768 if (put == ch->w_local_GP.put)
1877 break; 1769 break;
1878 }
1879 1770
1880 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1771 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1881 (put % ch->local_nentries) * ch->msg_size); 1772 (put % ch->local_nentries) *
1773 ch->msg_size);
1882 1774
1883 if (!(msg->flags & XPC_M_READY)) { 1775 if (!(msg->flags & XPC_M_READY))
1884 break; 1776 break;
1885 }
1886 1777
1887 put++; 1778 put++;
1888 } 1779 }
@@ -1893,9 +1784,9 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1893 } 1784 }
1894 1785
1895 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != 1786 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
1896 initial_put) { 1787 initial_put) {
1897 /* someone else beat us to it */ 1788 /* someone else beat us to it */
1898 DBUG_ON((volatile s64) ch->local_GP->put < initial_put); 1789 DBUG_ON(ch->local_GP->put < initial_put);
1899 break; 1790 break;
1900 } 1791 }
1901 1792
@@ -1914,12 +1805,10 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1914 initial_put = put; 1805 initial_put = put;
1915 } 1806 }
1916 1807
1917 if (send_IPI) { 1808 if (send_IPI)
1918 xpc_IPI_send_msgrequest(ch); 1809 xpc_IPI_send_msgrequest(ch);
1919 }
1920} 1810}
1921 1811
1922
1923/* 1812/*
1924 * Common code that does the actual sending of the message by advancing the 1813 * Common code that does the actual sending of the message by advancing the
1925 * local message queue's Put value and sends an IPI to the partition the 1814 * local message queue's Put value and sends an IPI to the partition the
@@ -1927,16 +1816,15 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1927 */ 1816 */
1928static enum xpc_retval 1817static enum xpc_retval
1929xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, 1818xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1930 xpc_notify_func func, void *key) 1819 xpc_notify_func func, void *key)
1931{ 1820{
1932 enum xpc_retval ret = xpcSuccess; 1821 enum xpc_retval ret = xpcSuccess;
1933 struct xpc_notify *notify = notify; 1822 struct xpc_notify *notify = notify;
1934 s64 put, msg_number = msg->number; 1823 s64 put, msg_number = msg->number;
1935 1824
1936
1937 DBUG_ON(notify_type == XPC_N_CALL && func == NULL); 1825 DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
1938 DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) != 1826 DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) !=
1939 msg_number % ch->local_nentries); 1827 msg_number % ch->local_nentries);
1940 DBUG_ON(msg->flags & XPC_M_READY); 1828 DBUG_ON(msg->flags & XPC_M_READY);
1941 1829
1942 if (ch->flags & XPC_C_DISCONNECTING) { 1830 if (ch->flags & XPC_C_DISCONNECTING) {
@@ -1959,7 +1847,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1959 notify->key = key; 1847 notify->key = key;
1960 notify->type = notify_type; 1848 notify->type = notify_type;
1961 1849
1962 // >>> is a mb() needed here? 1850 /* >>> is a mb() needed here? */
1963 1851
1964 if (ch->flags & XPC_C_DISCONNECTING) { 1852 if (ch->flags & XPC_C_DISCONNECTING) {
1965 /* 1853 /*
@@ -1970,7 +1858,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1970 * the notify entry. 1858 * the notify entry.
1971 */ 1859 */
1972 if (cmpxchg(&notify->type, notify_type, 0) == 1860 if (cmpxchg(&notify->type, notify_type, 0) ==
1973 notify_type) { 1861 notify_type) {
1974 atomic_dec(&ch->n_to_notify); 1862 atomic_dec(&ch->n_to_notify);
1975 ret = ch->reason; 1863 ret = ch->reason;
1976 } 1864 }
@@ -1992,16 +1880,14 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1992 /* see if the message is next in line to be sent, if so send it */ 1880 /* see if the message is next in line to be sent, if so send it */
1993 1881
1994 put = ch->local_GP->put; 1882 put = ch->local_GP->put;
1995 if (put == msg_number) { 1883 if (put == msg_number)
1996 xpc_send_msgs(ch, put); 1884 xpc_send_msgs(ch, put);
1997 }
1998 1885
1999 /* drop the reference grabbed in xpc_allocate_msg() */ 1886 /* drop the reference grabbed in xpc_allocate_msg() */
2000 xpc_msgqueue_deref(ch); 1887 xpc_msgqueue_deref(ch);
2001 return ret; 1888 return ret;
2002} 1889}
2003 1890
2004
2005/* 1891/*
2006 * Send a message previously allocated using xpc_initiate_allocate() on the 1892 * Send a message previously allocated using xpc_initiate_allocate() on the
2007 * specified channel connected to the specified partition. 1893 * specified channel connected to the specified partition.
@@ -2029,8 +1915,7 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
2029 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 1915 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2030 enum xpc_retval ret; 1916 enum xpc_retval ret;
2031 1917
2032 1918 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
2033 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
2034 partid, ch_number); 1919 partid, ch_number);
2035 1920
2036 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 1921 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -2042,7 +1927,6 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
2042 return ret; 1927 return ret;
2043} 1928}
2044 1929
2045
2046/* 1930/*
2047 * Send a message previously allocated using xpc_initiate_allocate on the 1931 * Send a message previously allocated using xpc_initiate_allocate on the
2048 * specified channel connected to the specified partition. 1932 * specified channel connected to the specified partition.
@@ -2075,14 +1959,13 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
2075 */ 1959 */
2076enum xpc_retval 1960enum xpc_retval
2077xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, 1961xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
2078 xpc_notify_func func, void *key) 1962 xpc_notify_func func, void *key)
2079{ 1963{
2080 struct xpc_partition *part = &xpc_partitions[partid]; 1964 struct xpc_partition *part = &xpc_partitions[partid];
2081 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 1965 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2082 enum xpc_retval ret; 1966 enum xpc_retval ret;
2083 1967
2084 1968 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
2085 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
2086 partid, ch_number); 1969 partid, ch_number);
2087 1970
2088 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 1971 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -2091,11 +1974,10 @@ xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
2091 DBUG_ON(func == NULL); 1974 DBUG_ON(func == NULL);
2092 1975
2093 ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, 1976 ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
2094 func, key); 1977 func, key);
2095 return ret; 1978 return ret;
2096} 1979}
2097 1980
2098
2099static struct xpc_msg * 1981static struct xpc_msg *
2100xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) 1982xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2101{ 1983{
@@ -2105,7 +1987,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2105 u64 msg_offset; 1987 u64 msg_offset;
2106 enum xpc_retval ret; 1988 enum xpc_retval ret;
2107 1989
2108
2109 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { 1990 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
2110 /* we were interrupted by a signal */ 1991 /* we were interrupted by a signal */
2111 return NULL; 1992 return NULL;
@@ -2117,23 +1998,21 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2117 1998
2118 msg_index = ch->next_msg_to_pull % ch->remote_nentries; 1999 msg_index = ch->next_msg_to_pull % ch->remote_nentries;
2119 2000
2120 DBUG_ON(ch->next_msg_to_pull >= 2001 DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put);
2121 (volatile s64) ch->w_remote_GP.put); 2002 nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull;
2122 nmsgs = (volatile s64) ch->w_remote_GP.put -
2123 ch->next_msg_to_pull;
2124 if (msg_index + nmsgs > ch->remote_nentries) { 2003 if (msg_index + nmsgs > ch->remote_nentries) {
2125 /* ignore the ones that wrap the msg queue for now */ 2004 /* ignore the ones that wrap the msg queue for now */
2126 nmsgs = ch->remote_nentries - msg_index; 2005 nmsgs = ch->remote_nentries - msg_index;
2127 } 2006 }
2128 2007
2129 msg_offset = msg_index * ch->msg_size; 2008 msg_offset = msg_index * ch->msg_size;
2130 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 2009 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
2131 msg_offset); 2010 remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
2132 remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa + 2011 msg_offset);
2133 msg_offset);
2134 2012
2135 if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg, 2013 ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
2136 nmsgs * ch->msg_size)) != xpcSuccess) { 2014 nmsgs * ch->msg_size);
2015 if (ret != xpcSuccess) {
2137 2016
2138 dev_dbg(xpc_chan, "failed to pull %d msgs starting with" 2017 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
2139 " msg %ld from partition %d, channel=%d, " 2018 " msg %ld from partition %d, channel=%d, "
@@ -2146,8 +2025,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2146 return NULL; 2025 return NULL;
2147 } 2026 }
2148 2027
2149 mb(); /* >>> this may not be needed, we're not sure */
2150
2151 ch->next_msg_to_pull += nmsgs; 2028 ch->next_msg_to_pull += nmsgs;
2152 } 2029 }
2153 2030
@@ -2155,12 +2032,11 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2155 2032
2156 /* return the message we were looking for */ 2033 /* return the message we were looking for */
2157 msg_offset = (get % ch->remote_nentries) * ch->msg_size; 2034 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
2158 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset); 2035 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
2159 2036
2160 return msg; 2037 return msg;
2161} 2038}
2162 2039
2163
2164/* 2040/*
2165 * Get a message to be delivered. 2041 * Get a message to be delivered.
2166 */ 2042 */
@@ -2170,23 +2046,21 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
2170 struct xpc_msg *msg = NULL; 2046 struct xpc_msg *msg = NULL;
2171 s64 get; 2047 s64 get;
2172 2048
2173
2174 do { 2049 do {
2175 if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) { 2050 if (ch->flags & XPC_C_DISCONNECTING)
2176 break; 2051 break;
2177 }
2178 2052
2179 get = (volatile s64) ch->w_local_GP.get; 2053 get = ch->w_local_GP.get;
2180 if (get == (volatile s64) ch->w_remote_GP.put) { 2054 rmb(); /* guarantee that .get loads before .put */
2055 if (get == ch->w_remote_GP.put)
2181 break; 2056 break;
2182 }
2183 2057
2184 /* There are messages waiting to be pulled and delivered. 2058 /* There are messages waiting to be pulled and delivered.
2185 * We need to try to secure one for ourselves. We'll do this 2059 * We need to try to secure one for ourselves. We'll do this
2186 * by trying to increment w_local_GP.get and hope that no one 2060 * by trying to increment w_local_GP.get and hope that no one
2187 * else beats us to it. If they do, we'll we'll simply have 2061 * else beats us to it. If they do, we'll we'll simply have
2188 * to try again for the next one. 2062 * to try again for the next one.
2189 */ 2063 */
2190 2064
2191 if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { 2065 if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
2192 /* we got the entry referenced by get */ 2066 /* we got the entry referenced by get */
@@ -2211,7 +2085,6 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
2211 return msg; 2085 return msg;
2212} 2086}
2213 2087
2214
2215/* 2088/*
2216 * Deliver a message to its intended recipient. 2089 * Deliver a message to its intended recipient.
2217 */ 2090 */
@@ -2220,8 +2093,8 @@ xpc_deliver_msg(struct xpc_channel *ch)
2220{ 2093{
2221 struct xpc_msg *msg; 2094 struct xpc_msg *msg;
2222 2095
2223 2096 msg = xpc_get_deliverable_msg(ch);
2224 if ((msg = xpc_get_deliverable_msg(ch)) != NULL) { 2097 if (msg != NULL) {
2225 2098
2226 /* 2099 /*
2227 * This ref is taken to protect the payload itself from being 2100 * This ref is taken to protect the payload itself from being
@@ -2235,16 +2108,16 @@ xpc_deliver_msg(struct xpc_channel *ch)
2235 if (ch->func != NULL) { 2108 if (ch->func != NULL) {
2236 dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " 2109 dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
2237 "msg_number=%ld, partid=%d, channel=%d\n", 2110 "msg_number=%ld, partid=%d, channel=%d\n",
2238 (void *) msg, msg->number, ch->partid, 2111 (void *)msg, msg->number, ch->partid,
2239 ch->number); 2112 ch->number);
2240 2113
2241 /* deliver the message to its intended recipient */ 2114 /* deliver the message to its intended recipient */
2242 ch->func(xpcMsgReceived, ch->partid, ch->number, 2115 ch->func(xpcMsgReceived, ch->partid, ch->number,
2243 &msg->payload, ch->key); 2116 &msg->payload, ch->key);
2244 2117
2245 dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " 2118 dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
2246 "msg_number=%ld, partid=%d, channel=%d\n", 2119 "msg_number=%ld, partid=%d, channel=%d\n",
2247 (void *) msg, msg->number, ch->partid, 2120 (void *)msg, msg->number, ch->partid,
2248 ch->number); 2121 ch->number);
2249 } 2122 }
2250 2123
@@ -2252,7 +2125,6 @@ xpc_deliver_msg(struct xpc_channel *ch)
2252 } 2125 }
2253} 2126}
2254 2127
2255
2256/* 2128/*
2257 * Now we actually acknowledge the messages that have been delivered and ack'd 2129 * Now we actually acknowledge the messages that have been delivered and ack'd
2258 * by advancing the cached remote message queue's Get value and if requested 2130 * by advancing the cached remote message queue's Get value and if requested
@@ -2265,20 +2137,18 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2265 s64 get = initial_get + 1; 2137 s64 get = initial_get + 1;
2266 int send_IPI = 0; 2138 int send_IPI = 0;
2267 2139
2268
2269 while (1) { 2140 while (1) {
2270 2141
2271 while (1) { 2142 while (1) {
2272 if (get == (volatile s64) ch->w_local_GP.get) { 2143 if (get == ch->w_local_GP.get)
2273 break; 2144 break;
2274 }
2275 2145
2276 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 2146 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
2277 (get % ch->remote_nentries) * ch->msg_size); 2147 (get % ch->remote_nentries) *
2148 ch->msg_size);
2278 2149
2279 if (!(msg->flags & XPC_M_DONE)) { 2150 if (!(msg->flags & XPC_M_DONE))
2280 break; 2151 break;
2281 }
2282 2152
2283 msg_flags |= msg->flags; 2153 msg_flags |= msg->flags;
2284 get++; 2154 get++;
@@ -2290,10 +2160,9 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2290 } 2160 }
2291 2161
2292 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != 2162 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
2293 initial_get) { 2163 initial_get) {
2294 /* someone else beat us to it */ 2164 /* someone else beat us to it */
2295 DBUG_ON((volatile s64) ch->local_GP->get <= 2165 DBUG_ON(ch->local_GP->get <= initial_get);
2296 initial_get);
2297 break; 2166 break;
2298 } 2167 }
2299 2168
@@ -2312,12 +2181,10 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2312 initial_get = get; 2181 initial_get = get;
2313 } 2182 }
2314 2183
2315 if (send_IPI) { 2184 if (send_IPI)
2316 xpc_IPI_send_msgrequest(ch); 2185 xpc_IPI_send_msgrequest(ch);
2317 }
2318} 2186}
2319 2187
2320
2321/* 2188/*
2322 * Acknowledge receipt of a delivered message. 2189 * Acknowledge receipt of a delivered message.
2323 * 2190 *
@@ -2343,17 +2210,16 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
2343 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 2210 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2344 s64 get, msg_number = msg->number; 2211 s64 get, msg_number = msg->number;
2345 2212
2346
2347 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 2213 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
2348 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 2214 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
2349 2215
2350 ch = &part->channels[ch_number]; 2216 ch = &part->channels[ch_number];
2351 2217
2352 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", 2218 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2353 (void *) msg, msg_number, ch->partid, ch->number); 2219 (void *)msg, msg_number, ch->partid, ch->number);
2354 2220
2355 DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) != 2221 DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) !=
2356 msg_number % ch->remote_nentries); 2222 msg_number % ch->remote_nentries);
2357 DBUG_ON(msg->flags & XPC_M_DONE); 2223 DBUG_ON(msg->flags & XPC_M_DONE);
2358 2224
2359 msg->flags |= XPC_M_DONE; 2225 msg->flags |= XPC_M_DONE;
@@ -2369,11 +2235,9 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
2369 * been delivered. 2235 * been delivered.
2370 */ 2236 */
2371 get = ch->local_GP->get; 2237 get = ch->local_GP->get;
2372 if (get == msg_number) { 2238 if (get == msg_number)
2373 xpc_acknowledge_msgs(ch, get, msg->flags); 2239 xpc_acknowledge_msgs(ch, get, msg->flags);
2374 }
2375 2240
2376 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ 2241 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
2377 xpc_msgqueue_deref(ch); 2242 xpc_msgqueue_deref(ch);
2378} 2243}
2379
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 9e0b164da9c2..f673ba90eb0e 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Communication (XPC) support - standard version. 10 * Cross Partition Communication (XPC) support - standard version.
12 * 11 *
@@ -44,23 +43,20 @@
44 * 43 *
45 */ 44 */
46 45
47
48#include <linux/kernel.h> 46#include <linux/kernel.h>
49#include <linux/module.h> 47#include <linux/module.h>
50#include <linux/init.h> 48#include <linux/init.h>
51#include <linux/sched.h>
52#include <linux/syscalls.h>
53#include <linux/cache.h> 49#include <linux/cache.h>
54#include <linux/interrupt.h> 50#include <linux/interrupt.h>
55#include <linux/delay.h> 51#include <linux/delay.h>
56#include <linux/reboot.h> 52#include <linux/reboot.h>
57#include <linux/completion.h> 53#include <linux/completion.h>
58#include <linux/kdebug.h> 54#include <linux/kdebug.h>
55#include <linux/kthread.h>
56#include <linux/uaccess.h>
59#include <asm/sn/intr.h> 57#include <asm/sn/intr.h>
60#include <asm/sn/sn_sal.h> 58#include <asm/sn/sn_sal.h>
61#include <asm/uaccess.h> 59#include "xpc.h"
62#include <asm/sn/xpc.h>
63
64 60
65/* define two XPC debug device structures to be used with dev_dbg() et al */ 61/* define two XPC debug device structures to be used with dev_dbg() et al */
66 62
@@ -81,10 +77,8 @@ struct device xpc_chan_dbg_subname = {
81struct device *xpc_part = &xpc_part_dbg_subname; 77struct device *xpc_part = &xpc_part_dbg_subname;
82struct device *xpc_chan = &xpc_chan_dbg_subname; 78struct device *xpc_chan = &xpc_chan_dbg_subname;
83 79
84
85static int xpc_kdebug_ignore; 80static int xpc_kdebug_ignore;
86 81
87
88/* systune related variables for /proc/sys directories */ 82/* systune related variables for /proc/sys directories */
89 83
90static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; 84static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
@@ -96,61 +90,56 @@ static int xpc_hb_check_min_interval = 10;
96static int xpc_hb_check_max_interval = 120; 90static int xpc_hb_check_max_interval = 120;
97 91
98int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; 92int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
99static int xpc_disengage_request_min_timelimit = 0; 93static int xpc_disengage_request_min_timelimit; /* = 0 */
100static int xpc_disengage_request_max_timelimit = 120; 94static int xpc_disengage_request_max_timelimit = 120;
101 95
102static ctl_table xpc_sys_xpc_hb_dir[] = { 96static ctl_table xpc_sys_xpc_hb_dir[] = {
103 { 97 {
104 .ctl_name = CTL_UNNUMBERED, 98 .ctl_name = CTL_UNNUMBERED,
105 .procname = "hb_interval", 99 .procname = "hb_interval",
106 .data = &xpc_hb_interval, 100 .data = &xpc_hb_interval,
107 .maxlen = sizeof(int), 101 .maxlen = sizeof(int),
108 .mode = 0644, 102 .mode = 0644,
109 .proc_handler = &proc_dointvec_minmax, 103 .proc_handler = &proc_dointvec_minmax,
110 .strategy = &sysctl_intvec, 104 .strategy = &sysctl_intvec,
111 .extra1 = &xpc_hb_min_interval, 105 .extra1 = &xpc_hb_min_interval,
112 .extra2 = &xpc_hb_max_interval 106 .extra2 = &xpc_hb_max_interval},
113 },
114 { 107 {
115 .ctl_name = CTL_UNNUMBERED, 108 .ctl_name = CTL_UNNUMBERED,
116 .procname = "hb_check_interval", 109 .procname = "hb_check_interval",
117 .data = &xpc_hb_check_interval, 110 .data = &xpc_hb_check_interval,
118 .maxlen = sizeof(int), 111 .maxlen = sizeof(int),
119 .mode = 0644, 112 .mode = 0644,
120 .proc_handler = &proc_dointvec_minmax, 113 .proc_handler = &proc_dointvec_minmax,
121 .strategy = &sysctl_intvec, 114 .strategy = &sysctl_intvec,
122 .extra1 = &xpc_hb_check_min_interval, 115 .extra1 = &xpc_hb_check_min_interval,
123 .extra2 = &xpc_hb_check_max_interval 116 .extra2 = &xpc_hb_check_max_interval},
124 },
125 {} 117 {}
126}; 118};
127static ctl_table xpc_sys_xpc_dir[] = { 119static ctl_table xpc_sys_xpc_dir[] = {
128 { 120 {
129 .ctl_name = CTL_UNNUMBERED, 121 .ctl_name = CTL_UNNUMBERED,
130 .procname = "hb", 122 .procname = "hb",
131 .mode = 0555, 123 .mode = 0555,
132 .child = xpc_sys_xpc_hb_dir 124 .child = xpc_sys_xpc_hb_dir},
133 },
134 { 125 {
135 .ctl_name = CTL_UNNUMBERED, 126 .ctl_name = CTL_UNNUMBERED,
136 .procname = "disengage_request_timelimit", 127 .procname = "disengage_request_timelimit",
137 .data = &xpc_disengage_request_timelimit, 128 .data = &xpc_disengage_request_timelimit,
138 .maxlen = sizeof(int), 129 .maxlen = sizeof(int),
139 .mode = 0644, 130 .mode = 0644,
140 .proc_handler = &proc_dointvec_minmax, 131 .proc_handler = &proc_dointvec_minmax,
141 .strategy = &sysctl_intvec, 132 .strategy = &sysctl_intvec,
142 .extra1 = &xpc_disengage_request_min_timelimit, 133 .extra1 = &xpc_disengage_request_min_timelimit,
143 .extra2 = &xpc_disengage_request_max_timelimit 134 .extra2 = &xpc_disengage_request_max_timelimit},
144 },
145 {} 135 {}
146}; 136};
147static ctl_table xpc_sys_dir[] = { 137static ctl_table xpc_sys_dir[] = {
148 { 138 {
149 .ctl_name = CTL_UNNUMBERED, 139 .ctl_name = CTL_UNNUMBERED,
150 .procname = "xpc", 140 .procname = "xpc",
151 .mode = 0555, 141 .mode = 0555,
152 .child = xpc_sys_xpc_dir 142 .child = xpc_sys_xpc_dir},
153 },
154 {} 143 {}
155}; 144};
156static struct ctl_table_header *xpc_sysctl; 145static struct ctl_table_header *xpc_sysctl;
@@ -172,13 +161,10 @@ static DECLARE_COMPLETION(xpc_hb_checker_exited);
172/* notification that the xpc_discovery thread has exited */ 161/* notification that the xpc_discovery thread has exited */
173static DECLARE_COMPLETION(xpc_discovery_exited); 162static DECLARE_COMPLETION(xpc_discovery_exited);
174 163
175
176static struct timer_list xpc_hb_timer; 164static struct timer_list xpc_hb_timer;
177 165
178
179static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); 166static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
180 167
181
182static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); 168static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
183static struct notifier_block xpc_reboot_notifier = { 169static struct notifier_block xpc_reboot_notifier = {
184 .notifier_call = xpc_system_reboot, 170 .notifier_call = xpc_system_reboot,
@@ -189,25 +175,22 @@ static struct notifier_block xpc_die_notifier = {
189 .notifier_call = xpc_system_die, 175 .notifier_call = xpc_system_die,
190}; 176};
191 177
192
193/* 178/*
194 * Timer function to enforce the timelimit on the partition disengage request. 179 * Timer function to enforce the timelimit on the partition disengage request.
195 */ 180 */
196static void 181static void
197xpc_timeout_partition_disengage_request(unsigned long data) 182xpc_timeout_partition_disengage_request(unsigned long data)
198{ 183{
199 struct xpc_partition *part = (struct xpc_partition *) data; 184 struct xpc_partition *part = (struct xpc_partition *)data;
200
201 185
202 DBUG_ON(time_before(jiffies, part->disengage_request_timeout)); 186 DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
203 187
204 (void) xpc_partition_disengaged(part); 188 (void)xpc_partition_disengaged(part);
205 189
206 DBUG_ON(part->disengage_request_timeout != 0); 190 DBUG_ON(part->disengage_request_timeout != 0);
207 DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); 191 DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
208} 192}
209 193
210
211/* 194/*
212 * Notify the heartbeat check thread that an IRQ has been received. 195 * Notify the heartbeat check thread that an IRQ has been received.
213 */ 196 */
@@ -219,7 +202,6 @@ xpc_act_IRQ_handler(int irq, void *dev_id)
219 return IRQ_HANDLED; 202 return IRQ_HANDLED;
220} 203}
221 204
222
223/* 205/*
224 * Timer to produce the heartbeat. The timer structures function is 206 * Timer to produce the heartbeat. The timer structures function is
225 * already set when this is initially called. A tunable is used to 207 * already set when this is initially called. A tunable is used to
@@ -230,15 +212,13 @@ xpc_hb_beater(unsigned long dummy)
230{ 212{
231 xpc_vars->heartbeat++; 213 xpc_vars->heartbeat++;
232 214
233 if (time_after_eq(jiffies, xpc_hb_check_timeout)) { 215 if (time_after_eq(jiffies, xpc_hb_check_timeout))
234 wake_up_interruptible(&xpc_act_IRQ_wq); 216 wake_up_interruptible(&xpc_act_IRQ_wq);
235 }
236 217
237 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); 218 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
238 add_timer(&xpc_hb_timer); 219 add_timer(&xpc_hb_timer);
239} 220}
240 221
241
242/* 222/*
243 * This thread is responsible for nearly all of the partition 223 * This thread is responsible for nearly all of the partition
244 * activation/deactivation. 224 * activation/deactivation.
@@ -248,27 +228,23 @@ xpc_hb_checker(void *ignore)
248{ 228{
249 int last_IRQ_count = 0; 229 int last_IRQ_count = 0;
250 int new_IRQ_count; 230 int new_IRQ_count;
251 int force_IRQ=0; 231 int force_IRQ = 0;
252
253 232
254 /* this thread was marked active by xpc_hb_init() */ 233 /* this thread was marked active by xpc_hb_init() */
255 234
256 daemonize(XPC_HB_CHECK_THREAD_NAME);
257
258 set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); 235 set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
259 236
260 /* set our heartbeating to other partitions into motion */ 237 /* set our heartbeating to other partitions into motion */
261 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); 238 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
262 xpc_hb_beater(0); 239 xpc_hb_beater(0);
263 240
264 while (!(volatile int) xpc_exiting) { 241 while (!xpc_exiting) {
265 242
266 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " 243 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
267 "been received\n", 244 "been received\n",
268 (int) (xpc_hb_check_timeout - jiffies), 245 (int)(xpc_hb_check_timeout - jiffies),
269 atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); 246 atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
270 247
271
272 /* checking of remote heartbeats is skewed by IRQ handling */ 248 /* checking of remote heartbeats is skewed by IRQ handling */
273 if (time_after_eq(jiffies, xpc_hb_check_timeout)) { 249 if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
274 dev_dbg(xpc_part, "checking remote heartbeats\n"); 250 dev_dbg(xpc_part, "checking remote heartbeats\n");
@@ -282,7 +258,6 @@ xpc_hb_checker(void *ignore)
282 force_IRQ = 1; 258 force_IRQ = 1;
283 } 259 }
284 260
285
286 /* check for outstanding IRQs */ 261 /* check for outstanding IRQs */
287 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); 262 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
288 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { 263 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
@@ -294,30 +269,30 @@ xpc_hb_checker(void *ignore)
294 last_IRQ_count += xpc_identify_act_IRQ_sender(); 269 last_IRQ_count += xpc_identify_act_IRQ_sender();
295 if (last_IRQ_count < new_IRQ_count) { 270 if (last_IRQ_count < new_IRQ_count) {
296 /* retry once to help avoid missing AMO */ 271 /* retry once to help avoid missing AMO */
297 (void) xpc_identify_act_IRQ_sender(); 272 (void)xpc_identify_act_IRQ_sender();
298 } 273 }
299 last_IRQ_count = new_IRQ_count; 274 last_IRQ_count = new_IRQ_count;
300 275
301 xpc_hb_check_timeout = jiffies + 276 xpc_hb_check_timeout = jiffies +
302 (xpc_hb_check_interval * HZ); 277 (xpc_hb_check_interval * HZ);
303 } 278 }
304 279
305 /* wait for IRQ or timeout */ 280 /* wait for IRQ or timeout */
306 (void) wait_event_interruptible(xpc_act_IRQ_wq, 281 (void)wait_event_interruptible(xpc_act_IRQ_wq,
307 (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || 282 (last_IRQ_count <
308 time_after_eq(jiffies, xpc_hb_check_timeout) || 283 atomic_read(&xpc_act_IRQ_rcvd)
309 (volatile int) xpc_exiting)); 284 || time_after_eq(jiffies,
285 xpc_hb_check_timeout) ||
286 xpc_exiting));
310 } 287 }
311 288
312 dev_dbg(xpc_part, "heartbeat checker is exiting\n"); 289 dev_dbg(xpc_part, "heartbeat checker is exiting\n");
313 290
314
315 /* mark this thread as having exited */ 291 /* mark this thread as having exited */
316 complete(&xpc_hb_checker_exited); 292 complete(&xpc_hb_checker_exited);
317 return 0; 293 return 0;
318} 294}
319 295
320
321/* 296/*
322 * This thread will attempt to discover other partitions to activate 297 * This thread will attempt to discover other partitions to activate
323 * based on info provided by SAL. This new thread is short lived and 298 * based on info provided by SAL. This new thread is short lived and
@@ -326,8 +301,6 @@ xpc_hb_checker(void *ignore)
326static int 301static int
327xpc_initiate_discovery(void *ignore) 302xpc_initiate_discovery(void *ignore)
328{ 303{
329 daemonize(XPC_DISCOVERY_THREAD_NAME);
330
331 xpc_discovery(); 304 xpc_discovery();
332 305
333 dev_dbg(xpc_part, "discovery thread is exiting\n"); 306 dev_dbg(xpc_part, "discovery thread is exiting\n");
@@ -337,7 +310,6 @@ xpc_initiate_discovery(void *ignore)
337 return 0; 310 return 0;
338} 311}
339 312
340
341/* 313/*
342 * Establish first contact with the remote partititon. This involves pulling 314 * Establish first contact with the remote partititon. This involves pulling
343 * the XPC per partition variables from the remote partition and waiting for 315 * the XPC per partition variables from the remote partition and waiting for
@@ -348,7 +320,6 @@ xpc_make_first_contact(struct xpc_partition *part)
348{ 320{
349 enum xpc_retval ret; 321 enum xpc_retval ret;
350 322
351
352 while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { 323 while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
353 if (ret != xpcRetry) { 324 if (ret != xpcRetry) {
354 XPC_DEACTIVATE_PARTITION(part, ret); 325 XPC_DEACTIVATE_PARTITION(part, ret);
@@ -359,17 +330,15 @@ xpc_make_first_contact(struct xpc_partition *part)
359 "partition %d\n", XPC_PARTID(part)); 330 "partition %d\n", XPC_PARTID(part));
360 331
361 /* wait a 1/4 of a second or so */ 332 /* wait a 1/4 of a second or so */
362 (void) msleep_interruptible(250); 333 (void)msleep_interruptible(250);
363 334
364 if (part->act_state == XPC_P_DEACTIVATING) { 335 if (part->act_state == XPC_P_DEACTIVATING)
365 return part->reason; 336 return part->reason;
366 }
367 } 337 }
368 338
369 return xpc_mark_partition_active(part); 339 return xpc_mark_partition_active(part);
370} 340}
371 341
372
373/* 342/*
374 * The first kthread assigned to a newly activated partition is the one 343 * The first kthread assigned to a newly activated partition is the one
375 * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to 344 * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
@@ -386,12 +355,11 @@ static void
386xpc_channel_mgr(struct xpc_partition *part) 355xpc_channel_mgr(struct xpc_partition *part)
387{ 356{
388 while (part->act_state != XPC_P_DEACTIVATING || 357 while (part->act_state != XPC_P_DEACTIVATING ||
389 atomic_read(&part->nchannels_active) > 0 || 358 atomic_read(&part->nchannels_active) > 0 ||
390 !xpc_partition_disengaged(part)) { 359 !xpc_partition_disengaged(part)) {
391 360
392 xpc_process_channel_activity(part); 361 xpc_process_channel_activity(part);
393 362
394
395 /* 363 /*
396 * Wait until we've been requested to activate kthreads or 364 * Wait until we've been requested to activate kthreads or
397 * all of the channel's message queues have been torn down or 365 * all of the channel's message queues have been torn down or
@@ -406,21 +374,16 @@ xpc_channel_mgr(struct xpc_partition *part)
406 * wake him up. 374 * wake him up.
407 */ 375 */
408 atomic_dec(&part->channel_mgr_requests); 376 atomic_dec(&part->channel_mgr_requests);
409 (void) wait_event_interruptible(part->channel_mgr_wq, 377 (void)wait_event_interruptible(part->channel_mgr_wq,
410 (atomic_read(&part->channel_mgr_requests) > 0 || 378 (atomic_read(&part->channel_mgr_requests) > 0 ||
411 (volatile u64) part->local_IPI_amo != 0 || 379 part->local_IPI_amo != 0 ||
412 ((volatile u8) part->act_state == 380 (part->act_state == XPC_P_DEACTIVATING &&
413 XPC_P_DEACTIVATING && 381 atomic_read(&part->nchannels_active) == 0 &&
414 atomic_read(&part->nchannels_active) == 0 && 382 xpc_partition_disengaged(part))));
415 xpc_partition_disengaged(part))));
416 atomic_set(&part->channel_mgr_requests, 1); 383 atomic_set(&part->channel_mgr_requests, 1);
417
418 // >>> Does it need to wakeup periodically as well? In case we
419 // >>> miscalculated the #of kthreads to wakeup or create?
420 } 384 }
421} 385}
422 386
423
424/* 387/*
425 * When XPC HB determines that a partition has come up, it will create a new 388 * When XPC HB determines that a partition has come up, it will create a new
426 * kthread and that kthread will call this function to attempt to set up the 389 * kthread and that kthread will call this function to attempt to set up the
@@ -443,9 +406,8 @@ xpc_partition_up(struct xpc_partition *part)
443 406
444 dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); 407 dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
445 408
446 if (xpc_setup_infrastructure(part) != xpcSuccess) { 409 if (xpc_setup_infrastructure(part) != xpcSuccess)
447 return; 410 return;
448 }
449 411
450 /* 412 /*
451 * The kthread that XPC HB called us with will become the 413 * The kthread that XPC HB called us with will become the
@@ -454,27 +416,22 @@ xpc_partition_up(struct xpc_partition *part)
454 * has been dismantled. 416 * has been dismantled.
455 */ 417 */
456 418
457 (void) xpc_part_ref(part); /* this will always succeed */ 419 (void)xpc_part_ref(part); /* this will always succeed */
458 420
459 if (xpc_make_first_contact(part) == xpcSuccess) { 421 if (xpc_make_first_contact(part) == xpcSuccess)
460 xpc_channel_mgr(part); 422 xpc_channel_mgr(part);
461 }
462 423
463 xpc_part_deref(part); 424 xpc_part_deref(part);
464 425
465 xpc_teardown_infrastructure(part); 426 xpc_teardown_infrastructure(part);
466} 427}
467 428
468
469static int 429static int
470xpc_activating(void *__partid) 430xpc_activating(void *__partid)
471{ 431{
472 partid_t partid = (u64) __partid; 432 partid_t partid = (u64)__partid;
473 struct xpc_partition *part = &xpc_partitions[partid]; 433 struct xpc_partition *part = &xpc_partitions[partid];
474 unsigned long irq_flags; 434 unsigned long irq_flags;
475 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
476 int ret;
477
478 435
479 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 436 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
480 437
@@ -496,21 +453,6 @@ xpc_activating(void *__partid)
496 453
497 dev_dbg(xpc_part, "bringing partition %d up\n", partid); 454 dev_dbg(xpc_part, "bringing partition %d up\n", partid);
498 455
499 daemonize("xpc%02d", partid);
500
501 /*
502 * This thread needs to run at a realtime priority to prevent a
503 * significant performance degradation.
504 */
505 ret = sched_setscheduler(current, SCHED_FIFO, &param);
506 if (ret != 0) {
507 dev_warn(xpc_part, "unable to set pid %d to a realtime "
508 "priority, ret=%d\n", current->pid, ret);
509 }
510
511 /* allow this thread and its children to run on any CPU */
512 set_cpus_allowed(current, CPU_MASK_ALL);
513
514 /* 456 /*
515 * Register the remote partition's AMOs with SAL so it can handle 457 * Register the remote partition's AMOs with SAL so it can handle
516 * and cleanup errors within that address range should the remote 458 * and cleanup errors within that address range should the remote
@@ -522,9 +464,9 @@ xpc_activating(void *__partid)
522 * reloads and system reboots. 464 * reloads and system reboots.
523 */ 465 */
524 if (sn_register_xp_addr_region(part->remote_amos_page_pa, 466 if (sn_register_xp_addr_region(part->remote_amos_page_pa,
525 PAGE_SIZE, 1) < 0) { 467 PAGE_SIZE, 1) < 0) {
526 dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " 468 dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
527 "xp_addr region\n", partid); 469 "xp_addr region\n", partid);
528 470
529 spin_lock_irqsave(&part->act_lock, irq_flags); 471 spin_lock_irqsave(&part->act_lock, irq_flags);
530 part->act_state = XPC_P_INACTIVE; 472 part->act_state = XPC_P_INACTIVE;
@@ -537,12 +479,11 @@ xpc_activating(void *__partid)
537 xpc_allow_hb(partid, xpc_vars); 479 xpc_allow_hb(partid, xpc_vars);
538 xpc_IPI_send_activated(part); 480 xpc_IPI_send_activated(part);
539 481
540
541 /* 482 /*
542 * xpc_partition_up() holds this thread and marks this partition as 483 * xpc_partition_up() holds this thread and marks this partition as
543 * XPC_P_ACTIVE by calling xpc_hb_mark_active(). 484 * XPC_P_ACTIVE by calling xpc_hb_mark_active().
544 */ 485 */
545 (void) xpc_partition_up(part); 486 (void)xpc_partition_up(part);
546 487
547 xpc_disallow_hb(partid, xpc_vars); 488 xpc_disallow_hb(partid, xpc_vars);
548 xpc_mark_partition_inactive(part); 489 xpc_mark_partition_inactive(part);
@@ -555,14 +496,12 @@ xpc_activating(void *__partid)
555 return 0; 496 return 0;
556} 497}
557 498
558
559void 499void
560xpc_activate_partition(struct xpc_partition *part) 500xpc_activate_partition(struct xpc_partition *part)
561{ 501{
562 partid_t partid = XPC_PARTID(part); 502 partid_t partid = XPC_PARTID(part);
563 unsigned long irq_flags; 503 unsigned long irq_flags;
564 pid_t pid; 504 struct task_struct *kthread;
565
566 505
567 spin_lock_irqsave(&part->act_lock, irq_flags); 506 spin_lock_irqsave(&part->act_lock, irq_flags);
568 507
@@ -573,9 +512,9 @@ xpc_activate_partition(struct xpc_partition *part)
573 512
574 spin_unlock_irqrestore(&part->act_lock, irq_flags); 513 spin_unlock_irqrestore(&part->act_lock, irq_flags);
575 514
576 pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0); 515 kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
577 516 partid);
578 if (unlikely(pid <= 0)) { 517 if (IS_ERR(kthread)) {
579 spin_lock_irqsave(&part->act_lock, irq_flags); 518 spin_lock_irqsave(&part->act_lock, irq_flags);
580 part->act_state = XPC_P_INACTIVE; 519 part->act_state = XPC_P_INACTIVE;
581 XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); 520 XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
@@ -583,12 +522,11 @@ xpc_activate_partition(struct xpc_partition *part)
583 } 522 }
584} 523}
585 524
586
587/* 525/*
588 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified 526 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
589 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more 527 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
590 * than one partition, we use an AMO_t structure per partition to indicate 528 * than one partition, we use an AMO_t structure per partition to indicate
591 * whether a partition has sent an IPI or not. >>> If it has, then wake up the 529 * whether a partition has sent an IPI or not. If it has, then wake up the
592 * associated kthread to handle it. 530 * associated kthread to handle it.
593 * 531 *
594 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC 532 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
@@ -603,10 +541,9 @@ xpc_activate_partition(struct xpc_partition *part)
603irqreturn_t 541irqreturn_t
604xpc_notify_IRQ_handler(int irq, void *dev_id) 542xpc_notify_IRQ_handler(int irq, void *dev_id)
605{ 543{
606 partid_t partid = (partid_t) (u64) dev_id; 544 partid_t partid = (partid_t) (u64)dev_id;
607 struct xpc_partition *part = &xpc_partitions[partid]; 545 struct xpc_partition *part = &xpc_partitions[partid];
608 546
609
610 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 547 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
611 548
612 if (xpc_part_ref(part)) { 549 if (xpc_part_ref(part)) {
@@ -617,7 +554,6 @@ xpc_notify_IRQ_handler(int irq, void *dev_id)
617 return IRQ_HANDLED; 554 return IRQ_HANDLED;
618} 555}
619 556
620
621/* 557/*
622 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor 558 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
623 * because the write to their associated IPI amo completed after the IRQ/IPI 559 * because the write to their associated IPI amo completed after the IRQ/IPI
@@ -630,13 +566,12 @@ xpc_dropped_IPI_check(struct xpc_partition *part)
630 xpc_check_for_channel_activity(part); 566 xpc_check_for_channel_activity(part);
631 567
632 part->dropped_IPI_timer.expires = jiffies + 568 part->dropped_IPI_timer.expires = jiffies +
633 XPC_P_DROPPED_IPI_WAIT; 569 XPC_P_DROPPED_IPI_WAIT;
634 add_timer(&part->dropped_IPI_timer); 570 add_timer(&part->dropped_IPI_timer);
635 xpc_part_deref(part); 571 xpc_part_deref(part);
636 } 572 }
637} 573}
638 574
639
640void 575void
641xpc_activate_kthreads(struct xpc_channel *ch, int needed) 576xpc_activate_kthreads(struct xpc_channel *ch, int needed)
642{ 577{
@@ -644,7 +579,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
644 int assigned = atomic_read(&ch->kthreads_assigned); 579 int assigned = atomic_read(&ch->kthreads_assigned);
645 int wakeup; 580 int wakeup;
646 581
647
648 DBUG_ON(needed <= 0); 582 DBUG_ON(needed <= 0);
649 583
650 if (idle > 0) { 584 if (idle > 0) {
@@ -658,16 +592,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
658 wake_up_nr(&ch->idle_wq, wakeup); 592 wake_up_nr(&ch->idle_wq, wakeup);
659 } 593 }
660 594
661 if (needed <= 0) { 595 if (needed <= 0)
662 return; 596 return;
663 }
664 597
665 if (needed + assigned > ch->kthreads_assigned_limit) { 598 if (needed + assigned > ch->kthreads_assigned_limit) {
666 needed = ch->kthreads_assigned_limit - assigned; 599 needed = ch->kthreads_assigned_limit - assigned;
667 // >>>should never be less than 0 600 if (needed <= 0)
668 if (needed <= 0) {
669 return; 601 return;
670 }
671 } 602 }
672 603
673 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", 604 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
@@ -676,7 +607,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
676 xpc_create_kthreads(ch, needed, 0); 607 xpc_create_kthreads(ch, needed, 0);
677} 608}
678 609
679
680/* 610/*
681 * This function is where XPC's kthreads wait for messages to deliver. 611 * This function is where XPC's kthreads wait for messages to deliver.
682 */ 612 */
@@ -686,15 +616,13 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
686 do { 616 do {
687 /* deliver messages to their intended recipients */ 617 /* deliver messages to their intended recipients */
688 618
689 while ((volatile s64) ch->w_local_GP.get < 619 while (ch->w_local_GP.get < ch->w_remote_GP.put &&
690 (volatile s64) ch->w_remote_GP.put && 620 !(ch->flags & XPC_C_DISCONNECTING)) {
691 !((volatile u32) ch->flags &
692 XPC_C_DISCONNECTING)) {
693 xpc_deliver_msg(ch); 621 xpc_deliver_msg(ch);
694 } 622 }
695 623
696 if (atomic_inc_return(&ch->kthreads_idle) > 624 if (atomic_inc_return(&ch->kthreads_idle) >
697 ch->kthreads_idle_limit) { 625 ch->kthreads_idle_limit) {
698 /* too many idle kthreads on this channel */ 626 /* too many idle kthreads on this channel */
699 atomic_dec(&ch->kthreads_idle); 627 atomic_dec(&ch->kthreads_idle);
700 break; 628 break;
@@ -703,20 +631,17 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
703 dev_dbg(xpc_chan, "idle kthread calling " 631 dev_dbg(xpc_chan, "idle kthread calling "
704 "wait_event_interruptible_exclusive()\n"); 632 "wait_event_interruptible_exclusive()\n");
705 633
706 (void) wait_event_interruptible_exclusive(ch->idle_wq, 634 (void)wait_event_interruptible_exclusive(ch->idle_wq,
707 ((volatile s64) ch->w_local_GP.get < 635 (ch->w_local_GP.get < ch->w_remote_GP.put ||
708 (volatile s64) ch->w_remote_GP.put || 636 (ch->flags & XPC_C_DISCONNECTING)));
709 ((volatile u32) ch->flags &
710 XPC_C_DISCONNECTING)));
711 637
712 atomic_dec(&ch->kthreads_idle); 638 atomic_dec(&ch->kthreads_idle);
713 639
714 } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING)); 640 } while (!(ch->flags & XPC_C_DISCONNECTING));
715} 641}
716 642
717
718static int 643static int
719xpc_daemonize_kthread(void *args) 644xpc_kthread_start(void *args)
720{ 645{
721 partid_t partid = XPC_UNPACK_ARG1(args); 646 partid_t partid = XPC_UNPACK_ARG1(args);
722 u16 ch_number = XPC_UNPACK_ARG2(args); 647 u16 ch_number = XPC_UNPACK_ARG2(args);
@@ -725,9 +650,6 @@ xpc_daemonize_kthread(void *args)
725 int n_needed; 650 int n_needed;
726 unsigned long irq_flags; 651 unsigned long irq_flags;
727 652
728
729 daemonize("xpc%02dc%d", partid, ch_number);
730
731 dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", 653 dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
732 partid, ch_number); 654 partid, ch_number);
733 655
@@ -756,10 +678,9 @@ xpc_daemonize_kthread(void *args)
756 * need one less than total #of messages to deliver. 678 * need one less than total #of messages to deliver.
757 */ 679 */
758 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; 680 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
759 if (n_needed > 0 && 681 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
760 !(ch->flags & XPC_C_DISCONNECTING)) {
761 xpc_activate_kthreads(ch, n_needed); 682 xpc_activate_kthreads(ch, n_needed);
762 } 683
763 } else { 684 } else {
764 spin_unlock_irqrestore(&ch->lock, irq_flags); 685 spin_unlock_irqrestore(&ch->lock, irq_flags);
765 } 686 }
@@ -771,7 +692,7 @@ xpc_daemonize_kthread(void *args)
771 692
772 spin_lock_irqsave(&ch->lock, irq_flags); 693 spin_lock_irqsave(&ch->lock, irq_flags);
773 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 694 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
774 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 695 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
775 ch->flags |= XPC_C_DISCONNECTINGCALLOUT; 696 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
776 spin_unlock_irqrestore(&ch->lock, irq_flags); 697 spin_unlock_irqrestore(&ch->lock, irq_flags);
777 698
@@ -798,7 +719,6 @@ xpc_daemonize_kthread(void *args)
798 return 0; 719 return 0;
799} 720}
800 721
801
802/* 722/*
803 * For each partition that XPC has established communications with, there is 723 * For each partition that XPC has established communications with, there is
804 * a minimum of one kernel thread assigned to perform any operation that 724 * a minimum of one kernel thread assigned to perform any operation that
@@ -813,13 +733,12 @@ xpc_daemonize_kthread(void *args)
813 */ 733 */
814void 734void
815xpc_create_kthreads(struct xpc_channel *ch, int needed, 735xpc_create_kthreads(struct xpc_channel *ch, int needed,
816 int ignore_disconnecting) 736 int ignore_disconnecting)
817{ 737{
818 unsigned long irq_flags; 738 unsigned long irq_flags;
819 pid_t pid;
820 u64 args = XPC_PACK_ARGS(ch->partid, ch->number); 739 u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
821 struct xpc_partition *part = &xpc_partitions[ch->partid]; 740 struct xpc_partition *part = &xpc_partitions[ch->partid];
822 741 struct task_struct *kthread;
823 742
824 while (needed-- > 0) { 743 while (needed-- > 0) {
825 744
@@ -832,7 +751,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
832 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { 751 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
833 /* kthreads assigned had gone to zero */ 752 /* kthreads assigned had gone to zero */
834 BUG_ON(!(ch->flags & 753 BUG_ON(!(ch->flags &
835 XPC_C_DISCONNECTINGCALLOUT_MADE)); 754 XPC_C_DISCONNECTINGCALLOUT_MADE));
836 break; 755 break;
837 } 756 }
838 757
@@ -843,11 +762,12 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
843 if (atomic_inc_return(&part->nchannels_engaged) == 1) 762 if (atomic_inc_return(&part->nchannels_engaged) == 1)
844 xpc_mark_partition_engaged(part); 763 xpc_mark_partition_engaged(part);
845 } 764 }
846 (void) xpc_part_ref(part); 765 (void)xpc_part_ref(part);
847 xpc_msgqueue_ref(ch); 766 xpc_msgqueue_ref(ch);
848 767
849 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); 768 kthread = kthread_run(xpc_kthread_start, (void *)args,
850 if (pid < 0) { 769 "xpc%02dc%d", ch->partid, ch->number);
770 if (IS_ERR(kthread)) {
851 /* the fork failed */ 771 /* the fork failed */
852 772
853 /* 773 /*
@@ -857,7 +777,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
857 * to this channel are blocked in the channel's 777 * to this channel are blocked in the channel's
858 * registerer, because the only thing that will unblock 778 * registerer, because the only thing that will unblock
859 * them is the xpcDisconnecting callout that this 779 * them is the xpcDisconnecting callout that this
860 * failed kernel_thread would have made. 780 * failed kthread_run() would have made.
861 */ 781 */
862 782
863 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && 783 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
@@ -869,7 +789,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
869 xpc_part_deref(part); 789 xpc_part_deref(part);
870 790
871 if (atomic_read(&ch->kthreads_assigned) < 791 if (atomic_read(&ch->kthreads_assigned) <
872 ch->kthreads_idle_limit) { 792 ch->kthreads_idle_limit) {
873 /* 793 /*
874 * Flag this as an error only if we have an 794 * Flag this as an error only if we have an
875 * insufficient #of kthreads for the channel 795 * insufficient #of kthreads for the channel
@@ -877,17 +797,14 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
877 */ 797 */
878 spin_lock_irqsave(&ch->lock, irq_flags); 798 spin_lock_irqsave(&ch->lock, irq_flags);
879 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, 799 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
880 &irq_flags); 800 &irq_flags);
881 spin_unlock_irqrestore(&ch->lock, irq_flags); 801 spin_unlock_irqrestore(&ch->lock, irq_flags);
882 } 802 }
883 break; 803 break;
884 } 804 }
885
886 ch->kthreads_created++; // >>> temporary debug only!!!
887 } 805 }
888} 806}
889 807
890
891void 808void
892xpc_disconnect_wait(int ch_number) 809xpc_disconnect_wait(int ch_number)
893{ 810{
@@ -897,14 +814,12 @@ xpc_disconnect_wait(int ch_number)
897 struct xpc_channel *ch; 814 struct xpc_channel *ch;
898 int wakeup_channel_mgr; 815 int wakeup_channel_mgr;
899 816
900
901 /* now wait for all callouts to the caller's function to cease */ 817 /* now wait for all callouts to the caller's function to cease */
902 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 818 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
903 part = &xpc_partitions[partid]; 819 part = &xpc_partitions[partid];
904 820
905 if (!xpc_part_ref(part)) { 821 if (!xpc_part_ref(part))
906 continue; 822 continue;
907 }
908 823
909 ch = &part->channels[ch_number]; 824 ch = &part->channels[ch_number];
910 825
@@ -923,7 +838,8 @@ xpc_disconnect_wait(int ch_number)
923 if (part->act_state != XPC_P_DEACTIVATING) { 838 if (part->act_state != XPC_P_DEACTIVATING) {
924 spin_lock(&part->IPI_lock); 839 spin_lock(&part->IPI_lock);
925 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 840 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
926 ch->number, ch->delayed_IPI_flags); 841 ch->number,
842 ch->delayed_IPI_flags);
927 spin_unlock(&part->IPI_lock); 843 spin_unlock(&part->IPI_lock);
928 wakeup_channel_mgr = 1; 844 wakeup_channel_mgr = 1;
929 } 845 }
@@ -933,15 +849,13 @@ xpc_disconnect_wait(int ch_number)
933 ch->flags &= ~XPC_C_WDISCONNECT; 849 ch->flags &= ~XPC_C_WDISCONNECT;
934 spin_unlock_irqrestore(&ch->lock, irq_flags); 850 spin_unlock_irqrestore(&ch->lock, irq_flags);
935 851
936 if (wakeup_channel_mgr) { 852 if (wakeup_channel_mgr)
937 xpc_wakeup_channel_mgr(part); 853 xpc_wakeup_channel_mgr(part);
938 }
939 854
940 xpc_part_deref(part); 855 xpc_part_deref(part);
941 } 856 }
942} 857}
943 858
944
945static void 859static void
946xpc_do_exit(enum xpc_retval reason) 860xpc_do_exit(enum xpc_retval reason)
947{ 861{
@@ -950,7 +864,6 @@ xpc_do_exit(enum xpc_retval reason)
950 struct xpc_partition *part; 864 struct xpc_partition *part;
951 unsigned long printmsg_time, disengage_request_timeout = 0; 865 unsigned long printmsg_time, disengage_request_timeout = 0;
952 866
953
954 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ 867 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
955 DBUG_ON(xpc_exiting == 1); 868 DBUG_ON(xpc_exiting == 1);
956 869
@@ -971,10 +884,8 @@ xpc_do_exit(enum xpc_retval reason)
971 /* wait for the heartbeat checker thread to exit */ 884 /* wait for the heartbeat checker thread to exit */
972 wait_for_completion(&xpc_hb_checker_exited); 885 wait_for_completion(&xpc_hb_checker_exited);
973 886
974
975 /* sleep for a 1/3 of a second or so */ 887 /* sleep for a 1/3 of a second or so */
976 (void) msleep_interruptible(300); 888 (void)msleep_interruptible(300);
977
978 889
979 /* wait for all partitions to become inactive */ 890 /* wait for all partitions to become inactive */
980 891
@@ -988,7 +899,7 @@ xpc_do_exit(enum xpc_retval reason)
988 part = &xpc_partitions[partid]; 899 part = &xpc_partitions[partid];
989 900
990 if (xpc_partition_disengaged(part) && 901 if (xpc_partition_disengaged(part) &&
991 part->act_state == XPC_P_INACTIVE) { 902 part->act_state == XPC_P_INACTIVE) {
992 continue; 903 continue;
993 } 904 }
994 905
@@ -997,47 +908,46 @@ xpc_do_exit(enum xpc_retval reason)
997 XPC_DEACTIVATE_PARTITION(part, reason); 908 XPC_DEACTIVATE_PARTITION(part, reason);
998 909
999 if (part->disengage_request_timeout > 910 if (part->disengage_request_timeout >
1000 disengage_request_timeout) { 911 disengage_request_timeout) {
1001 disengage_request_timeout = 912 disengage_request_timeout =
1002 part->disengage_request_timeout; 913 part->disengage_request_timeout;
1003 } 914 }
1004 } 915 }
1005 916
1006 if (xpc_partition_engaged(-1UL)) { 917 if (xpc_partition_engaged(-1UL)) {
1007 if (time_after(jiffies, printmsg_time)) { 918 if (time_after(jiffies, printmsg_time)) {
1008 dev_info(xpc_part, "waiting for remote " 919 dev_info(xpc_part, "waiting for remote "
1009 "partitions to disengage, timeout in " 920 "partitions to disengage, timeout in "
1010 "%ld seconds\n", 921 "%ld seconds\n",
1011 (disengage_request_timeout - jiffies) 922 (disengage_request_timeout - jiffies)
1012 / HZ); 923 / HZ);
1013 printmsg_time = jiffies + 924 printmsg_time = jiffies +
1014 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 925 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
1015 printed_waiting_msg = 1; 926 printed_waiting_msg = 1;
1016 } 927 }
1017 928
1018 } else if (active_part_count > 0) { 929 } else if (active_part_count > 0) {
1019 if (printed_waiting_msg) { 930 if (printed_waiting_msg) {
1020 dev_info(xpc_part, "waiting for local partition" 931 dev_info(xpc_part, "waiting for local partition"
1021 " to disengage\n"); 932 " to disengage\n");
1022 printed_waiting_msg = 0; 933 printed_waiting_msg = 0;
1023 } 934 }
1024 935
1025 } else { 936 } else {
1026 if (!xpc_disengage_request_timedout) { 937 if (!xpc_disengage_request_timedout) {
1027 dev_info(xpc_part, "all partitions have " 938 dev_info(xpc_part, "all partitions have "
1028 "disengaged\n"); 939 "disengaged\n");
1029 } 940 }
1030 break; 941 break;
1031 } 942 }
1032 943
1033 /* sleep for a 1/3 of a second or so */ 944 /* sleep for a 1/3 of a second or so */
1034 (void) msleep_interruptible(300); 945 (void)msleep_interruptible(300);
1035 946
1036 } while (1); 947 } while (1);
1037 948
1038 DBUG_ON(xpc_partition_engaged(-1UL)); 949 DBUG_ON(xpc_partition_engaged(-1UL));
1039 950
1040
1041 /* indicate to others that our reserved page is uninitialized */ 951 /* indicate to others that our reserved page is uninitialized */
1042 xpc_rsvd_page->vars_pa = 0; 952 xpc_rsvd_page->vars_pa = 0;
1043 953
@@ -1047,27 +957,24 @@ xpc_do_exit(enum xpc_retval reason)
1047 957
1048 if (reason == xpcUnloading) { 958 if (reason == xpcUnloading) {
1049 /* take ourselves off of the reboot_notifier_list */ 959 /* take ourselves off of the reboot_notifier_list */
1050 (void) unregister_reboot_notifier(&xpc_reboot_notifier); 960 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1051 961
1052 /* take ourselves off of the die_notifier list */ 962 /* take ourselves off of the die_notifier list */
1053 (void) unregister_die_notifier(&xpc_die_notifier); 963 (void)unregister_die_notifier(&xpc_die_notifier);
1054 } 964 }
1055 965
1056 /* close down protections for IPI operations */ 966 /* close down protections for IPI operations */
1057 xpc_restrict_IPI_ops(); 967 xpc_restrict_IPI_ops();
1058 968
1059
1060 /* clear the interface to XPC's functions */ 969 /* clear the interface to XPC's functions */
1061 xpc_clear_interface(); 970 xpc_clear_interface();
1062 971
1063 if (xpc_sysctl) { 972 if (xpc_sysctl)
1064 unregister_sysctl_table(xpc_sysctl); 973 unregister_sysctl_table(xpc_sysctl);
1065 }
1066 974
1067 kfree(xpc_remote_copy_buffer_base); 975 kfree(xpc_remote_copy_buffer_base);
1068} 976}
1069 977
1070
1071/* 978/*
1072 * This function is called when the system is being rebooted. 979 * This function is called when the system is being rebooted.
1073 */ 980 */
@@ -1076,7 +983,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1076{ 983{
1077 enum xpc_retval reason; 984 enum xpc_retval reason;
1078 985
1079
1080 switch (event) { 986 switch (event) {
1081 case SYS_RESTART: 987 case SYS_RESTART:
1082 reason = xpcSystemReboot; 988 reason = xpcSystemReboot;
@@ -1095,7 +1001,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1095 return NOTIFY_DONE; 1001 return NOTIFY_DONE;
1096} 1002}
1097 1003
1098
1099/* 1004/*
1100 * Notify other partitions to disengage from all references to our memory. 1005 * Notify other partitions to disengage from all references to our memory.
1101 */ 1006 */
@@ -1107,17 +1012,16 @@ xpc_die_disengage(void)
1107 unsigned long engaged; 1012 unsigned long engaged;
1108 long time, printmsg_time, disengage_request_timeout; 1013 long time, printmsg_time, disengage_request_timeout;
1109 1014
1110
1111 /* keep xpc_hb_checker thread from doing anything (just in case) */ 1015 /* keep xpc_hb_checker thread from doing anything (just in case) */
1112 xpc_exiting = 1; 1016 xpc_exiting = 1;
1113 1017
1114 xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ 1018 xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */
1115 1019
1116 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1020 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1117 part = &xpc_partitions[partid]; 1021 part = &xpc_partitions[partid];
1118 1022
1119 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 1023 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
1120 remote_vars_version)) { 1024 remote_vars_version)) {
1121 1025
1122 /* just in case it was left set by an earlier XPC */ 1026 /* just in case it was left set by an earlier XPC */
1123 xpc_clear_partition_engaged(1UL << partid); 1027 xpc_clear_partition_engaged(1UL << partid);
@@ -1125,7 +1029,7 @@ xpc_die_disengage(void)
1125 } 1029 }
1126 1030
1127 if (xpc_partition_engaged(1UL << partid) || 1031 if (xpc_partition_engaged(1UL << partid) ||
1128 part->act_state != XPC_P_INACTIVE) { 1032 part->act_state != XPC_P_INACTIVE) {
1129 xpc_request_partition_disengage(part); 1033 xpc_request_partition_disengage(part);
1130 xpc_mark_partition_disengaged(part); 1034 xpc_mark_partition_disengaged(part);
1131 xpc_IPI_send_disengage(part); 1035 xpc_IPI_send_disengage(part);
@@ -1134,9 +1038,9 @@ xpc_die_disengage(void)
1134 1038
1135 time = rtc_time(); 1039 time = rtc_time();
1136 printmsg_time = time + 1040 printmsg_time = time +
1137 (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); 1041 (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
1138 disengage_request_timeout = time + 1042 disengage_request_timeout = time +
1139 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); 1043 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
1140 1044
1141 /* wait for all other partitions to disengage from us */ 1045 /* wait for all other partitions to disengage from us */
1142 1046
@@ -1152,8 +1056,8 @@ xpc_die_disengage(void)
1152 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1056 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1153 if (engaged & (1UL << partid)) { 1057 if (engaged & (1UL << partid)) {
1154 dev_info(xpc_part, "disengage from " 1058 dev_info(xpc_part, "disengage from "
1155 "remote partition %d timed " 1059 "remote partition %d timed "
1156 "out\n", partid); 1060 "out\n", partid);
1157 } 1061 }
1158 } 1062 }
1159 break; 1063 break;
@@ -1161,17 +1065,16 @@ xpc_die_disengage(void)
1161 1065
1162 if (time >= printmsg_time) { 1066 if (time >= printmsg_time) {
1163 dev_info(xpc_part, "waiting for remote partitions to " 1067 dev_info(xpc_part, "waiting for remote partitions to "
1164 "disengage, timeout in %ld seconds\n", 1068 "disengage, timeout in %ld seconds\n",
1165 (disengage_request_timeout - time) / 1069 (disengage_request_timeout - time) /
1166 sn_rtc_cycles_per_second); 1070 sn_rtc_cycles_per_second);
1167 printmsg_time = time + 1071 printmsg_time = time +
1168 (XPC_DISENGAGE_PRINTMSG_INTERVAL * 1072 (XPC_DISENGAGE_PRINTMSG_INTERVAL *
1169 sn_rtc_cycles_per_second); 1073 sn_rtc_cycles_per_second);
1170 } 1074 }
1171 } 1075 }
1172} 1076}
1173 1077
1174
1175/* 1078/*
1176 * This function is called when the system is being restarted or halted due 1079 * This function is called when the system is being restarted or halted due
1177 * to some sort of system failure. If this is the case we need to notify the 1080 * to some sort of system failure. If this is the case we need to notify the
@@ -1191,9 +1094,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1191 1094
1192 case DIE_KDEBUG_ENTER: 1095 case DIE_KDEBUG_ENTER:
1193 /* Should lack of heartbeat be ignored by other partitions? */ 1096 /* Should lack of heartbeat be ignored by other partitions? */
1194 if (!xpc_kdebug_ignore) { 1097 if (!xpc_kdebug_ignore)
1195 break; 1098 break;
1196 } 1099
1197 /* fall through */ 1100 /* fall through */
1198 case DIE_MCA_MONARCH_ENTER: 1101 case DIE_MCA_MONARCH_ENTER:
1199 case DIE_INIT_MONARCH_ENTER: 1102 case DIE_INIT_MONARCH_ENTER:
@@ -1203,9 +1106,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1203 1106
1204 case DIE_KDEBUG_LEAVE: 1107 case DIE_KDEBUG_LEAVE:
1205 /* Is lack of heartbeat being ignored by other partitions? */ 1108 /* Is lack of heartbeat being ignored by other partitions? */
1206 if (!xpc_kdebug_ignore) { 1109 if (!xpc_kdebug_ignore)
1207 break; 1110 break;
1208 } 1111
1209 /* fall through */ 1112 /* fall through */
1210 case DIE_MCA_MONARCH_LEAVE: 1113 case DIE_MCA_MONARCH_LEAVE:
1211 case DIE_INIT_MONARCH_LEAVE: 1114 case DIE_INIT_MONARCH_LEAVE:
@@ -1217,26 +1120,23 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1217 return NOTIFY_DONE; 1120 return NOTIFY_DONE;
1218} 1121}
1219 1122
1220
1221int __init 1123int __init
1222xpc_init(void) 1124xpc_init(void)
1223{ 1125{
1224 int ret; 1126 int ret;
1225 partid_t partid; 1127 partid_t partid;
1226 struct xpc_partition *part; 1128 struct xpc_partition *part;
1227 pid_t pid; 1129 struct task_struct *kthread;
1228 size_t buf_size; 1130 size_t buf_size;
1229 1131
1230 1132 if (!ia64_platform_is("sn2"))
1231 if (!ia64_platform_is("sn2")) {
1232 return -ENODEV; 1133 return -ENODEV;
1233 }
1234
1235 1134
1236 buf_size = max(XPC_RP_VARS_SIZE, 1135 buf_size = max(XPC_RP_VARS_SIZE,
1237 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); 1136 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1238 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, 1137 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1239 GFP_KERNEL, &xpc_remote_copy_buffer_base); 1138 GFP_KERNEL,
1139 &xpc_remote_copy_buffer_base);
1240 if (xpc_remote_copy_buffer == NULL) 1140 if (xpc_remote_copy_buffer == NULL)
1241 return -ENOMEM; 1141 return -ENOMEM;
1242 1142
@@ -1256,7 +1156,7 @@ xpc_init(void)
1256 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1156 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1257 part = &xpc_partitions[partid]; 1157 part = &xpc_partitions[partid];
1258 1158
1259 DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part)); 1159 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
1260 1160
1261 part->act_IRQ_rcvd = 0; 1161 part->act_IRQ_rcvd = 0;
1262 spin_lock_init(&part->act_lock); 1162 spin_lock_init(&part->act_lock);
@@ -1265,8 +1165,8 @@ xpc_init(void)
1265 1165
1266 init_timer(&part->disengage_request_timer); 1166 init_timer(&part->disengage_request_timer);
1267 part->disengage_request_timer.function = 1167 part->disengage_request_timer.function =
1268 xpc_timeout_partition_disengage_request; 1168 xpc_timeout_partition_disengage_request;
1269 part->disengage_request_timer.data = (unsigned long) part; 1169 part->disengage_request_timer.data = (unsigned long)part;
1270 1170
1271 part->setup_state = XPC_P_UNSET; 1171 part->setup_state = XPC_P_UNSET;
1272 init_waitqueue_head(&part->teardown_wq); 1172 init_waitqueue_head(&part->teardown_wq);
@@ -1292,16 +1192,15 @@ xpc_init(void)
1292 * but rather immediately process the interrupt. 1192 * but rather immediately process the interrupt.
1293 */ 1193 */
1294 ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, 1194 ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
1295 "xpc hb", NULL); 1195 "xpc hb", NULL);
1296 if (ret != 0) { 1196 if (ret != 0) {
1297 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " 1197 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
1298 "errno=%d\n", -ret); 1198 "errno=%d\n", -ret);
1299 1199
1300 xpc_restrict_IPI_ops(); 1200 xpc_restrict_IPI_ops();
1301 1201
1302 if (xpc_sysctl) { 1202 if (xpc_sysctl)
1303 unregister_sysctl_table(xpc_sysctl); 1203 unregister_sysctl_table(xpc_sysctl);
1304 }
1305 1204
1306 kfree(xpc_remote_copy_buffer_base); 1205 kfree(xpc_remote_copy_buffer_base);
1307 return -EBUSY; 1206 return -EBUSY;
@@ -1319,26 +1218,22 @@ xpc_init(void)
1319 free_irq(SGI_XPC_ACTIVATE, NULL); 1218 free_irq(SGI_XPC_ACTIVATE, NULL);
1320 xpc_restrict_IPI_ops(); 1219 xpc_restrict_IPI_ops();
1321 1220
1322 if (xpc_sysctl) { 1221 if (xpc_sysctl)
1323 unregister_sysctl_table(xpc_sysctl); 1222 unregister_sysctl_table(xpc_sysctl);
1324 }
1325 1223
1326 kfree(xpc_remote_copy_buffer_base); 1224 kfree(xpc_remote_copy_buffer_base);
1327 return -EBUSY; 1225 return -EBUSY;
1328 } 1226 }
1329 1227
1330
1331 /* add ourselves to the reboot_notifier_list */ 1228 /* add ourselves to the reboot_notifier_list */
1332 ret = register_reboot_notifier(&xpc_reboot_notifier); 1229 ret = register_reboot_notifier(&xpc_reboot_notifier);
1333 if (ret != 0) { 1230 if (ret != 0)
1334 dev_warn(xpc_part, "can't register reboot notifier\n"); 1231 dev_warn(xpc_part, "can't register reboot notifier\n");
1335 }
1336 1232
1337 /* add ourselves to the die_notifier list */ 1233 /* add ourselves to the die_notifier list */
1338 ret = register_die_notifier(&xpc_die_notifier); 1234 ret = register_die_notifier(&xpc_die_notifier);
1339 if (ret != 0) { 1235 if (ret != 0)
1340 dev_warn(xpc_part, "can't register die notifier\n"); 1236 dev_warn(xpc_part, "can't register die notifier\n");
1341 }
1342 1237
1343 init_timer(&xpc_hb_timer); 1238 init_timer(&xpc_hb_timer);
1344 xpc_hb_timer.function = xpc_hb_beater; 1239 xpc_hb_timer.function = xpc_hb_beater;
@@ -1347,39 +1242,38 @@ xpc_init(void)
1347 * The real work-horse behind xpc. This processes incoming 1242 * The real work-horse behind xpc. This processes incoming
1348 * interrupts and monitors remote heartbeats. 1243 * interrupts and monitors remote heartbeats.
1349 */ 1244 */
1350 pid = kernel_thread(xpc_hb_checker, NULL, 0); 1245 kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
1351 if (pid < 0) { 1246 if (IS_ERR(kthread)) {
1352 dev_err(xpc_part, "failed while forking hb check thread\n"); 1247 dev_err(xpc_part, "failed while forking hb check thread\n");
1353 1248
1354 /* indicate to others that our reserved page is uninitialized */ 1249 /* indicate to others that our reserved page is uninitialized */
1355 xpc_rsvd_page->vars_pa = 0; 1250 xpc_rsvd_page->vars_pa = 0;
1356 1251
1357 /* take ourselves off of the reboot_notifier_list */ 1252 /* take ourselves off of the reboot_notifier_list */
1358 (void) unregister_reboot_notifier(&xpc_reboot_notifier); 1253 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1359 1254
1360 /* take ourselves off of the die_notifier list */ 1255 /* take ourselves off of the die_notifier list */
1361 (void) unregister_die_notifier(&xpc_die_notifier); 1256 (void)unregister_die_notifier(&xpc_die_notifier);
1362 1257
1363 del_timer_sync(&xpc_hb_timer); 1258 del_timer_sync(&xpc_hb_timer);
1364 free_irq(SGI_XPC_ACTIVATE, NULL); 1259 free_irq(SGI_XPC_ACTIVATE, NULL);
1365 xpc_restrict_IPI_ops(); 1260 xpc_restrict_IPI_ops();
1366 1261
1367 if (xpc_sysctl) { 1262 if (xpc_sysctl)
1368 unregister_sysctl_table(xpc_sysctl); 1263 unregister_sysctl_table(xpc_sysctl);
1369 }
1370 1264
1371 kfree(xpc_remote_copy_buffer_base); 1265 kfree(xpc_remote_copy_buffer_base);
1372 return -EBUSY; 1266 return -EBUSY;
1373 } 1267 }
1374 1268
1375
1376 /* 1269 /*
1377 * Startup a thread that will attempt to discover other partitions to 1270 * Startup a thread that will attempt to discover other partitions to
1378 * activate based on info provided by SAL. This new thread is short 1271 * activate based on info provided by SAL. This new thread is short
1379 * lived and will exit once discovery is complete. 1272 * lived and will exit once discovery is complete.
1380 */ 1273 */
1381 pid = kernel_thread(xpc_initiate_discovery, NULL, 0); 1274 kthread = kthread_run(xpc_initiate_discovery, NULL,
1382 if (pid < 0) { 1275 XPC_DISCOVERY_THREAD_NAME);
1276 if (IS_ERR(kthread)) {
1383 dev_err(xpc_part, "failed while forking discovery thread\n"); 1277 dev_err(xpc_part, "failed while forking discovery thread\n");
1384 1278
1385 /* mark this new thread as a non-starter */ 1279 /* mark this new thread as a non-starter */
@@ -1389,7 +1283,6 @@ xpc_init(void)
1389 return -EBUSY; 1283 return -EBUSY;
1390 } 1284 }
1391 1285
1392
1393 /* set the interface to point at XPC's functions */ 1286 /* set the interface to point at XPC's functions */
1394 xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, 1287 xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1395 xpc_initiate_allocate, xpc_initiate_send, 1288 xpc_initiate_allocate, xpc_initiate_send,
@@ -1398,16 +1291,16 @@ xpc_init(void)
1398 1291
1399 return 0; 1292 return 0;
1400} 1293}
1401module_init(xpc_init);
1402 1294
1295module_init(xpc_init);
1403 1296
1404void __exit 1297void __exit
1405xpc_exit(void) 1298xpc_exit(void)
1406{ 1299{
1407 xpc_do_exit(xpcUnloading); 1300 xpc_do_exit(xpcUnloading);
1408} 1301}
1409module_exit(xpc_exit);
1410 1302
1303module_exit(xpc_exit);
1411 1304
1412MODULE_AUTHOR("Silicon Graphics, Inc."); 1305MODULE_AUTHOR("Silicon Graphics, Inc.");
1413MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); 1306MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
@@ -1415,17 +1308,16 @@ MODULE_LICENSE("GPL");
1415 1308
1416module_param(xpc_hb_interval, int, 0); 1309module_param(xpc_hb_interval, int, 0);
1417MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " 1310MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1418 "heartbeat increments."); 1311 "heartbeat increments.");
1419 1312
1420module_param(xpc_hb_check_interval, int, 0); 1313module_param(xpc_hb_check_interval, int, 0);
1421MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " 1314MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1422 "heartbeat checks."); 1315 "heartbeat checks.");
1423 1316
1424module_param(xpc_disengage_request_timelimit, int, 0); 1317module_param(xpc_disengage_request_timelimit, int, 0);
1425MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " 1318MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
1426 "for disengage request to complete."); 1319 "for disengage request to complete.");
1427 1320
1428module_param(xpc_kdebug_ignore, int, 0); 1321module_param(xpc_kdebug_ignore, int, 0);
1429MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " 1322MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1430 "other partitions when dropping into kdebug."); 1323 "other partitions when dropping into kdebug.");
1431
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 9e97c2684832..27e200ec5826 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Communication (XPC) partition support. 10 * Cross Partition Communication (XPC) partition support.
12 * 11 *
@@ -16,7 +15,6 @@
16 * 15 *
17 */ 16 */
18 17
19
20#include <linux/kernel.h> 18#include <linux/kernel.h>
21#include <linux/sysctl.h> 19#include <linux/sysctl.h>
22#include <linux/cache.h> 20#include <linux/cache.h>
@@ -28,13 +26,11 @@
28#include <asm/sn/sn_sal.h> 26#include <asm/sn/sn_sal.h>
29#include <asm/sn/nodepda.h> 27#include <asm/sn/nodepda.h>
30#include <asm/sn/addrs.h> 28#include <asm/sn/addrs.h>
31#include <asm/sn/xpc.h> 29#include "xpc.h"
32
33 30
34/* XPC is exiting flag */ 31/* XPC is exiting flag */
35int xpc_exiting; 32int xpc_exiting;
36 33
37
38/* SH_IPI_ACCESS shub register value on startup */ 34/* SH_IPI_ACCESS shub register value on startup */
39static u64 xpc_sh1_IPI_access; 35static u64 xpc_sh1_IPI_access;
40static u64 xpc_sh2_IPI_access0; 36static u64 xpc_sh2_IPI_access0;
@@ -42,11 +38,9 @@ static u64 xpc_sh2_IPI_access1;
42static u64 xpc_sh2_IPI_access2; 38static u64 xpc_sh2_IPI_access2;
43static u64 xpc_sh2_IPI_access3; 39static u64 xpc_sh2_IPI_access3;
44 40
45
46/* original protection values for each node */ 41/* original protection values for each node */
47u64 xpc_prot_vec[MAX_NUMNODES]; 42u64 xpc_prot_vec[MAX_NUMNODES];
48 43
49
50/* this partition's reserved page pointers */ 44/* this partition's reserved page pointers */
51struct xpc_rsvd_page *xpc_rsvd_page; 45struct xpc_rsvd_page *xpc_rsvd_page;
52static u64 *xpc_part_nasids; 46static u64 *xpc_part_nasids;
@@ -57,7 +51,6 @@ struct xpc_vars_part *xpc_vars_part;
57static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ 51static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */
58static int xp_nasid_mask_words; /* actual size in words of nasid mask */ 52static int xp_nasid_mask_words; /* actual size in words of nasid mask */
59 53
60
61/* 54/*
62 * For performance reasons, each entry of xpc_partitions[] is cacheline 55 * For performance reasons, each entry of xpc_partitions[] is cacheline
63 * aligned. And xpc_partitions[] is padded with an additional entry at the 56 * aligned. And xpc_partitions[] is padded with an additional entry at the
@@ -66,7 +59,6 @@ static int xp_nasid_mask_words; /* actual size in words of nasid mask */
66 */ 59 */
67struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; 60struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
68 61
69
70/* 62/*
71 * Generic buffer used to store a local copy of portions of a remote 63 * Generic buffer used to store a local copy of portions of a remote
72 * partition's reserved page (either its header and part_nasids mask, 64 * partition's reserved page (either its header and part_nasids mask,
@@ -75,7 +67,6 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
75char *xpc_remote_copy_buffer; 67char *xpc_remote_copy_buffer;
76void *xpc_remote_copy_buffer_base; 68void *xpc_remote_copy_buffer_base;
77 69
78
79/* 70/*
80 * Guarantee that the kmalloc'd memory is cacheline aligned. 71 * Guarantee that the kmalloc'd memory is cacheline aligned.
81 */ 72 */
@@ -84,22 +75,21 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
84{ 75{
85 /* see if kmalloc will give us cachline aligned memory by default */ 76 /* see if kmalloc will give us cachline aligned memory by default */
86 *base = kmalloc(size, flags); 77 *base = kmalloc(size, flags);
87 if (*base == NULL) { 78 if (*base == NULL)
88 return NULL; 79 return NULL;
89 } 80
90 if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { 81 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
91 return *base; 82 return *base;
92 } 83
93 kfree(*base); 84 kfree(*base);
94 85
95 /* nope, we'll have to do it ourselves */ 86 /* nope, we'll have to do it ourselves */
96 *base = kmalloc(size + L1_CACHE_BYTES, flags); 87 *base = kmalloc(size + L1_CACHE_BYTES, flags);
97 if (*base == NULL) { 88 if (*base == NULL)
98 return NULL; 89 return NULL;
99 }
100 return (void *) L1_CACHE_ALIGN((u64) *base);
101}
102 90
91 return (void *)L1_CACHE_ALIGN((u64)*base);
92}
103 93
104/* 94/*
105 * Given a nasid, get the physical address of the partition's reserved page 95 * Given a nasid, get the physical address of the partition's reserved page
@@ -117,25 +107,24 @@ xpc_get_rsvd_page_pa(int nasid)
117 u64 buf_len = 0; 107 u64 buf_len = 0;
118 void *buf_base = NULL; 108 void *buf_base = NULL;
119 109
120
121 while (1) { 110 while (1) {
122 111
123 status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, 112 status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
124 &len); 113 &len);
125 114
126 dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" 115 dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
127 "0x%016lx, address=0x%016lx, len=0x%016lx\n", 116 "0x%016lx, address=0x%016lx, len=0x%016lx\n",
128 status, cookie, rp_pa, len); 117 status, cookie, rp_pa, len);
129 118
130 if (status != SALRET_MORE_PASSES) { 119 if (status != SALRET_MORE_PASSES)
131 break; 120 break;
132 }
133 121
134 if (L1_CACHE_ALIGN(len) > buf_len) { 122 if (L1_CACHE_ALIGN(len) > buf_len) {
135 kfree(buf_base); 123 kfree(buf_base);
136 buf_len = L1_CACHE_ALIGN(len); 124 buf_len = L1_CACHE_ALIGN(len);
137 buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len, 125 buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len,
138 GFP_KERNEL, &buf_base); 126 GFP_KERNEL,
127 &buf_base);
139 if (buf_base == NULL) { 128 if (buf_base == NULL) {
140 dev_err(xpc_part, "unable to kmalloc " 129 dev_err(xpc_part, "unable to kmalloc "
141 "len=0x%016lx\n", buf_len); 130 "len=0x%016lx\n", buf_len);
@@ -145,7 +134,7 @@ xpc_get_rsvd_page_pa(int nasid)
145 } 134 }
146 135
147 bte_res = xp_bte_copy(rp_pa, buf, buf_len, 136 bte_res = xp_bte_copy(rp_pa, buf, buf_len,
148 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 137 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
149 if (bte_res != BTE_SUCCESS) { 138 if (bte_res != BTE_SUCCESS) {
150 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); 139 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
151 status = SALRET_ERROR; 140 status = SALRET_ERROR;
@@ -155,14 +144,13 @@ xpc_get_rsvd_page_pa(int nasid)
155 144
156 kfree(buf_base); 145 kfree(buf_base);
157 146
158 if (status != SALRET_OK) { 147 if (status != SALRET_OK)
159 rp_pa = 0; 148 rp_pa = 0;
160 } 149
161 dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); 150 dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
162 return rp_pa; 151 return rp_pa;
163} 152}
164 153
165
166/* 154/*
167 * Fill the partition reserved page with the information needed by 155 * Fill the partition reserved page with the information needed by
168 * other partitions to discover we are alive and establish initial 156 * other partitions to discover we are alive and establish initial
@@ -176,7 +164,6 @@ xpc_rsvd_page_init(void)
176 u64 rp_pa, nasid_array = 0; 164 u64 rp_pa, nasid_array = 0;
177 int i, ret; 165 int i, ret;
178 166
179
180 /* get the local reserved page's address */ 167 /* get the local reserved page's address */
181 168
182 preempt_disable(); 169 preempt_disable();
@@ -186,7 +173,7 @@ xpc_rsvd_page_init(void)
186 dev_err(xpc_part, "SAL failed to locate the reserved page\n"); 173 dev_err(xpc_part, "SAL failed to locate the reserved page\n");
187 return NULL; 174 return NULL;
188 } 175 }
189 rp = (struct xpc_rsvd_page *) __va(rp_pa); 176 rp = (struct xpc_rsvd_page *)__va(rp_pa);
190 177
191 if (rp->partid != sn_partition_id) { 178 if (rp->partid != sn_partition_id) {
192 dev_err(xpc_part, "the reserved page's partid of %d should be " 179 dev_err(xpc_part, "the reserved page's partid of %d should be "
@@ -222,8 +209,9 @@ xpc_rsvd_page_init(void)
222 * on subsequent loads of XPC. This AMO page is never freed, and its 209 * on subsequent loads of XPC. This AMO page is never freed, and its
223 * memory protections are never restricted. 210 * memory protections are never restricted.
224 */ 211 */
225 if ((amos_page = xpc_vars->amos_page) == NULL) { 212 amos_page = xpc_vars->amos_page;
226 amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0)); 213 if (amos_page == NULL) {
214 amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0));
227 if (amos_page == NULL) { 215 if (amos_page == NULL) {
228 dev_err(xpc_part, "can't allocate page of AMOs\n"); 216 dev_err(xpc_part, "can't allocate page of AMOs\n");
229 return NULL; 217 return NULL;
@@ -234,30 +222,31 @@ xpc_rsvd_page_init(void)
234 * when xpc_allow_IPI_ops() is called via xpc_hb_init(). 222 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
235 */ 223 */
236 if (!enable_shub_wars_1_1()) { 224 if (!enable_shub_wars_1_1()) {
237 ret = sn_change_memprotect(ia64_tpa((u64) amos_page), 225 ret = sn_change_memprotect(ia64_tpa((u64)amos_page),
238 PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, 226 PAGE_SIZE,
239 &nasid_array); 227 SN_MEMPROT_ACCESS_CLASS_1,
228 &nasid_array);
240 if (ret != 0) { 229 if (ret != 0) {
241 dev_err(xpc_part, "can't change memory " 230 dev_err(xpc_part, "can't change memory "
242 "protections\n"); 231 "protections\n");
243 uncached_free_page(__IA64_UNCACHED_OFFSET | 232 uncached_free_page(__IA64_UNCACHED_OFFSET |
244 TO_PHYS((u64) amos_page)); 233 TO_PHYS((u64)amos_page));
245 return NULL; 234 return NULL;
246 } 235 }
247 } 236 }
248 } else if (!IS_AMO_ADDRESS((u64) amos_page)) { 237 } else if (!IS_AMO_ADDRESS((u64)amos_page)) {
249 /* 238 /*
250 * EFI's XPBOOT can also set amos_page in the reserved page, 239 * EFI's XPBOOT can also set amos_page in the reserved page,
251 * but it happens to leave it as an uncached physical address 240 * but it happens to leave it as an uncached physical address
252 * and we need it to be an uncached virtual, so we'll have to 241 * and we need it to be an uncached virtual, so we'll have to
253 * convert it. 242 * convert it.
254 */ 243 */
255 if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { 244 if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) {
256 dev_err(xpc_part, "previously used amos_page address " 245 dev_err(xpc_part, "previously used amos_page address "
257 "is bad = 0x%p\n", (void *) amos_page); 246 "is bad = 0x%p\n", (void *)amos_page);
258 return NULL; 247 return NULL;
259 } 248 }
260 amos_page = (AMO_t *) TO_AMO((u64) amos_page); 249 amos_page = (AMO_t *)TO_AMO((u64)amos_page);
261 } 250 }
262 251
263 /* clear xpc_vars */ 252 /* clear xpc_vars */
@@ -267,22 +256,20 @@ xpc_rsvd_page_init(void)
267 xpc_vars->act_nasid = cpuid_to_nasid(0); 256 xpc_vars->act_nasid = cpuid_to_nasid(0);
268 xpc_vars->act_phys_cpuid = cpu_physical_id(0); 257 xpc_vars->act_phys_cpuid = cpu_physical_id(0);
269 xpc_vars->vars_part_pa = __pa(xpc_vars_part); 258 xpc_vars->vars_part_pa = __pa(xpc_vars_part);
270 xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page); 259 xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page);
271 xpc_vars->amos_page = amos_page; /* save for next load of XPC */ 260 xpc_vars->amos_page = amos_page; /* save for next load of XPC */
272
273 261
274 /* clear xpc_vars_part */ 262 /* clear xpc_vars_part */
275 memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) * 263 memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
276 XP_MAX_PARTITIONS); 264 XP_MAX_PARTITIONS);
277 265
278 /* initialize the activate IRQ related AMO variables */ 266 /* initialize the activate IRQ related AMO variables */
279 for (i = 0; i < xp_nasid_mask_words; i++) { 267 for (i = 0; i < xp_nasid_mask_words; i++)
280 (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); 268 (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
281 }
282 269
283 /* initialize the engaged remote partitions related AMO variables */ 270 /* initialize the engaged remote partitions related AMO variables */
284 (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); 271 (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
285 (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); 272 (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
286 273
287 /* timestamp of when reserved page was setup by XPC */ 274 /* timestamp of when reserved page was setup by XPC */
288 rp->stamp = CURRENT_TIME; 275 rp->stamp = CURRENT_TIME;
@@ -296,7 +283,6 @@ xpc_rsvd_page_init(void)
296 return rp; 283 return rp;
297} 284}
298 285
299
300/* 286/*
301 * Change protections to allow IPI operations (and AMO operations on 287 * Change protections to allow IPI operations (and AMO operations on
302 * Shub 1.1 systems). 288 * Shub 1.1 systems).
@@ -307,39 +293,38 @@ xpc_allow_IPI_ops(void)
307 int node; 293 int node;
308 int nasid; 294 int nasid;
309 295
310 296 /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
311 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
312 297
313 if (is_shub2()) { 298 if (is_shub2()) {
314 xpc_sh2_IPI_access0 = 299 xpc_sh2_IPI_access0 =
315 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); 300 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
316 xpc_sh2_IPI_access1 = 301 xpc_sh2_IPI_access1 =
317 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); 302 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
318 xpc_sh2_IPI_access2 = 303 xpc_sh2_IPI_access2 =
319 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); 304 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
320 xpc_sh2_IPI_access3 = 305 xpc_sh2_IPI_access3 =
321 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); 306 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
322 307
323 for_each_online_node(node) { 308 for_each_online_node(node) {
324 nasid = cnodeid_to_nasid(node); 309 nasid = cnodeid_to_nasid(node);
325 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 310 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
326 -1UL); 311 -1UL);
327 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 312 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
328 -1UL); 313 -1UL);
329 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 314 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
330 -1UL); 315 -1UL);
331 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 316 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
332 -1UL); 317 -1UL);
333 } 318 }
334 319
335 } else { 320 } else {
336 xpc_sh1_IPI_access = 321 xpc_sh1_IPI_access =
337 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); 322 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
338 323
339 for_each_online_node(node) { 324 for_each_online_node(node) {
340 nasid = cnodeid_to_nasid(node); 325 nasid = cnodeid_to_nasid(node);
341 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 326 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
342 -1UL); 327 -1UL);
343 328
344 /* 329 /*
345 * Since the BIST collides with memory operations on 330 * Since the BIST collides with memory operations on
@@ -347,21 +332,23 @@ xpc_allow_IPI_ops(void)
347 */ 332 */
348 if (enable_shub_wars_1_1()) { 333 if (enable_shub_wars_1_1()) {
349 /* open up everything */ 334 /* open up everything */
350 xpc_prot_vec[node] = (u64) HUB_L((u64 *) 335 xpc_prot_vec[node] = (u64)HUB_L((u64 *)
351 GLOBAL_MMR_ADDR(nasid, 336 GLOBAL_MMR_ADDR
352 SH1_MD_DQLP_MMR_DIR_PRIVEC0)); 337 (nasid,
353 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 338 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
354 SH1_MD_DQLP_MMR_DIR_PRIVEC0), 339 HUB_S((u64 *)
355 -1UL); 340 GLOBAL_MMR_ADDR(nasid,
356 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 341 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
357 SH1_MD_DQRP_MMR_DIR_PRIVEC0), 342 -1UL);
358 -1UL); 343 HUB_S((u64 *)
344 GLOBAL_MMR_ADDR(nasid,
345 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
346 -1UL);
359 } 347 }
360 } 348 }
361 } 349 }
362} 350}
363 351
364
365/* 352/*
366 * Restrict protections to disallow IPI operations (and AMO operations on 353 * Restrict protections to disallow IPI operations (and AMO operations on
367 * Shub 1.1 systems). 354 * Shub 1.1 systems).
@@ -372,43 +359,41 @@ xpc_restrict_IPI_ops(void)
372 int node; 359 int node;
373 int nasid; 360 int nasid;
374 361
375 362 /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
376 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
377 363
378 if (is_shub2()) { 364 if (is_shub2()) {
379 365
380 for_each_online_node(node) { 366 for_each_online_node(node) {
381 nasid = cnodeid_to_nasid(node); 367 nasid = cnodeid_to_nasid(node);
382 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 368 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
383 xpc_sh2_IPI_access0); 369 xpc_sh2_IPI_access0);
384 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 370 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
385 xpc_sh2_IPI_access1); 371 xpc_sh2_IPI_access1);
386 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 372 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
387 xpc_sh2_IPI_access2); 373 xpc_sh2_IPI_access2);
388 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 374 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
389 xpc_sh2_IPI_access3); 375 xpc_sh2_IPI_access3);
390 } 376 }
391 377
392 } else { 378 } else {
393 379
394 for_each_online_node(node) { 380 for_each_online_node(node) {
395 nasid = cnodeid_to_nasid(node); 381 nasid = cnodeid_to_nasid(node);
396 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 382 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
397 xpc_sh1_IPI_access); 383 xpc_sh1_IPI_access);
398 384
399 if (enable_shub_wars_1_1()) { 385 if (enable_shub_wars_1_1()) {
400 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 386 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
401 SH1_MD_DQLP_MMR_DIR_PRIVEC0), 387 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
402 xpc_prot_vec[node]); 388 xpc_prot_vec[node]);
403 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 389 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
404 SH1_MD_DQRP_MMR_DIR_PRIVEC0), 390 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
405 xpc_prot_vec[node]); 391 xpc_prot_vec[node]);
406 } 392 }
407 } 393 }
408 } 394 }
409} 395}
410 396
411
412/* 397/*
413 * At periodic intervals, scan through all active partitions and ensure 398 * At periodic intervals, scan through all active partitions and ensure
414 * their heartbeat is still active. If not, the partition is deactivated. 399 * their heartbeat is still active. If not, the partition is deactivated.
@@ -421,34 +406,31 @@ xpc_check_remote_hb(void)
421 partid_t partid; 406 partid_t partid;
422 bte_result_t bres; 407 bte_result_t bres;
423 408
424 409 remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
425 remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
426 410
427 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 411 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
428 412
429 if (xpc_exiting) { 413 if (xpc_exiting)
430 break; 414 break;
431 }
432 415
433 if (partid == sn_partition_id) { 416 if (partid == sn_partition_id)
434 continue; 417 continue;
435 }
436 418
437 part = &xpc_partitions[partid]; 419 part = &xpc_partitions[partid];
438 420
439 if (part->act_state == XPC_P_INACTIVE || 421 if (part->act_state == XPC_P_INACTIVE ||
440 part->act_state == XPC_P_DEACTIVATING) { 422 part->act_state == XPC_P_DEACTIVATING) {
441 continue; 423 continue;
442 } 424 }
443 425
444 /* pull the remote_hb cache line */ 426 /* pull the remote_hb cache line */
445 bres = xp_bte_copy(part->remote_vars_pa, 427 bres = xp_bte_copy(part->remote_vars_pa,
446 (u64) remote_vars, 428 (u64)remote_vars,
447 XPC_RP_VARS_SIZE, 429 XPC_RP_VARS_SIZE,
448 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 430 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
449 if (bres != BTE_SUCCESS) { 431 if (bres != BTE_SUCCESS) {
450 XPC_DEACTIVATE_PARTITION(part, 432 XPC_DEACTIVATE_PARTITION(part,
451 xpc_map_bte_errors(bres)); 433 xpc_map_bte_errors(bres));
452 continue; 434 continue;
453 } 435 }
454 436
@@ -459,8 +441,8 @@ xpc_check_remote_hb(void)
459 remote_vars->heartbeating_to_mask); 441 remote_vars->heartbeating_to_mask);
460 442
461 if (((remote_vars->heartbeat == part->last_heartbeat) && 443 if (((remote_vars->heartbeat == part->last_heartbeat) &&
462 (remote_vars->heartbeat_offline == 0)) || 444 (remote_vars->heartbeat_offline == 0)) ||
463 !xpc_hb_allowed(sn_partition_id, remote_vars)) { 445 !xpc_hb_allowed(sn_partition_id, remote_vars)) {
464 446
465 XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); 447 XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
466 continue; 448 continue;
@@ -470,7 +452,6 @@ xpc_check_remote_hb(void)
470 } 452 }
471} 453}
472 454
473
474/* 455/*
475 * Get a copy of a portion of the remote partition's rsvd page. 456 * Get a copy of a portion of the remote partition's rsvd page.
476 * 457 *
@@ -480,59 +461,48 @@ xpc_check_remote_hb(void)
480 */ 461 */
481static enum xpc_retval 462static enum xpc_retval
482xpc_get_remote_rp(int nasid, u64 *discovered_nasids, 463xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
483 struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) 464 struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
484{ 465{
485 int bres, i; 466 int bres, i;
486 467
487
488 /* get the reserved page's physical address */ 468 /* get the reserved page's physical address */
489 469
490 *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); 470 *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
491 if (*remote_rp_pa == 0) { 471 if (*remote_rp_pa == 0)
492 return xpcNoRsvdPageAddr; 472 return xpcNoRsvdPageAddr;
493 }
494
495 473
496 /* pull over the reserved page header and part_nasids mask */ 474 /* pull over the reserved page header and part_nasids mask */
497 bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp, 475 bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp,
498 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, 476 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
499 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 477 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
500 if (bres != BTE_SUCCESS) { 478 if (bres != BTE_SUCCESS)
501 return xpc_map_bte_errors(bres); 479 return xpc_map_bte_errors(bres);
502 }
503
504 480
505 if (discovered_nasids != NULL) { 481 if (discovered_nasids != NULL) {
506 u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); 482 u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
507 483
508 484 for (i = 0; i < xp_nasid_mask_words; i++)
509 for (i = 0; i < xp_nasid_mask_words; i++) {
510 discovered_nasids[i] |= remote_part_nasids[i]; 485 discovered_nasids[i] |= remote_part_nasids[i];
511 }
512 } 486 }
513 487
514
515 /* check that the partid is for another partition */ 488 /* check that the partid is for another partition */
516 489
517 if (remote_rp->partid < 1 || 490 if (remote_rp->partid < 1 ||
518 remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { 491 remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
519 return xpcInvalidPartid; 492 return xpcInvalidPartid;
520 } 493 }
521 494
522 if (remote_rp->partid == sn_partition_id) { 495 if (remote_rp->partid == sn_partition_id)
523 return xpcLocalPartid; 496 return xpcLocalPartid;
524 }
525
526 497
527 if (XPC_VERSION_MAJOR(remote_rp->version) != 498 if (XPC_VERSION_MAJOR(remote_rp->version) !=
528 XPC_VERSION_MAJOR(XPC_RP_VERSION)) { 499 XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
529 return xpcBadVersion; 500 return xpcBadVersion;
530 } 501 }
531 502
532 return xpcSuccess; 503 return xpcSuccess;
533} 504}
534 505
535
536/* 506/*
537 * Get a copy of the remote partition's XPC variables from the reserved page. 507 * Get a copy of the remote partition's XPC variables from the reserved page.
538 * 508 *
@@ -544,34 +514,30 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
544{ 514{
545 int bres; 515 int bres;
546 516
547 517 if (remote_vars_pa == 0)
548 if (remote_vars_pa == 0) {
549 return xpcVarsNotSet; 518 return xpcVarsNotSet;
550 }
551 519
552 /* pull over the cross partition variables */ 520 /* pull over the cross partition variables */
553 bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE, 521 bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE,
554 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 522 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
555 if (bres != BTE_SUCCESS) { 523 if (bres != BTE_SUCCESS)
556 return xpc_map_bte_errors(bres); 524 return xpc_map_bte_errors(bres);
557 }
558 525
559 if (XPC_VERSION_MAJOR(remote_vars->version) != 526 if (XPC_VERSION_MAJOR(remote_vars->version) !=
560 XPC_VERSION_MAJOR(XPC_V_VERSION)) { 527 XPC_VERSION_MAJOR(XPC_V_VERSION)) {
561 return xpcBadVersion; 528 return xpcBadVersion;
562 } 529 }
563 530
564 return xpcSuccess; 531 return xpcSuccess;
565} 532}
566 533
567
568/* 534/*
569 * Update the remote partition's info. 535 * Update the remote partition's info.
570 */ 536 */
571static void 537static void
572xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, 538xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
573 struct timespec *remote_rp_stamp, u64 remote_rp_pa, 539 struct timespec *remote_rp_stamp, u64 remote_rp_pa,
574 u64 remote_vars_pa, struct xpc_vars *remote_vars) 540 u64 remote_vars_pa, struct xpc_vars *remote_vars)
575{ 541{
576 part->remote_rp_version = remote_rp_version; 542 part->remote_rp_version = remote_rp_version;
577 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", 543 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
@@ -613,7 +579,6 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
613 part->remote_vars_version); 579 part->remote_vars_version);
614} 580}
615 581
616
617/* 582/*
618 * Prior code has determined the nasid which generated an IPI. Inspect 583 * Prior code has determined the nasid which generated an IPI. Inspect
619 * that nasid to determine if its partition needs to be activated or 584 * that nasid to determine if its partition needs to be activated or
@@ -643,54 +608,51 @@ xpc_identify_act_IRQ_req(int nasid)
643 struct xpc_partition *part; 608 struct xpc_partition *part;
644 enum xpc_retval ret; 609 enum xpc_retval ret;
645 610
646
647 /* pull over the reserved page structure */ 611 /* pull over the reserved page structure */
648 612
649 remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer; 613 remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
650 614
651 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); 615 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
652 if (ret != xpcSuccess) { 616 if (ret != xpcSuccess) {
653 dev_warn(xpc_part, "unable to get reserved page from nasid %d, " 617 dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
654 "which sent interrupt, reason=%d\n", nasid, ret); 618 "which sent interrupt, reason=%d\n", nasid, ret);
655 return; 619 return;
656 } 620 }
657 621
658 remote_vars_pa = remote_rp->vars_pa; 622 remote_vars_pa = remote_rp->vars_pa;
659 remote_rp_version = remote_rp->version; 623 remote_rp_version = remote_rp->version;
660 if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { 624 if (XPC_SUPPORTS_RP_STAMP(remote_rp_version))
661 remote_rp_stamp = remote_rp->stamp; 625 remote_rp_stamp = remote_rp->stamp;
662 } 626
663 partid = remote_rp->partid; 627 partid = remote_rp->partid;
664 part = &xpc_partitions[partid]; 628 part = &xpc_partitions[partid];
665 629
666
667 /* pull over the cross partition variables */ 630 /* pull over the cross partition variables */
668 631
669 remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; 632 remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
670 633
671 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); 634 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
672 if (ret != xpcSuccess) { 635 if (ret != xpcSuccess) {
673 636
674 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " 637 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
675 "which sent interrupt, reason=%d\n", nasid, ret); 638 "which sent interrupt, reason=%d\n", nasid, ret);
676 639
677 XPC_DEACTIVATE_PARTITION(part, ret); 640 XPC_DEACTIVATE_PARTITION(part, ret);
678 return; 641 return;
679 } 642 }
680 643
681
682 part->act_IRQ_rcvd++; 644 part->act_IRQ_rcvd++;
683 645
684 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " 646 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
685 "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd, 647 "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd,
686 remote_vars->heartbeat, remote_vars->heartbeating_to_mask); 648 remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
687 649
688 if (xpc_partition_disengaged(part) && 650 if (xpc_partition_disengaged(part) &&
689 part->act_state == XPC_P_INACTIVE) { 651 part->act_state == XPC_P_INACTIVE) {
690 652
691 xpc_update_partition_info(part, remote_rp_version, 653 xpc_update_partition_info(part, remote_rp_version,
692 &remote_rp_stamp, remote_rp_pa, 654 &remote_rp_stamp, remote_rp_pa,
693 remote_vars_pa, remote_vars); 655 remote_vars_pa, remote_vars);
694 656
695 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { 657 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
696 if (xpc_partition_disengage_requested(1UL << partid)) { 658 if (xpc_partition_disengage_requested(1UL << partid)) {
@@ -714,16 +676,15 @@ xpc_identify_act_IRQ_req(int nasid)
714 676
715 if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) { 677 if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
716 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 678 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
717 remote_vars_version)); 679 remote_vars_version));
718 680
719 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { 681 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
720 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> 682 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
721 version)); 683 version));
722 /* see if the other side rebooted */ 684 /* see if the other side rebooted */
723 if (part->remote_amos_page_pa == 685 if (part->remote_amos_page_pa ==
724 remote_vars->amos_page_pa && 686 remote_vars->amos_page_pa &&
725 xpc_hb_allowed(sn_partition_id, 687 xpc_hb_allowed(sn_partition_id, remote_vars)) {
726 remote_vars)) {
727 /* doesn't look that way, so ignore the IPI */ 688 /* doesn't look that way, so ignore the IPI */
728 return; 689 return;
729 } 690 }
@@ -735,8 +696,8 @@ xpc_identify_act_IRQ_req(int nasid)
735 */ 696 */
736 697
737 xpc_update_partition_info(part, remote_rp_version, 698 xpc_update_partition_info(part, remote_rp_version,
738 &remote_rp_stamp, remote_rp_pa, 699 &remote_rp_stamp, remote_rp_pa,
739 remote_vars_pa, remote_vars); 700 remote_vars_pa, remote_vars);
740 part->reactivate_nasid = nasid; 701 part->reactivate_nasid = nasid;
741 XPC_DEACTIVATE_PARTITION(part, xpcReactivating); 702 XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
742 return; 703 return;
@@ -756,15 +717,15 @@ xpc_identify_act_IRQ_req(int nasid)
756 xpc_clear_partition_disengage_request(1UL << partid); 717 xpc_clear_partition_disengage_request(1UL << partid);
757 718
758 xpc_update_partition_info(part, remote_rp_version, 719 xpc_update_partition_info(part, remote_rp_version,
759 &remote_rp_stamp, remote_rp_pa, 720 &remote_rp_stamp, remote_rp_pa,
760 remote_vars_pa, remote_vars); 721 remote_vars_pa, remote_vars);
761 reactivate = 1; 722 reactivate = 1;
762 723
763 } else { 724 } else {
764 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version)); 725 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
765 726
766 stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp, 727 stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
767 &remote_rp_stamp); 728 &remote_rp_stamp);
768 if (stamp_diff != 0) { 729 if (stamp_diff != 0) {
769 DBUG_ON(stamp_diff >= 0); 730 DBUG_ON(stamp_diff >= 0);
770 731
@@ -775,17 +736,18 @@ xpc_identify_act_IRQ_req(int nasid)
775 736
776 DBUG_ON(xpc_partition_engaged(1UL << partid)); 737 DBUG_ON(xpc_partition_engaged(1UL << partid));
777 DBUG_ON(xpc_partition_disengage_requested(1UL << 738 DBUG_ON(xpc_partition_disengage_requested(1UL <<
778 partid)); 739 partid));
779 740
780 xpc_update_partition_info(part, remote_rp_version, 741 xpc_update_partition_info(part, remote_rp_version,
781 &remote_rp_stamp, remote_rp_pa, 742 &remote_rp_stamp,
782 remote_vars_pa, remote_vars); 743 remote_rp_pa, remote_vars_pa,
744 remote_vars);
783 reactivate = 1; 745 reactivate = 1;
784 } 746 }
785 } 747 }
786 748
787 if (part->disengage_request_timeout > 0 && 749 if (part->disengage_request_timeout > 0 &&
788 !xpc_partition_disengaged(part)) { 750 !xpc_partition_disengaged(part)) {
789 /* still waiting on other side to disengage from us */ 751 /* still waiting on other side to disengage from us */
790 return; 752 return;
791 } 753 }
@@ -795,12 +757,11 @@ xpc_identify_act_IRQ_req(int nasid)
795 XPC_DEACTIVATE_PARTITION(part, xpcReactivating); 757 XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
796 758
797 } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && 759 } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
798 xpc_partition_disengage_requested(1UL << partid)) { 760 xpc_partition_disengage_requested(1UL << partid)) {
799 XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown); 761 XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
800 } 762 }
801} 763}
802 764
803
804/* 765/*
805 * Loop through the activation AMO variables and process any bits 766 * Loop through the activation AMO variables and process any bits
806 * which are set. Each bit indicates a nasid sending a partition 767 * which are set. Each bit indicates a nasid sending a partition
@@ -813,20 +774,17 @@ xpc_identify_act_IRQ_sender(void)
813{ 774{
814 int word, bit; 775 int word, bit;
815 u64 nasid_mask; 776 u64 nasid_mask;
816 u64 nasid; /* remote nasid */ 777 u64 nasid; /* remote nasid */
817 int n_IRQs_detected = 0; 778 int n_IRQs_detected = 0;
818 AMO_t *act_amos; 779 AMO_t *act_amos;
819 780
820
821 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; 781 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
822 782
823
824 /* scan through act AMO variable looking for non-zero entries */ 783 /* scan through act AMO variable looking for non-zero entries */
825 for (word = 0; word < xp_nasid_mask_words; word++) { 784 for (word = 0; word < xp_nasid_mask_words; word++) {
826 785
827 if (xpc_exiting) { 786 if (xpc_exiting)
828 break; 787 break;
829 }
830 788
831 nasid_mask = xpc_IPI_receive(&act_amos[word]); 789 nasid_mask = xpc_IPI_receive(&act_amos[word]);
832 if (nasid_mask == 0) { 790 if (nasid_mask == 0) {
@@ -837,7 +795,6 @@ xpc_identify_act_IRQ_sender(void)
837 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, 795 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
838 nasid_mask); 796 nasid_mask);
839 797
840
841 /* 798 /*
842 * If this nasid has been added to the machine since 799 * If this nasid has been added to the machine since
843 * our partition was reset, this will retain the 800 * our partition was reset, this will retain the
@@ -846,7 +803,6 @@ xpc_identify_act_IRQ_sender(void)
846 */ 803 */
847 xpc_mach_nasids[word] |= nasid_mask; 804 xpc_mach_nasids[word] |= nasid_mask;
848 805
849
850 /* locate the nasid(s) which sent interrupts */ 806 /* locate the nasid(s) which sent interrupts */
851 807
852 for (bit = 0; bit < (8 * sizeof(u64)); bit++) { 808 for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
@@ -862,7 +818,6 @@ xpc_identify_act_IRQ_sender(void)
862 return n_IRQs_detected; 818 return n_IRQs_detected;
863} 819}
864 820
865
866/* 821/*
867 * See if the other side has responded to a partition disengage request 822 * See if the other side has responded to a partition disengage request
868 * from us. 823 * from us.
@@ -873,11 +828,11 @@ xpc_partition_disengaged(struct xpc_partition *part)
873 partid_t partid = XPC_PARTID(part); 828 partid_t partid = XPC_PARTID(part);
874 int disengaged; 829 int disengaged;
875 830
876
877 disengaged = (xpc_partition_engaged(1UL << partid) == 0); 831 disengaged = (xpc_partition_engaged(1UL << partid) == 0);
878 if (part->disengage_request_timeout) { 832 if (part->disengage_request_timeout) {
879 if (!disengaged) { 833 if (!disengaged) {
880 if (time_before(jiffies, part->disengage_request_timeout)) { 834 if (time_before(jiffies,
835 part->disengage_request_timeout)) {
881 /* timelimit hasn't been reached yet */ 836 /* timelimit hasn't been reached yet */
882 return 0; 837 return 0;
883 } 838 }
@@ -888,7 +843,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
888 */ 843 */
889 844
890 dev_info(xpc_part, "disengage from remote partition %d " 845 dev_info(xpc_part, "disengage from remote partition %d "
891 "timed out\n", partid); 846 "timed out\n", partid);
892 xpc_disengage_request_timedout = 1; 847 xpc_disengage_request_timedout = 1;
893 xpc_clear_partition_engaged(1UL << partid); 848 xpc_clear_partition_engaged(1UL << partid);
894 disengaged = 1; 849 disengaged = 1;
@@ -898,23 +853,20 @@ xpc_partition_disengaged(struct xpc_partition *part)
898 /* cancel the timer function, provided it's not us */ 853 /* cancel the timer function, provided it's not us */
899 if (!in_interrupt()) { 854 if (!in_interrupt()) {
900 del_singleshot_timer_sync(&part-> 855 del_singleshot_timer_sync(&part->
901 disengage_request_timer); 856 disengage_request_timer);
902 } 857 }
903 858
904 DBUG_ON(part->act_state != XPC_P_DEACTIVATING && 859 DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
905 part->act_state != XPC_P_INACTIVE); 860 part->act_state != XPC_P_INACTIVE);
906 if (part->act_state != XPC_P_INACTIVE) { 861 if (part->act_state != XPC_P_INACTIVE)
907 xpc_wakeup_channel_mgr(part); 862 xpc_wakeup_channel_mgr(part);
908 }
909 863
910 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { 864 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version))
911 xpc_cancel_partition_disengage_request(part); 865 xpc_cancel_partition_disengage_request(part);
912 }
913 } 866 }
914 return disengaged; 867 return disengaged;
915} 868}
916 869
917
918/* 870/*
919 * Mark specified partition as active. 871 * Mark specified partition as active.
920 */ 872 */
@@ -924,7 +876,6 @@ xpc_mark_partition_active(struct xpc_partition *part)
924 unsigned long irq_flags; 876 unsigned long irq_flags;
925 enum xpc_retval ret; 877 enum xpc_retval ret;
926 878
927
928 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); 879 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
929 880
930 spin_lock_irqsave(&part->act_lock, irq_flags); 881 spin_lock_irqsave(&part->act_lock, irq_flags);
@@ -940,17 +891,15 @@ xpc_mark_partition_active(struct xpc_partition *part)
940 return ret; 891 return ret;
941} 892}
942 893
943
944/* 894/*
945 * Notify XPC that the partition is down. 895 * Notify XPC that the partition is down.
946 */ 896 */
947void 897void
948xpc_deactivate_partition(const int line, struct xpc_partition *part, 898xpc_deactivate_partition(const int line, struct xpc_partition *part,
949 enum xpc_retval reason) 899 enum xpc_retval reason)
950{ 900{
951 unsigned long irq_flags; 901 unsigned long irq_flags;
952 902
953
954 spin_lock_irqsave(&part->act_lock, irq_flags); 903 spin_lock_irqsave(&part->act_lock, irq_flags);
955 904
956 if (part->act_state == XPC_P_INACTIVE) { 905 if (part->act_state == XPC_P_INACTIVE) {
@@ -964,7 +913,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
964 } 913 }
965 if (part->act_state == XPC_P_DEACTIVATING) { 914 if (part->act_state == XPC_P_DEACTIVATING) {
966 if ((part->reason == xpcUnloading && reason != xpcUnloading) || 915 if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
967 reason == xpcReactivating) { 916 reason == xpcReactivating) {
968 XPC_SET_REASON(part, reason, line); 917 XPC_SET_REASON(part, reason, line);
969 } 918 }
970 spin_unlock_irqrestore(&part->act_lock, irq_flags); 919 spin_unlock_irqrestore(&part->act_lock, irq_flags);
@@ -982,9 +931,9 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
982 931
983 /* set a timelimit on the disengage request */ 932 /* set a timelimit on the disengage request */
984 part->disengage_request_timeout = jiffies + 933 part->disengage_request_timeout = jiffies +
985 (xpc_disengage_request_timelimit * HZ); 934 (xpc_disengage_request_timelimit * HZ);
986 part->disengage_request_timer.expires = 935 part->disengage_request_timer.expires =
987 part->disengage_request_timeout; 936 part->disengage_request_timeout;
988 add_timer(&part->disengage_request_timer); 937 add_timer(&part->disengage_request_timer);
989 } 938 }
990 939
@@ -994,7 +943,6 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
994 xpc_partition_going_down(part, reason); 943 xpc_partition_going_down(part, reason);
995} 944}
996 945
997
998/* 946/*
999 * Mark specified partition as inactive. 947 * Mark specified partition as inactive.
1000 */ 948 */
@@ -1003,7 +951,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
1003{ 951{
1004 unsigned long irq_flags; 952 unsigned long irq_flags;
1005 953
1006
1007 dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", 954 dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
1008 XPC_PARTID(part)); 955 XPC_PARTID(part));
1009 956
@@ -1013,7 +960,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
1013 part->remote_rp_pa = 0; 960 part->remote_rp_pa = 0;
1014} 961}
1015 962
1016
1017/* 963/*
1018 * SAL has provided a partition and machine mask. The partition mask 964 * SAL has provided a partition and machine mask. The partition mask
1019 * contains a bit for each even nasid in our partition. The machine 965 * contains a bit for each even nasid in our partition. The machine
@@ -1041,24 +987,22 @@ xpc_discovery(void)
1041 u64 *discovered_nasids; 987 u64 *discovered_nasids;
1042 enum xpc_retval ret; 988 enum xpc_retval ret;
1043 989
1044
1045 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + 990 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
1046 xp_nasid_mask_bytes, 991 xp_nasid_mask_bytes,
1047 GFP_KERNEL, &remote_rp_base); 992 GFP_KERNEL, &remote_rp_base);
1048 if (remote_rp == NULL) { 993 if (remote_rp == NULL)
1049 return; 994 return;
1050 }
1051 remote_vars = (struct xpc_vars *) remote_rp;
1052 995
996 remote_vars = (struct xpc_vars *)remote_rp;
1053 997
1054 discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, 998 discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
1055 GFP_KERNEL); 999 GFP_KERNEL);
1056 if (discovered_nasids == NULL) { 1000 if (discovered_nasids == NULL) {
1057 kfree(remote_rp_base); 1001 kfree(remote_rp_base);
1058 return; 1002 return;
1059 } 1003 }
1060 1004
1061 rp = (struct xpc_rsvd_page *) xpc_rsvd_page; 1005 rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
1062 1006
1063 /* 1007 /*
1064 * The term 'region' in this context refers to the minimum number of 1008 * The term 'region' in this context refers to the minimum number of
@@ -1081,23 +1025,19 @@ xpc_discovery(void)
1081 1025
1082 for (region = 0; region < max_regions; region++) { 1026 for (region = 0; region < max_regions; region++) {
1083 1027
1084 if ((volatile int) xpc_exiting) { 1028 if (xpc_exiting)
1085 break; 1029 break;
1086 }
1087 1030
1088 dev_dbg(xpc_part, "searching region %d\n", region); 1031 dev_dbg(xpc_part, "searching region %d\n", region);
1089 1032
1090 for (nasid = (region * region_size * 2); 1033 for (nasid = (region * region_size * 2);
1091 nasid < ((region + 1) * region_size * 2); 1034 nasid < ((region + 1) * region_size * 2); nasid += 2) {
1092 nasid += 2) {
1093 1035
1094 if ((volatile int) xpc_exiting) { 1036 if (xpc_exiting)
1095 break; 1037 break;
1096 }
1097 1038
1098 dev_dbg(xpc_part, "checking nasid %d\n", nasid); 1039 dev_dbg(xpc_part, "checking nasid %d\n", nasid);
1099 1040
1100
1101 if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) { 1041 if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
1102 dev_dbg(xpc_part, "PROM indicates Nasid %d is " 1042 dev_dbg(xpc_part, "PROM indicates Nasid %d is "
1103 "part of the local partition; skipping " 1043 "part of the local partition; skipping "
@@ -1119,19 +1059,18 @@ xpc_discovery(void)
1119 continue; 1059 continue;
1120 } 1060 }
1121 1061
1122
1123 /* pull over the reserved page structure */ 1062 /* pull over the reserved page structure */
1124 1063
1125 ret = xpc_get_remote_rp(nasid, discovered_nasids, 1064 ret = xpc_get_remote_rp(nasid, discovered_nasids,
1126 remote_rp, &remote_rp_pa); 1065 remote_rp, &remote_rp_pa);
1127 if (ret != xpcSuccess) { 1066 if (ret != xpcSuccess) {
1128 dev_dbg(xpc_part, "unable to get reserved page " 1067 dev_dbg(xpc_part, "unable to get reserved page "
1129 "from nasid %d, reason=%d\n", nasid, 1068 "from nasid %d, reason=%d\n", nasid,
1130 ret); 1069 ret);
1131 1070
1132 if (ret == xpcLocalPartid) { 1071 if (ret == xpcLocalPartid)
1133 break; 1072 break;
1134 } 1073
1135 continue; 1074 continue;
1136 } 1075 }
1137 1076
@@ -1140,7 +1079,6 @@ xpc_discovery(void)
1140 partid = remote_rp->partid; 1079 partid = remote_rp->partid;
1141 part = &xpc_partitions[partid]; 1080 part = &xpc_partitions[partid];
1142 1081
1143
1144 /* pull over the cross partition variables */ 1082 /* pull over the cross partition variables */
1145 1083
1146 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); 1084 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
@@ -1171,15 +1109,15 @@ xpc_discovery(void)
1171 * get the same page for remote_act_amos_pa after 1109 * get the same page for remote_act_amos_pa after
1172 * module reloads and system reboots. 1110 * module reloads and system reboots.
1173 */ 1111 */
1174 if (sn_register_xp_addr_region( 1112 if (sn_register_xp_addr_region
1175 remote_vars->amos_page_pa, 1113 (remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) {
1176 PAGE_SIZE, 1) < 0) { 1114 dev_dbg(xpc_part,
1177 dev_dbg(xpc_part, "partition %d failed to " 1115 "partition %d failed to "
1178 "register xp_addr region 0x%016lx\n", 1116 "register xp_addr region 0x%016lx\n",
1179 partid, remote_vars->amos_page_pa); 1117 partid, remote_vars->amos_page_pa);
1180 1118
1181 XPC_SET_REASON(part, xpcPhysAddrRegFailed, 1119 XPC_SET_REASON(part, xpcPhysAddrRegFailed,
1182 __LINE__); 1120 __LINE__);
1183 break; 1121 break;
1184 } 1122 }
1185 1123
@@ -1195,9 +1133,9 @@ xpc_discovery(void)
1195 remote_vars->act_phys_cpuid); 1133 remote_vars->act_phys_cpuid);
1196 1134
1197 if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> 1135 if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
1198 version)) { 1136 version)) {
1199 part->remote_amos_page_pa = 1137 part->remote_amos_page_pa =
1200 remote_vars->amos_page_pa; 1138 remote_vars->amos_page_pa;
1201 xpc_mark_partition_disengaged(part); 1139 xpc_mark_partition_disengaged(part);
1202 xpc_cancel_partition_disengage_request(part); 1140 xpc_cancel_partition_disengage_request(part);
1203 } 1141 }
@@ -1209,7 +1147,6 @@ xpc_discovery(void)
1209 kfree(remote_rp_base); 1147 kfree(remote_rp_base);
1210} 1148}
1211 1149
1212
1213/* 1150/*
1214 * Given a partid, get the nasids owned by that partition from the 1151 * Given a partid, get the nasids owned by that partition from the
1215 * remote partition's reserved page. 1152 * remote partition's reserved page.
@@ -1221,19 +1158,17 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
1221 u64 part_nasid_pa; 1158 u64 part_nasid_pa;
1222 int bte_res; 1159 int bte_res;
1223 1160
1224
1225 part = &xpc_partitions[partid]; 1161 part = &xpc_partitions[partid];
1226 if (part->remote_rp_pa == 0) { 1162 if (part->remote_rp_pa == 0)
1227 return xpcPartitionDown; 1163 return xpcPartitionDown;
1228 }
1229 1164
1230 memset(nasid_mask, 0, XP_NASID_MASK_BYTES); 1165 memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
1231 1166
1232 part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); 1167 part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa);
1233 1168
1234 bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask, 1169 bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask,
1235 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); 1170 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE),
1171 NULL);
1236 1172
1237 return xpc_map_bte_errors(bte_res); 1173 return xpc_map_bte_errors(bte_res);
1238} 1174}
1239
diff --git a/arch/ia64/sn/kernel/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index a5df672d8392..a9543c65814d 100644
--- a/arch/ia64/sn/kernel/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1999-2008 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Network Interface (XPNET) support 10 * Cross Partition Network Interface (XPNET) support
12 * 11 *
@@ -21,8 +20,8 @@
21 * 20 *
22 */ 21 */
23 22
24
25#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/types.h>
26#include <linux/kernel.h> 25#include <linux/kernel.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/ioport.h> 27#include <linux/ioport.h>
@@ -36,10 +35,8 @@
36#include <asm/sn/bte.h> 35#include <asm/sn/bte.h>
37#include <asm/sn/io.h> 36#include <asm/sn/io.h>
38#include <asm/sn/sn_sal.h> 37#include <asm/sn/sn_sal.h>
39#include <asm/types.h>
40#include <asm/atomic.h> 38#include <asm/atomic.h>
41#include <asm/sn/xp.h> 39#include "xp.h"
42
43 40
44/* 41/*
45 * The message payload transferred by XPC. 42 * The message payload transferred by XPC.
@@ -79,7 +76,6 @@ struct xpnet_message {
79#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE)) 76#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE))
80#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE) 77#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
81 78
82
83#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) 79#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
84#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) 80#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
85 81
@@ -91,9 +87,9 @@ struct xpnet_message {
91#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) 87#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
92#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) 88#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
93 89
94#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */ 90#define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */
95#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */ 91#define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */
96#define XPNET_MAGIC 0x88786984 /* "XNET" */ 92#define XPNET_MAGIC 0x88786984 /* "XNET" */
97 93
98#define XPNET_VALID_MSG(_m) \ 94#define XPNET_VALID_MSG(_m) \
99 ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \ 95 ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
@@ -101,7 +97,6 @@ struct xpnet_message {
101 97
102#define XPNET_DEVICE_NAME "xp0" 98#define XPNET_DEVICE_NAME "xp0"
103 99
104
105/* 100/*
106 * When messages are queued with xpc_send_notify, a kmalloc'd buffer 101 * When messages are queued with xpc_send_notify, a kmalloc'd buffer
107 * of the following type is passed as a notification cookie. When the 102 * of the following type is passed as a notification cookie. When the
@@ -145,7 +140,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
145/* 32KB has been determined to be the ideal */ 140/* 32KB has been determined to be the ideal */
146#define XPNET_DEF_MTU (0x8000UL) 141#define XPNET_DEF_MTU (0x8000UL)
147 142
148
149/* 143/*
150 * The partition id is encapsulated in the MAC address. The following 144 * The partition id is encapsulated in the MAC address. The following
151 * define locates the octet the partid is in. 145 * define locates the octet the partid is in.
@@ -153,7 +147,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
153#define XPNET_PARTID_OCTET 1 147#define XPNET_PARTID_OCTET 1
154#define XPNET_LICENSE_OCTET 2 148#define XPNET_LICENSE_OCTET 2
155 149
156
157/* 150/*
158 * Define the XPNET debug device structure that is to be used with dev_dbg(), 151 * Define the XPNET debug device structure that is to be used with dev_dbg(),
159 * dev_err(), dev_warn(), and dev_info(). 152 * dev_err(), dev_warn(), and dev_info().
@@ -163,7 +156,7 @@ struct device_driver xpnet_dbg_name = {
163}; 156};
164 157
165struct device xpnet_dbg_subname = { 158struct device xpnet_dbg_subname = {
166 .bus_id = {0}, /* set to "" */ 159 .bus_id = {0}, /* set to "" */
167 .driver = &xpnet_dbg_name 160 .driver = &xpnet_dbg_name
168}; 161};
169 162
@@ -178,14 +171,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
178 struct sk_buff *skb; 171 struct sk_buff *skb;
179 bte_result_t bret; 172 bte_result_t bret;
180 struct xpnet_dev_private *priv = 173 struct xpnet_dev_private *priv =
181 (struct xpnet_dev_private *) xpnet_device->priv; 174 (struct xpnet_dev_private *)xpnet_device->priv;
182
183 175
184 if (!XPNET_VALID_MSG(msg)) { 176 if (!XPNET_VALID_MSG(msg)) {
185 /* 177 /*
186 * Packet with a different XPC version. Ignore. 178 * Packet with a different XPC version. Ignore.
187 */ 179 */
188 xpc_received(partid, channel, (void *) msg); 180 xpc_received(partid, channel, (void *)msg);
189 181
190 priv->stats.rx_errors++; 182 priv->stats.rx_errors++;
191 183
@@ -194,14 +186,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
194 dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, 186 dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
195 msg->leadin_ignore, msg->tailout_ignore); 187 msg->leadin_ignore, msg->tailout_ignore);
196 188
197
198 /* reserve an extra cache line */ 189 /* reserve an extra cache line */
199 skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); 190 skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
200 if (!skb) { 191 if (!skb) {
201 dev_err(xpnet, "failed on dev_alloc_skb(%d)\n", 192 dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
202 msg->size + L1_CACHE_BYTES); 193 msg->size + L1_CACHE_BYTES);
203 194
204 xpc_received(partid, channel, (void *) msg); 195 xpc_received(partid, channel, (void *)msg);
205 196
206 priv->stats.rx_errors++; 197 priv->stats.rx_errors++;
207 198
@@ -227,12 +218,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
227 * Move the data over from the other side. 218 * Move the data over from the other side.
228 */ 219 */
229 if ((XPNET_VERSION_MINOR(msg->version) == 1) && 220 if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
230 (msg->embedded_bytes != 0)) { 221 (msg->embedded_bytes != 0)) {
231 dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, " 222 dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
232 "%lu)\n", skb->data, &msg->data, 223 "%lu)\n", skb->data, &msg->data,
233 (size_t) msg->embedded_bytes); 224 (size_t)msg->embedded_bytes);
234 225
235 skb_copy_to_linear_data(skb, &msg->data, (size_t)msg->embedded_bytes); 226 skb_copy_to_linear_data(skb, &msg->data,
227 (size_t)msg->embedded_bytes);
236 } else { 228 } else {
237 dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" 229 dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
238 "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, 230 "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
@@ -244,16 +236,18 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
244 msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL); 236 msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
245 237
246 if (bret != BTE_SUCCESS) { 238 if (bret != BTE_SUCCESS) {
247 // >>> Need better way of cleaning skb. Currently skb 239 /*
248 // >>> appears in_use and we can't just call 240 * >>> Need better way of cleaning skb. Currently skb
249 // >>> dev_kfree_skb. 241 * >>> appears in_use and we can't just call
242 * >>> dev_kfree_skb.
243 */
250 dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " 244 dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
251 "error=0x%x\n", (void *)msg->buf_pa, 245 "error=0x%x\n", (void *)msg->buf_pa,
252 (void *)__pa((u64)skb->data & 246 (void *)__pa((u64)skb->data &
253 ~(L1_CACHE_BYTES - 1)), 247 ~(L1_CACHE_BYTES - 1)),
254 msg->size, bret); 248 msg->size, bret);
255 249
256 xpc_received(partid, channel, (void *) msg); 250 xpc_received(partid, channel, (void *)msg);
257 251
258 priv->stats.rx_errors++; 252 priv->stats.rx_errors++;
259 253
@@ -262,7 +256,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
262 } 256 }
263 257
264 dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 258 dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
265 "skb->end=0x%p skb->len=%d\n", (void *) skb->head, 259 "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
266 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), 260 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
267 skb->len); 261 skb->len);
268 262
@@ -275,16 +269,14 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
275 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), 269 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
276 skb_end_pointer(skb), skb->len); 270 skb_end_pointer(skb), skb->len);
277 271
278
279 xpnet_device->last_rx = jiffies; 272 xpnet_device->last_rx = jiffies;
280 priv->stats.rx_packets++; 273 priv->stats.rx_packets++;
281 priv->stats.rx_bytes += skb->len + ETH_HLEN; 274 priv->stats.rx_bytes += skb->len + ETH_HLEN;
282 275
283 netif_rx_ni(skb); 276 netif_rx_ni(skb);
284 xpc_received(partid, channel, (void *) msg); 277 xpc_received(partid, channel, (void *)msg);
285} 278}
286 279
287
288/* 280/*
289 * This is the handler which XPC calls during any sort of change in 281 * This is the handler which XPC calls during any sort of change in
290 * state or message reception on a connection. 282 * state or message reception on a connection.
@@ -295,20 +287,19 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
295{ 287{
296 long bp; 288 long bp;
297 289
298
299 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 290 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
300 DBUG_ON(channel != XPC_NET_CHANNEL); 291 DBUG_ON(channel != XPC_NET_CHANNEL);
301 292
302 switch(reason) { 293 switch (reason) {
303 case xpcMsgReceived: /* message received */ 294 case xpcMsgReceived: /* message received */
304 DBUG_ON(data == NULL); 295 DBUG_ON(data == NULL);
305 296
306 xpnet_receive(partid, channel, (struct xpnet_message *) data); 297 xpnet_receive(partid, channel, (struct xpnet_message *)data);
307 break; 298 break;
308 299
309 case xpcConnected: /* connection completed to a partition */ 300 case xpcConnected: /* connection completed to a partition */
310 spin_lock_bh(&xpnet_broadcast_lock); 301 spin_lock_bh(&xpnet_broadcast_lock);
311 xpnet_broadcast_partitions |= 1UL << (partid -1 ); 302 xpnet_broadcast_partitions |= 1UL << (partid - 1);
312 bp = xpnet_broadcast_partitions; 303 bp = xpnet_broadcast_partitions;
313 spin_unlock_bh(&xpnet_broadcast_lock); 304 spin_unlock_bh(&xpnet_broadcast_lock);
314 305
@@ -321,13 +312,12 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
321 312
322 default: 313 default:
323 spin_lock_bh(&xpnet_broadcast_lock); 314 spin_lock_bh(&xpnet_broadcast_lock);
324 xpnet_broadcast_partitions &= ~(1UL << (partid -1 )); 315 xpnet_broadcast_partitions &= ~(1UL << (partid - 1));
325 bp = xpnet_broadcast_partitions; 316 bp = xpnet_broadcast_partitions;
326 spin_unlock_bh(&xpnet_broadcast_lock); 317 spin_unlock_bh(&xpnet_broadcast_lock);
327 318
328 if (bp == 0) { 319 if (bp == 0)
329 netif_carrier_off(xpnet_device); 320 netif_carrier_off(xpnet_device);
330 }
331 321
332 dev_dbg(xpnet, "%s disconnected from partition %d; " 322 dev_dbg(xpnet, "%s disconnected from partition %d; "
333 "xpnet_broadcast_partitions=0x%lx\n", 323 "xpnet_broadcast_partitions=0x%lx\n",
@@ -337,13 +327,11 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
337 } 327 }
338} 328}
339 329
340
341static int 330static int
342xpnet_dev_open(struct net_device *dev) 331xpnet_dev_open(struct net_device *dev)
343{ 332{
344 enum xpc_retval ret; 333 enum xpc_retval ret;
345 334
346
347 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " 335 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
348 "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, 336 "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
349 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, 337 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
@@ -364,7 +352,6 @@ xpnet_dev_open(struct net_device *dev)
364 return 0; 352 return 0;
365} 353}
366 354
367
368static int 355static int
369xpnet_dev_stop(struct net_device *dev) 356xpnet_dev_stop(struct net_device *dev)
370{ 357{
@@ -375,7 +362,6 @@ xpnet_dev_stop(struct net_device *dev)
375 return 0; 362 return 0;
376} 363}
377 364
378
379static int 365static int
380xpnet_dev_change_mtu(struct net_device *dev, int new_mtu) 366xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
381{ 367{
@@ -392,7 +378,6 @@ xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
392 return 0; 378 return 0;
393} 379}
394 380
395
396/* 381/*
397 * Required for the net_device structure. 382 * Required for the net_device structure.
398 */ 383 */
@@ -402,7 +387,6 @@ xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
402 return 0; 387 return 0;
403} 388}
404 389
405
406/* 390/*
407 * Return statistics to the caller. 391 * Return statistics to the caller.
408 */ 392 */
@@ -411,13 +395,11 @@ xpnet_dev_get_stats(struct net_device *dev)
411{ 395{
412 struct xpnet_dev_private *priv; 396 struct xpnet_dev_private *priv;
413 397
414 398 priv = (struct xpnet_dev_private *)dev->priv;
415 priv = (struct xpnet_dev_private *) dev->priv;
416 399
417 return &priv->stats; 400 return &priv->stats;
418} 401}
419 402
420
421/* 403/*
422 * Notification that the other end has received the message and 404 * Notification that the other end has received the message and
423 * DMA'd the skb information. At this point, they are done with 405 * DMA'd the skb information. At this point, they are done with
@@ -426,11 +408,9 @@ xpnet_dev_get_stats(struct net_device *dev)
426 */ 408 */
427static void 409static void
428xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, 410xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
429 void *__qm) 411 void *__qm)
430{ 412{
431 struct xpnet_pending_msg *queued_msg = 413 struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
432 (struct xpnet_pending_msg *) __qm;
433
434 414
435 DBUG_ON(queued_msg == NULL); 415 DBUG_ON(queued_msg == NULL);
436 416
@@ -439,14 +419,13 @@ xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
439 419
440 if (atomic_dec_return(&queued_msg->use_count) == 0) { 420 if (atomic_dec_return(&queued_msg->use_count) == 0) {
441 dev_dbg(xpnet, "all acks for skb->head=-x%p\n", 421 dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
442 (void *) queued_msg->skb->head); 422 (void *)queued_msg->skb->head);
443 423
444 dev_kfree_skb_any(queued_msg->skb); 424 dev_kfree_skb_any(queued_msg->skb);
445 kfree(queued_msg); 425 kfree(queued_msg);
446 } 426 }
447} 427}
448 428
449
450/* 429/*
451 * Network layer has formatted a packet (skb) and is ready to place it 430 * Network layer has formatted a packet (skb) and is ready to place it
452 * "on the wire". Prepare and send an xpnet_message to all partitions 431 * "on the wire". Prepare and send an xpnet_message to all partitions
@@ -469,16 +448,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
469 struct xpnet_dev_private *priv; 448 struct xpnet_dev_private *priv;
470 u16 embedded_bytes; 449 u16 embedded_bytes;
471 450
472 451 priv = (struct xpnet_dev_private *)dev->priv;
473 priv = (struct xpnet_dev_private *) dev->priv;
474
475 452
476 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 453 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
477 "skb->end=0x%p skb->len=%d\n", (void *) skb->head, 454 "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
478 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), 455 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
479 skb->len); 456 skb->len);
480 457
481
482 /* 458 /*
483 * The xpnet_pending_msg tracks how many outstanding 459 * The xpnet_pending_msg tracks how many outstanding
484 * xpc_send_notifies are relying on this skb. When none 460 * xpc_send_notifies are relying on this skb. When none
@@ -487,16 +463,15 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
487 queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC); 463 queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
488 if (queued_msg == NULL) { 464 if (queued_msg == NULL) {
489 dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " 465 dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
490 "packet\n", sizeof(struct xpnet_pending_msg)); 466 "packet\n", sizeof(struct xpnet_pending_msg));
491 467
492 priv->stats.tx_errors++; 468 priv->stats.tx_errors++;
493 469
494 return -ENOMEM; 470 return -ENOMEM;
495 } 471 }
496 472
497
498 /* get the beginning of the first cacheline and end of last */ 473 /* get the beginning of the first cacheline and end of last */
499 start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1)); 474 start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
500 end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb)); 475 end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
501 476
502 /* calculate how many bytes to embed in the XPC message */ 477 /* calculate how many bytes to embed in the XPC message */
@@ -506,7 +481,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
506 embedded_bytes = skb->len; 481 embedded_bytes = skb->len;
507 } 482 }
508 483
509
510 /* 484 /*
511 * Since the send occurs asynchronously, we set the count to one 485 * Since the send occurs asynchronously, we set the count to one
512 * and begin sending. Any sends that happen to complete before 486 * and begin sending. Any sends that happen to complete before
@@ -517,14 +491,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
517 atomic_set(&queued_msg->use_count, 1); 491 atomic_set(&queued_msg->use_count, 1);
518 queued_msg->skb = skb; 492 queued_msg->skb = skb;
519 493
520
521 second_mac_octet = skb->data[XPNET_PARTID_OCTET]; 494 second_mac_octet = skb->data[XPNET_PARTID_OCTET];
522 if (second_mac_octet == 0xff) { 495 if (second_mac_octet == 0xff) {
523 /* we are being asked to broadcast to all partitions */ 496 /* we are being asked to broadcast to all partitions */
524 dp = xpnet_broadcast_partitions; 497 dp = xpnet_broadcast_partitions;
525 } else if (second_mac_octet != 0) { 498 } else if (second_mac_octet != 0) {
526 dp = xpnet_broadcast_partitions & 499 dp = xpnet_broadcast_partitions &
527 (1UL << (second_mac_octet - 1)); 500 (1UL << (second_mac_octet - 1));
528 } else { 501 } else {
529 /* 0 is an invalid partid. Ignore */ 502 /* 0 is an invalid partid. Ignore */
530 dp = 0; 503 dp = 0;
@@ -543,7 +516,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
543 for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS; 516 for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
544 dest_partid++) { 517 dest_partid++) {
545 518
546
547 if (!(dp & (1UL << (dest_partid - 1)))) { 519 if (!(dp & (1UL << (dest_partid - 1)))) {
548 /* not destined for this partition */ 520 /* not destined for this partition */
549 continue; 521 continue;
@@ -552,20 +524,18 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
552 /* remove this partition from the destinations mask */ 524 /* remove this partition from the destinations mask */
553 dp &= ~(1UL << (dest_partid - 1)); 525 dp &= ~(1UL << (dest_partid - 1));
554 526
555
556 /* found a partition to send to */ 527 /* found a partition to send to */
557 528
558 ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, 529 ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
559 XPC_NOWAIT, (void **)&msg); 530 XPC_NOWAIT, (void **)&msg);
560 if (unlikely(ret != xpcSuccess)) { 531 if (unlikely(ret != xpcSuccess))
561 continue; 532 continue;
562 }
563 533
564 msg->embedded_bytes = embedded_bytes; 534 msg->embedded_bytes = embedded_bytes;
565 if (unlikely(embedded_bytes != 0)) { 535 if (unlikely(embedded_bytes != 0)) {
566 msg->version = XPNET_VERSION_EMBED; 536 msg->version = XPNET_VERSION_EMBED;
567 dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", 537 dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
568 &msg->data, skb->data, (size_t) embedded_bytes); 538 &msg->data, skb->data, (size_t)embedded_bytes);
569 skb_copy_from_linear_data(skb, &msg->data, 539 skb_copy_from_linear_data(skb, &msg->data,
570 (size_t)embedded_bytes); 540 (size_t)embedded_bytes);
571 } else { 541 } else {
@@ -573,7 +543,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
573 } 543 }
574 msg->magic = XPNET_MAGIC; 544 msg->magic = XPNET_MAGIC;
575 msg->size = end_addr - start_addr; 545 msg->size = end_addr - start_addr;
576 msg->leadin_ignore = (u64) skb->data - start_addr; 546 msg->leadin_ignore = (u64)skb->data - start_addr;
577 msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); 547 msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
578 msg->buf_pa = __pa(start_addr); 548 msg->buf_pa = __pa(start_addr);
579 549
@@ -583,7 +553,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
583 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, 553 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
584 msg->leadin_ignore, msg->tailout_ignore); 554 msg->leadin_ignore, msg->tailout_ignore);
585 555
586
587 atomic_inc(&queued_msg->use_count); 556 atomic_inc(&queued_msg->use_count);
588 557
589 ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, 558 ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
@@ -592,14 +561,12 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
592 atomic_dec(&queued_msg->use_count); 561 atomic_dec(&queued_msg->use_count);
593 continue; 562 continue;
594 } 563 }
595
596 } 564 }
597 565
598 if (atomic_dec_return(&queued_msg->use_count) == 0) { 566 if (atomic_dec_return(&queued_msg->use_count) == 0) {
599 dev_dbg(xpnet, "no partitions to receive packet destined for " 567 dev_dbg(xpnet, "no partitions to receive packet destined for "
600 "%d\n", dest_partid); 568 "%d\n", dest_partid);
601 569
602
603 dev_kfree_skb(skb); 570 dev_kfree_skb(skb);
604 kfree(queued_msg); 571 kfree(queued_msg);
605 } 572 }
@@ -610,23 +577,20 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
610 return 0; 577 return 0;
611} 578}
612 579
613
614/* 580/*
615 * Deal with transmit timeouts coming from the network layer. 581 * Deal with transmit timeouts coming from the network layer.
616 */ 582 */
617static void 583static void
618xpnet_dev_tx_timeout (struct net_device *dev) 584xpnet_dev_tx_timeout(struct net_device *dev)
619{ 585{
620 struct xpnet_dev_private *priv; 586 struct xpnet_dev_private *priv;
621 587
622 588 priv = (struct xpnet_dev_private *)dev->priv;
623 priv = (struct xpnet_dev_private *) dev->priv;
624 589
625 priv->stats.tx_errors++; 590 priv->stats.tx_errors++;
626 return; 591 return;
627} 592}
628 593
629
630static int __init 594static int __init
631xpnet_init(void) 595xpnet_init(void)
632{ 596{
@@ -634,10 +598,8 @@ xpnet_init(void)
634 u32 license_num; 598 u32 license_num;
635 int result = -ENOMEM; 599 int result = -ENOMEM;
636 600
637 601 if (!ia64_platform_is("sn2"))
638 if (!ia64_platform_is("sn2")) {
639 return -ENODEV; 602 return -ENODEV;
640 }
641 603
642 dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); 604 dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
643 605
@@ -647,9 +609,8 @@ xpnet_init(void)
647 */ 609 */
648 xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), 610 xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
649 XPNET_DEVICE_NAME, ether_setup); 611 XPNET_DEVICE_NAME, ether_setup);
650 if (xpnet_device == NULL) { 612 if (xpnet_device == NULL)
651 return -ENOMEM; 613 return -ENOMEM;
652 }
653 614
654 netif_carrier_off(xpnet_device); 615 netif_carrier_off(xpnet_device);
655 616
@@ -672,7 +633,7 @@ xpnet_init(void)
672 license_num = sn_partition_serial_number_val(); 633 license_num = sn_partition_serial_number_val();
673 for (i = 3; i >= 0; i--) { 634 for (i = 3; i >= 0; i--) {
674 xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] = 635 xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
675 license_num & 0xff; 636 license_num & 0xff;
676 license_num = license_num >> 8; 637 license_num = license_num >> 8;
677 } 638 }
678 639
@@ -690,29 +651,27 @@ xpnet_init(void)
690 xpnet_device->features = NETIF_F_NO_CSUM; 651 xpnet_device->features = NETIF_F_NO_CSUM;
691 652
692 result = register_netdev(xpnet_device); 653 result = register_netdev(xpnet_device);
693 if (result != 0) { 654 if (result != 0)
694 free_netdev(xpnet_device); 655 free_netdev(xpnet_device);
695 }
696 656
697 return result; 657 return result;
698} 658}
699module_init(xpnet_init);
700 659
660module_init(xpnet_init);
701 661
702static void __exit 662static void __exit
703xpnet_exit(void) 663xpnet_exit(void)
704{ 664{
705 dev_info(xpnet, "unregistering network device %s\n", 665 dev_info(xpnet, "unregistering network device %s\n",
706 xpnet_device[0].name); 666 xpnet_device[0].name);
707 667
708 unregister_netdev(xpnet_device); 668 unregister_netdev(xpnet_device);
709 669
710 free_netdev(xpnet_device); 670 free_netdev(xpnet_device);
711} 671}
712module_exit(xpnet_exit);
713 672
673module_exit(xpnet_exit);
714 674
715MODULE_AUTHOR("Silicon Graphics, Inc."); 675MODULE_AUTHOR("Silicon Graphics, Inc.");
716MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)"); 676MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
717MODULE_LICENSE("GPL"); 677MODULE_LICENSE("GPL");
718
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
index f1663aa94a52..18a4321349a3 100644
--- a/include/asm-ia64/mca.h
+++ b/include/asm-ia64/mca.h
@@ -157,6 +157,7 @@ extern void ia64_mca_printk(const char * fmt, ...)
157struct ia64_mca_notify_die { 157struct ia64_mca_notify_die {
158 struct ia64_sal_os_state *sos; 158 struct ia64_sal_os_state *sos;
159 int *monarch_cpu; 159 int *monarch_cpu;
160 int *data;
160}; 161};
161 162
162DECLARE_PER_CPU(u64, ia64_mca_pal_base); 163DECLARE_PER_CPU(u64, ia64_mca_pal_base);