aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/acpi.c53
-rw-r--r--arch/ia64/kernel/entry.S4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c15
-rw-r--r--arch/ia64/kernel/setup.c4
-rw-r--r--arch/ia64/kernel/smpboot.c5
-rw-r--r--arch/ia64/kernel/time.c39
-rw-r--r--arch/ia64/kernel/traps.c8
-rw-r--r--arch/ia64/sn/kernel/io_init.c97
-rw-r--r--arch/ia64/sn/kernel/setup.c4
-rw-r--r--arch/ia64/sn/kernel/sn2/prominfo_proc.c25
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c35
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c22
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c19
-rw-r--r--arch/ia64/sn/kernel/sn2/timer_interrupt.c7
-rw-r--r--arch/ia64/sn/kernel/tiocx.c4
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c8
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c20
-rw-r--r--arch/ia64/sn/pci/pci_dma.c16
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c29
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c14
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c9
21 files changed, 235 insertions, 202 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index d2702c419cf8..ecd44bdc8394 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -761,6 +761,59 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
761 return (0); 761 return (0);
762} 762}
763 763
764int additional_cpus __initdata = -1;
765
766static __init int setup_additional_cpus(char *s)
767{
768 if (s)
769 additional_cpus = simple_strtol(s, NULL, 0);
770
771 return 0;
772}
773
774early_param("additional_cpus", setup_additional_cpus);
775
776/*
777 * cpu_possible_map should be static, it cannot change as cpu's
778 * are onlined, or offlined. The reason is per-cpu data-structures
779 * are allocated by some modules at init time, and dont expect to
780 * do this dynamically on cpu arrival/departure.
781 * cpu_present_map on the other hand can change dynamically.
782 * In case when cpu_hotplug is not compiled, then we resort to current
783 * behaviour, which is cpu_possible == cpu_present.
784 * - Ashok Raj
785 *
786 * Three ways to find out the number of additional hotplug CPUs:
787 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
788 * - The user can overwrite it with additional_cpus=NUM
789 * - Otherwise don't reserve additional CPUs.
790 */
791__init void prefill_possible_map(void)
792{
793 int i;
794 int possible, disabled_cpus;
795
796 disabled_cpus = total_cpus - available_cpus;
797
798 if (additional_cpus == -1) {
799 if (disabled_cpus > 0)
800 additional_cpus = disabled_cpus;
801 else
802 additional_cpus = 0;
803 }
804
805 possible = available_cpus + additional_cpus;
806
807 if (possible > NR_CPUS)
808 possible = NR_CPUS;
809
810 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
811 possible, max((possible - available_cpus), 0));
812
813 for (i = 0; i < possible; i++)
814 cpu_set(i, cpu_possible_map);
815}
816
764int acpi_map_lsapic(acpi_handle handle, int *pcpu) 817int acpi_map_lsapic(acpi_handle handle, int *pcpu)
765{ 818{
766 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 819 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 27b222c277e4..930fdfca6ddb 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -569,7 +569,9 @@ GLOBAL_ENTRY(ia64_trace_syscall)
569.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8 569.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
570.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10 570.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
571 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value 571 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
572.ret3: br.cond.sptk .work_pending_syscall_end 572.ret3:
573(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
574 br.cond.sptk .work_pending_syscall_end
573 575
574strace_error: 576strace_error:
575 ld8 r3=[r2] // load pt_regs.r8 577 ld8 r3=[r2] // load pt_regs.r8
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index e72de580ebbf..bbcfd08378a6 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -10,23 +10,8 @@
10 10
11#include <linux/string.h> 11#include <linux/string.h>
12EXPORT_SYMBOL(memset); 12EXPORT_SYMBOL(memset);
13EXPORT_SYMBOL(memchr);
14EXPORT_SYMBOL(memcmp);
15EXPORT_SYMBOL(memcpy); 13EXPORT_SYMBOL(memcpy);
16EXPORT_SYMBOL(memmove);
17EXPORT_SYMBOL(memscan);
18EXPORT_SYMBOL(strcat);
19EXPORT_SYMBOL(strchr);
20EXPORT_SYMBOL(strcmp);
21EXPORT_SYMBOL(strcpy);
22EXPORT_SYMBOL(strlen); 14EXPORT_SYMBOL(strlen);
23EXPORT_SYMBOL(strncat);
24EXPORT_SYMBOL(strncmp);
25EXPORT_SYMBOL(strncpy);
26EXPORT_SYMBOL(strnlen);
27EXPORT_SYMBOL(strrchr);
28EXPORT_SYMBOL(strstr);
29EXPORT_SYMBOL(strpbrk);
30 15
31#include <asm/checksum.h> 16#include <asm/checksum.h>
32EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 17EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 35f7835294a3..3258e09278d0 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -430,6 +430,7 @@ setup_arch (char **cmdline_p)
430 if (early_console_setup(*cmdline_p) == 0) 430 if (early_console_setup(*cmdline_p) == 0)
431 mark_bsp_online(); 431 mark_bsp_online();
432 432
433 parse_early_param();
433#ifdef CONFIG_ACPI 434#ifdef CONFIG_ACPI
434 /* Initialize the ACPI boot-time table parser */ 435 /* Initialize the ACPI boot-time table parser */
435 acpi_table_init(); 436 acpi_table_init();
@@ -688,6 +689,9 @@ void
688setup_per_cpu_areas (void) 689setup_per_cpu_areas (void)
689{ 690{
690 /* start_kernel() requires this... */ 691 /* start_kernel() requires this... */
692#ifdef CONFIG_ACPI_HOTPLUG_CPU
693 prefill_possible_map();
694#endif
691} 695}
692 696
693/* 697/*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 8f44e7d2df66..b681ef34a86e 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -129,7 +129,7 @@ DEFINE_PER_CPU(int, cpu_state);
129/* Bitmasks of currently online, and possible CPUs */ 129/* Bitmasks of currently online, and possible CPUs */
130cpumask_t cpu_online_map; 130cpumask_t cpu_online_map;
131EXPORT_SYMBOL(cpu_online_map); 131EXPORT_SYMBOL(cpu_online_map);
132cpumask_t cpu_possible_map; 132cpumask_t cpu_possible_map = CPU_MASK_NONE;
133EXPORT_SYMBOL(cpu_possible_map); 133EXPORT_SYMBOL(cpu_possible_map);
134 134
135cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 135cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
@@ -506,9 +506,6 @@ smp_build_cpu_map (void)
506 506
507 for (cpu = 0; cpu < NR_CPUS; cpu++) { 507 for (cpu = 0; cpu < NR_CPUS; cpu++) {
508 ia64_cpu_to_sapicid[cpu] = -1; 508 ia64_cpu_to_sapicid[cpu] = -1;
509#ifdef CONFIG_HOTPLUG_CPU
510 cpu_set(cpu, cpu_possible_map);
511#endif
512 } 509 }
513 510
514 ia64_cpu_to_sapicid[0] = boot_cpu_id; 511 ia64_cpu_to_sapicid[0] = boot_cpu_id;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index a094ec49ccfa..307d01e15b2e 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -250,32 +250,27 @@ time_init (void)
250 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); 250 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
251} 251}
252 252
253#define SMALLUSECS 100 253/*
254 254 * Generic udelay assumes that if preemption is allowed and the thread
255void 255 * migrates to another CPU, that the ITC values are synchronized across
256udelay (unsigned long usecs) 256 * all CPUs.
257 */
258static void
259ia64_itc_udelay (unsigned long usecs)
257{ 260{
258 unsigned long start; 261 unsigned long start = ia64_get_itc();
259 unsigned long cycles; 262 unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
260 unsigned long smallusecs;
261 263
262 /* 264 while (time_before(ia64_get_itc(), end))
263 * Execute the non-preemptible delay loop (because the ITC might 265 cpu_relax();
264 * not be synchronized between CPUS) in relatively short time 266}
265 * chunks, allowing preemption between the chunks.
266 */
267 while (usecs > 0) {
268 smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs;
269 preempt_disable();
270 cycles = smallusecs*local_cpu_data->cyc_per_usec;
271 start = ia64_get_itc();
272 267
273 while (ia64_get_itc() - start < cycles) 268void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
274 cpu_relax();
275 269
276 preempt_enable(); 270void
277 usecs -= smallusecs; 271udelay (unsigned long usecs)
278 } 272{
273 (*ia64_udelay)(usecs);
279} 274}
280EXPORT_SYMBOL(udelay); 275EXPORT_SYMBOL(udelay);
281 276
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 55391901b013..dabd6c32641e 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> /* for EXPORT_SYMBOL */ 16#include <linux/module.h> /* for EXPORT_SYMBOL */
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
18#include <linux/kprobes.h> 18#include <linux/kprobes.h>
19#include <linux/delay.h> /* for ssleep() */
19 20
20#include <asm/fpswa.h> 21#include <asm/fpswa.h>
21#include <asm/ia32.h> 22#include <asm/ia32.h>
@@ -116,6 +117,13 @@ die (const char *str, struct pt_regs *regs, long err)
116 bust_spinlocks(0); 117 bust_spinlocks(0);
117 die.lock_owner = -1; 118 die.lock_owner = -1;
118 spin_unlock_irq(&die.lock); 119 spin_unlock_irq(&die.lock);
120
121 if (panic_on_oops) {
122 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
123 ssleep(5);
124 panic("Fatal exception");
125 }
126
119 do_exit(SIGSEGV); 127 do_exit(SIGSEGV);
120} 128}
121 129
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 3437c2390429..3edef0d32f86 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -23,6 +23,10 @@
23#include "xtalk/hubdev.h" 23#include "xtalk/hubdev.h"
24#include "xtalk/xwidgetdev.h" 24#include "xtalk/xwidgetdev.h"
25 25
26
27extern void sn_init_cpei_timer(void);
28extern void register_sn_procfs(void);
29
26static struct list_head sn_sysdata_list; 30static struct list_head sn_sysdata_list;
27 31
28/* sysdata list struct */ 32/* sysdata list struct */
@@ -40,12 +44,12 @@ struct brick {
40 struct slab_info slab_info[MAX_SLABS + 1]; 44 struct slab_info slab_info[MAX_SLABS + 1];
41}; 45};
42 46
43int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */ 47int sn_ioif_inited; /* SN I/O infrastructure initialized? */
44 48
45struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ 49struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
46 50
47static int max_segment_number = 0; /* Default highest segment number */ 51static int max_segment_number; /* Default highest segment number */
48static int max_pcibus_number = 255; /* Default highest pci bus number */ 52static int max_pcibus_number = 255; /* Default highest pci bus number */
49 53
50/* 54/*
51 * Hooks and struct for unsupported pci providers 55 * Hooks and struct for unsupported pci providers
@@ -84,7 +88,6 @@ static inline u64
84sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, 88sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
85 u64 address) 89 u64 address)
86{ 90{
87
88 struct ia64_sal_retval ret_stuff; 91 struct ia64_sal_retval ret_stuff;
89 ret_stuff.status = 0; 92 ret_stuff.status = 0;
90 ret_stuff.v0 = 0; 93 ret_stuff.v0 = 0;
@@ -94,7 +97,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
94 (u64) nasid, (u64) widget_num, 97 (u64) nasid, (u64) widget_num,
95 (u64) device_num, (u64) address, 0, 0, 0); 98 (u64) device_num, (u64) address, 0, 0, 0);
96 return ret_stuff.status; 99 return ret_stuff.status;
97
98} 100}
99 101
100/* 102/*
@@ -102,7 +104,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
102 */ 104 */
103static inline u64 sal_get_hubdev_info(u64 handle, u64 address) 105static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
104{ 106{
105
106 struct ia64_sal_retval ret_stuff; 107 struct ia64_sal_retval ret_stuff;
107 ret_stuff.status = 0; 108 ret_stuff.status = 0;
108 ret_stuff.v0 = 0; 109 ret_stuff.v0 = 0;
@@ -118,7 +119,6 @@ static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
118 */ 119 */
119static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) 120static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
120{ 121{
121
122 struct ia64_sal_retval ret_stuff; 122 struct ia64_sal_retval ret_stuff;
123 ret_stuff.status = 0; 123 ret_stuff.status = 0;
124 ret_stuff.v0 = 0; 124 ret_stuff.v0 = 0;
@@ -215,7 +215,7 @@ static void __init sn_fixup_ionodes(void)
215 struct hubdev_info *hubdev; 215 struct hubdev_info *hubdev;
216 u64 status; 216 u64 status;
217 u64 nasid; 217 u64 nasid;
218 int i, widget, device; 218 int i, widget, device, size;
219 219
220 /* 220 /*
221 * Get SGI Specific HUB chipset information. 221 * Get SGI Specific HUB chipset information.
@@ -251,48 +251,37 @@ static void __init sn_fixup_ionodes(void)
251 if (!hubdev->hdi_flush_nasid_list.widget_p) 251 if (!hubdev->hdi_flush_nasid_list.widget_p)
252 continue; 252 continue;
253 253
254 size = (HUB_WIDGET_ID_MAX + 1) *
255 sizeof(struct sn_flush_device_kernel *);
254 hubdev->hdi_flush_nasid_list.widget_p = 256 hubdev->hdi_flush_nasid_list.widget_p =
255 kmalloc((HUB_WIDGET_ID_MAX + 1) * 257 kzalloc(size, GFP_KERNEL);
256 sizeof(struct sn_flush_device_kernel *), 258 if (!hubdev->hdi_flush_nasid_list.widget_p)
257 GFP_KERNEL); 259 BUG();
258 memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0,
259 (HUB_WIDGET_ID_MAX + 1) *
260 sizeof(struct sn_flush_device_kernel *));
261 260
262 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { 261 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
263 sn_flush_device_kernel = kmalloc(DEV_PER_WIDGET * 262 size = DEV_PER_WIDGET *
264 sizeof(struct 263 sizeof(struct sn_flush_device_kernel);
265 sn_flush_device_kernel), 264 sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
266 GFP_KERNEL);
267 if (!sn_flush_device_kernel) 265 if (!sn_flush_device_kernel)
268 BUG(); 266 BUG();
269 memset(sn_flush_device_kernel, 0x0,
270 DEV_PER_WIDGET *
271 sizeof(struct sn_flush_device_kernel));
272 267
273 dev_entry = sn_flush_device_kernel; 268 dev_entry = sn_flush_device_kernel;
274 for (device = 0; device < DEV_PER_WIDGET; 269 for (device = 0; device < DEV_PER_WIDGET;
275 device++,dev_entry++) { 270 device++,dev_entry++) {
276 dev_entry->common = kmalloc(sizeof(struct 271 size = sizeof(struct sn_flush_device_common);
277 sn_flush_device_common), 272 dev_entry->common = kzalloc(size, GFP_KERNEL);
278 GFP_KERNEL);
279 if (!dev_entry->common) 273 if (!dev_entry->common)
280 BUG(); 274 BUG();
281 memset(dev_entry->common, 0x0, sizeof(struct
282 sn_flush_device_common));
283 275
284 if (sn_prom_feature_available( 276 if (sn_prom_feature_available(
285 PRF_DEVICE_FLUSH_LIST)) 277 PRF_DEVICE_FLUSH_LIST))
286 status = sal_get_device_dmaflush_list( 278 status = sal_get_device_dmaflush_list(
287 nasid, 279 nasid, widget, device,
288 widget, 280 (u64)(dev_entry->common));
289 device,
290 (u64)(dev_entry->common));
291 else 281 else
292 status = sn_device_fixup_war(nasid, 282 status = sn_device_fixup_war(nasid,
293 widget, 283 widget, device,
294 device, 284 dev_entry->common);
295 dev_entry->common);
296 if (status != SALRET_OK) 285 if (status != SALRET_OK)
297 panic("SAL call failed: %s\n", 286 panic("SAL call failed: %s\n",
298 ia64_sal_strerror(status)); 287 ia64_sal_strerror(status));
@@ -383,13 +372,12 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
383 372
384 pci_dev_get(dev); /* for the sysdata pointer */ 373 pci_dev_get(dev); /* for the sysdata pointer */
385 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); 374 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
386 if (pcidev_info <= 0) 375 if (!pcidev_info)
387 BUG(); /* Cannot afford to run out of memory */ 376 BUG(); /* Cannot afford to run out of memory */
388 377
389 sn_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_KERNEL); 378 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
390 if (sn_irq_info <= 0) 379 if (!sn_irq_info)
391 BUG(); /* Cannot afford to run out of memory */ 380 BUG(); /* Cannot afford to run out of memory */
392 memset(sn_irq_info, 0, sizeof(struct sn_irq_info));
393 381
394 /* Call to retrieve pci device information needed by kernel. */ 382 /* Call to retrieve pci device information needed by kernel. */
395 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number, 383 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
@@ -482,13 +470,13 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
482 */ 470 */
483void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) 471void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
484{ 472{
485 int status = 0; 473 int status;
486 int nasid, cnode; 474 int nasid, cnode;
487 struct pci_controller *controller; 475 struct pci_controller *controller;
488 struct sn_pci_controller *sn_controller; 476 struct sn_pci_controller *sn_controller;
489 struct pcibus_bussoft *prom_bussoft_ptr; 477 struct pcibus_bussoft *prom_bussoft_ptr;
490 struct hubdev_info *hubdev_info; 478 struct hubdev_info *hubdev_info;
491 void *provider_soft = NULL; 479 void *provider_soft;
492 struct sn_pcibus_provider *provider; 480 struct sn_pcibus_provider *provider;
493 481
494 status = sal_get_pcibus_info((u64) segment, (u64) busnum, 482 status = sal_get_pcibus_info((u64) segment, (u64) busnum,
@@ -535,6 +523,8 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
535 bus->sysdata = controller; 523 bus->sysdata = controller;
536 if (provider->bus_fixup) 524 if (provider->bus_fixup)
537 provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller); 525 provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
526 else
527 provider_soft = NULL;
538 528
539 if (provider_soft == NULL) { 529 if (provider_soft == NULL) {
540 /* fixup failed or not applicable */ 530 /* fixup failed or not applicable */
@@ -638,13 +628,8 @@ void sn_bus_free_sysdata(void)
638 628
639static int __init sn_pci_init(void) 629static int __init sn_pci_init(void)
640{ 630{
641 int i = 0; 631 int i, j;
642 int j = 0;
643 struct pci_dev *pci_dev = NULL; 632 struct pci_dev *pci_dev = NULL;
644 extern void sn_init_cpei_timer(void);
645#ifdef CONFIG_PROC_FS
646 extern void register_sn_procfs(void);
647#endif
648 633
649 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) 634 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
650 return 0; 635 return 0;
@@ -700,32 +685,29 @@ static int __init sn_pci_init(void)
700 */ 685 */
701void hubdev_init_node(nodepda_t * npda, cnodeid_t node) 686void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
702{ 687{
703
704 struct hubdev_info *hubdev_info; 688 struct hubdev_info *hubdev_info;
689 int size;
690 pg_data_t *pg;
691
692 size = sizeof(struct hubdev_info);
705 693
706 if (node >= num_online_nodes()) /* Headless/memless IO nodes */ 694 if (node >= num_online_nodes()) /* Headless/memless IO nodes */
707 hubdev_info = 695 pg = NODE_DATA(0);
708 (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(0),
709 sizeof(struct
710 hubdev_info));
711 else 696 else
712 hubdev_info = 697 pg = NODE_DATA(node);
713 (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(node),
714 sizeof(struct
715 hubdev_info));
716 npda->pdinfo = (void *)hubdev_info;
717 698
699 hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
700
701 npda->pdinfo = (void *)hubdev_info;
718} 702}
719 703
720geoid_t 704geoid_t
721cnodeid_get_geoid(cnodeid_t cnode) 705cnodeid_get_geoid(cnodeid_t cnode)
722{ 706{
723
724 struct hubdev_info *hubdev; 707 struct hubdev_info *hubdev;
725 708
726 hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); 709 hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
727 return hubdev->hdi_geoid; 710 return hubdev->hdi_geoid;
728
729} 711}
730 712
731subsys_initcall(sn_pci_init); 713subsys_initcall(sn_pci_init);
@@ -734,3 +716,4 @@ EXPORT_SYMBOL(sn_pci_unfixup_slot);
734EXPORT_SYMBOL(sn_pci_controller_fixup); 716EXPORT_SYMBOL(sn_pci_controller_fixup);
735EXPORT_SYMBOL(sn_bus_store_sysdata); 717EXPORT_SYMBOL(sn_bus_store_sysdata);
736EXPORT_SYMBOL(sn_bus_free_sysdata); 718EXPORT_SYMBOL(sn_bus_free_sysdata);
719EXPORT_SYMBOL(sn_pcidev_info_get);
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 48645ac120fc..5b84836c2171 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
75DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 75DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
76EXPORT_PER_CPU_SYMBOL(__sn_hub_info); 76EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
77 77
78DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); 78DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
79EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); 79EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
80 80
81DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); 81DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
@@ -317,6 +317,7 @@ struct pcdp_vga_device {
317#define PCDP_PCI_TRANS_IOPORT 0x02 317#define PCDP_PCI_TRANS_IOPORT 0x02
318#define PCDP_PCI_TRANS_MMIO 0x01 318#define PCDP_PCI_TRANS_MMIO 0x01
319 319
320#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
320static void 321static void
321sn_scan_pcdp(void) 322sn_scan_pcdp(void)
322{ 323{
@@ -358,6 +359,7 @@ sn_scan_pcdp(void)
358 break; /* once we find the primary, we're done */ 359 break; /* once we find the primary, we're done */
359 } 360 }
360} 361}
362#endif
361 363
362static unsigned long sn2_rtc_initial; 364static unsigned long sn2_rtc_initial;
363 365
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
index 81c63b2f8ae9..6ae276d5d50c 100644
--- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (C) 1999,2001-2004, 2006 Silicon Graphics, Inc. All Rights Reserved.
7 * 7 *
8 * Module to export the system's Firmware Interface Tables, including 8 * Module to export the system's Firmware Interface Tables, including
9 * PROM revision numbers and banners, in /proc 9 * PROM revision numbers and banners, in /proc
@@ -190,7 +190,7 @@ static int
190read_version_entry(char *page, char **start, off_t off, int count, int *eof, 190read_version_entry(char *page, char **start, off_t off, int count, int *eof,
191 void *data) 191 void *data)
192{ 192{
193 int len = 0; 193 int len;
194 194
195 /* data holds the NASID of the node */ 195 /* data holds the NASID of the node */
196 len = dump_version(page, (unsigned long)data); 196 len = dump_version(page, (unsigned long)data);
@@ -202,7 +202,7 @@ static int
202read_fit_entry(char *page, char **start, off_t off, int count, int *eof, 202read_fit_entry(char *page, char **start, off_t off, int count, int *eof,
203 void *data) 203 void *data)
204{ 204{
205 int len = 0; 205 int len;
206 206
207 /* data holds the NASID of the node */ 207 /* data holds the NASID of the node */
208 len = dump_fit(page, (unsigned long)data); 208 len = dump_fit(page, (unsigned long)data);
@@ -229,13 +229,16 @@ int __init prominfo_init(void)
229 struct proc_dir_entry *p; 229 struct proc_dir_entry *p;
230 cnodeid_t cnodeid; 230 cnodeid_t cnodeid;
231 unsigned long nasid; 231 unsigned long nasid;
232 int size;
232 char name[NODE_NAME_LEN]; 233 char name[NODE_NAME_LEN];
233 234
234 if (!ia64_platform_is("sn2")) 235 if (!ia64_platform_is("sn2"))
235 return 0; 236 return 0;
236 237
237 proc_entries = kmalloc(num_online_nodes() * sizeof(struct proc_dir_entry *), 238 size = num_online_nodes() * sizeof(struct proc_dir_entry *);
238 GFP_KERNEL); 239 proc_entries = kzalloc(size, GFP_KERNEL);
240 if (!proc_entries)
241 return -ENOMEM;
239 242
240 sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL); 243 sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);
241 244
@@ -244,14 +247,12 @@ int __init prominfo_init(void)
244 sprintf(name, "node%d", cnodeid); 247 sprintf(name, "node%d", cnodeid);
245 *entp = proc_mkdir(name, sgi_prominfo_entry); 248 *entp = proc_mkdir(name, sgi_prominfo_entry);
246 nasid = cnodeid_to_nasid(cnodeid); 249 nasid = cnodeid_to_nasid(cnodeid);
247 p = create_proc_read_entry( 250 p = create_proc_read_entry("fit", 0, *entp, read_fit_entry,
248 "fit", 0, *entp, read_fit_entry, 251 (void *)nasid);
249 (void *)nasid);
250 if (p) 252 if (p)
251 p->owner = THIS_MODULE; 253 p->owner = THIS_MODULE;
252 p = create_proc_read_entry( 254 p = create_proc_read_entry("version", 0, *entp,
253 "version", 0, *entp, read_version_entry, 255 read_version_entry, (void *)nasid);
254 (void *)nasid);
255 if (p) 256 if (p)
256 p->owner = THIS_MODULE; 257 p->owner = THIS_MODULE;
257 entp++; 258 entp++;
@@ -263,7 +264,7 @@ int __init prominfo_init(void)
263void __exit prominfo_exit(void) 264void __exit prominfo_exit(void)
264{ 265{
265 struct proc_dir_entry **entp; 266 struct proc_dir_entry **entp;
266 unsigned cnodeid; 267 unsigned int cnodeid;
267 char name[NODE_NAME_LEN]; 268 char name[NODE_NAME_LEN];
268 269
269 entp = proc_entries; 270 entp = proc_entries;
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index f153a4c35c70..24eefb2fc55f 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -46,8 +46,14 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats);
46 46
47static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); 47static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
48 48
49void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned long *, unsigned long, 49extern unsigned long
50 volatile unsigned long *, unsigned long); 50sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
51 volatile unsigned long *, unsigned long,
52 volatile unsigned long *, unsigned long);
53void
54sn2_ptc_deadlock_recovery(short *, short, short, int,
55 volatile unsigned long *, unsigned long,
56 volatile unsigned long *, unsigned long);
51 57
52/* 58/*
53 * Note: some is the following is captured here to make degugging easier 59 * Note: some is the following is captured here to make degugging easier
@@ -59,16 +65,6 @@ void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned lon
59#define reset_max_active_on_deadlock() 1 65#define reset_max_active_on_deadlock() 1
60#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock) 66#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
61 67
62static inline void ptc_lock(int sh1, unsigned long *flagp)
63{
64 spin_lock_irqsave(PTC_LOCK(sh1), *flagp);
65}
66
67static inline void ptc_unlock(int sh1, unsigned long flags)
68{
69 spin_unlock_irqrestore(PTC_LOCK(sh1), flags);
70}
71
72struct ptc_stats { 68struct ptc_stats {
73 unsigned long ptc_l; 69 unsigned long ptc_l;
74 unsigned long change_rid; 70 unsigned long change_rid;
@@ -82,6 +78,8 @@ struct ptc_stats {
82 unsigned long shub_ptc_flushes_not_my_mm; 78 unsigned long shub_ptc_flushes_not_my_mm;
83}; 79};
84 80
81#define sn2_ptctest 0
82
85static inline unsigned long wait_piowc(void) 83static inline unsigned long wait_piowc(void)
86{ 84{
87 volatile unsigned long *piows; 85 volatile unsigned long *piows;
@@ -200,7 +198,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
200 max_active = max_active_pio(shub1); 198 max_active = max_active_pio(shub1);
201 199
202 itc = ia64_get_itc(); 200 itc = ia64_get_itc();
203 ptc_lock(shub1, &flags); 201 spin_lock_irqsave(PTC_LOCK(shub1), flags);
204 itc2 = ia64_get_itc(); 202 itc2 = ia64_get_itc();
205 203
206 __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; 204 __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
@@ -258,7 +256,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
258 ia64_srlz_d(); 256 ia64_srlz_d();
259 } 257 }
260 258
261 ptc_unlock(shub1, flags); 259 spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
262 260
263 preempt_enable(); 261 preempt_enable();
264} 262}
@@ -270,11 +268,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
270 * TLB flush transaction. The recovery sequence is somewhat tricky & is 268 * TLB flush transaction. The recovery sequence is somewhat tricky & is
271 * coded in assembly language. 269 * coded in assembly language.
272 */ 270 */
273void sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, volatile unsigned long *ptc0, unsigned long data0, 271
274 volatile unsigned long *ptc1, unsigned long data1) 272void
273sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
274 volatile unsigned long *ptc0, unsigned long data0,
275 volatile unsigned long *ptc1, unsigned long data1)
275{ 276{
276 extern unsigned long sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
277 volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
278 short nasid, i; 277 short nasid, i;
279 unsigned long *piows, zeroval, n; 278 unsigned long *piows, zeroval, n;
280 279
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
index a06719d752a0..c686d9c12f7b 100644
--- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -6,11 +6,11 @@
6 * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8#include <linux/config.h> 8#include <linux/config.h>
9#include <asm/uaccess.h>
10 9
11#ifdef CONFIG_PROC_FS 10#ifdef CONFIG_PROC_FS
12#include <linux/proc_fs.h> 11#include <linux/proc_fs.h>
13#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <asm/uaccess.h>
14#include <asm/sn/sn_sal.h> 14#include <asm/sn/sn_sal.h>
15 15
16static int partition_id_show(struct seq_file *s, void *p) 16static int partition_id_show(struct seq_file *s, void *p)
@@ -90,10 +90,10 @@ static int coherence_id_open(struct inode *inode, struct file *file)
90 return single_open(file, coherence_id_show, NULL); 90 return single_open(file, coherence_id_show, NULL);
91} 91}
92 92
93static struct proc_dir_entry *sn_procfs_create_entry( 93static struct proc_dir_entry
94 const char *name, struct proc_dir_entry *parent, 94*sn_procfs_create_entry(const char *name, struct proc_dir_entry *parent,
95 int (*openfunc)(struct inode *, struct file *), 95 int (*openfunc)(struct inode *, struct file *),
96 int (*releasefunc)(struct inode *, struct file *)) 96 int (*releasefunc)(struct inode *, struct file *))
97{ 97{
98 struct proc_dir_entry *e = create_proc_entry(name, 0444, parent); 98 struct proc_dir_entry *e = create_proc_entry(name, 0444, parent);
99 99
@@ -126,24 +126,24 @@ void register_sn_procfs(void)
126 return; 126 return;
127 127
128 sn_procfs_create_entry("partition_id", sgi_proc_dir, 128 sn_procfs_create_entry("partition_id", sgi_proc_dir,
129 partition_id_open, single_release); 129 partition_id_open, single_release);
130 130
131 sn_procfs_create_entry("system_serial_number", sgi_proc_dir, 131 sn_procfs_create_entry("system_serial_number", sgi_proc_dir,
132 system_serial_number_open, single_release); 132 system_serial_number_open, single_release);
133 133
134 sn_procfs_create_entry("licenseID", sgi_proc_dir, 134 sn_procfs_create_entry("licenseID", sgi_proc_dir,
135 licenseID_open, single_release); 135 licenseID_open, single_release);
136 136
137 e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, 137 e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir,
138 sn_force_interrupt_open, single_release); 138 sn_force_interrupt_open, single_release);
139 if (e) 139 if (e)
140 e->proc_fops->write = sn_force_interrupt_write_proc; 140 e->proc_fops->write = sn_force_interrupt_write_proc;
141 141
142 sn_procfs_create_entry("coherence_id", sgi_proc_dir, 142 sn_procfs_create_entry("coherence_id", sgi_proc_dir,
143 coherence_id_open, single_release); 143 coherence_id_open, single_release);
144 144
145 sn_procfs_create_entry("sn_topology", sgi_proc_dir, 145 sn_procfs_create_entry("sn_topology", sgi_proc_dir,
146 sn_topology_open, sn_topology_release); 146 sn_topology_open, sn_topology_release);
147} 147}
148 148
149#endif /* CONFIG_PROC_FS */ 149#endif /* CONFIG_PROC_FS */
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
index deb9baf4d473..56a88b6df4b4 100644
--- a/arch/ia64/sn/kernel/sn2/timer.c
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -14,6 +14,7 @@
14 14
15#include <asm/hw_irq.h> 15#include <asm/hw_irq.h>
16#include <asm/system.h> 16#include <asm/system.h>
17#include <asm/timex.h>
17 18
18#include <asm/sn/leds.h> 19#include <asm/sn/leds.h>
19#include <asm/sn/shub_mmr.h> 20#include <asm/sn/shub_mmr.h>
@@ -28,9 +29,27 @@ static struct time_interpolator sn2_interpolator = {
28 .source = TIME_SOURCE_MMIO64 29 .source = TIME_SOURCE_MMIO64
29}; 30};
30 31
32/*
33 * sn udelay uses the RTC instead of the ITC because the ITC is not
34 * synchronized across all CPUs, and the thread may migrate to another CPU
35 * if preemption is enabled.
36 */
37static void
38ia64_sn_udelay (unsigned long usecs)
39{
40 unsigned long start = rtc_time();
41 unsigned long end = start +
42 usecs * sn_rtc_cycles_per_second / 1000000;
43
44 while (time_before((unsigned long)rtc_time(), end))
45 cpu_relax();
46}
47
31void __init sn_timer_init(void) 48void __init sn_timer_init(void)
32{ 49{
33 sn2_interpolator.frequency = sn_rtc_cycles_per_second; 50 sn2_interpolator.frequency = sn_rtc_cycles_per_second;
34 sn2_interpolator.addr = RTC_COUNTER_ADDR; 51 sn2_interpolator.addr = RTC_COUNTER_ADDR;
35 register_time_interpolator(&sn2_interpolator); 52 register_time_interpolator(&sn2_interpolator);
53
54 ia64_udelay = &ia64_sn_udelay;
36} 55}
diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
index adf5db2e2afe..fa7f69945917 100644
--- a/arch/ia64/sn/kernel/sn2/timer_interrupt.c
+++ b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * 3 *
4 * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. 4 * Copyright (c) 2005, 2006 Silicon Graphics, Inc. All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
@@ -22,11 +22,6 @@
22 * License along with this program; if not, write the Free Software 22 * License along with this program; if not, write the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
24 * 24 *
25 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
26 * Mountain View, CA 94043, or:
27 *
28 * http://www.sgi.com
29 *
30 * For further information regarding this notice, see: 25 * For further information regarding this notice, see:
31 * 26 *
32 * http://oss.sgi.com/projects/GenInfo/NoticeExplan 27 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index d263d3e8fbb9..8a56f8b5ffa2 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -284,12 +284,10 @@ struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq,
284 if ((nasid & 1) == 0) 284 if ((nasid & 1) == 0)
285 return NULL; 285 return NULL;
286 286
287 sn_irq_info = kmalloc(sn_irq_size, GFP_KERNEL); 287 sn_irq_info = kzalloc(sn_irq_size, GFP_KERNEL);
288 if (sn_irq_info == NULL) 288 if (sn_irq_info == NULL)
289 return NULL; 289 return NULL;
290 290
291 memset(sn_irq_info, 0x0, sn_irq_size);
292
293 status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq, 291 status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq,
294 req_nasid, slice); 292 req_nasid, slice);
295 if (status) { 293 if (status) {
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 36e5437a0fb6..cdf6856ce089 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -738,7 +738,9 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
738 738
739 /* make sure all activity has settled down first */ 739 /* make sure all activity has settled down first */
740 740
741 if (atomic_read(&ch->references) > 0) { 741 if (atomic_read(&ch->references) > 0 ||
742 ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
743 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) {
742 return; 744 return;
743 } 745 }
744 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); 746 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
@@ -775,7 +777,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
775 777
776 /* both sides are disconnected now */ 778 /* both sides are disconnected now */
777 779
778 if (ch->flags & XPC_C_CONNECTCALLOUT) { 780 if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
779 spin_unlock_irqrestore(&ch->lock, *irq_flags); 781 spin_unlock_irqrestore(&ch->lock, *irq_flags);
780 xpc_disconnect_callout(ch, xpcDisconnected); 782 xpc_disconnect_callout(ch, xpcDisconnected);
781 spin_lock_irqsave(&ch->lock, *irq_flags); 783 spin_lock_irqsave(&ch->lock, *irq_flags);
@@ -1300,7 +1302,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1300 "delivered=%d, partid=%d, channel=%d\n", 1302 "delivered=%d, partid=%d, channel=%d\n",
1301 nmsgs_sent, ch->partid, ch->number); 1303 nmsgs_sent, ch->partid, ch->number);
1302 1304
1303 if (ch->flags & XPC_C_CONNECTCALLOUT) { 1305 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
1304 xpc_activate_kthreads(ch, nmsgs_sent); 1306 xpc_activate_kthreads(ch, nmsgs_sent);
1305 } 1307 }
1306 } 1308 }
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 9cd460dfe27e..8cbf16432570 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -750,12 +750,16 @@ xpc_daemonize_kthread(void *args)
750 /* let registerer know that connection has been established */ 750 /* let registerer know that connection has been established */
751 751
752 spin_lock_irqsave(&ch->lock, irq_flags); 752 spin_lock_irqsave(&ch->lock, irq_flags);
753 if (!(ch->flags & XPC_C_CONNECTCALLOUT)) { 753 if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
754 ch->flags |= XPC_C_CONNECTCALLOUT; 754 ch->flags |= XPC_C_CONNECTEDCALLOUT;
755 spin_unlock_irqrestore(&ch->lock, irq_flags); 755 spin_unlock_irqrestore(&ch->lock, irq_flags);
756 756
757 xpc_connected_callout(ch); 757 xpc_connected_callout(ch);
758 758
759 spin_lock_irqsave(&ch->lock, irq_flags);
760 ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
761 spin_unlock_irqrestore(&ch->lock, irq_flags);
762
759 /* 763 /*
760 * It is possible that while the callout was being 764 * It is possible that while the callout was being
761 * made that the remote partition sent some messages. 765 * made that the remote partition sent some messages.
@@ -777,15 +781,17 @@ xpc_daemonize_kthread(void *args)
777 781
778 if (atomic_dec_return(&ch->kthreads_assigned) == 0) { 782 if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
779 spin_lock_irqsave(&ch->lock, irq_flags); 783 spin_lock_irqsave(&ch->lock, irq_flags);
780 if ((ch->flags & XPC_C_CONNECTCALLOUT) && 784 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
781 !(ch->flags & XPC_C_DISCONNECTCALLOUT)) { 785 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
782 ch->flags |= XPC_C_DISCONNECTCALLOUT; 786 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
783 spin_unlock_irqrestore(&ch->lock, irq_flags); 787 spin_unlock_irqrestore(&ch->lock, irq_flags);
784 788
785 xpc_disconnect_callout(ch, xpcDisconnecting); 789 xpc_disconnect_callout(ch, xpcDisconnecting);
786 } else { 790
787 spin_unlock_irqrestore(&ch->lock, irq_flags); 791 spin_lock_irqsave(&ch->lock, irq_flags);
792 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
788 } 793 }
794 spin_unlock_irqrestore(&ch->lock, irq_flags);
789 if (atomic_dec_return(&part->nchannels_engaged) == 0) { 795 if (atomic_dec_return(&part->nchannels_engaged) == 0) {
790 xpc_mark_partition_disengaged(part); 796 xpc_mark_partition_disengaged(part);
791 xpc_IPI_send_disengage(part); 797 xpc_IPI_send_disengage(part);
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 5a36292388eb..b4b84c269210 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -335,10 +335,10 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
335 */ 335 */
336 336
337 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, 337 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
338 pci_domain_nr(bus), bus->number, 338 pci_domain_nr(bus), bus->number,
339 0, /* io */ 339 0, /* io */
340 0, /* read */ 340 0, /* read */
341 port, size, __pa(val)); 341 port, size, __pa(val));
342 342
343 if (isrv.status == 0) 343 if (isrv.status == 0)
344 return size; 344 return size;
@@ -381,10 +381,10 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
381 */ 381 */
382 382
383 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, 383 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
384 pci_domain_nr(bus), bus->number, 384 pci_domain_nr(bus), bus->number,
385 0, /* io */ 385 0, /* io */
386 1, /* write */ 386 1, /* write */
387 port, size, __pa(&val)); 387 port, size, __pa(&val));
388 388
389 if (isrv.status == 0) 389 if (isrv.status == 0)
390 return size; 390 return size;
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index aa3fa5152a32..1f0253bfe0a0 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -12,7 +12,7 @@
12#include <asm/sn/pcibus_provider_defs.h> 12#include <asm/sn/pcibus_provider_defs.h>
13#include <asm/sn/pcidev.h> 13#include <asm/sn/pcidev.h>
14 14
15int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ 15int pcibr_invalidate_ate; /* by default don't invalidate ATE on free */
16 16
17/* 17/*
18 * mark_ate: Mark the ate as either free or inuse. 18 * mark_ate: Mark the ate as either free or inuse.
@@ -20,14 +20,12 @@ int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
20static void mark_ate(struct ate_resource *ate_resource, int start, int number, 20static void mark_ate(struct ate_resource *ate_resource, int start, int number,
21 u64 value) 21 u64 value)
22{ 22{
23
24 u64 *ate = ate_resource->ate; 23 u64 *ate = ate_resource->ate;
25 int index; 24 int index;
26 int length = 0; 25 int length = 0;
27 26
28 for (index = start; length < number; index++, length++) 27 for (index = start; length < number; index++, length++)
29 ate[index] = value; 28 ate[index] = value;
30
31} 29}
32 30
33/* 31/*
@@ -37,7 +35,6 @@ static void mark_ate(struct ate_resource *ate_resource, int start, int number,
37static int find_free_ate(struct ate_resource *ate_resource, int start, 35static int find_free_ate(struct ate_resource *ate_resource, int start,
38 int count) 36 int count)
39{ 37{
40
41 u64 *ate = ate_resource->ate; 38 u64 *ate = ate_resource->ate;
42 int index; 39 int index;
43 int start_free; 40 int start_free;
@@ -70,12 +67,10 @@ static int find_free_ate(struct ate_resource *ate_resource, int start,
70static inline void free_ate_resource(struct ate_resource *ate_resource, 67static inline void free_ate_resource(struct ate_resource *ate_resource,
71 int start) 68 int start)
72{ 69{
73
74 mark_ate(ate_resource, start, ate_resource->ate[start], 0); 70 mark_ate(ate_resource, start, ate_resource->ate[start], 0);
75 if ((ate_resource->lowest_free_index > start) || 71 if ((ate_resource->lowest_free_index > start) ||
76 (ate_resource->lowest_free_index < 0)) 72 (ate_resource->lowest_free_index < 0))
77 ate_resource->lowest_free_index = start; 73 ate_resource->lowest_free_index = start;
78
79} 74}
80 75
81/* 76/*
@@ -84,7 +79,6 @@ static inline void free_ate_resource(struct ate_resource *ate_resource,
84static inline int alloc_ate_resource(struct ate_resource *ate_resource, 79static inline int alloc_ate_resource(struct ate_resource *ate_resource,
85 int ate_needed) 80 int ate_needed)
86{ 81{
87
88 int start_index; 82 int start_index;
89 83
90 /* 84 /*
@@ -118,19 +112,12 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
118 */ 112 */
119int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count) 113int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
120{ 114{
121 int status = 0; 115 int status;
122 u64 flag; 116 unsigned long flags;
123 117
124 flag = pcibr_lock(pcibus_info); 118 spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
125 status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count); 119 status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
126 120 spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
127 if (status < 0) {
128 /* Failed to allocate */
129 pcibr_unlock(pcibus_info, flag);
130 return -1;
131 }
132
133 pcibr_unlock(pcibus_info, flag);
134 121
135 return status; 122 return status;
136} 123}
@@ -182,7 +169,7 @@ void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
182 ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V)); 169 ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V));
183 } 170 }
184 171
185 flags = pcibr_lock(pcibus_info); 172 spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
186 free_ate_resource(&pcibus_info->pbi_int_ate_resource, index); 173 free_ate_resource(&pcibus_info->pbi_int_ate_resource, index);
187 pcibr_unlock(pcibus_info, flags); 174 spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
188} 175}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index 54ce5b7ceed2..9f86bb6519aa 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -137,14 +137,12 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
137 pci_addr |= PCI64_ATTR_VIRTUAL; 137 pci_addr |= PCI64_ATTR_VIRTUAL;
138 138
139 return pci_addr; 139 return pci_addr;
140
141} 140}
142 141
143static dma_addr_t 142static dma_addr_t
144pcibr_dmatrans_direct32(struct pcidev_info * info, 143pcibr_dmatrans_direct32(struct pcidev_info * info,
145 u64 paddr, size_t req_size, u64 flags) 144 u64 paddr, size_t req_size, u64 flags)
146{ 145{
147
148 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; 146 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
149 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> 147 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
150 pdi_pcibus_info; 148 pdi_pcibus_info;
@@ -171,7 +169,6 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
171 } 169 }
172 170
173 return PCI32_DIRECT_BASE | offset; 171 return PCI32_DIRECT_BASE | offset;
174
175} 172}
176 173
177/* 174/*
@@ -218,9 +215,8 @@ void sn_dma_flush(u64 addr)
218 u64 flags; 215 u64 flags;
219 u64 itte; 216 u64 itte;
220 struct hubdev_info *hubinfo; 217 struct hubdev_info *hubinfo;
221 volatile struct sn_flush_device_kernel *p; 218 struct sn_flush_device_kernel *p;
222 volatile struct sn_flush_device_common *common; 219 struct sn_flush_device_common *common;
223
224 struct sn_flush_nasid_entry *flush_nasid_list; 220 struct sn_flush_nasid_entry *flush_nasid_list;
225 221
226 if (!sn_ioif_inited) 222 if (!sn_ioif_inited)
@@ -310,8 +306,7 @@ void sn_dma_flush(u64 addr)
310 (common->sfdl_slot - 1)); 306 (common->sfdl_slot - 1));
311 } 307 }
312 } else { 308 } else {
313 spin_lock_irqsave((spinlock_t *)&p->sfdl_flush_lock, 309 spin_lock_irqsave(&p->sfdl_flush_lock, flags);
314 flags);
315 *common->sfdl_flush_addr = 0; 310 *common->sfdl_flush_addr = 0;
316 311
317 /* force an interrupt. */ 312 /* force an interrupt. */
@@ -322,8 +317,7 @@ void sn_dma_flush(u64 addr)
322 cpu_relax(); 317 cpu_relax();
323 318
324 /* okay, everything is synched up. */ 319 /* okay, everything is synched up. */
325 spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, 320 spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
326 flags);
327 } 321 }
328 return; 322 return;
329} 323}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 2fac27049bf6..98f716bd92f0 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -163,9 +163,12 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
163 /* Setup the PMU ATE map */ 163 /* Setup the PMU ATE map */
164 soft->pbi_int_ate_resource.lowest_free_index = 0; 164 soft->pbi_int_ate_resource.lowest_free_index = 0;
165 soft->pbi_int_ate_resource.ate = 165 soft->pbi_int_ate_resource.ate =
166 kmalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL); 166 kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
167 memset(soft->pbi_int_ate_resource.ate, 0, 167
168 (soft->pbi_int_ate_size * sizeof(u64))); 168 if (!soft->pbi_int_ate_resource.ate) {
169 kfree(soft);
170 return NULL;
171 }
169 172
170 if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) { 173 if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
171 /* TIO PCI Bridge: find nearest node with CPUs */ 174 /* TIO PCI Bridge: find nearest node with CPUs */