aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2006-09-05 17:20:21 -0400
committerDave Jones <davej@redhat.com>2006-09-05 17:20:21 -0400
commit115b384cf87249d76adb0b21aca11ee22128927d (patch)
treef39a2a54863e9d82d1196906f92c82ab5991c6af /arch/ia64
parent8eb7925f93af75e66a240d148efdec212f95bcb7 (diff)
parentc336923b668fdcf0312efbec3b44895d713f4d81 (diff)
Merge ../linus
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/configs/sn2_defconfig2
-rw-r--r--arch/ia64/defconfig2
-rw-r--r--arch/ia64/hp/sim/simscsi.c5
-rw-r--r--arch/ia64/kernel/Makefile3
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/ia64/kernel/efi.c6
-rw-r--r--arch/ia64/kernel/gate.lds.S1
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/kprobes.c9
-rw-r--r--arch/ia64/kernel/pal.S18
-rw-r--r--arch/ia64/kernel/palinfo.c47
-rw-r--r--arch/ia64/kernel/topology.c8
-rw-r--r--arch/ia64/kernel/traps.c5
-rw-r--r--arch/ia64/kernel/uncached.c86
-rw-r--r--arch/ia64/lib/Makefile2
-rw-r--r--arch/ia64/mm/contig.c16
-rw-r--r--arch/ia64/mm/discontig.c68
-rw-r--r--arch/ia64/mm/init.c55
-rw-r--r--arch/ia64/mm/ioremap.c6
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c4
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c30
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c24
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c6
25 files changed, 218 insertions, 195 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 47de9ee6bcd6..674de8943478 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -258,7 +258,7 @@ config NR_CPUS
258 int "Maximum number of CPUs (2-1024)" 258 int "Maximum number of CPUs (2-1024)"
259 range 2 1024 259 range 2 1024
260 depends on SMP 260 depends on SMP
261 default "64" 261 default "1024"
262 help 262 help
263 You should set this to the number of CPUs in your system, but 263 You should set this to the number of CPUs in your system, but
264 keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but 264 keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but
@@ -354,7 +354,7 @@ config NUMA
354config NODES_SHIFT 354config NODES_SHIFT
355 int "Max num nodes shift(3-10)" 355 int "Max num nodes shift(3-10)"
356 range 3 10 356 range 3 10
357 default "8" 357 default "10"
358 depends on NEED_MULTIPLE_NODES 358 depends on NEED_MULTIPLE_NODES
359 help 359 help
360 This option specifies the maximum number of nodes in your SSI system. 360 This option specifies the maximum number of nodes in your SSI system.
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index 9ea35398e10d..0f14a82b856e 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -363,7 +363,7 @@ CONFIG_BLK_DEV_IDECD=y
363# 363#
364CONFIG_IDE_GENERIC=y 364CONFIG_IDE_GENERIC=y
365CONFIG_BLK_DEV_IDEPCI=y 365CONFIG_BLK_DEV_IDEPCI=y
366# CONFIG_IDEPCI_SHARE_IRQ is not set 366CONFIG_IDEPCI_SHARE_IRQ=y
367# CONFIG_BLK_DEV_OFFBOARD is not set 367# CONFIG_BLK_DEV_OFFBOARD is not set
368# CONFIG_BLK_DEV_GENERIC is not set 368# CONFIG_BLK_DEV_GENERIC is not set
369# CONFIG_BLK_DEV_OPTI621 is not set 369# CONFIG_BLK_DEV_OPTI621 is not set
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 6cba55da572a..9001b3fbaa32 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -366,7 +366,7 @@ CONFIG_BLK_DEV_IDESCSI=m
366# CONFIG_IDE_GENERIC is not set 366# CONFIG_IDE_GENERIC is not set
367# CONFIG_BLK_DEV_IDEPNP is not set 367# CONFIG_BLK_DEV_IDEPNP is not set
368CONFIG_BLK_DEV_IDEPCI=y 368CONFIG_BLK_DEV_IDEPCI=y
369# CONFIG_IDEPCI_SHARE_IRQ is not set 369CONFIG_IDEPCI_SHARE_IRQ=y
370# CONFIG_BLK_DEV_OFFBOARD is not set 370# CONFIG_BLK_DEV_OFFBOARD is not set
371CONFIG_BLK_DEV_GENERIC=y 371CONFIG_BLK_DEV_GENERIC=y
372# CONFIG_BLK_DEV_OPTI621 is not set 372# CONFIG_BLK_DEV_OPTI621 is not set
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index a3fe97531134..8f0a16a79a67 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -151,7 +151,7 @@ static void
151simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset) 151simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
152{ 152{
153 int list_len = sc->use_sg; 153 int list_len = sc->use_sg;
154 struct scatterlist *sl = (struct scatterlist *)sc->buffer; 154 struct scatterlist *sl = (struct scatterlist *)sc->request_buffer;
155 struct disk_stat stat; 155 struct disk_stat stat;
156 struct disk_req req; 156 struct disk_req req;
157 157
@@ -244,7 +244,8 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
244 244
245 if (scatterlen == 0) 245 if (scatterlen == 0)
246 memcpy(sc->request_buffer, buf, len); 246 memcpy(sc->request_buffer, buf, len);
247 else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) { 247 else for (slp = (struct scatterlist *)sc->request_buffer;
248 scatterlen-- > 0 && len > 0; slp++) {
248 unsigned thislen = min(len, slp->length); 249 unsigned thislen = min(len, slp->length);
249 250
250 memcpy(page_address(slp->page) + slp->offset, buf, thislen); 251 memcpy(page_address(slp->page) + slp->offset, buf, thislen);
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 0e4553f320bf..ad8215a3c586 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -45,7 +45,8 @@ CPPFLAGS_gate.lds := -P -C -U$(ARCH)
45quiet_cmd_gate = GATE $@ 45quiet_cmd_gate = GATE $@
46 cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ 46 cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
47 47
48GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 48GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
49 $(call ld-option, -Wl$(comma)--hash-style=sysv)
49$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE 50$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
50 $(call if_changed,gate) 51 $(call if_changed,gate)
51 52
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 99761b81db44..0176556aeecc 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -55,7 +55,7 @@
55 55
56#define BAD_MADT_ENTRY(entry, end) ( \ 56#define BAD_MADT_ENTRY(entry, end) ( \
57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
58 ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) 58 ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
59 59
60#define PREFIX "ACPI: " 60#define PREFIX "ACPI: "
61 61
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index e4bfa9dafbce..bb8770a177b5 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -632,7 +632,7 @@ kern_memory_descriptor (unsigned long phys_addr)
632 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) 632 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
633 return md; 633 return md;
634 } 634 }
635 return 0; 635 return NULL;
636} 636}
637 637
638static efi_memory_desc_t * 638static efi_memory_desc_t *
@@ -652,7 +652,7 @@ efi_memory_descriptor (unsigned long phys_addr)
652 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) 652 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
653 return md; 653 return md;
654 } 654 }
655 return 0; 655 return NULL;
656} 656}
657 657
658u32 658u32
@@ -923,7 +923,7 @@ find_memmap_space (void)
923void 923void
924efi_memmap_init(unsigned long *s, unsigned long *e) 924efi_memmap_init(unsigned long *s, unsigned long *e)
925{ 925{
926 struct kern_memdesc *k, *prev = 0; 926 struct kern_memdesc *k, *prev = NULL;
927 u64 contig_low=0, contig_high=0; 927 u64 contig_low=0, contig_high=0;
928 u64 as, ae, lim; 928 u64 as, ae, lim;
929 void *efi_map_start, *efi_map_end, *p, *q; 929 void *efi_map_start, *efi_map_end, *p, *q;
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S
index cc35cddfd4cf..6d198339bf85 100644
--- a/arch/ia64/kernel/gate.lds.S
+++ b/arch/ia64/kernel/gate.lds.S
@@ -12,6 +12,7 @@ SECTIONS
12 . = GATE_ADDR + SIZEOF_HEADERS; 12 . = GATE_ADDR + SIZEOF_HEADERS;
13 13
14 .hash : { *(.hash) } :readable 14 .hash : { *(.hash) } :readable
15 .gnu.hash : { *(.gnu.hash) }
15 .dynsym : { *(.dynsym) } 16 .dynsym : { *(.dynsym) }
16 .dynstr : { *(.dynstr) } 17 .dynstr : { *(.dynstr) }
17 .gnu.version : { *(.gnu.version) } 18 .gnu.version : { *(.gnu.version) }
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 561b8f1d3bc7..29236f0c62b5 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -853,7 +853,6 @@ END(__ia64_init_fpu)
853 */ 853 */
854GLOBAL_ENTRY(ia64_switch_mode_phys) 854GLOBAL_ENTRY(ia64_switch_mode_phys)
855 { 855 {
856 alloc r2=ar.pfs,0,0,0,0
857 rsm psr.i | psr.ic // disable interrupts and interrupt collection 856 rsm psr.i | psr.ic // disable interrupts and interrupt collection
858 mov r15=ip 857 mov r15=ip
859 } 858 }
@@ -902,7 +901,6 @@ END(ia64_switch_mode_phys)
902 */ 901 */
903GLOBAL_ENTRY(ia64_switch_mode_virt) 902GLOBAL_ENTRY(ia64_switch_mode_virt)
904 { 903 {
905 alloc r2=ar.pfs,0,0,0,0
906 rsm psr.i | psr.ic // disable interrupts and interrupt collection 904 rsm psr.i | psr.ic // disable interrupts and interrupt collection
907 mov r15=ip 905 mov r15=ip
908 } 906 }
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index b7cf651ceb14..3ead20fb6f4b 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(__udivdi3);
62EXPORT_SYMBOL(__moddi3); 62EXPORT_SYMBOL(__moddi3);
63EXPORT_SYMBOL(__umoddi3); 63EXPORT_SYMBOL(__umoddi3);
64 64
65#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE) 65#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
66extern void xor_ia64_2(void); 66extern void xor_ia64_2(void);
67extern void xor_ia64_3(void); 67extern void xor_ia64_3(void);
68extern void xor_ia64_4(void); 68extern void xor_ia64_4(void);
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 00d9c83b8020..781960f80b6f 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -448,11 +448,20 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
448 return 0; 448 return 0;
449} 449}
450 450
451void __kprobes flush_insn_slot(struct kprobe *p)
452{
453 unsigned long arm_addr;
454
455 arm_addr = ((unsigned long)&p->opcode.bundle) & ~0xFULL;
456 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
457}
458
451void __kprobes arch_arm_kprobe(struct kprobe *p) 459void __kprobes arch_arm_kprobe(struct kprobe *p)
452{ 460{
453 unsigned long addr = (unsigned long)p->addr; 461 unsigned long addr = (unsigned long)p->addr;
454 unsigned long arm_addr = addr & ~0xFULL; 462 unsigned long arm_addr = addr & ~0xFULL;
455 463
464 flush_insn_slot(p);
456 memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t)); 465 memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
457 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 466 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
458} 467}
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index 5018c7f2e7a8..ebaf1e685f5e 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -217,12 +217,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
217 .body 217 .body
218 ;; 218 ;;
219 ld8 loc2 = [loc2] // loc2 <- entry point 219 ld8 loc2 = [loc2] // loc2 <- entry point
220 mov out0 = in0 // first argument 220 mov loc3 = psr // save psr
221 mov out1 = in1 // copy arg2
222 mov out2 = in2 // copy arg3
223 mov out3 = in3 // copy arg3
224 ;;
225 mov loc3 = psr // save psr
226 ;; 221 ;;
227 mov loc4=ar.rsc // save RSE configuration 222 mov loc4=ar.rsc // save RSE configuration
228 dep.z loc2=loc2,0,61 // convert pal entry point to physical 223 dep.z loc2=loc2,0,61 // convert pal entry point to physical
@@ -236,18 +231,23 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
236 ;; 231 ;;
237 andcm r16=loc3,r16 // removes bits to clear from psr 232 andcm r16=loc3,r16 // removes bits to clear from psr
238 br.call.sptk.many rp=ia64_switch_mode_phys 233 br.call.sptk.many rp=ia64_switch_mode_phys
239.ret6: 234
235 mov out0 = in0 // first argument
236 mov out1 = in1 // copy arg2
237 mov out2 = in2 // copy arg3
238 mov out3 = in3 // copy arg3
240 mov loc5 = r19 239 mov loc5 = r19
241 mov loc6 = r20 240 mov loc6 = r20
241
242 br.call.sptk.many rp=b7 // now make the call 242 br.call.sptk.many rp=b7 // now make the call
243.ret7: 243
244 mov ar.rsc=0 // put RSE in enforced lazy, LE mode 244 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
245 mov r16=loc3 // r16= original psr 245 mov r16=loc3 // r16= original psr
246 mov r19=loc5 246 mov r19=loc5
247 mov r20=loc6 247 mov r20=loc6
248 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode 248 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
249 249
250.ret8: mov psr.l = loc3 // restore init PSR 250 mov psr.l = loc3 // restore init PSR
251 mov ar.pfs = loc1 251 mov ar.pfs = loc1
252 mov rp = loc0 252 mov rp = loc0
253 ;; 253 ;;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 3f5bac59209a..0b546e2b36ac 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -566,29 +566,23 @@ version_info(char *page)
566 pal_version_u_t min_ver, cur_ver; 566 pal_version_u_t min_ver, cur_ver;
567 char *p = page; 567 char *p = page;
568 568
569 /* The PAL_VERSION call is advertised as being able to support 569 if (ia64_pal_version(&min_ver, &cur_ver) != 0)
570 * both physical and virtual mode calls. This seems to be a documentation 570 return 0;
571 * bug rather than firmware bug. In fact, it does only support physical mode.
572 * So now the code reflects this fact and the pal_version() has been updated
573 * accordingly.
574 */
575 if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0;
576 571
577 p += sprintf(p, 572 p += sprintf(p,
578 "PAL_vendor : 0x%02x (min=0x%02x)\n" 573 "PAL_vendor : 0x%02x (min=0x%02x)\n"
579 "PAL_A : %x.%x.%x (min=%x.%x.%x)\n" 574 "PAL_A : %02x.%02x (min=%02x.%02x)\n"
580 "PAL_B : %x.%x.%x (min=%x.%x.%x)\n", 575 "PAL_B : %02x.%02x (min=%02x.%02x)\n",
581 cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor, 576 cur_ver.pal_version_s.pv_pal_vendor,
582 577 min_ver.pal_version_s.pv_pal_vendor,
583 cur_ver.pal_version_s.pv_pal_a_model>>4, 578 cur_ver.pal_version_s.pv_pal_a_model,
584 cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev, 579 cur_ver.pal_version_s.pv_pal_a_rev,
585 min_ver.pal_version_s.pv_pal_a_model>>4, 580 min_ver.pal_version_s.pv_pal_a_model,
586 min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev, 581 min_ver.pal_version_s.pv_pal_a_rev,
587 582 cur_ver.pal_version_s.pv_pal_b_model,
588 cur_ver.pal_version_s.pv_pal_b_model>>4, 583 cur_ver.pal_version_s.pv_pal_b_rev,
589 cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev, 584 min_ver.pal_version_s.pv_pal_b_model,
590 min_ver.pal_version_s.pv_pal_b_model>>4, 585 min_ver.pal_version_s.pv_pal_b_rev);
591 min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev);
592 return p - page; 586 return p - page;
593} 587}
594 588
@@ -958,9 +952,9 @@ remove_palinfo_proc_entries(unsigned int hcpu)
958 } 952 }
959} 953}
960 954
961static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, 955#ifdef CONFIG_HOTPLUG_CPU
962 unsigned long action, 956static int palinfo_cpu_callback(struct notifier_block *nfb,
963 void *hcpu) 957 unsigned long action, void *hcpu)
964{ 958{
965 unsigned int hotcpu = (unsigned long)hcpu; 959 unsigned int hotcpu = (unsigned long)hcpu;
966 960
@@ -968,20 +962,19 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
968 case CPU_ONLINE: 962 case CPU_ONLINE:
969 create_palinfo_proc_entries(hotcpu); 963 create_palinfo_proc_entries(hotcpu);
970 break; 964 break;
971#ifdef CONFIG_HOTPLUG_CPU
972 case CPU_DEAD: 965 case CPU_DEAD:
973 remove_palinfo_proc_entries(hotcpu); 966 remove_palinfo_proc_entries(hotcpu);
974 break; 967 break;
975#endif
976 } 968 }
977 return NOTIFY_OK; 969 return NOTIFY_OK;
978} 970}
979 971
980static struct notifier_block __cpuinitdata palinfo_cpu_notifier = 972static struct notifier_block palinfo_cpu_notifier =
981{ 973{
982 .notifier_call = palinfo_cpu_callback, 974 .notifier_call = palinfo_cpu_callback,
983 .priority = 0, 975 .priority = 0,
984}; 976};
977#endif
985 978
986static int __init 979static int __init
987palinfo_init(void) 980palinfo_init(void)
@@ -1020,7 +1013,7 @@ palinfo_exit(void)
1020 /* 1013 /*
1021 * Unregister from cpu notifier callbacks 1014 * Unregister from cpu notifier callbacks
1022 */ 1015 */
1023 unregister_cpu_notifier(&palinfo_cpu_notifier); 1016 unregister_hotcpu_notifier(&palinfo_cpu_notifier);
1024} 1017}
1025 1018
1026module_init(palinfo_init); 1019module_init(palinfo_init);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index b146f1cfad31..f648c610b10c 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -67,10 +67,8 @@ static int __init topology_init(void)
67#endif 67#endif
68 68
69 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL); 69 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
70 if (!sysfs_cpus) { 70 if (!sysfs_cpus)
71 err = -ENOMEM; 71 panic("kzalloc in topology_init failed - NR_CPUS too big?");
72 goto out;
73 }
74 72
75 for_each_present_cpu(i) { 73 for_each_present_cpu(i) {
76 if((err = arch_register_cpu(i))) 74 if((err = arch_register_cpu(i)))
@@ -435,7 +433,7 @@ static int __cpuinit cache_sysfs_init(void)
435 (void *)(long)i); 433 (void *)(long)i);
436 } 434 }
437 435
438 register_cpu_notifier(&cache_cpu_notifier); 436 register_hotcpu_notifier(&cache_cpu_notifier);
439 437
440 return 0; 438 return 0;
441} 439}
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index e7bbb0f40aa2..fffa9e0826bc 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -117,11 +117,8 @@ die (const char *str, struct pt_regs *regs, long err)
117 die.lock_owner = -1; 117 die.lock_owner = -1;
118 spin_unlock_irq(&die.lock); 118 spin_unlock_irq(&die.lock);
119 119
120 if (panic_on_oops) { 120 if (panic_on_oops)
121 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
122 ssleep(5);
123 panic("Fatal exception"); 121 panic("Fatal exception");
124 }
125 122
126 do_exit(SIGSEGV); 123 do_exit(SIGSEGV);
127} 124}
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 5f03b9e524dd..4c73a6763669 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -32,32 +32,38 @@
32 32
33extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); 33extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
34 34
35#define MAX_UNCACHED_GRANULES 5 35struct uncached_pool {
36static int allocated_granules; 36 struct gen_pool *pool;
37 struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
38 int nchunks_added; /* #of converted chunks added to pool */
39 atomic_t status; /* smp called function's return status*/
40};
41
42#define MAX_CONVERTED_CHUNKS_PER_NODE 2
37 43
38struct gen_pool *uncached_pool[MAX_NUMNODES]; 44struct uncached_pool uncached_pools[MAX_NUMNODES];
39 45
40 46
41static void uncached_ipi_visibility(void *data) 47static void uncached_ipi_visibility(void *data)
42{ 48{
43 int status; 49 int status;
50 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
44 51
45 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 52 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
46 if ((status != PAL_VISIBILITY_OK) && 53 if ((status != PAL_VISIBILITY_OK) &&
47 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) 54 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
48 printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " 55 atomic_inc(&uc_pool->status);
49 "CPU %i\n", status, raw_smp_processor_id());
50} 56}
51 57
52 58
53static void uncached_ipi_mc_drain(void *data) 59static void uncached_ipi_mc_drain(void *data)
54{ 60{
55 int status; 61 int status;
62 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
56 63
57 status = ia64_pal_mc_drain(); 64 status = ia64_pal_mc_drain();
58 if (status) 65 if (status != PAL_STATUS_SUCCESS)
59 printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " 66 atomic_inc(&uc_pool->status);
60 "CPU %i\n", status, raw_smp_processor_id());
61} 67}
62 68
63 69
@@ -70,21 +76,34 @@ static void uncached_ipi_mc_drain(void *data)
70 * This is accomplished by first allocating a granule of cached memory pages 76 * This is accomplished by first allocating a granule of cached memory pages
71 * and then converting them to uncached memory pages. 77 * and then converting them to uncached memory pages.
72 */ 78 */
73static int uncached_add_chunk(struct gen_pool *pool, int nid) 79static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
74{ 80{
75 struct page *page; 81 struct page *page;
76 int status, i; 82 int status, i, nchunks_added = uc_pool->nchunks_added;
77 unsigned long c_addr, uc_addr; 83 unsigned long c_addr, uc_addr;
78 84
79 if (allocated_granules >= MAX_UNCACHED_GRANULES) 85 if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
86 return -1; /* interrupted by a signal */
87
88 if (uc_pool->nchunks_added > nchunks_added) {
89 /* someone added a new chunk while we were waiting */
90 mutex_unlock(&uc_pool->add_chunk_mutex);
91 return 0;
92 }
93
94 if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
95 mutex_unlock(&uc_pool->add_chunk_mutex);
80 return -1; 96 return -1;
97 }
81 98
82 /* attempt to allocate a granule's worth of cached memory pages */ 99 /* attempt to allocate a granule's worth of cached memory pages */
83 100
84 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, 101 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
85 IA64_GRANULE_SHIFT-PAGE_SHIFT); 102 IA64_GRANULE_SHIFT-PAGE_SHIFT);
86 if (!page) 103 if (!page) {
104 mutex_unlock(&uc_pool->add_chunk_mutex);
87 return -1; 105 return -1;
106 }
88 107
89 /* convert the memory pages from cached to uncached */ 108 /* convert the memory pages from cached to uncached */
90 109
@@ -102,11 +121,14 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid)
102 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); 121 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
103 122
104 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
105 if (!status) { 124 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
106 status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); 125 atomic_set(&uc_pool->status, 0);
107 if (status) 126 status = smp_call_function(uncached_ipi_visibility, uc_pool,
127 0, 1);
128 if (status || atomic_read(&uc_pool->status))
108 goto failed; 129 goto failed;
109 } 130 } else if (status != PAL_VISIBILITY_OK)
131 goto failed;
110 132
111 preempt_disable(); 133 preempt_disable();
112 134
@@ -120,20 +142,24 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid)
120 142
121 preempt_enable(); 143 preempt_enable();
122 144
123 ia64_pal_mc_drain(); 145 status = ia64_pal_mc_drain();
124 status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); 146 if (status != PAL_STATUS_SUCCESS)
125 if (status) 147 goto failed;
148 atomic_set(&uc_pool->status, 0);
149 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);
150 if (status || atomic_read(&uc_pool->status))
126 goto failed; 151 goto failed;
127 152
128 /* 153 /*
129 * The chunk of memory pages has been converted to uncached so now we 154 * The chunk of memory pages has been converted to uncached so now we
130 * can add it to the pool. 155 * can add it to the pool.
131 */ 156 */
132 status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); 157 status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
133 if (status) 158 if (status)
134 goto failed; 159 goto failed;
135 160
136 allocated_granules++; 161 uc_pool->nchunks_added++;
162 mutex_unlock(&uc_pool->add_chunk_mutex);
137 return 0; 163 return 0;
138 164
139 /* failed to convert or add the chunk so give it back to the kernel */ 165 /* failed to convert or add the chunk so give it back to the kernel */
@@ -142,6 +168,7 @@ failed:
142 ClearPageUncached(&page[i]); 168 ClearPageUncached(&page[i]);
143 169
144 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); 170 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
171 mutex_unlock(&uc_pool->add_chunk_mutex);
145 return -1; 172 return -1;
146} 173}
147 174
@@ -158,7 +185,7 @@ failed:
158unsigned long uncached_alloc_page(int starting_nid) 185unsigned long uncached_alloc_page(int starting_nid)
159{ 186{
160 unsigned long uc_addr; 187 unsigned long uc_addr;
161 struct gen_pool *pool; 188 struct uncached_pool *uc_pool;
162 int nid; 189 int nid;
163 190
164 if (unlikely(starting_nid >= MAX_NUMNODES)) 191 if (unlikely(starting_nid >= MAX_NUMNODES))
@@ -171,14 +198,14 @@ unsigned long uncached_alloc_page(int starting_nid)
171 do { 198 do {
172 if (!node_online(nid)) 199 if (!node_online(nid))
173 continue; 200 continue;
174 pool = uncached_pool[nid]; 201 uc_pool = &uncached_pools[nid];
175 if (pool == NULL) 202 if (uc_pool->pool == NULL)
176 continue; 203 continue;
177 do { 204 do {
178 uc_addr = gen_pool_alloc(pool, PAGE_SIZE); 205 uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE);
179 if (uc_addr != 0) 206 if (uc_addr != 0)
180 return uc_addr; 207 return uc_addr;
181 } while (uncached_add_chunk(pool, nid) == 0); 208 } while (uncached_add_chunk(uc_pool, nid) == 0);
182 209
183 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); 210 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
184 211
@@ -197,7 +224,7 @@ EXPORT_SYMBOL(uncached_alloc_page);
197void uncached_free_page(unsigned long uc_addr) 224void uncached_free_page(unsigned long uc_addr)
198{ 225{
199 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); 226 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
200 struct gen_pool *pool = uncached_pool[nid]; 227 struct gen_pool *pool = uncached_pools[nid].pool;
201 228
202 if (unlikely(pool == NULL)) 229 if (unlikely(pool == NULL))
203 return; 230 return;
@@ -224,7 +251,7 @@ static int __init uncached_build_memmap(unsigned long uc_start,
224 unsigned long uc_end, void *arg) 251 unsigned long uc_end, void *arg)
225{ 252{
226 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); 253 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
227 struct gen_pool *pool = uncached_pool[nid]; 254 struct gen_pool *pool = uncached_pools[nid].pool;
228 size_t size = uc_end - uc_start; 255 size_t size = uc_end - uc_start;
229 256
230 touch_softlockup_watchdog(); 257 touch_softlockup_watchdog();
@@ -242,7 +269,8 @@ static int __init uncached_init(void)
242 int nid; 269 int nid;
243 270
244 for_each_online_node(nid) { 271 for_each_online_node(nid) {
245 uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid); 272 uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
273 mutex_init(&uncached_pools[nid].add_chunk_mutex);
246 } 274 }
247 275
248 efi_memmap_walk_uc(uncached_build_memmap, NULL); 276 efi_memmap_walk_uc(uncached_build_memmap, NULL);
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index d8536a2c22a9..38fa6e49e791 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -14,7 +14,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
14lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o 14lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
15lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o 15lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
16lib-$(CONFIG_PERFMON) += carta_random.o 16lib-$(CONFIG_PERFMON) += carta_random.o
17lib-$(CONFIG_MD_RAID5) += xor.o 17lib-$(CONFIG_MD_RAID456) += xor.o
18 18
19AFLAGS___divdi3.o = 19AFLAGS___divdi3.o =
20AFLAGS___udivdi3.o = -DUNSIGNED 20AFLAGS___udivdi3.o = -DUNSIGNED
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2a88cdd6d924..e004143ba86b 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -27,6 +27,7 @@
27 27
28#ifdef CONFIG_VIRTUAL_MEM_MAP 28#ifdef CONFIG_VIRTUAL_MEM_MAP
29static unsigned long num_dma_physpages; 29static unsigned long num_dma_physpages;
30static unsigned long max_gap;
30#endif 31#endif
31 32
32/** 33/**
@@ -45,9 +46,15 @@ show_mem (void)
45 46
46 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 47 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
47 i = max_mapnr; 48 i = max_mapnr;
48 while (i-- > 0) { 49 for (i = 0; i < max_mapnr; i++) {
49 if (!pfn_valid(i)) 50 if (!pfn_valid(i)) {
51#ifdef CONFIG_VIRTUAL_MEM_MAP
52 if (max_gap < LARGE_GAP)
53 continue;
54 i = vmemmap_find_next_valid_pfn(0, i) - 1;
55#endif
50 continue; 56 continue;
57 }
51 total++; 58 total++;
52 if (PageReserved(mem_map+i)) 59 if (PageReserved(mem_map+i))
53 reserved++; 60 reserved++;
@@ -234,7 +241,6 @@ paging_init (void)
234 unsigned long zones_size[MAX_NR_ZONES]; 241 unsigned long zones_size[MAX_NR_ZONES];
235#ifdef CONFIG_VIRTUAL_MEM_MAP 242#ifdef CONFIG_VIRTUAL_MEM_MAP
236 unsigned long zholes_size[MAX_NR_ZONES]; 243 unsigned long zholes_size[MAX_NR_ZONES];
237 unsigned long max_gap;
238#endif 244#endif
239 245
240 /* initialize mem_map[] */ 246 /* initialize mem_map[] */
@@ -266,7 +272,6 @@ paging_init (void)
266 } 272 }
267 } 273 }
268 274
269 max_gap = 0;
270 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); 275 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
271 if (max_gap < LARGE_GAP) { 276 if (max_gap < LARGE_GAP) {
272 vmem_map = (struct page *) 0; 277 vmem_map = (struct page *) 0;
@@ -277,7 +282,8 @@ paging_init (void)
277 282
278 /* allocate virtual_mem_map */ 283 /* allocate virtual_mem_map */
279 284
280 map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page)); 285 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
286 sizeof(struct page));
281 vmalloc_end -= map_size; 287 vmalloc_end -= map_size;
282 vmem_map = (struct page *) vmalloc_end; 288 vmem_map = (struct page *) vmalloc_end;
283 efi_memmap_walk(create_mem_map_page_table, NULL); 289 efi_memmap_walk(create_mem_map_page_table, NULL);
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 99bd9e30db96..d260bffa01ab 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void)
534} 534}
535#endif /* CONFIG_SMP */ 535#endif /* CONFIG_SMP */
536 536
537#ifdef CONFIG_VIRTUAL_MEM_MAP
538static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
539{
540 unsigned long end_address, hole_next_pfn;
541 unsigned long stop_address;
542
543 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
544 end_address = PAGE_ALIGN(end_address);
545
546 stop_address = (unsigned long) &vmem_map[
547 pgdat->node_start_pfn + pgdat->node_spanned_pages];
548
549 do {
550 pgd_t *pgd;
551 pud_t *pud;
552 pmd_t *pmd;
553 pte_t *pte;
554
555 pgd = pgd_offset_k(end_address);
556 if (pgd_none(*pgd)) {
557 end_address += PGDIR_SIZE;
558 continue;
559 }
560
561 pud = pud_offset(pgd, end_address);
562 if (pud_none(*pud)) {
563 end_address += PUD_SIZE;
564 continue;
565 }
566
567 pmd = pmd_offset(pud, end_address);
568 if (pmd_none(*pmd)) {
569 end_address += PMD_SIZE;
570 continue;
571 }
572
573 pte = pte_offset_kernel(pmd, end_address);
574retry_pte:
575 if (pte_none(*pte)) {
576 end_address += PAGE_SIZE;
577 pte++;
578 if ((end_address < stop_address) &&
579 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
580 goto retry_pte;
581 continue;
582 }
583 /* Found next valid vmem_map page */
584 break;
585 } while (end_address < stop_address);
586
587 end_address = min(end_address, stop_address);
588 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
589 hole_next_pfn = end_address / sizeof(struct page);
590 return hole_next_pfn - pgdat->node_start_pfn;
591}
592#else
593static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
594{
595 return i + 1;
596}
597#endif
598
599/** 537/**
600 * show_mem - give short summary of memory stats 538 * show_mem - give short summary of memory stats
601 * 539 *
@@ -625,7 +563,8 @@ void show_mem(void)
625 if (pfn_valid(pgdat->node_start_pfn + i)) 563 if (pfn_valid(pgdat->node_start_pfn + i))
626 page = pfn_to_page(pgdat->node_start_pfn + i); 564 page = pfn_to_page(pgdat->node_start_pfn + i);
627 else { 565 else {
628 i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; 566 i = vmemmap_find_next_valid_pfn(pgdat->node_id,
567 i) - 1;
629 continue; 568 continue;
630 } 569 }
631 if (PageReserved(page)) 570 if (PageReserved(page))
@@ -751,7 +690,8 @@ void __init paging_init(void)
751 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 690 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
752 691
753#ifdef CONFIG_VIRTUAL_MEM_MAP 692#ifdef CONFIG_VIRTUAL_MEM_MAP
754 vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); 693 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
694 sizeof(struct page));
755 vmem_map = (struct page *) vmalloc_end; 695 vmem_map = (struct page *) vmalloc_end;
756 efi_memmap_walk(create_mem_map_page_table, NULL); 696 efi_memmap_walk(create_mem_map_page_table, NULL);
757 printk("Virtual mem_map starts at 0x%p\n", vmem_map); 697 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 2f50c064513c..30617ccb4f7e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data)
415} 415}
416 416
417#ifdef CONFIG_VIRTUAL_MEM_MAP 417#ifdef CONFIG_VIRTUAL_MEM_MAP
418int vmemmap_find_next_valid_pfn(int node, int i)
419{
420 unsigned long end_address, hole_next_pfn;
421 unsigned long stop_address;
422 pg_data_t *pgdat = NODE_DATA(node);
423
424 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
425 end_address = PAGE_ALIGN(end_address);
426
427 stop_address = (unsigned long) &vmem_map[
428 pgdat->node_start_pfn + pgdat->node_spanned_pages];
429
430 do {
431 pgd_t *pgd;
432 pud_t *pud;
433 pmd_t *pmd;
434 pte_t *pte;
435
436 pgd = pgd_offset_k(end_address);
437 if (pgd_none(*pgd)) {
438 end_address += PGDIR_SIZE;
439 continue;
440 }
441
442 pud = pud_offset(pgd, end_address);
443 if (pud_none(*pud)) {
444 end_address += PUD_SIZE;
445 continue;
446 }
447
448 pmd = pmd_offset(pud, end_address);
449 if (pmd_none(*pmd)) {
450 end_address += PMD_SIZE;
451 continue;
452 }
453
454 pte = pte_offset_kernel(pmd, end_address);
455retry_pte:
456 if (pte_none(*pte)) {
457 end_address += PAGE_SIZE;
458 pte++;
459 if ((end_address < stop_address) &&
460 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
461 goto retry_pte;
462 continue;
463 }
464 /* Found next valid vmem_map page */
465 break;
466 } while (end_address < stop_address);
467
468 end_address = min(end_address, stop_address);
469 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
470 hole_next_pfn = end_address / sizeof(struct page);
471 return hole_next_pfn - pgdat->node_start_pfn;
472}
418 473
419int __init 474int __init
420create_mem_map_page_table (u64 start, u64 end, void *arg) 475create_mem_map_page_table (u64 start, u64 end, void *arg)
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 07bd02b6c372..4280c074d64e 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -32,7 +32,7 @@ ioremap (unsigned long offset, unsigned long size)
32 */ 32 */
33 attr = kern_mem_attribute(offset, size); 33 attr = kern_mem_attribute(offset, size);
34 if (attr & EFI_MEMORY_WB) 34 if (attr & EFI_MEMORY_WB)
35 return phys_to_virt(offset); 35 return (void __iomem *) phys_to_virt(offset);
36 else if (attr & EFI_MEMORY_UC) 36 else if (attr & EFI_MEMORY_UC)
37 return __ioremap(offset, size); 37 return __ioremap(offset, size);
38 38
@@ -43,7 +43,7 @@ ioremap (unsigned long offset, unsigned long size)
43 gran_base = GRANULEROUNDDOWN(offset); 43 gran_base = GRANULEROUNDDOWN(offset);
44 gran_size = GRANULEROUNDUP(offset + size) - gran_base; 44 gran_size = GRANULEROUNDUP(offset + size) - gran_base;
45 if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) 45 if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
46 return phys_to_virt(offset); 46 return (void __iomem *) phys_to_virt(offset);
47 47
48 return __ioremap(offset, size); 48 return __ioremap(offset, size);
49} 49}
@@ -53,7 +53,7 @@ void __iomem *
53ioremap_nocache (unsigned long offset, unsigned long size) 53ioremap_nocache (unsigned long offset, unsigned long size)
54{ 54{
55 if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) 55 if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
56 return 0; 56 return NULL;
57 57
58 return __ioremap(offset, size); 58 return __ioremap(offset, size);
59} 59}
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index c2f69f7942af..1f3540826e68 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -279,8 +279,8 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
279 return part->reason; 279 return part->reason;
280 } 280 }
281 281
282 bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst), 282 bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt,
283 (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL); 283 (BTE_NORMAL | BTE_WACQUIRE), NULL);
284 if (bte_ret == BTE_SUCCESS) { 284 if (bte_ret == BTE_SUCCESS) {
285 return xpcSuccess; 285 return xpcSuccess;
286 } 286 }
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 99b123a6421a..4d026f9dd98b 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -480,7 +480,7 @@ xpc_activating(void *__partid)
480 partid_t partid = (u64) __partid; 480 partid_t partid = (u64) __partid;
481 struct xpc_partition *part = &xpc_partitions[partid]; 481 struct xpc_partition *part = &xpc_partitions[partid];
482 unsigned long irq_flags; 482 unsigned long irq_flags;
483 struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 }; 483 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
484 int ret; 484 int ret;
485 485
486 486
@@ -1052,6 +1052,8 @@ xpc_do_exit(enum xpc_retval reason)
1052 if (xpc_sysctl) { 1052 if (xpc_sysctl) {
1053 unregister_sysctl_table(xpc_sysctl); 1053 unregister_sysctl_table(xpc_sysctl);
1054 } 1054 }
1055
1056 kfree(xpc_remote_copy_buffer_base);
1055} 1057}
1056 1058
1057 1059
@@ -1212,24 +1214,20 @@ xpc_init(void)
1212 partid_t partid; 1214 partid_t partid;
1213 struct xpc_partition *part; 1215 struct xpc_partition *part;
1214 pid_t pid; 1216 pid_t pid;
1217 size_t buf_size;
1215 1218
1216 1219
1217 if (!ia64_platform_is("sn2")) { 1220 if (!ia64_platform_is("sn2")) {
1218 return -ENODEV; 1221 return -ENODEV;
1219 } 1222 }
1220 1223
1221 /* 1224
1222 * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng 1225 buf_size = max(XPC_RP_VARS_SIZE,
1223 * various portions of a partition's reserved page. Its size is based 1226 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1224 * on the size of the reserved page header and part_nasids mask. So we 1227 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1225 * need to ensure that the other items will fit as well. 1228 GFP_KERNEL, &xpc_remote_copy_buffer_base);
1226 */ 1229 if (xpc_remote_copy_buffer == NULL)
1227 if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) { 1230 return -ENOMEM;
1228 dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n");
1229 return -EPERM;
1230 }
1231 DBUG_ON((u64) xpc_remote_copy_buffer !=
1232 L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer));
1233 1231
1234 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); 1232 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
1235 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); 1233 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
@@ -1293,6 +1291,8 @@ xpc_init(void)
1293 if (xpc_sysctl) { 1291 if (xpc_sysctl) {
1294 unregister_sysctl_table(xpc_sysctl); 1292 unregister_sysctl_table(xpc_sysctl);
1295 } 1293 }
1294
1295 kfree(xpc_remote_copy_buffer_base);
1296 return -EBUSY; 1296 return -EBUSY;
1297 } 1297 }
1298 1298
@@ -1311,6 +1311,8 @@ xpc_init(void)
1311 if (xpc_sysctl) { 1311 if (xpc_sysctl) {
1312 unregister_sysctl_table(xpc_sysctl); 1312 unregister_sysctl_table(xpc_sysctl);
1313 } 1313 }
1314
1315 kfree(xpc_remote_copy_buffer_base);
1314 return -EBUSY; 1316 return -EBUSY;
1315 } 1317 }
1316 1318
@@ -1362,6 +1364,8 @@ xpc_init(void)
1362 if (xpc_sysctl) { 1364 if (xpc_sysctl) {
1363 unregister_sysctl_table(xpc_sysctl); 1365 unregister_sysctl_table(xpc_sysctl);
1364 } 1366 }
1367
1368 kfree(xpc_remote_copy_buffer_base);
1365 return -EBUSY; 1369 return -EBUSY;
1366 } 1370 }
1367 1371
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index 2a89cfce4954..57c723f5cba4 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -71,19 +71,15 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
71 * Generic buffer used to store a local copy of portions of a remote 71 * Generic buffer used to store a local copy of portions of a remote
72 * partition's reserved page (either its header and part_nasids mask, 72 * partition's reserved page (either its header and part_nasids mask,
73 * or its vars). 73 * or its vars).
74 *
75 * xpc_discovery runs only once and is a seperate thread that is
76 * very likely going to be processing in parallel with receiving
77 * interrupts.
78 */ 74 */
79char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE + 75char *xpc_remote_copy_buffer;
80 XP_NASID_MASK_BYTES]; 76void *xpc_remote_copy_buffer_base;
81 77
82 78
83/* 79/*
84 * Guarantee that the kmalloc'd memory is cacheline aligned. 80 * Guarantee that the kmalloc'd memory is cacheline aligned.
85 */ 81 */
86static void * 82void *
87xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) 83xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
88{ 84{
89 /* see if kmalloc will give us cachline aligned memory by default */ 85 /* see if kmalloc will give us cachline aligned memory by default */
@@ -148,7 +144,7 @@ xpc_get_rsvd_page_pa(int nasid)
148 } 144 }
149 } 145 }
150 146
151 bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len, 147 bte_res = xp_bte_copy(rp_pa, buf, buf_len,
152 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 148 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
153 if (bte_res != BTE_SUCCESS) { 149 if (bte_res != BTE_SUCCESS) {
154 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); 150 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
@@ -447,7 +443,7 @@ xpc_check_remote_hb(void)
447 443
448 /* pull the remote_hb cache line */ 444 /* pull the remote_hb cache line */
449 bres = xp_bte_copy(part->remote_vars_pa, 445 bres = xp_bte_copy(part->remote_vars_pa,
450 ia64_tpa((u64) remote_vars), 446 (u64) remote_vars,
451 XPC_RP_VARS_SIZE, 447 XPC_RP_VARS_SIZE,
452 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 448 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
453 if (bres != BTE_SUCCESS) { 449 if (bres != BTE_SUCCESS) {
@@ -498,8 +494,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
498 494
499 495
500 /* pull over the reserved page header and part_nasids mask */ 496 /* pull over the reserved page header and part_nasids mask */
501 497 bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp,
502 bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp),
503 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, 498 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
504 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 499 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
505 if (bres != BTE_SUCCESS) { 500 if (bres != BTE_SUCCESS) {
@@ -554,11 +549,8 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
554 return xpcVarsNotSet; 549 return xpcVarsNotSet;
555 } 550 }
556 551
557
558 /* pull over the cross partition variables */ 552 /* pull over the cross partition variables */
559 553 bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE,
560 bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
561 XPC_RP_VARS_SIZE,
562 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 554 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
563 if (bres != BTE_SUCCESS) { 555 if (bres != BTE_SUCCESS) {
564 return xpc_map_bte_errors(bres); 556 return xpc_map_bte_errors(bres);
@@ -1239,7 +1231,7 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
1239 1231
1240 part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); 1232 part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
1241 1233
1242 bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask), 1234 bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask,
1243 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); 1235 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
1244 1236
1245 return xpc_map_bte_errors(bte_res); 1237 return xpc_map_bte_errors(bte_res);
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 17cd34284886..af7171adcd2c 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -74,7 +74,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr)
74 else 74 else
75 mmr_war_offset = 0x158; 75 mmr_war_offset = 0x158;
76 76
77 readq_relaxed((void *)(mmr_base + mmr_war_offset)); 77 readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset));
78 } 78 }
79} 79}
80 80
@@ -92,8 +92,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
92 92
93 if (mmr_offset < 0x45000) { 93 if (mmr_offset < 0x45000) {
94 if (mmr_offset == 0x100) 94 if (mmr_offset == 0x100)
95 readq_relaxed((void *)(mmr_base + 0x38)); 95 readq_relaxed((void __iomem *)(mmr_base + 0x38));
96 readq_relaxed((void *)(mmr_base + 0xb050)); 96 readq_relaxed((void __iomem *)(mmr_base + 0xb050));
97 } 97 }
98} 98}
99 99