diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/configs/sn2_defconfig | 2 | ||||
-rw-r--r-- | arch/ia64/defconfig | 2 | ||||
-rw-r--r-- | arch/ia64/hp/sim/simscsi.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/efi.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/gate.lds.S | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/ia64_ksyms.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 9 | ||||
-rw-r--r-- | arch/ia64/kernel/pal.S | 18 | ||||
-rw-r--r-- | arch/ia64/kernel/palinfo.c | 47 | ||||
-rw-r--r-- | arch/ia64/kernel/topology.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/traps.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/uncached.c | 86 | ||||
-rw-r--r-- | arch/ia64/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/ia64/mm/contig.c | 16 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 68 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 55 | ||||
-rw-r--r-- | arch/ia64/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_main.c | 2 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioce_provider.c | 6 |
21 files changed, 187 insertions, 159 deletions
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig index 9ea35398e10d..0f14a82b856e 100644 --- a/arch/ia64/configs/sn2_defconfig +++ b/arch/ia64/configs/sn2_defconfig | |||
@@ -363,7 +363,7 @@ CONFIG_BLK_DEV_IDECD=y | |||
363 | # | 363 | # |
364 | CONFIG_IDE_GENERIC=y | 364 | CONFIG_IDE_GENERIC=y |
365 | CONFIG_BLK_DEV_IDEPCI=y | 365 | CONFIG_BLK_DEV_IDEPCI=y |
366 | # CONFIG_IDEPCI_SHARE_IRQ is not set | 366 | CONFIG_IDEPCI_SHARE_IRQ=y |
367 | # CONFIG_BLK_DEV_OFFBOARD is not set | 367 | # CONFIG_BLK_DEV_OFFBOARD is not set |
368 | # CONFIG_BLK_DEV_GENERIC is not set | 368 | # CONFIG_BLK_DEV_GENERIC is not set |
369 | # CONFIG_BLK_DEV_OPTI621 is not set | 369 | # CONFIG_BLK_DEV_OPTI621 is not set |
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig index 6cba55da572a..9001b3fbaa32 100644 --- a/arch/ia64/defconfig +++ b/arch/ia64/defconfig | |||
@@ -366,7 +366,7 @@ CONFIG_BLK_DEV_IDESCSI=m | |||
366 | # CONFIG_IDE_GENERIC is not set | 366 | # CONFIG_IDE_GENERIC is not set |
367 | # CONFIG_BLK_DEV_IDEPNP is not set | 367 | # CONFIG_BLK_DEV_IDEPNP is not set |
368 | CONFIG_BLK_DEV_IDEPCI=y | 368 | CONFIG_BLK_DEV_IDEPCI=y |
369 | # CONFIG_IDEPCI_SHARE_IRQ is not set | 369 | CONFIG_IDEPCI_SHARE_IRQ=y |
370 | # CONFIG_BLK_DEV_OFFBOARD is not set | 370 | # CONFIG_BLK_DEV_OFFBOARD is not set |
371 | CONFIG_BLK_DEV_GENERIC=y | 371 | CONFIG_BLK_DEV_GENERIC=y |
372 | # CONFIG_BLK_DEV_OPTI621 is not set | 372 | # CONFIG_BLK_DEV_OPTI621 is not set |
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index a3fe97531134..8a4f0d0d17a3 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c | |||
@@ -151,7 +151,7 @@ static void | |||
151 | simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset) | 151 | simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset) |
152 | { | 152 | { |
153 | int list_len = sc->use_sg; | 153 | int list_len = sc->use_sg; |
154 | struct scatterlist *sl = (struct scatterlist *)sc->buffer; | 154 | struct scatterlist *sl = (struct scatterlist *)sc->request_buffer; |
155 | struct disk_stat stat; | 155 | struct disk_stat stat; |
156 | struct disk_req req; | 156 | struct disk_req req; |
157 | 157 | ||
@@ -244,7 +244,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len) | |||
244 | 244 | ||
245 | if (scatterlen == 0) | 245 | if (scatterlen == 0) |
246 | memcpy(sc->request_buffer, buf, len); | 246 | memcpy(sc->request_buffer, buf, len); |
247 | else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) { | 247 | else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) { |
248 | unsigned thislen = min(len, slp->length); | 248 | unsigned thislen = min(len, slp->length); |
249 | 249 | ||
250 | memcpy(page_address(slp->page) + slp->offset, buf, thislen); | 250 | memcpy(page_address(slp->page) + slp->offset, buf, thislen); |
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 0e4553f320bf..ad8215a3c586 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -45,7 +45,8 @@ CPPFLAGS_gate.lds := -P -C -U$(ARCH) | |||
45 | quiet_cmd_gate = GATE $@ | 45 | quiet_cmd_gate = GATE $@ |
46 | cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ | 46 | cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ |
47 | 47 | ||
48 | GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 | 48 | GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \ |
49 | $(call ld-option, -Wl$(comma)--hash-style=sysv) | ||
49 | $(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE | 50 | $(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE |
50 | $(call if_changed,gate) | 51 | $(call if_changed,gate) |
51 | 52 | ||
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index e4bfa9dafbce..bb8770a177b5 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -632,7 +632,7 @@ kern_memory_descriptor (unsigned long phys_addr) | |||
632 | if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) | 632 | if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) |
633 | return md; | 633 | return md; |
634 | } | 634 | } |
635 | return 0; | 635 | return NULL; |
636 | } | 636 | } |
637 | 637 | ||
638 | static efi_memory_desc_t * | 638 | static efi_memory_desc_t * |
@@ -652,7 +652,7 @@ efi_memory_descriptor (unsigned long phys_addr) | |||
652 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) | 652 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) |
653 | return md; | 653 | return md; |
654 | } | 654 | } |
655 | return 0; | 655 | return NULL; |
656 | } | 656 | } |
657 | 657 | ||
658 | u32 | 658 | u32 |
@@ -923,7 +923,7 @@ find_memmap_space (void) | |||
923 | void | 923 | void |
924 | efi_memmap_init(unsigned long *s, unsigned long *e) | 924 | efi_memmap_init(unsigned long *s, unsigned long *e) |
925 | { | 925 | { |
926 | struct kern_memdesc *k, *prev = 0; | 926 | struct kern_memdesc *k, *prev = NULL; |
927 | u64 contig_low=0, contig_high=0; | 927 | u64 contig_low=0, contig_high=0; |
928 | u64 as, ae, lim; | 928 | u64 as, ae, lim; |
929 | void *efi_map_start, *efi_map_end, *p, *q; | 929 | void *efi_map_start, *efi_map_end, *p, *q; |
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index cc35cddfd4cf..6d198339bf85 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S | |||
@@ -12,6 +12,7 @@ SECTIONS | |||
12 | . = GATE_ADDR + SIZEOF_HEADERS; | 12 | . = GATE_ADDR + SIZEOF_HEADERS; |
13 | 13 | ||
14 | .hash : { *(.hash) } :readable | 14 | .hash : { *(.hash) } :readable |
15 | .gnu.hash : { *(.gnu.hash) } | ||
15 | .dynsym : { *(.dynsym) } | 16 | .dynsym : { *(.dynsym) } |
16 | .dynstr : { *(.dynstr) } | 17 | .dynstr : { *(.dynstr) } |
17 | .gnu.version : { *(.gnu.version) } | 18 | .gnu.version : { *(.gnu.version) } |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 561b8f1d3bc7..29236f0c62b5 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -853,7 +853,6 @@ END(__ia64_init_fpu) | |||
853 | */ | 853 | */ |
854 | GLOBAL_ENTRY(ia64_switch_mode_phys) | 854 | GLOBAL_ENTRY(ia64_switch_mode_phys) |
855 | { | 855 | { |
856 | alloc r2=ar.pfs,0,0,0,0 | ||
857 | rsm psr.i | psr.ic // disable interrupts and interrupt collection | 856 | rsm psr.i | psr.ic // disable interrupts and interrupt collection |
858 | mov r15=ip | 857 | mov r15=ip |
859 | } | 858 | } |
@@ -902,7 +901,6 @@ END(ia64_switch_mode_phys) | |||
902 | */ | 901 | */ |
903 | GLOBAL_ENTRY(ia64_switch_mode_virt) | 902 | GLOBAL_ENTRY(ia64_switch_mode_virt) |
904 | { | 903 | { |
905 | alloc r2=ar.pfs,0,0,0,0 | ||
906 | rsm psr.i | psr.ic // disable interrupts and interrupt collection | 904 | rsm psr.i | psr.ic // disable interrupts and interrupt collection |
907 | mov r15=ip | 905 | mov r15=ip |
908 | } | 906 | } |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index b7cf651ceb14..3ead20fb6f4b 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(__udivdi3); | |||
62 | EXPORT_SYMBOL(__moddi3); | 62 | EXPORT_SYMBOL(__moddi3); |
63 | EXPORT_SYMBOL(__umoddi3); | 63 | EXPORT_SYMBOL(__umoddi3); |
64 | 64 | ||
65 | #if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE) | 65 | #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) |
66 | extern void xor_ia64_2(void); | 66 | extern void xor_ia64_2(void); |
67 | extern void xor_ia64_3(void); | 67 | extern void xor_ia64_3(void); |
68 | extern void xor_ia64_4(void); | 68 | extern void xor_ia64_4(void); |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 00d9c83b8020..781960f80b6f 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -448,11 +448,20 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
448 | return 0; | 448 | return 0; |
449 | } | 449 | } |
450 | 450 | ||
451 | void __kprobes flush_insn_slot(struct kprobe *p) | ||
452 | { | ||
453 | unsigned long arm_addr; | ||
454 | |||
455 | arm_addr = ((unsigned long)&p->opcode.bundle) & ~0xFULL; | ||
456 | flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); | ||
457 | } | ||
458 | |||
451 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 459 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
452 | { | 460 | { |
453 | unsigned long addr = (unsigned long)p->addr; | 461 | unsigned long addr = (unsigned long)p->addr; |
454 | unsigned long arm_addr = addr & ~0xFULL; | 462 | unsigned long arm_addr = addr & ~0xFULL; |
455 | 463 | ||
464 | flush_insn_slot(p); | ||
456 | memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t)); | 465 | memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t)); |
457 | flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); | 466 | flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); |
458 | } | 467 | } |
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S index 5018c7f2e7a8..ebaf1e685f5e 100644 --- a/arch/ia64/kernel/pal.S +++ b/arch/ia64/kernel/pal.S | |||
@@ -217,12 +217,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) | |||
217 | .body | 217 | .body |
218 | ;; | 218 | ;; |
219 | ld8 loc2 = [loc2] // loc2 <- entry point | 219 | ld8 loc2 = [loc2] // loc2 <- entry point |
220 | mov out0 = in0 // first argument | 220 | mov loc3 = psr // save psr |
221 | mov out1 = in1 // copy arg2 | ||
222 | mov out2 = in2 // copy arg3 | ||
223 | mov out3 = in3 // copy arg3 | ||
224 | ;; | ||
225 | mov loc3 = psr // save psr | ||
226 | ;; | 221 | ;; |
227 | mov loc4=ar.rsc // save RSE configuration | 222 | mov loc4=ar.rsc // save RSE configuration |
228 | dep.z loc2=loc2,0,61 // convert pal entry point to physical | 223 | dep.z loc2=loc2,0,61 // convert pal entry point to physical |
@@ -236,18 +231,23 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) | |||
236 | ;; | 231 | ;; |
237 | andcm r16=loc3,r16 // removes bits to clear from psr | 232 | andcm r16=loc3,r16 // removes bits to clear from psr |
238 | br.call.sptk.many rp=ia64_switch_mode_phys | 233 | br.call.sptk.many rp=ia64_switch_mode_phys |
239 | .ret6: | 234 | |
235 | mov out0 = in0 // first argument | ||
236 | mov out1 = in1 // copy arg2 | ||
237 | mov out2 = in2 // copy arg3 | ||
238 | mov out3 = in3 // copy arg3 | ||
240 | mov loc5 = r19 | 239 | mov loc5 = r19 |
241 | mov loc6 = r20 | 240 | mov loc6 = r20 |
241 | |||
242 | br.call.sptk.many rp=b7 // now make the call | 242 | br.call.sptk.many rp=b7 // now make the call |
243 | .ret7: | 243 | |
244 | mov ar.rsc=0 // put RSE in enforced lazy, LE mode | 244 | mov ar.rsc=0 // put RSE in enforced lazy, LE mode |
245 | mov r16=loc3 // r16= original psr | 245 | mov r16=loc3 // r16= original psr |
246 | mov r19=loc5 | 246 | mov r19=loc5 |
247 | mov r20=loc6 | 247 | mov r20=loc6 |
248 | br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode | 248 | br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode |
249 | 249 | ||
250 | .ret8: mov psr.l = loc3 // restore init PSR | 250 | mov psr.l = loc3 // restore init PSR |
251 | mov ar.pfs = loc1 | 251 | mov ar.pfs = loc1 |
252 | mov rp = loc0 | 252 | mov rp = loc0 |
253 | ;; | 253 | ;; |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 3f5bac59209a..0b546e2b36ac 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -566,29 +566,23 @@ version_info(char *page) | |||
566 | pal_version_u_t min_ver, cur_ver; | 566 | pal_version_u_t min_ver, cur_ver; |
567 | char *p = page; | 567 | char *p = page; |
568 | 568 | ||
569 | /* The PAL_VERSION call is advertised as being able to support | 569 | if (ia64_pal_version(&min_ver, &cur_ver) != 0) |
570 | * both physical and virtual mode calls. This seems to be a documentation | 570 | return 0; |
571 | * bug rather than firmware bug. In fact, it does only support physical mode. | ||
572 | * So now the code reflects this fact and the pal_version() has been updated | ||
573 | * accordingly. | ||
574 | */ | ||
575 | if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0; | ||
576 | 571 | ||
577 | p += sprintf(p, | 572 | p += sprintf(p, |
578 | "PAL_vendor : 0x%02x (min=0x%02x)\n" | 573 | "PAL_vendor : 0x%02x (min=0x%02x)\n" |
579 | "PAL_A : %x.%x.%x (min=%x.%x.%x)\n" | 574 | "PAL_A : %02x.%02x (min=%02x.%02x)\n" |
580 | "PAL_B : %x.%x.%x (min=%x.%x.%x)\n", | 575 | "PAL_B : %02x.%02x (min=%02x.%02x)\n", |
581 | cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor, | 576 | cur_ver.pal_version_s.pv_pal_vendor, |
582 | 577 | min_ver.pal_version_s.pv_pal_vendor, | |
583 | cur_ver.pal_version_s.pv_pal_a_model>>4, | 578 | cur_ver.pal_version_s.pv_pal_a_model, |
584 | cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev, | 579 | cur_ver.pal_version_s.pv_pal_a_rev, |
585 | min_ver.pal_version_s.pv_pal_a_model>>4, | 580 | min_ver.pal_version_s.pv_pal_a_model, |
586 | min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev, | 581 | min_ver.pal_version_s.pv_pal_a_rev, |
587 | 582 | cur_ver.pal_version_s.pv_pal_b_model, | |
588 | cur_ver.pal_version_s.pv_pal_b_model>>4, | 583 | cur_ver.pal_version_s.pv_pal_b_rev, |
589 | cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev, | 584 | min_ver.pal_version_s.pv_pal_b_model, |
590 | min_ver.pal_version_s.pv_pal_b_model>>4, | 585 | min_ver.pal_version_s.pv_pal_b_rev); |
591 | min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev); | ||
592 | return p - page; | 586 | return p - page; |
593 | } | 587 | } |
594 | 588 | ||
@@ -958,9 +952,9 @@ remove_palinfo_proc_entries(unsigned int hcpu) | |||
958 | } | 952 | } |
959 | } | 953 | } |
960 | 954 | ||
961 | static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, | 955 | #ifdef CONFIG_HOTPLUG_CPU |
962 | unsigned long action, | 956 | static int palinfo_cpu_callback(struct notifier_block *nfb, |
963 | void *hcpu) | 957 | unsigned long action, void *hcpu) |
964 | { | 958 | { |
965 | unsigned int hotcpu = (unsigned long)hcpu; | 959 | unsigned int hotcpu = (unsigned long)hcpu; |
966 | 960 | ||
@@ -968,20 +962,19 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, | |||
968 | case CPU_ONLINE: | 962 | case CPU_ONLINE: |
969 | create_palinfo_proc_entries(hotcpu); | 963 | create_palinfo_proc_entries(hotcpu); |
970 | break; | 964 | break; |
971 | #ifdef CONFIG_HOTPLUG_CPU | ||
972 | case CPU_DEAD: | 965 | case CPU_DEAD: |
973 | remove_palinfo_proc_entries(hotcpu); | 966 | remove_palinfo_proc_entries(hotcpu); |
974 | break; | 967 | break; |
975 | #endif | ||
976 | } | 968 | } |
977 | return NOTIFY_OK; | 969 | return NOTIFY_OK; |
978 | } | 970 | } |
979 | 971 | ||
980 | static struct notifier_block __cpuinitdata palinfo_cpu_notifier = | 972 | static struct notifier_block palinfo_cpu_notifier = |
981 | { | 973 | { |
982 | .notifier_call = palinfo_cpu_callback, | 974 | .notifier_call = palinfo_cpu_callback, |
983 | .priority = 0, | 975 | .priority = 0, |
984 | }; | 976 | }; |
977 | #endif | ||
985 | 978 | ||
986 | static int __init | 979 | static int __init |
987 | palinfo_init(void) | 980 | palinfo_init(void) |
@@ -1020,7 +1013,7 @@ palinfo_exit(void) | |||
1020 | /* | 1013 | /* |
1021 | * Unregister from cpu notifier callbacks | 1014 | * Unregister from cpu notifier callbacks |
1022 | */ | 1015 | */ |
1023 | unregister_cpu_notifier(&palinfo_cpu_notifier); | 1016 | unregister_hotcpu_notifier(&palinfo_cpu_notifier); |
1024 | } | 1017 | } |
1025 | 1018 | ||
1026 | module_init(palinfo_init); | 1019 | module_init(palinfo_init); |
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index b146f1cfad31..d24fa393b182 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -435,7 +435,7 @@ static int __cpuinit cache_sysfs_init(void) | |||
435 | (void *)(long)i); | 435 | (void *)(long)i); |
436 | } | 436 | } |
437 | 437 | ||
438 | register_cpu_notifier(&cache_cpu_notifier); | 438 | register_hotcpu_notifier(&cache_cpu_notifier); |
439 | 439 | ||
440 | return 0; | 440 | return 0; |
441 | } | 441 | } |
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index e7bbb0f40aa2..5a0420464c6c 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -117,11 +117,8 @@ die (const char *str, struct pt_regs *regs, long err) | |||
117 | die.lock_owner = -1; | 117 | die.lock_owner = -1; |
118 | spin_unlock_irq(&die.lock); | 118 | spin_unlock_irq(&die.lock); |
119 | 119 | ||
120 | if (panic_on_oops) { | 120 | if (panic_on_oops) |
121 | printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); | 121 | panic("Fatal exception: panic_on_oops"); |
122 | ssleep(5); | ||
123 | panic("Fatal exception"); | ||
124 | } | ||
125 | 122 | ||
126 | do_exit(SIGSEGV); | 123 | do_exit(SIGSEGV); |
127 | } | 124 | } |
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 5f03b9e524dd..4c73a6763669 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -32,32 +32,38 @@ | |||
32 | 32 | ||
33 | extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); | 33 | extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); |
34 | 34 | ||
35 | #define MAX_UNCACHED_GRANULES 5 | 35 | struct uncached_pool { |
36 | static int allocated_granules; | 36 | struct gen_pool *pool; |
37 | struct mutex add_chunk_mutex; /* serialize adding a converted chunk */ | ||
38 | int nchunks_added; /* #of converted chunks added to pool */ | ||
39 | atomic_t status; /* smp called function's return status*/ | ||
40 | }; | ||
41 | |||
42 | #define MAX_CONVERTED_CHUNKS_PER_NODE 2 | ||
37 | 43 | ||
38 | struct gen_pool *uncached_pool[MAX_NUMNODES]; | 44 | struct uncached_pool uncached_pools[MAX_NUMNODES]; |
39 | 45 | ||
40 | 46 | ||
41 | static void uncached_ipi_visibility(void *data) | 47 | static void uncached_ipi_visibility(void *data) |
42 | { | 48 | { |
43 | int status; | 49 | int status; |
50 | struct uncached_pool *uc_pool = (struct uncached_pool *)data; | ||
44 | 51 | ||
45 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | 52 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); |
46 | if ((status != PAL_VISIBILITY_OK) && | 53 | if ((status != PAL_VISIBILITY_OK) && |
47 | (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) | 54 | (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) |
48 | printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " | 55 | atomic_inc(&uc_pool->status); |
49 | "CPU %i\n", status, raw_smp_processor_id()); | ||
50 | } | 56 | } |
51 | 57 | ||
52 | 58 | ||
53 | static void uncached_ipi_mc_drain(void *data) | 59 | static void uncached_ipi_mc_drain(void *data) |
54 | { | 60 | { |
55 | int status; | 61 | int status; |
62 | struct uncached_pool *uc_pool = (struct uncached_pool *)data; | ||
56 | 63 | ||
57 | status = ia64_pal_mc_drain(); | 64 | status = ia64_pal_mc_drain(); |
58 | if (status) | 65 | if (status != PAL_STATUS_SUCCESS) |
59 | printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " | 66 | atomic_inc(&uc_pool->status); |
60 | "CPU %i\n", status, raw_smp_processor_id()); | ||
61 | } | 67 | } |
62 | 68 | ||
63 | 69 | ||
@@ -70,21 +76,34 @@ static void uncached_ipi_mc_drain(void *data) | |||
70 | * This is accomplished by first allocating a granule of cached memory pages | 76 | * This is accomplished by first allocating a granule of cached memory pages |
71 | * and then converting them to uncached memory pages. | 77 | * and then converting them to uncached memory pages. |
72 | */ | 78 | */ |
73 | static int uncached_add_chunk(struct gen_pool *pool, int nid) | 79 | static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) |
74 | { | 80 | { |
75 | struct page *page; | 81 | struct page *page; |
76 | int status, i; | 82 | int status, i, nchunks_added = uc_pool->nchunks_added; |
77 | unsigned long c_addr, uc_addr; | 83 | unsigned long c_addr, uc_addr; |
78 | 84 | ||
79 | if (allocated_granules >= MAX_UNCACHED_GRANULES) | 85 | if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) |
86 | return -1; /* interrupted by a signal */ | ||
87 | |||
88 | if (uc_pool->nchunks_added > nchunks_added) { | ||
89 | /* someone added a new chunk while we were waiting */ | ||
90 | mutex_unlock(&uc_pool->add_chunk_mutex); | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { | ||
95 | mutex_unlock(&uc_pool->add_chunk_mutex); | ||
80 | return -1; | 96 | return -1; |
97 | } | ||
81 | 98 | ||
82 | /* attempt to allocate a granule's worth of cached memory pages */ | 99 | /* attempt to allocate a granule's worth of cached memory pages */ |
83 | 100 | ||
84 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, | 101 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, |
85 | IA64_GRANULE_SHIFT-PAGE_SHIFT); | 102 | IA64_GRANULE_SHIFT-PAGE_SHIFT); |
86 | if (!page) | 103 | if (!page) { |
104 | mutex_unlock(&uc_pool->add_chunk_mutex); | ||
87 | return -1; | 105 | return -1; |
106 | } | ||
88 | 107 | ||
89 | /* convert the memory pages from cached to uncached */ | 108 | /* convert the memory pages from cached to uncached */ |
90 | 109 | ||
@@ -102,11 +121,14 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid) | |||
102 | flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); | 121 | flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); |
103 | 122 | ||
104 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | 123 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); |
105 | if (!status) { | 124 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { |
106 | status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); | 125 | atomic_set(&uc_pool->status, 0); |
107 | if (status) | 126 | status = smp_call_function(uncached_ipi_visibility, uc_pool, |
127 | 0, 1); | ||
128 | if (status || atomic_read(&uc_pool->status)) | ||
108 | goto failed; | 129 | goto failed; |
109 | } | 130 | } else if (status != PAL_VISIBILITY_OK) |
131 | goto failed; | ||
110 | 132 | ||
111 | preempt_disable(); | 133 | preempt_disable(); |
112 | 134 | ||
@@ -120,20 +142,24 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid) | |||
120 | 142 | ||
121 | preempt_enable(); | 143 | preempt_enable(); |
122 | 144 | ||
123 | ia64_pal_mc_drain(); | 145 | status = ia64_pal_mc_drain(); |
124 | status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); | 146 | if (status != PAL_STATUS_SUCCESS) |
125 | if (status) | 147 | goto failed; |
148 | atomic_set(&uc_pool->status, 0); | ||
149 | status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); | ||
150 | if (status || atomic_read(&uc_pool->status)) | ||
126 | goto failed; | 151 | goto failed; |
127 | 152 | ||
128 | /* | 153 | /* |
129 | * The chunk of memory pages has been converted to uncached so now we | 154 | * The chunk of memory pages has been converted to uncached so now we |
130 | * can add it to the pool. | 155 | * can add it to the pool. |
131 | */ | 156 | */ |
132 | status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); | 157 | status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); |
133 | if (status) | 158 | if (status) |
134 | goto failed; | 159 | goto failed; |
135 | 160 | ||
136 | allocated_granules++; | 161 | uc_pool->nchunks_added++; |
162 | mutex_unlock(&uc_pool->add_chunk_mutex); | ||
137 | return 0; | 163 | return 0; |
138 | 164 | ||
139 | /* failed to convert or add the chunk so give it back to the kernel */ | 165 | /* failed to convert or add the chunk so give it back to the kernel */ |
@@ -142,6 +168,7 @@ failed: | |||
142 | ClearPageUncached(&page[i]); | 168 | ClearPageUncached(&page[i]); |
143 | 169 | ||
144 | free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); | 170 | free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); |
171 | mutex_unlock(&uc_pool->add_chunk_mutex); | ||
145 | return -1; | 172 | return -1; |
146 | } | 173 | } |
147 | 174 | ||
@@ -158,7 +185,7 @@ failed: | |||
158 | unsigned long uncached_alloc_page(int starting_nid) | 185 | unsigned long uncached_alloc_page(int starting_nid) |
159 | { | 186 | { |
160 | unsigned long uc_addr; | 187 | unsigned long uc_addr; |
161 | struct gen_pool *pool; | 188 | struct uncached_pool *uc_pool; |
162 | int nid; | 189 | int nid; |
163 | 190 | ||
164 | if (unlikely(starting_nid >= MAX_NUMNODES)) | 191 | if (unlikely(starting_nid >= MAX_NUMNODES)) |
@@ -171,14 +198,14 @@ unsigned long uncached_alloc_page(int starting_nid) | |||
171 | do { | 198 | do { |
172 | if (!node_online(nid)) | 199 | if (!node_online(nid)) |
173 | continue; | 200 | continue; |
174 | pool = uncached_pool[nid]; | 201 | uc_pool = &uncached_pools[nid]; |
175 | if (pool == NULL) | 202 | if (uc_pool->pool == NULL) |
176 | continue; | 203 | continue; |
177 | do { | 204 | do { |
178 | uc_addr = gen_pool_alloc(pool, PAGE_SIZE); | 205 | uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE); |
179 | if (uc_addr != 0) | 206 | if (uc_addr != 0) |
180 | return uc_addr; | 207 | return uc_addr; |
181 | } while (uncached_add_chunk(pool, nid) == 0); | 208 | } while (uncached_add_chunk(uc_pool, nid) == 0); |
182 | 209 | ||
183 | } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); | 210 | } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); |
184 | 211 | ||
@@ -197,7 +224,7 @@ EXPORT_SYMBOL(uncached_alloc_page); | |||
197 | void uncached_free_page(unsigned long uc_addr) | 224 | void uncached_free_page(unsigned long uc_addr) |
198 | { | 225 | { |
199 | int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); | 226 | int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); |
200 | struct gen_pool *pool = uncached_pool[nid]; | 227 | struct gen_pool *pool = uncached_pools[nid].pool; |
201 | 228 | ||
202 | if (unlikely(pool == NULL)) | 229 | if (unlikely(pool == NULL)) |
203 | return; | 230 | return; |
@@ -224,7 +251,7 @@ static int __init uncached_build_memmap(unsigned long uc_start, | |||
224 | unsigned long uc_end, void *arg) | 251 | unsigned long uc_end, void *arg) |
225 | { | 252 | { |
226 | int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); | 253 | int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); |
227 | struct gen_pool *pool = uncached_pool[nid]; | 254 | struct gen_pool *pool = uncached_pools[nid].pool; |
228 | size_t size = uc_end - uc_start; | 255 | size_t size = uc_end - uc_start; |
229 | 256 | ||
230 | touch_softlockup_watchdog(); | 257 | touch_softlockup_watchdog(); |
@@ -242,7 +269,8 @@ static int __init uncached_init(void) | |||
242 | int nid; | 269 | int nid; |
243 | 270 | ||
244 | for_each_online_node(nid) { | 271 | for_each_online_node(nid) { |
245 | uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid); | 272 | uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid); |
273 | mutex_init(&uncached_pools[nid].add_chunk_mutex); | ||
246 | } | 274 | } |
247 | 275 | ||
248 | efi_memmap_walk_uc(uncached_build_memmap, NULL); | 276 | efi_memmap_walk_uc(uncached_build_memmap, NULL); |
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index d8536a2c22a9..38fa6e49e791 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile | |||
@@ -14,7 +14,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ | |||
14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o | 14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o |
15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o | 15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o |
16 | lib-$(CONFIG_PERFMON) += carta_random.o | 16 | lib-$(CONFIG_PERFMON) += carta_random.o |
17 | lib-$(CONFIG_MD_RAID5) += xor.o | 17 | lib-$(CONFIG_MD_RAID456) += xor.o |
18 | 18 | ||
19 | AFLAGS___divdi3.o = | 19 | AFLAGS___divdi3.o = |
20 | AFLAGS___udivdi3.o = -DUNSIGNED | 20 | AFLAGS___udivdi3.o = -DUNSIGNED |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 2a88cdd6d924..e004143ba86b 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 28 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
29 | static unsigned long num_dma_physpages; | 29 | static unsigned long num_dma_physpages; |
30 | static unsigned long max_gap; | ||
30 | #endif | 31 | #endif |
31 | 32 | ||
32 | /** | 33 | /** |
@@ -45,9 +46,15 @@ show_mem (void) | |||
45 | 46 | ||
46 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 47 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
47 | i = max_mapnr; | 48 | i = max_mapnr; |
48 | while (i-- > 0) { | 49 | for (i = 0; i < max_mapnr; i++) { |
49 | if (!pfn_valid(i)) | 50 | if (!pfn_valid(i)) { |
51 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
52 | if (max_gap < LARGE_GAP) | ||
53 | continue; | ||
54 | i = vmemmap_find_next_valid_pfn(0, i) - 1; | ||
55 | #endif | ||
50 | continue; | 56 | continue; |
57 | } | ||
51 | total++; | 58 | total++; |
52 | if (PageReserved(mem_map+i)) | 59 | if (PageReserved(mem_map+i)) |
53 | reserved++; | 60 | reserved++; |
@@ -234,7 +241,6 @@ paging_init (void) | |||
234 | unsigned long zones_size[MAX_NR_ZONES]; | 241 | unsigned long zones_size[MAX_NR_ZONES]; |
235 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 242 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
236 | unsigned long zholes_size[MAX_NR_ZONES]; | 243 | unsigned long zholes_size[MAX_NR_ZONES]; |
237 | unsigned long max_gap; | ||
238 | #endif | 244 | #endif |
239 | 245 | ||
240 | /* initialize mem_map[] */ | 246 | /* initialize mem_map[] */ |
@@ -266,7 +272,6 @@ paging_init (void) | |||
266 | } | 272 | } |
267 | } | 273 | } |
268 | 274 | ||
269 | max_gap = 0; | ||
270 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); | 275 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); |
271 | if (max_gap < LARGE_GAP) { | 276 | if (max_gap < LARGE_GAP) { |
272 | vmem_map = (struct page *) 0; | 277 | vmem_map = (struct page *) 0; |
@@ -277,7 +282,8 @@ paging_init (void) | |||
277 | 282 | ||
278 | /* allocate virtual_mem_map */ | 283 | /* allocate virtual_mem_map */ |
279 | 284 | ||
280 | map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page)); | 285 | map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
286 | sizeof(struct page)); | ||
281 | vmalloc_end -= map_size; | 287 | vmalloc_end -= map_size; |
282 | vmem_map = (struct page *) vmalloc_end; | 288 | vmem_map = (struct page *) vmalloc_end; |
283 | efi_memmap_walk(create_mem_map_page_table, NULL); | 289 | efi_memmap_walk(create_mem_map_page_table, NULL); |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 99bd9e30db96..d260bffa01ab 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void) | |||
534 | } | 534 | } |
535 | #endif /* CONFIG_SMP */ | 535 | #endif /* CONFIG_SMP */ |
536 | 536 | ||
537 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
538 | static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) | ||
539 | { | ||
540 | unsigned long end_address, hole_next_pfn; | ||
541 | unsigned long stop_address; | ||
542 | |||
543 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | ||
544 | end_address = PAGE_ALIGN(end_address); | ||
545 | |||
546 | stop_address = (unsigned long) &vmem_map[ | ||
547 | pgdat->node_start_pfn + pgdat->node_spanned_pages]; | ||
548 | |||
549 | do { | ||
550 | pgd_t *pgd; | ||
551 | pud_t *pud; | ||
552 | pmd_t *pmd; | ||
553 | pte_t *pte; | ||
554 | |||
555 | pgd = pgd_offset_k(end_address); | ||
556 | if (pgd_none(*pgd)) { | ||
557 | end_address += PGDIR_SIZE; | ||
558 | continue; | ||
559 | } | ||
560 | |||
561 | pud = pud_offset(pgd, end_address); | ||
562 | if (pud_none(*pud)) { | ||
563 | end_address += PUD_SIZE; | ||
564 | continue; | ||
565 | } | ||
566 | |||
567 | pmd = pmd_offset(pud, end_address); | ||
568 | if (pmd_none(*pmd)) { | ||
569 | end_address += PMD_SIZE; | ||
570 | continue; | ||
571 | } | ||
572 | |||
573 | pte = pte_offset_kernel(pmd, end_address); | ||
574 | retry_pte: | ||
575 | if (pte_none(*pte)) { | ||
576 | end_address += PAGE_SIZE; | ||
577 | pte++; | ||
578 | if ((end_address < stop_address) && | ||
579 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | ||
580 | goto retry_pte; | ||
581 | continue; | ||
582 | } | ||
583 | /* Found next valid vmem_map page */ | ||
584 | break; | ||
585 | } while (end_address < stop_address); | ||
586 | |||
587 | end_address = min(end_address, stop_address); | ||
588 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | ||
589 | hole_next_pfn = end_address / sizeof(struct page); | ||
590 | return hole_next_pfn - pgdat->node_start_pfn; | ||
591 | } | ||
592 | #else | ||
593 | static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) | ||
594 | { | ||
595 | return i + 1; | ||
596 | } | ||
597 | #endif | ||
598 | |||
599 | /** | 537 | /** |
600 | * show_mem - give short summary of memory stats | 538 | * show_mem - give short summary of memory stats |
601 | * | 539 | * |
@@ -625,7 +563,8 @@ void show_mem(void) | |||
625 | if (pfn_valid(pgdat->node_start_pfn + i)) | 563 | if (pfn_valid(pgdat->node_start_pfn + i)) |
626 | page = pfn_to_page(pgdat->node_start_pfn + i); | 564 | page = pfn_to_page(pgdat->node_start_pfn + i); |
627 | else { | 565 | else { |
628 | i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; | 566 | i = vmemmap_find_next_valid_pfn(pgdat->node_id, |
567 | i) - 1; | ||
629 | continue; | 568 | continue; |
630 | } | 569 | } |
631 | if (PageReserved(page)) | 570 | if (PageReserved(page)) |
@@ -751,7 +690,8 @@ void __init paging_init(void) | |||
751 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); | 690 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); |
752 | 691 | ||
753 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 692 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
754 | vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); | 693 | vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
694 | sizeof(struct page)); | ||
755 | vmem_map = (struct page *) vmalloc_end; | 695 | vmem_map = (struct page *) vmalloc_end; |
756 | efi_memmap_walk(create_mem_map_page_table, NULL); | 696 | efi_memmap_walk(create_mem_map_page_table, NULL); |
757 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | 697 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 2f50c064513c..30617ccb4f7e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data) | |||
415 | } | 415 | } |
416 | 416 | ||
417 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 417 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
418 | int vmemmap_find_next_valid_pfn(int node, int i) | ||
419 | { | ||
420 | unsigned long end_address, hole_next_pfn; | ||
421 | unsigned long stop_address; | ||
422 | pg_data_t *pgdat = NODE_DATA(node); | ||
423 | |||
424 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | ||
425 | end_address = PAGE_ALIGN(end_address); | ||
426 | |||
427 | stop_address = (unsigned long) &vmem_map[ | ||
428 | pgdat->node_start_pfn + pgdat->node_spanned_pages]; | ||
429 | |||
430 | do { | ||
431 | pgd_t *pgd; | ||
432 | pud_t *pud; | ||
433 | pmd_t *pmd; | ||
434 | pte_t *pte; | ||
435 | |||
436 | pgd = pgd_offset_k(end_address); | ||
437 | if (pgd_none(*pgd)) { | ||
438 | end_address += PGDIR_SIZE; | ||
439 | continue; | ||
440 | } | ||
441 | |||
442 | pud = pud_offset(pgd, end_address); | ||
443 | if (pud_none(*pud)) { | ||
444 | end_address += PUD_SIZE; | ||
445 | continue; | ||
446 | } | ||
447 | |||
448 | pmd = pmd_offset(pud, end_address); | ||
449 | if (pmd_none(*pmd)) { | ||
450 | end_address += PMD_SIZE; | ||
451 | continue; | ||
452 | } | ||
453 | |||
454 | pte = pte_offset_kernel(pmd, end_address); | ||
455 | retry_pte: | ||
456 | if (pte_none(*pte)) { | ||
457 | end_address += PAGE_SIZE; | ||
458 | pte++; | ||
459 | if ((end_address < stop_address) && | ||
460 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | ||
461 | goto retry_pte; | ||
462 | continue; | ||
463 | } | ||
464 | /* Found next valid vmem_map page */ | ||
465 | break; | ||
466 | } while (end_address < stop_address); | ||
467 | |||
468 | end_address = min(end_address, stop_address); | ||
469 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | ||
470 | hole_next_pfn = end_address / sizeof(struct page); | ||
471 | return hole_next_pfn - pgdat->node_start_pfn; | ||
472 | } | ||
418 | 473 | ||
419 | int __init | 474 | int __init |
420 | create_mem_map_page_table (u64 start, u64 end, void *arg) | 475 | create_mem_map_page_table (u64 start, u64 end, void *arg) |
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 07bd02b6c372..4280c074d64e 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c | |||
@@ -32,7 +32,7 @@ ioremap (unsigned long offset, unsigned long size) | |||
32 | */ | 32 | */ |
33 | attr = kern_mem_attribute(offset, size); | 33 | attr = kern_mem_attribute(offset, size); |
34 | if (attr & EFI_MEMORY_WB) | 34 | if (attr & EFI_MEMORY_WB) |
35 | return phys_to_virt(offset); | 35 | return (void __iomem *) phys_to_virt(offset); |
36 | else if (attr & EFI_MEMORY_UC) | 36 | else if (attr & EFI_MEMORY_UC) |
37 | return __ioremap(offset, size); | 37 | return __ioremap(offset, size); |
38 | 38 | ||
@@ -43,7 +43,7 @@ ioremap (unsigned long offset, unsigned long size) | |||
43 | gran_base = GRANULEROUNDDOWN(offset); | 43 | gran_base = GRANULEROUNDDOWN(offset); |
44 | gran_size = GRANULEROUNDUP(offset + size) - gran_base; | 44 | gran_size = GRANULEROUNDUP(offset + size) - gran_base; |
45 | if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) | 45 | if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) |
46 | return phys_to_virt(offset); | 46 | return (void __iomem *) phys_to_virt(offset); |
47 | 47 | ||
48 | return __ioremap(offset, size); | 48 | return __ioremap(offset, size); |
49 | } | 49 | } |
@@ -53,7 +53,7 @@ void __iomem * | |||
53 | ioremap_nocache (unsigned long offset, unsigned long size) | 53 | ioremap_nocache (unsigned long offset, unsigned long size) |
54 | { | 54 | { |
55 | if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) | 55 | if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) |
56 | return 0; | 56 | return NULL; |
57 | 57 | ||
58 | return __ioremap(offset, size); | 58 | return __ioremap(offset, size); |
59 | } | 59 | } |
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 99b123a6421a..5e8e59efb347 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -480,7 +480,7 @@ xpc_activating(void *__partid) | |||
480 | partid_t partid = (u64) __partid; | 480 | partid_t partid = (u64) __partid; |
481 | struct xpc_partition *part = &xpc_partitions[partid]; | 481 | struct xpc_partition *part = &xpc_partitions[partid]; |
482 | unsigned long irq_flags; | 482 | unsigned long irq_flags; |
483 | struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 }; | 483 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
484 | int ret; | 484 | int ret; |
485 | 485 | ||
486 | 486 | ||
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 17cd34284886..af7171adcd2c 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c | |||
@@ -74,7 +74,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr) | |||
74 | else | 74 | else |
75 | mmr_war_offset = 0x158; | 75 | mmr_war_offset = 0x158; |
76 | 76 | ||
77 | readq_relaxed((void *)(mmr_base + mmr_war_offset)); | 77 | readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset)); |
78 | } | 78 | } |
79 | } | 79 | } |
80 | 80 | ||
@@ -92,8 +92,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) | |||
92 | 92 | ||
93 | if (mmr_offset < 0x45000) { | 93 | if (mmr_offset < 0x45000) { |
94 | if (mmr_offset == 0x100) | 94 | if (mmr_offset == 0x100) |
95 | readq_relaxed((void *)(mmr_base + 0x38)); | 95 | readq_relaxed((void __iomem *)(mmr_base + 0x38)); |
96 | readq_relaxed((void *)(mmr_base + 0xb050)); | 96 | readq_relaxed((void __iomem *)(mmr_base + 0xb050)); |
97 | } | 97 | } |
98 | } | 98 | } |
99 | 99 | ||