aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/kernel/asm-offsets.c16
-rw-r--r--arch/ia64/kernel/efi.c156
-rw-r--r--arch/ia64/kernel/efi_stub.S2
-rw-r--r--arch/ia64/kernel/entry.h1
-rw-r--r--arch/ia64/kernel/mca_asm.S28
-rw-r--r--arch/ia64/kernel/sal.c6
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/mm/ioremap.c27
-rw-r--r--arch/ia64/pci/pci.c17
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c50
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c4
13 files changed, 205 insertions, 107 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index fbb25b00629b..18318749884b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -449,6 +449,8 @@ config PCI_DOMAINS
449 bool 449 bool
450 default PCI 450 default PCI
451 451
452source "drivers/pci/pcie/Kconfig"
453
452source "drivers/pci/Kconfig" 454source "drivers/pci/Kconfig"
453 455
454source "drivers/pci/hotplug/Kconfig" 456source "drivers/pci/hotplug/Kconfig"
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 80ea7506fa1a..21033ed83307 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -71,6 +71,8 @@ all: compressed unwcheck
71 71
72compressed: vmlinux.gz 72compressed: vmlinux.gz
73 73
74vmlinuz: vmlinux.gz
75
74vmlinux.gz: vmlinux 76vmlinux.gz: vmlinux
75 $(Q)$(MAKE) $(build)=$(boot) $@ 77 $(Q)$(MAKE) $(build)=$(boot) $@
76 78
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 77225659e968..16e7b6600ae6 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -217,16 +217,24 @@ void foo(void)
217 DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, 217 DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
218 offsetof (struct ia64_mca_cpu, init_stack)); 218 offsetof (struct ia64_mca_cpu, init_stack));
219 BLANK(); 219 BLANK();
220 DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET,
221 offsetof (struct ia64_sal_os_state, sal_ra));
222 DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET, 220 DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET,
223 offsetof (struct ia64_sal_os_state, os_gp)); 221 offsetof (struct ia64_sal_os_state, os_gp));
224 DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
225 offsetof (struct ia64_sal_os_state, pal_min_state));
226 DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, 222 DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET,
227 offsetof (struct ia64_sal_os_state, proc_state_param)); 223 offsetof (struct ia64_sal_os_state, proc_state_param));
224 DEFINE(IA64_SAL_OS_STATE_SAL_RA_OFFSET,
225 offsetof (struct ia64_sal_os_state, sal_ra));
226 DEFINE(IA64_SAL_OS_STATE_SAL_GP_OFFSET,
227 offsetof (struct ia64_sal_os_state, sal_gp));
228 DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
229 offsetof (struct ia64_sal_os_state, pal_min_state));
230 DEFINE(IA64_SAL_OS_STATE_OS_STATUS_OFFSET,
231 offsetof (struct ia64_sal_os_state, os_status));
232 DEFINE(IA64_SAL_OS_STATE_CONTEXT_OFFSET,
233 offsetof (struct ia64_sal_os_state, context));
228 DEFINE(IA64_SAL_OS_STATE_SIZE, 234 DEFINE(IA64_SAL_OS_STATE_SIZE,
229 sizeof (struct ia64_sal_os_state)); 235 sizeof (struct ia64_sal_os_state));
236 BLANK();
237
230 DEFINE(IA64_PMSA_GR_OFFSET, 238 DEFINE(IA64_PMSA_GR_OFFSET,
231 offsetof (struct pal_min_state_area_s, pmsa_gr)); 239 offsetof (struct pal_min_state_area_s, pmsa_gr));
232 DEFINE(IA64_PMSA_BANK1_GR_OFFSET, 240 DEFINE(IA64_PMSA_BANK1_GR_OFFSET,
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 12cfedce73b1..c33d0ba7e300 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -8,6 +8,8 @@
8 * Copyright (C) 1999-2003 Hewlett-Packard Co. 8 * Copyright (C) 1999-2003 Hewlett-Packard Co.
9 * David Mosberger-Tang <davidm@hpl.hp.com> 9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Stephane Eranian <eranian@hpl.hp.com> 10 * Stephane Eranian <eranian@hpl.hp.com>
11 * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
12 * Bjorn Helgaas <bjorn.helgaas@hp.com>
11 * 13 *
12 * All EFI Runtime Services are not implemented yet as EFI only 14 * All EFI Runtime Services are not implemented yet as EFI only
13 * supports physical mode addressing on SoftSDV. This is to be fixed 15 * supports physical mode addressing on SoftSDV. This is to be fixed
@@ -622,28 +624,20 @@ efi_get_iobase (void)
622 return 0; 624 return 0;
623} 625}
624 626
625static efi_memory_desc_t * 627static struct kern_memdesc *
626efi_memory_descriptor (unsigned long phys_addr) 628kern_memory_descriptor (unsigned long phys_addr)
627{ 629{
628 void *efi_map_start, *efi_map_end, *p; 630 struct kern_memdesc *md;
629 efi_memory_desc_t *md;
630 u64 efi_desc_size;
631
632 efi_map_start = __va(ia64_boot_param->efi_memmap);
633 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
634 efi_desc_size = ia64_boot_param->efi_memdesc_size;
635 631
636 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 632 for (md = kern_memmap; md->start != ~0UL; md++) {
637 md = p; 633 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
638
639 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
640 return md; 634 return md;
641 } 635 }
642 return 0; 636 return 0;
643} 637}
644 638
645static int 639static efi_memory_desc_t *
646efi_memmap_has_mmio (void) 640efi_memory_descriptor (unsigned long phys_addr)
647{ 641{
648 void *efi_map_start, *efi_map_end, *p; 642 void *efi_map_start, *efi_map_end, *p;
649 efi_memory_desc_t *md; 643 efi_memory_desc_t *md;
@@ -656,8 +650,8 @@ efi_memmap_has_mmio (void)
656 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 650 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
657 md = p; 651 md = p;
658 652
659 if (md->type == EFI_MEMORY_MAPPED_IO) 653 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
660 return 1; 654 return md;
661 } 655 }
662 return 0; 656 return 0;
663} 657}
@@ -683,71 +677,125 @@ efi_mem_attributes (unsigned long phys_addr)
683} 677}
684EXPORT_SYMBOL(efi_mem_attributes); 678EXPORT_SYMBOL(efi_mem_attributes);
685 679
686/* 680u64
687 * Determines whether the memory at phys_addr supports the desired 681efi_mem_attribute (unsigned long phys_addr, unsigned long size)
688 * attribute (WB, UC, etc). If this returns 1, the caller can safely
689 * access size bytes at phys_addr with the specified attribute.
690 */
691int
692efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, u64 attr)
693{ 682{
694 unsigned long end = phys_addr + size; 683 unsigned long end = phys_addr + size;
695 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 684 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
685 u64 attr;
686
687 if (!md)
688 return 0;
689
690 /*
691 * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
692 * the kernel that firmware needs this region mapped.
693 */
694 attr = md->attribute & ~EFI_MEMORY_RUNTIME;
695 do {
696 unsigned long md_end = efi_md_end(md);
697
698 if (end <= md_end)
699 return attr;
700
701 md = efi_memory_descriptor(md_end);
702 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
703 return 0;
704 } while (md);
705 return 0;
706}
707
708u64
709kern_mem_attribute (unsigned long phys_addr, unsigned long size)
710{
711 unsigned long end = phys_addr + size;
712 struct kern_memdesc *md;
713 u64 attr;
696 714
697 /* 715 /*
698 * Some firmware doesn't report MMIO regions in the EFI memory 716 * This is a hack for ioremap calls before we set up kern_memmap.
699 * map. The Intel BigSur (a.k.a. HP i2000) has this problem. 717 * Maybe we should do efi_memmap_init() earlier instead.
700 * On those platforms, we have to assume UC is valid everywhere.
701 */ 718 */
702 if (!md || (md->attribute & attr) != attr) { 719 if (!kern_memmap) {
703 if (attr == EFI_MEMORY_UC && !efi_memmap_has_mmio()) 720 attr = efi_mem_attribute(phys_addr, size);
704 return 1; 721 if (attr & EFI_MEMORY_WB)
722 return EFI_MEMORY_WB;
705 return 0; 723 return 0;
706 } 724 }
707 725
726 md = kern_memory_descriptor(phys_addr);
727 if (!md)
728 return 0;
729
730 attr = md->attribute;
708 do { 731 do {
709 unsigned long md_end = efi_md_end(md); 732 unsigned long md_end = kmd_end(md);
710 733
711 if (end <= md_end) 734 if (end <= md_end)
712 return 1; 735 return attr;
713 736
714 md = efi_memory_descriptor(md_end); 737 md = kern_memory_descriptor(md_end);
715 if (!md || (md->attribute & attr) != attr) 738 if (!md || md->attribute != attr)
716 return 0; 739 return 0;
717 } while (md); 740 } while (md);
718 return 0; 741 return 0;
719} 742}
743EXPORT_SYMBOL(kern_mem_attribute);
720 744
721/*
722 * For /dev/mem, we only allow read & write system calls to access
723 * write-back memory, because read & write don't allow the user to
724 * control access size.
725 */
726int 745int
727valid_phys_addr_range (unsigned long phys_addr, unsigned long size) 746valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
728{ 747{
729 return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB); 748 u64 attr;
749
750 /*
751 * /dev/mem reads and writes use copy_to_user(), which implicitly
752 * uses a granule-sized kernel identity mapping. It's really
753 * only safe to do this for regions in kern_memmap. For more
754 * details, see Documentation/ia64/aliasing.txt.
755 */
756 attr = kern_mem_attribute(phys_addr, size);
757 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
758 return 1;
759 return 0;
730} 760}
731 761
732/*
733 * We allow mmap of anything in the EFI memory map that supports
734 * either write-back or uncacheable access. For uncacheable regions,
735 * the supported access sizes are system-dependent, and the user is
736 * responsible for using the correct size.
737 *
738 * Note that this doesn't currently allow access to hot-added memory,
739 * because that doesn't appear in the boot-time EFI memory map.
740 */
741int 762int
742valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long size) 763valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long size)
743{ 764{
744 if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB)) 765 /*
745 return 1; 766 * MMIO regions are often missing from the EFI memory map.
767 * We must allow mmap of them for programs like X, so we
768 * currently can't do any useful validation.
769 */
770 return 1;
771}
746 772
747 if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC)) 773pgprot_t
748 return 1; 774phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
775 pgprot_t vma_prot)
776{
777 unsigned long phys_addr = pfn << PAGE_SHIFT;
778 u64 attr;
749 779
750 return 0; 780 /*
781 * For /dev/mem mmap, we use user mappings, but if the region is
782 * in kern_memmap (and hence may be covered by a kernel mapping),
783 * we must use the same attribute as the kernel mapping.
784 */
785 attr = kern_mem_attribute(phys_addr, size);
786 if (attr & EFI_MEMORY_WB)
787 return pgprot_cacheable(vma_prot);
788 else if (attr & EFI_MEMORY_UC)
789 return pgprot_noncached(vma_prot);
790
791 /*
792 * Some chipsets don't support UC access to memory. If
793 * WB is supported, we prefer that.
794 */
795 if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
796 return pgprot_cacheable(vma_prot);
797
798 return pgprot_noncached(vma_prot);
751} 799}
752 800
753int __init 801int __init
diff --git a/arch/ia64/kernel/efi_stub.S b/arch/ia64/kernel/efi_stub.S
index 5a7fe70212a9..a56e161d7515 100644
--- a/arch/ia64/kernel/efi_stub.S
+++ b/arch/ia64/kernel/efi_stub.S
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(efi_call_phys)
61 or loc3=loc3,r17 61 or loc3=loc3,r17
62 mov b6=r2 62 mov b6=r2
63 ;; 63 ;;
64 andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared 64 andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
65 br.call.sptk.many rp=ia64_switch_mode_phys 65 br.call.sptk.many rp=ia64_switch_mode_phys
66.ret0: mov out4=in5 66.ret0: mov out4=in5
67 mov out0=in1 67 mov out0=in1
diff --git a/arch/ia64/kernel/entry.h b/arch/ia64/kernel/entry.h
index 78eeb0793419..ebc3dfb88826 100644
--- a/arch/ia64/kernel/entry.h
+++ b/arch/ia64/kernel/entry.h
@@ -23,6 +23,7 @@
23 23
24#define PT(f) (IA64_PT_REGS_##f##_OFFSET) 24#define PT(f) (IA64_PT_REGS_##f##_OFFSET)
25#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET) 25#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET)
26#define SOS(f) (IA64_SAL_OS_STATE_##f##_OFFSET)
26 27
27#define PT_REGS_SAVES(off) \ 28#define PT_REGS_SAVES(off) \
28 .unwabi 3, 'i'; \ 29 .unwabi 3, 'i'; \
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 6dff024cd62b..c1bd1feffab0 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -159,7 +159,7 @@ ia64_os_mca_spin:
159 GET_IA64_MCA_DATA(r2) 159 GET_IA64_MCA_DATA(r2)
160 // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param 160 // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
161 ;; 161 ;;
162 add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2 162 add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2
163 ;; 163 ;;
164 ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK. 164 ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK.
165 ;; 165 ;;
@@ -479,9 +479,11 @@ ia64_state_save:
479 st8 [temp2]=r11,16 // rv_rc 479 st8 [temp2]=r11,16 // rv_rc
480 mov r11=cr.iipa 480 mov r11=cr.iipa
481 ;; 481 ;;
482 st8 [temp1]=r18,16 // proc_state_param 482 st8 [temp1]=r18 // proc_state_param
483 st8 [temp2]=r19,16 // monarch 483 st8 [temp2]=r19 // monarch
484 mov r6=IA64_KR(CURRENT) 484 mov r6=IA64_KR(CURRENT)
485 add temp1=SOS(SAL_RA), regs
486 add temp2=SOS(SAL_GP), regs
485 ;; 487 ;;
486 st8 [temp1]=r12,16 // sal_ra 488 st8 [temp1]=r12,16 // sal_ra
487 st8 [temp2]=r10,16 // sal_gp 489 st8 [temp2]=r10,16 // sal_gp
@@ -503,12 +505,14 @@ ia64_state_save:
503 st8 [temp2]=r11,16 // cr.iipa 505 st8 [temp2]=r11,16 // cr.iipa
504 mov r12=cr.iim 506 mov r12=cr.iim
505 ;; 507 ;;
506 st8 [temp1]=r12,16 // cr.iim 508 st8 [temp1]=r12 // cr.iim
507(p1) mov r12=IA64_MCA_COLD_BOOT 509(p1) mov r12=IA64_MCA_COLD_BOOT
508(p2) mov r12=IA64_INIT_WARM_BOOT 510(p2) mov r12=IA64_INIT_WARM_BOOT
509 mov r6=cr.iha 511 mov r6=cr.iha
512 add temp1=SOS(OS_STATUS), regs
510 ;; 513 ;;
511 st8 [temp2]=r6,16 // cr.iha 514 st8 [temp2]=r6 // cr.iha
515 add temp2=SOS(CONTEXT), regs
512 st8 [temp1]=r12 // os_status, default is cold boot 516 st8 [temp1]=r12 // os_status, default is cold boot
513 mov r6=IA64_MCA_SAME_CONTEXT 517 mov r6=IA64_MCA_SAME_CONTEXT
514 ;; 518 ;;
@@ -820,8 +824,8 @@ ia64_state_restore:
820 // Restore the SAL to OS state. The previous code left regs at pt_regs. 824 // Restore the SAL to OS state. The previous code left regs at pt_regs.
821 add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs 825 add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
822 ;; 826 ;;
823 add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs 827 add temp1=SOS(SAL_RA), regs
824 add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs 828 add temp2=SOS(SAL_GP), regs
825 ;; 829 ;;
826 ld8 r12=[temp1],16 // sal_ra 830 ld8 r12=[temp1],16 // sal_ra
827 ld8 r9=[temp2],16 // sal_gp 831 ld8 r9=[temp2],16 // sal_gp
@@ -842,8 +846,10 @@ ia64_state_restore:
842 ;; 846 ;;
843 mov cr.itir=temp3 847 mov cr.itir=temp3
844 mov cr.iipa=temp4 848 mov cr.iipa=temp4
845 ld8 temp3=[temp1],16 // cr.iim 849 ld8 temp3=[temp1] // cr.iim
846 ld8 temp4=[temp2],16 // cr.iha 850 ld8 temp4=[temp2] // cr.iha
851 add temp1=SOS(OS_STATUS), regs
852 add temp2=SOS(CONTEXT), regs
847 ;; 853 ;;
848 mov cr.iim=temp3 854 mov cr.iim=temp3
849 mov cr.iha=temp4 855 mov cr.iha=temp4
@@ -916,7 +922,7 @@ ia64_state_restore:
916 922
917ia64_new_stack: 923ia64_new_stack:
918 add regs=MCA_PT_REGS_OFFSET, r3 924 add regs=MCA_PT_REGS_OFFSET, r3
919 add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3 925 add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3
920 mov b0=r2 // save return address 926 mov b0=r2 // save return address
921 GET_IA64_MCA_DATA(temp1) 927 GET_IA64_MCA_DATA(temp1)
922 invala 928 invala
@@ -1020,7 +1026,7 @@ ia64_old_stack:
1020 1026
1021ia64_set_kernel_registers: 1027ia64_set_kernel_registers:
1022 add temp3=MCA_SP_OFFSET, r3 1028 add temp3=MCA_SP_OFFSET, r3
1023 add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3 1029 add temp4=MCA_SOS_OFFSET+SOS(OS_GP), r3
1024 mov b0=r2 // save return address 1030 mov b0=r2 // save return address
1025 GET_IA64_MCA_DATA(temp1) 1031 GET_IA64_MCA_DATA(temp1)
1026 ;; 1032 ;;
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 056f7a6eedc7..77fa65903d94 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -227,7 +227,7 @@ static int sal_cache_flush_drops_interrupts;
227static void __init 227static void __init
228check_sal_cache_flush (void) 228check_sal_cache_flush (void)
229{ 229{
230 unsigned long flags, itv; 230 unsigned long flags;
231 int cpu; 231 int cpu;
232 u64 vector; 232 u64 vector;
233 233
@@ -238,9 +238,6 @@ check_sal_cache_flush (void)
238 * Schedule a timer interrupt, wait until it's reported, and see if 238 * Schedule a timer interrupt, wait until it's reported, and see if
239 * SAL_CACHE_FLUSH drops it. 239 * SAL_CACHE_FLUSH drops it.
240 */ 240 */
241 itv = ia64_get_itv();
242 BUG_ON((itv & (1 << 16)) == 0);
243
244 ia64_set_itv(IA64_TIMER_VECTOR); 241 ia64_set_itv(IA64_TIMER_VECTOR);
245 ia64_set_itm(ia64_get_itc() + 1000); 242 ia64_set_itm(ia64_get_itc() + 1000);
246 243
@@ -260,7 +257,6 @@ check_sal_cache_flush (void)
260 ia64_eoi(); 257 ia64_eoi();
261 } 258 }
262 259
263 ia64_set_itv(itv);
264 local_irq_restore(flags); 260 local_irq_restore(flags);
265 put_cpu(); 261 put_cpu();
266} 262}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index e4dfda1eb7dd..6dba2d63f24d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -260,6 +260,7 @@ reserve_memory (void)
260 n++; 260 n++;
261 261
262 num_rsvd_regions = n; 262 num_rsvd_regions = n;
263 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
263 264
264 sort_regions(rsvd_region, num_rsvd_regions); 265 sort_regions(rsvd_region, num_rsvd_regions);
265} 266}
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 643ccc6960ce..07bd02b6c372 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/efi.h> 12#include <linux/efi.h>
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/meminit.h>
14 15
15static inline void __iomem * 16static inline void __iomem *
16__ioremap (unsigned long offset, unsigned long size) 17__ioremap (unsigned long offset, unsigned long size)
@@ -21,16 +22,29 @@ __ioremap (unsigned long offset, unsigned long size)
21void __iomem * 22void __iomem *
22ioremap (unsigned long offset, unsigned long size) 23ioremap (unsigned long offset, unsigned long size)
23{ 24{
24 if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB)) 25 u64 attr;
25 return phys_to_virt(offset); 26 unsigned long gran_base, gran_size;
26 27
27 if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC)) 28 /*
29 * For things in kern_memmap, we must use the same attribute
30 * as the rest of the kernel. For more details, see
31 * Documentation/ia64/aliasing.txt.
32 */
33 attr = kern_mem_attribute(offset, size);
34 if (attr & EFI_MEMORY_WB)
35 return phys_to_virt(offset);
36 else if (attr & EFI_MEMORY_UC)
28 return __ioremap(offset, size); 37 return __ioremap(offset, size);
29 38
30 /* 39 /*
31 * Someday this should check ACPI resources so we 40 * Some chipsets don't support UC access to memory. If
32 * can do the right thing for hot-plugged regions. 41 * WB is supported for the whole granule, we prefer that.
33 */ 42 */
43 gran_base = GRANULEROUNDDOWN(offset);
44 gran_size = GRANULEROUNDUP(offset + size) - gran_base;
45 if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
46 return phys_to_virt(offset);
47
34 return __ioremap(offset, size); 48 return __ioremap(offset, size);
35} 49}
36EXPORT_SYMBOL(ioremap); 50EXPORT_SYMBOL(ioremap);
@@ -38,6 +52,9 @@ EXPORT_SYMBOL(ioremap);
38void __iomem * 52void __iomem *
39ioremap_nocache (unsigned long offset, unsigned long size) 53ioremap_nocache (unsigned long offset, unsigned long size)
40{ 54{
55 if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
56 return 0;
57
41 return __ioremap(offset, size); 58 return __ioremap(offset, size);
42} 59}
43EXPORT_SYMBOL(ioremap_nocache); 60EXPORT_SYMBOL(ioremap_nocache);
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index cf7751b99d1c..61dd8608da4f 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -645,18 +645,31 @@ char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
645int 645int
646pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma) 646pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
647{ 647{
648 unsigned long size = vma->vm_end - vma->vm_start;
649 pgprot_t prot;
648 char *addr; 650 char *addr;
649 651
652 /*
653 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
654 * for more details.
655 */
656 if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, size))
657 return -EINVAL;
658 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
659 vma->vm_page_prot);
660 if (pgprot_val(prot) != pgprot_val(pgprot_noncached(vma->vm_page_prot)))
661 return -EINVAL;
662
650 addr = pci_get_legacy_mem(bus); 663 addr = pci_get_legacy_mem(bus);
651 if (IS_ERR(addr)) 664 if (IS_ERR(addr))
652 return PTR_ERR(addr); 665 return PTR_ERR(addr);
653 666
654 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT; 667 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
655 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 668 vma->vm_page_prot = prot;
656 vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO); 669 vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
657 670
658 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 671 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
659 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 672 size, vma->vm_page_prot))
660 return -EAGAIN; 673 return -EAGAIN;
661 674
662 return 0; 675 return 0;
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 739c948dc504..9a8a29339d2d 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -51,6 +51,8 @@ static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
51static int sn_hwperf_init(void); 51static int sn_hwperf_init(void);
52static DECLARE_MUTEX(sn_hwperf_init_mutex); 52static DECLARE_MUTEX(sn_hwperf_init_mutex);
53 53
54#define cnode_possible(n) ((n) < num_cnodes)
55
54static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret) 56static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
55{ 57{
56 int e; 58 int e;
@@ -127,14 +129,14 @@ static int sn_hwperf_geoid_to_cnode(char *location)
127 } 129 }
128 } 130 }
129 131
130 return node_possible(cnode) ? cnode : -1; 132 return cnode_possible(cnode) ? cnode : -1;
131} 133}
132 134
133static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj) 135static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
134{ 136{
135 if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)) 137 if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
136 BUG(); 138 BUG();
137 if (!obj->sn_hwp_this_part) 139 if (SN_HWPERF_FOREIGN(obj))
138 return -1; 140 return -1;
139 return sn_hwperf_geoid_to_cnode(obj->location); 141 return sn_hwperf_geoid_to_cnode(obj->location);
140} 142}
@@ -199,12 +201,12 @@ static void print_pci_topology(struct seq_file *s)
199 201
200static inline int sn_hwperf_has_cpus(cnodeid_t node) 202static inline int sn_hwperf_has_cpus(cnodeid_t node)
201{ 203{
202 return node_online(node) && nr_cpus_node(node); 204 return node < MAX_NUMNODES && node_online(node) && nr_cpus_node(node);
203} 205}
204 206
205static inline int sn_hwperf_has_mem(cnodeid_t node) 207static inline int sn_hwperf_has_mem(cnodeid_t node)
206{ 208{
207 return node_online(node) && NODE_DATA(node)->node_present_pages; 209 return node < MAX_NUMNODES && node_online(node) && NODE_DATA(node)->node_present_pages;
208} 210}
209 211
210static struct sn_hwperf_object_info * 212static struct sn_hwperf_object_info *
@@ -237,7 +239,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
237 int found_mem = 0; 239 int found_mem = 0;
238 int found_cpu = 0; 240 int found_cpu = 0;
239 241
240 if (!node_possible(node)) 242 if (!cnode_possible(node))
241 return -EINVAL; 243 return -EINVAL;
242 244
243 if (sn_hwperf_has_cpus(node)) { 245 if (sn_hwperf_has_cpus(node)) {
@@ -442,7 +444,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
442 seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location, 444 seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
443 obj->sn_hwp_this_part ? "local" : "shared", obj->name); 445 obj->sn_hwp_this_part ? "local" : "shared", obj->name);
444 446
445 if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)) 447 if (ordinal < 0 || (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)))
446 seq_putc(s, '\n'); 448 seq_putc(s, '\n');
447 else { 449 else {
448 cnodeid_t near_mem = -1; 450 cnodeid_t near_mem = -1;
@@ -468,22 +470,24 @@ static int sn_topology_show(struct seq_file *s, void *d)
468 /* 470 /*
469 * CPUs on this node, if any 471 * CPUs on this node, if any
470 */ 472 */
471 cpumask = node_to_cpumask(ordinal); 473 if (!SN_HWPERF_IS_IONODE(obj)) {
472 for_each_online_cpu(i) { 474 cpumask = node_to_cpumask(ordinal);
473 if (cpu_isset(i, cpumask)) { 475 for_each_online_cpu(i) {
474 slice = 'a' + cpuid_to_slice(i); 476 if (cpu_isset(i, cpumask)) {
475 c = cpu_data(i); 477 slice = 'a' + cpuid_to_slice(i);
476 seq_printf(s, "cpu %d %s%c local" 478 c = cpu_data(i);
477 " freq %luMHz, arch ia64", 479 seq_printf(s, "cpu %d %s%c local"
478 i, obj->location, slice, 480 " freq %luMHz, arch ia64",
479 c->proc_freq / 1000000); 481 i, obj->location, slice,
480 for_each_online_cpu(j) { 482 c->proc_freq / 1000000);
481 seq_printf(s, j ? ":%d" : ", dist %d", 483 for_each_online_cpu(j) {
482 node_distance( 484 seq_printf(s, j ? ":%d" : ", dist %d",
483 cpu_to_node(i), 485 node_distance(
484 cpu_to_node(j))); 486 cpu_to_node(i),
487 cpu_to_node(j)));
488 }
489 seq_putc(s, '\n');
485 } 490 }
486 seq_putc(s, '\n');
487 } 491 }
488 } 492 }
489 } 493 }
@@ -523,7 +527,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
523 if (obj->sn_hwp_this_part && p->sn_hwp_this_part) 527 if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
524 /* both ends local to this partition */ 528 /* both ends local to this partition */
525 seq_puts(s, " local"); 529 seq_puts(s, " local");
526 else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part) 530 else if (SN_HWPERF_FOREIGN(p))
527 /* both ends of the link in foreign partiton */ 531 /* both ends of the link in foreign partiton */
528 seq_puts(s, " foreign"); 532 seq_puts(s, " foreign");
529 else 533 else
@@ -776,7 +780,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
776 780
777 case SN_HWPERF_GET_NODE_NASID: 781 case SN_HWPERF_GET_NODE_NASID:
778 if (a.sz != sizeof(u64) || 782 if (a.sz != sizeof(u64) ||
779 (node = a.arg) < 0 || !node_possible(node)) { 783 (node = a.arg) < 0 || !cnode_possible(node)) {
780 r = -EINVAL; 784 r = -EINVAL;
781 goto error; 785 goto error;
782 } 786 }
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 4cac7bdc7c7f..2d7948567ebc 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -1023,7 +1023,7 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
1023 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL); 1023 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL);
1024 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias, 1024 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias,
1025 ~0ULL); 1025 ~0ULL);
1026 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, ~0ULL); 1026 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, 0ULL);
1027 1027
1028 if (request_irq(SGI_PCIASIC_ERROR, 1028 if (request_irq(SGI_PCIASIC_ERROR,
1029 tioce_error_intr_handler, 1029 tioce_error_intr_handler,