aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/efi.c46
-rw-r--r--arch/ia64/mm/ioremap.c78
-rw-r--r--arch/ia64/pci/pci.c2
3 files changed, 107 insertions, 19 deletions
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index f45f91d38cab..78d29b79947d 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -660,6 +660,29 @@ efi_memory_descriptor (unsigned long phys_addr)
660 return NULL; 660 return NULL;
661} 661}
662 662
663static int
664efi_memmap_intersects (unsigned long phys_addr, unsigned long size)
665{
666 void *efi_map_start, *efi_map_end, *p;
667 efi_memory_desc_t *md;
668 u64 efi_desc_size;
669 unsigned long end;
670
671 efi_map_start = __va(ia64_boot_param->efi_memmap);
672 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
673 efi_desc_size = ia64_boot_param->efi_memdesc_size;
674
675 end = phys_addr + size;
676
677 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
678 md = p;
679
680 if (md->phys_addr < end && efi_md_end(md) > phys_addr)
681 return 1;
682 }
683 return 0;
684}
685
663u32 686u32
664efi_mem_type (unsigned long phys_addr) 687efi_mem_type (unsigned long phys_addr)
665{ 688{
@@ -766,11 +789,28 @@ valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
766int 789int
767valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size) 790valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
768{ 791{
792 unsigned long phys_addr = pfn << PAGE_SHIFT;
793 u64 attr;
794
795 attr = efi_mem_attribute(phys_addr, size);
796
769 /* 797 /*
770 * MMIO regions are often missing from the EFI memory map. 798 * /dev/mem mmap uses normal user pages, so we don't need the entire
771 * We must allow mmap of them for programs like X, so we 799 * granule, but the entire region we're mapping must support the same
772 * currently can't do any useful validation. 800 * attribute.
773 */ 801 */
802 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
803 return 1;
804
805 /*
806 * Intel firmware doesn't tell us about all the MMIO regions, so
807 * in general we have to allow mmap requests. But if EFI *does*
808 * tell us about anything inside this region, we should deny it.
809 * The user can always map a smaller region to avoid the overlap.
810 */
811 if (efi_memmap_intersects(phys_addr, size))
812 return 0;
813
774 return 1; 814 return 1;
775} 815}
776 816
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 4280c074d64e..2a140627dfd6 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. 2 * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
3 * Bjorn Helgaas <bjorn.helgaas@hp.com> 3 * Bjorn Helgaas <bjorn.helgaas@hp.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -10,51 +10,101 @@
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/efi.h> 12#include <linux/efi.h>
13#include <linux/io.h>
14#include <linux/vmalloc.h>
13#include <asm/io.h> 15#include <asm/io.h>
14#include <asm/meminit.h> 16#include <asm/meminit.h>
15 17
16static inline void __iomem * 18static inline void __iomem *
17__ioremap (unsigned long offset, unsigned long size) 19__ioremap (unsigned long phys_addr)
18{ 20{
19 return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset); 21 return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
20} 22}
21 23
22void __iomem * 24void __iomem *
23ioremap (unsigned long offset, unsigned long size) 25ioremap (unsigned long phys_addr, unsigned long size)
24{ 26{
27 void __iomem *addr;
28 struct vm_struct *area;
29 unsigned long offset;
30 pgprot_t prot;
25 u64 attr; 31 u64 attr;
26 unsigned long gran_base, gran_size; 32 unsigned long gran_base, gran_size;
33 unsigned long page_base;
27 34
28 /* 35 /*
29 * For things in kern_memmap, we must use the same attribute 36 * For things in kern_memmap, we must use the same attribute
30 * as the rest of the kernel. For more details, see 37 * as the rest of the kernel. For more details, see
31 * Documentation/ia64/aliasing.txt. 38 * Documentation/ia64/aliasing.txt.
32 */ 39 */
33 attr = kern_mem_attribute(offset, size); 40 attr = kern_mem_attribute(phys_addr, size);
34 if (attr & EFI_MEMORY_WB) 41 if (attr & EFI_MEMORY_WB)
35 return (void __iomem *) phys_to_virt(offset); 42 return (void __iomem *) phys_to_virt(phys_addr);
36 else if (attr & EFI_MEMORY_UC) 43 else if (attr & EFI_MEMORY_UC)
37 return __ioremap(offset, size); 44 return __ioremap(phys_addr);
38 45
39 /* 46 /*
40 * Some chipsets don't support UC access to memory. If 47 * Some chipsets don't support UC access to memory. If
41 * WB is supported for the whole granule, we prefer that. 48 * WB is supported for the whole granule, we prefer that.
42 */ 49 */
43 gran_base = GRANULEROUNDDOWN(offset); 50 gran_base = GRANULEROUNDDOWN(phys_addr);
44 gran_size = GRANULEROUNDUP(offset + size) - gran_base; 51 gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
45 if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) 52 if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
46 return (void __iomem *) phys_to_virt(offset); 53 return (void __iomem *) phys_to_virt(phys_addr);
47 54
48 return __ioremap(offset, size); 55 /*
56 * WB is not supported for the whole granule, so we can't use
57 * the region 7 identity mapping. If we can safely cover the
58 * area with kernel page table mappings, we can use those
59 * instead.
60 */
61 page_base = phys_addr & PAGE_MASK;
62 size = PAGE_ALIGN(phys_addr + size) - page_base;
63 if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
64 prot = PAGE_KERNEL;
65
66 /*
67 * Mappings have to be page-aligned
68 */
69 offset = phys_addr & ~PAGE_MASK;
70 phys_addr &= PAGE_MASK;
71
72 /*
73 * Ok, go for it..
74 */
75 area = get_vm_area(size, VM_IOREMAP);
76 if (!area)
77 return NULL;
78
79 area->phys_addr = phys_addr;
80 addr = (void __iomem *) area->addr;
81 if (ioremap_page_range((unsigned long) addr,
82 (unsigned long) addr + size, phys_addr, prot)) {
83 vunmap((void __force *) addr);
84 return NULL;
85 }
86
87 return (void __iomem *) (offset + (char __iomem *)addr);
88 }
89
90 return __ioremap(phys_addr);
49} 91}
50EXPORT_SYMBOL(ioremap); 92EXPORT_SYMBOL(ioremap);
51 93
52void __iomem * 94void __iomem *
53ioremap_nocache (unsigned long offset, unsigned long size) 95ioremap_nocache (unsigned long phys_addr, unsigned long size)
54{ 96{
55 if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) 97 if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
56 return NULL; 98 return NULL;
57 99
58 return __ioremap(offset, size); 100 return __ioremap(phys_addr);
59} 101}
60EXPORT_SYMBOL(ioremap_nocache); 102EXPORT_SYMBOL(ioremap_nocache);
103
104void
105iounmap (volatile void __iomem *addr)
106{
107 if (REGION_NUMBER(addr) == RGN_GATE)
108 vunmap((void *) ((unsigned long) addr & PAGE_MASK));
109}
110EXPORT_SYMBOL(iounmap);
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 0e83f3b419b5..9f635896d252 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -659,8 +659,6 @@ pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
659 return -EINVAL; 659 return -EINVAL;
660 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, 660 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
661 vma->vm_page_prot); 661 vma->vm_page_prot);
662 if (pgprot_val(prot) != pgprot_val(pgprot_noncached(vma->vm_page_prot)))
663 return -EINVAL;
664 662
665 addr = pci_get_legacy_mem(bus); 663 addr = pci_get_legacy_mem(bus);
666 if (IS_ERR(addr)) 664 if (IS_ERR(addr))