diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 07:34:05 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:34:05 -0500 |
commit | e9332cacd74b25548a517b8dd4fff0216f7907db (patch) | |
tree | 200a1678dcb177614fe5f862854d10995401646f /arch/x86/mm | |
parent | 74ff2857f099951020a47420872f5c1460f32c27 (diff) |
x86: switch to change_page_attr_addr in ioremap_32.c
Use change_page_attr_addr() instead of change_page_attr(), which
simplifies the code significantly and matches the 64bit
implementation.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/ioremap_32.c | 90 |
1 files changed, 49 insertions, 41 deletions
diff --git a/arch/x86/mm/ioremap_32.c b/arch/x86/mm/ioremap_32.c index 213ace58a188..ae7e55c8c647 100644 --- a/arch/x86/mm/ioremap_32.c +++ b/arch/x86/mm/ioremap_32.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * (C) Copyright 1995 1996 Linus Torvalds | 6 | * (C) Copyright 1995 1996 Linus Torvalds |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/bootmem.h> | ||
9 | #include <linux/init.h> | 10 | #include <linux/init.h> |
10 | #include <linux/io.h> | 11 | #include <linux/io.h> |
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
@@ -19,6 +20,42 @@ | |||
19 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
20 | 21 | ||
21 | /* | 22 | /* |
23 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | ||
24 | * conflicts. | ||
25 | */ | ||
26 | static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, | ||
27 | pgprot_t prot) | ||
28 | { | ||
29 | unsigned long npages, vaddr, last_addr = phys_addr + size - 1; | ||
30 | int err, level; | ||
31 | |||
32 | /* No change for pages after the last mapping */ | ||
33 | if (last_addr >= (max_pfn_mapped << PAGE_SHIFT)) | ||
34 | return 0; | ||
35 | |||
36 | npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
37 | vaddr = (unsigned long) __va(phys_addr); | ||
38 | |||
39 | /* | ||
40 | * If there is no identity map for this address, | ||
41 | * change_page_attr_addr is unnecessary | ||
42 | */ | ||
43 | if (!lookup_address(vaddr, &level)) | ||
44 | return 0; | ||
45 | |||
46 | /* | ||
47 | * Must use an address here and not struct page because the | ||
48 | * phys addr can be a in hole between nodes and not have a | ||
49 | * memmap entry. | ||
50 | */ | ||
51 | err = change_page_attr_addr(vaddr, npages, prot); | ||
52 | if (!err) | ||
53 | global_flush_tlb(); | ||
54 | |||
55 | return err; | ||
56 | } | ||
57 | |||
58 | /* | ||
22 | * Remap an arbitrary physical address space into the kernel virtual | 59 | * Remap an arbitrary physical address space into the kernel virtual |
23 | * address space. Needed when the kernel wants to access high addresses | 60 | * address space. Needed when the kernel wants to access high addresses |
24 | * directly. | 61 | * directly. |
@@ -33,7 +70,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
33 | void __iomem *addr; | 70 | void __iomem *addr; |
34 | struct vm_struct *area; | 71 | struct vm_struct *area; |
35 | unsigned long offset, last_addr; | 72 | unsigned long offset, last_addr; |
36 | pgprot_t prot; | 73 | pgprot_t pgprot; |
37 | 74 | ||
38 | /* Don't allow wraparound or zero size */ | 75 | /* Don't allow wraparound or zero size */ |
39 | last_addr = phys_addr + size - 1; | 76 | last_addr = phys_addr + size - 1; |
@@ -62,7 +99,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
62 | return NULL; | 99 | return NULL; |
63 | } | 100 | } |
64 | 101 | ||
65 | prot = MAKE_GLOBAL(__PAGE_KERNEL | flags); | 102 | pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); |
66 | 103 | ||
67 | /* | 104 | /* |
68 | * Mappings have to be page-aligned | 105 | * Mappings have to be page-aligned |
@@ -79,11 +116,17 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
79 | return NULL; | 116 | return NULL; |
80 | area->phys_addr = phys_addr; | 117 | area->phys_addr = phys_addr; |
81 | addr = (void __iomem *) area->addr; | 118 | addr = (void __iomem *) area->addr; |
82 | if (ioremap_page_range((unsigned long) addr, | 119 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, |
83 | (unsigned long) addr + size, phys_addr, prot)) { | 120 | phys_addr, pgprot)) { |
84 | vunmap((void __force *) addr); | 121 | vunmap((void __force *) addr); |
85 | return NULL; | 122 | return NULL; |
86 | } | 123 | } |
124 | |||
125 | if (ioremap_change_attr(phys_addr, size, pgprot) < 0) { | ||
126 | vunmap(addr); | ||
127 | return NULL; | ||
128 | } | ||
129 | |||
87 | return (void __iomem *) (offset + (char __iomem *)addr); | 130 | return (void __iomem *) (offset + (char __iomem *)addr); |
88 | } | 131 | } |
89 | EXPORT_SYMBOL(__ioremap); | 132 | EXPORT_SYMBOL(__ioremap); |
@@ -111,37 +154,7 @@ EXPORT_SYMBOL(__ioremap); | |||
111 | */ | 154 | */ |
112 | void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) | 155 | void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) |
113 | { | 156 | { |
114 | unsigned long last_addr; | 157 | return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); |
115 | void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); | ||
116 | |||
117 | if (!p) | ||
118 | return p; | ||
119 | |||
120 | /* Guaranteed to be > phys_addr, as per __ioremap() */ | ||
121 | last_addr = phys_addr + size - 1; | ||
122 | |||
123 | if (last_addr < virt_to_phys(high_memory) - 1) { | ||
124 | struct page *ppage = virt_to_page(__va(phys_addr)); | ||
125 | unsigned long npages; | ||
126 | |||
127 | phys_addr &= PAGE_MASK; | ||
128 | |||
129 | /* This might overflow and become zero.. */ | ||
130 | last_addr = PAGE_ALIGN(last_addr); | ||
131 | |||
132 | /* .. but that's ok, because modulo-2**n arithmetic will make | ||
133 | * the page-aligned "last - first" come out right. | ||
134 | */ | ||
135 | npages = (last_addr - phys_addr) >> PAGE_SHIFT; | ||
136 | |||
137 | if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { | ||
138 | iounmap(p); | ||
139 | p = NULL; | ||
140 | } | ||
141 | global_flush_tlb(); | ||
142 | } | ||
143 | |||
144 | return p; | ||
145 | } | 158 | } |
146 | EXPORT_SYMBOL(ioremap_nocache); | 159 | EXPORT_SYMBOL(ioremap_nocache); |
147 | 160 | ||
@@ -189,12 +202,7 @@ void iounmap(volatile void __iomem *addr) | |||
189 | } | 202 | } |
190 | 203 | ||
191 | /* Reset the direct mapping. Can block */ | 204 | /* Reset the direct mapping. Can block */ |
192 | if (p->phys_addr < virt_to_phys(high_memory) - 1) { | 205 | ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL); |
193 | change_page_attr(virt_to_page(__va(p->phys_addr)), | ||
194 | get_vm_area_size(p) >> PAGE_SHIFT, | ||
195 | PAGE_KERNEL); | ||
196 | global_flush_tlb(); | ||
197 | } | ||
198 | 206 | ||
199 | /* Finally remove it */ | 207 | /* Finally remove it */ |
200 | o = remove_vm_area((void *)addr); | 208 | o = remove_vm_area((void *)addr); |