aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm
diff options
context:
space:
mode:
author <jgarzik@pretzel.yyz.us>2005-05-27 22:07:40 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-05-27 22:07:40 -0400
commitff0e0ea2f5d36fa90fc2c57fd019102b0a0cfabf (patch)
tree963cdd52a4032cd4827896c4e813cfbf6dd7b3e6 /arch/x86_64/mm
parent43f66a6ce8da299344cf1bc2ac2311889cc88555 (diff)
parent1f15d694522af9cd7492695f11dd2dc77b6cf098 (diff)
Automatic merge of /spare/repo/netdev-2.6 branch we18
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r--arch/x86_64/mm/fault.c11
-rw-r--r--arch/x86_64/mm/ioremap.c29
2 files changed, 17 insertions, 23 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index e03309216764..5d6b2114cc9a 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -234,6 +234,8 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
234 234
235/* 235/*
236 * Handle a fault on the vmalloc or module mapping area 236 * Handle a fault on the vmalloc or module mapping area
237 *
238 * This assumes no large pages in there.
237 */ 239 */
238static int vmalloc_fault(unsigned long address) 240static int vmalloc_fault(unsigned long address)
239{ 241{
@@ -272,7 +274,10 @@ static int vmalloc_fault(unsigned long address)
272 if (!pte_present(*pte_ref)) 274 if (!pte_present(*pte_ref))
273 return -1; 275 return -1;
274 pte = pte_offset_kernel(pmd, address); 276 pte = pte_offset_kernel(pmd, address);
275 if (!pte_present(*pte) || pte_page(*pte) != pte_page(*pte_ref)) 277 /* Don't use pte_page here, because the mappings can point
278 outside mem_map, and the NUMA hash lookup cannot handle
279 that. */
280 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
276 BUG(); 281 BUG();
277 __flush_tlb_all(); 282 __flush_tlb_all();
278 return 0; 283 return 0;
@@ -346,7 +351,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
346 * protection error (error_code & 1) == 0. 351 * protection error (error_code & 1) == 0.
347 */ 352 */
348 if (unlikely(address >= TASK_SIZE)) { 353 if (unlikely(address >= TASK_SIZE)) {
349 if (!(error_code & 5)) { 354 if (!(error_code & 5) &&
355 ((address >= VMALLOC_START && address < VMALLOC_END) ||
356 (address >= MODULES_VADDR && address < MODULES_END))) {
350 if (vmalloc_fault(address) < 0) 357 if (vmalloc_fault(address) < 0)
351 goto bad_area_nosemaphore; 358 goto bad_area_nosemaphore;
352 return; 359 return;
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index 74ec8554b195..58aac23760ef 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -133,7 +133,7 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
133 unsigned long flags) 133 unsigned long flags)
134{ 134{
135 int err = 0; 135 int err = 0;
136 if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { 136 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
137 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 137 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
138 unsigned long vaddr = (unsigned long) __va(phys_addr); 138 unsigned long vaddr = (unsigned long) __va(phys_addr);
139 139
@@ -214,7 +214,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
214 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); 214 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
215 return NULL; 215 return NULL;
216 } 216 }
217 if (ioremap_change_attr(phys_addr, size, flags) < 0) { 217 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
218 area->flags &= 0xffffff; 218 area->flags &= 0xffffff;
219 vunmap(addr); 219 vunmap(addr);
220 return NULL; 220 return NULL;
@@ -251,7 +251,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
251 251
252void iounmap(volatile void __iomem *addr) 252void iounmap(volatile void __iomem *addr)
253{ 253{
254 struct vm_struct *p, **pprev; 254 struct vm_struct *p;
255 255
256 if (addr <= high_memory) 256 if (addr <= high_memory)
257 return; 257 return;
@@ -260,24 +260,11 @@ void iounmap(volatile void __iomem *addr)
260 return; 260 return;
261 261
262 write_lock(&vmlist_lock); 262 write_lock(&vmlist_lock);
263 for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev) 263 p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
264 if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr)) 264 if (!p)
265 break; 265 printk("iounmap: bad address %p\n", addr);
266 if (!p) { 266 else if (p->flags >> 20)
267 printk("__iounmap: bad address %p\n", addr); 267 ioremap_change_attr(p->phys_addr, p->size, 0);
268 goto out_unlock;
269 }
270 *pprev = p->next;
271 unmap_vm_area(p);
272 if ((p->flags >> 20) &&
273 p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) {
274 /* p->size includes the guard page, but cpa doesn't like that */
275 change_page_attr(virt_to_page(__va(p->phys_addr)),
276 p->size >> PAGE_SHIFT,
277 PAGE_KERNEL);
278 global_flush_tlb();
279 }
280out_unlock:
281 write_unlock(&vmlist_lock); 268 write_unlock(&vmlist_lock);
282 kfree(p); 269 kfree(p);
283} 270}