aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:34:57 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:34:57 -0400
commita989705c4cf6e6c1a339c95f9daf658b4ba88ca8 (patch)
treed1925b831ec9fbae65db1b193dbad1869c43a9bc /arch/ia64/mm
parent2d56d3c43cc97ae48586745556f5a5b564d61582 (diff)
parentd29182534c5f39ff899763d1e0982d8f33791d6f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] update memory attribute aliasing documentation & test cases [IA64] fail mmaps that span areas with incompatible attributes [IA64] allow WB /sys/.../legacy_mem mmaps [IA64] make ioremap avoid unsupported attributes [IA64] rename ioremap variables to match i386 [IA64] relax per-cpu TLB requirement to DTC [IA64] remove per-cpu ia64_phys_stacked_size_p8 [IA64] Fix example error injection program [IA64] Itanium MC Error Injection Tool: pal_mc_error_inject() interface [IA64] Itanium MC Error Injection Tool: Makefile changes [IA64] Itanium MC Error Injection Tool: Driver sysfs interface [IA64] Itanium MC Error Injection Tool: Doc and sample application [IA64] Itanium MC Error Injection Tool: Kernel configuration
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/init.c11
-rw-r--r--arch/ia64/mm/ioremap.c78
2 files changed, 65 insertions, 24 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 2da841110727..cffb1e8325e8 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -355,7 +355,7 @@ setup_gate (void)
355void __devinit 355void __devinit
356ia64_mmu_init (void *my_cpu_data) 356ia64_mmu_init (void *my_cpu_data)
357{ 357{
358 unsigned long psr, pta, impl_va_bits; 358 unsigned long pta, impl_va_bits;
359 extern void __devinit tlb_init (void); 359 extern void __devinit tlb_init (void);
360 360
361#ifdef CONFIG_DISABLE_VHPT 361#ifdef CONFIG_DISABLE_VHPT
@@ -364,15 +364,6 @@ ia64_mmu_init (void *my_cpu_data)
364# define VHPT_ENABLE_BIT 1 364# define VHPT_ENABLE_BIT 1
365#endif 365#endif
366 366
367 /* Pin mapping for percpu area into TLB */
368 psr = ia64_clear_ic();
369 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
370 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
371 PERCPU_PAGE_SHIFT);
372
373 ia64_set_psr(psr);
374 ia64_srlz_i();
375
376 /* 367 /*
377 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped 368 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
378 * address space. The IA-64 architecture guarantees that at least 50 bits of 369 * address space. The IA-64 architecture guarantees that at least 50 bits of
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 4280c074d64e..2a140627dfd6 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. 2 * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
3 * Bjorn Helgaas <bjorn.helgaas@hp.com> 3 * Bjorn Helgaas <bjorn.helgaas@hp.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -10,51 +10,101 @@
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/efi.h> 12#include <linux/efi.h>
13#include <linux/io.h>
14#include <linux/vmalloc.h>
13#include <asm/io.h> 15#include <asm/io.h>
14#include <asm/meminit.h> 16#include <asm/meminit.h>
15 17
16static inline void __iomem * 18static inline void __iomem *
17__ioremap (unsigned long offset, unsigned long size) 19__ioremap (unsigned long phys_addr)
18{ 20{
19 return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset); 21 return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
20} 22}
21 23
22void __iomem * 24void __iomem *
23ioremap (unsigned long offset, unsigned long size) 25ioremap (unsigned long phys_addr, unsigned long size)
24{ 26{
27 void __iomem *addr;
28 struct vm_struct *area;
29 unsigned long offset;
30 pgprot_t prot;
25 u64 attr; 31 u64 attr;
26 unsigned long gran_base, gran_size; 32 unsigned long gran_base, gran_size;
33 unsigned long page_base;
27 34
28 /* 35 /*
29 * For things in kern_memmap, we must use the same attribute 36 * For things in kern_memmap, we must use the same attribute
30 * as the rest of the kernel. For more details, see 37 * as the rest of the kernel. For more details, see
31 * Documentation/ia64/aliasing.txt. 38 * Documentation/ia64/aliasing.txt.
32 */ 39 */
33 attr = kern_mem_attribute(offset, size); 40 attr = kern_mem_attribute(phys_addr, size);
34 if (attr & EFI_MEMORY_WB) 41 if (attr & EFI_MEMORY_WB)
35 return (void __iomem *) phys_to_virt(offset); 42 return (void __iomem *) phys_to_virt(phys_addr);
36 else if (attr & EFI_MEMORY_UC) 43 else if (attr & EFI_MEMORY_UC)
37 return __ioremap(offset, size); 44 return __ioremap(phys_addr);
38 45
39 /* 46 /*
40 * Some chipsets don't support UC access to memory. If 47 * Some chipsets don't support UC access to memory. If
41 * WB is supported for the whole granule, we prefer that. 48 * WB is supported for the whole granule, we prefer that.
42 */ 49 */
43 gran_base = GRANULEROUNDDOWN(offset); 50 gran_base = GRANULEROUNDDOWN(phys_addr);
44 gran_size = GRANULEROUNDUP(offset + size) - gran_base; 51 gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
45 if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) 52 if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
46 return (void __iomem *) phys_to_virt(offset); 53 return (void __iomem *) phys_to_virt(phys_addr);
47 54
48 return __ioremap(offset, size); 55 /*
56 * WB is not supported for the whole granule, so we can't use
57 * the region 7 identity mapping. If we can safely cover the
58 * area with kernel page table mappings, we can use those
59 * instead.
60 */
61 page_base = phys_addr & PAGE_MASK;
62 size = PAGE_ALIGN(phys_addr + size) - page_base;
63 if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
64 prot = PAGE_KERNEL;
65
66 /*
67 * Mappings have to be page-aligned
68 */
69 offset = phys_addr & ~PAGE_MASK;
70 phys_addr &= PAGE_MASK;
71
72 /*
73 * Ok, go for it..
74 */
75 area = get_vm_area(size, VM_IOREMAP);
76 if (!area)
77 return NULL;
78
79 area->phys_addr = phys_addr;
80 addr = (void __iomem *) area->addr;
81 if (ioremap_page_range((unsigned long) addr,
82 (unsigned long) addr + size, phys_addr, prot)) {
83 vunmap((void __force *) addr);
84 return NULL;
85 }
86
87 return (void __iomem *) (offset + (char __iomem *)addr);
88 }
89
90 return __ioremap(phys_addr);
49} 91}
50EXPORT_SYMBOL(ioremap); 92EXPORT_SYMBOL(ioremap);
51 93
52void __iomem * 94void __iomem *
53ioremap_nocache (unsigned long offset, unsigned long size) 95ioremap_nocache (unsigned long phys_addr, unsigned long size)
54{ 96{
55 if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) 97 if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
56 return NULL; 98 return NULL;
57 99
58 return __ioremap(offset, size); 100 return __ioremap(phys_addr);
59} 101}
60EXPORT_SYMBOL(ioremap_nocache); 102EXPORT_SYMBOL(ioremap_nocache);
103
104void
105iounmap (volatile void __iomem *addr)
106{
107 if (REGION_NUMBER(addr) == RGN_GATE)
108 vunmap((void *) ((unsigned long) addr & PAGE_MASK));
109}
110EXPORT_SYMBOL(iounmap);