diff options
-rw-r--r-- | arch/avr32/mm/ioremap.c | 120 |
1 files changed, 6 insertions, 114 deletions
diff --git a/arch/avr32/mm/ioremap.c b/arch/avr32/mm/ioremap.c index 536021877df6..8cfec65e37f7 100644 --- a/arch/avr32/mm/ioremap.c +++ b/arch/avr32/mm/ioremap.c | |||
@@ -7,119 +7,11 @@ | |||
7 | */ | 7 | */ |
8 | #include <linux/vmalloc.h> | 8 | #include <linux/vmalloc.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/io.h> | ||
10 | 11 | ||
11 | #include <asm/io.h> | ||
12 | #include <asm/pgtable.h> | 12 | #include <asm/pgtable.h> |
13 | #include <asm/cacheflush.h> | ||
14 | #include <asm/tlbflush.h> | ||
15 | #include <asm/addrspace.h> | 13 | #include <asm/addrspace.h> |
16 | 14 | ||
17 | static inline int remap_area_pte(pte_t *pte, unsigned long address, | ||
18 | unsigned long end, unsigned long phys_addr, | ||
19 | pgprot_t prot) | ||
20 | { | ||
21 | unsigned long pfn; | ||
22 | |||
23 | pfn = phys_addr >> PAGE_SHIFT; | ||
24 | do { | ||
25 | WARN_ON(!pte_none(*pte)); | ||
26 | |||
27 | set_pte(pte, pfn_pte(pfn, prot)); | ||
28 | address += PAGE_SIZE; | ||
29 | pfn++; | ||
30 | pte++; | ||
31 | } while (address && (address < end)); | ||
32 | |||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | static inline int remap_area_pmd(pmd_t *pmd, unsigned long address, | ||
37 | unsigned long end, unsigned long phys_addr, | ||
38 | pgprot_t prot) | ||
39 | { | ||
40 | unsigned long next; | ||
41 | |||
42 | phys_addr -= address; | ||
43 | |||
44 | do { | ||
45 | pte_t *pte = pte_alloc_kernel(pmd, address); | ||
46 | if (!pte) | ||
47 | return -ENOMEM; | ||
48 | |||
49 | next = (address + PMD_SIZE) & PMD_MASK; | ||
50 | if (remap_area_pte(pte, address, next, | ||
51 | address + phys_addr, prot)) | ||
52 | return -ENOMEM; | ||
53 | |||
54 | address = next; | ||
55 | pmd++; | ||
56 | } while (address && (address < end)); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static int remap_area_pud(pud_t *pud, unsigned long address, | ||
61 | unsigned long end, unsigned long phys_addr, | ||
62 | pgprot_t prot) | ||
63 | { | ||
64 | unsigned long next; | ||
65 | |||
66 | phys_addr -= address; | ||
67 | |||
68 | do { | ||
69 | pmd_t *pmd = pmd_alloc(&init_mm, pud, address); | ||
70 | if (!pmd) | ||
71 | return -ENOMEM; | ||
72 | next = (address + PUD_SIZE) & PUD_MASK; | ||
73 | if (remap_area_pmd(pmd, address, next, | ||
74 | phys_addr + address, prot)) | ||
75 | return -ENOMEM; | ||
76 | |||
77 | address = next; | ||
78 | pud++; | ||
79 | } while (address && address < end); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static int remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
85 | size_t size, pgprot_t prot) | ||
86 | { | ||
87 | unsigned long end = address + size; | ||
88 | unsigned long next; | ||
89 | pgd_t *pgd; | ||
90 | int err = 0; | ||
91 | |||
92 | phys_addr -= address; | ||
93 | |||
94 | pgd = pgd_offset_k(address); | ||
95 | flush_cache_all(); | ||
96 | BUG_ON(address >= end); | ||
97 | |||
98 | spin_lock(&init_mm.page_table_lock); | ||
99 | do { | ||
100 | pud_t *pud = pud_alloc(&init_mm, pgd, address); | ||
101 | |||
102 | err = -ENOMEM; | ||
103 | if (!pud) | ||
104 | break; | ||
105 | |||
106 | next = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
107 | if (next < address || next > end) | ||
108 | next = end; | ||
109 | err = remap_area_pud(pud, address, next, | ||
110 | phys_addr + address, prot); | ||
111 | if (err) | ||
112 | break; | ||
113 | |||
114 | address = next; | ||
115 | pgd++; | ||
116 | } while (address && (address < end)); | ||
117 | |||
118 | spin_unlock(&init_mm.page_table_lock); | ||
119 | flush_tlb_all(); | ||
120 | return err; | ||
121 | } | ||
122 | |||
123 | /* | 15 | /* |
124 | * Re-map an arbitrary physical address space into the kernel virtual | 16 | * Re-map an arbitrary physical address space into the kernel virtual |
125 | * address space. Needed when the kernel wants to access physical | 17 | * address space. Needed when the kernel wants to access physical |
@@ -128,7 +20,7 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
128 | void __iomem *__ioremap(unsigned long phys_addr, size_t size, | 20 | void __iomem *__ioremap(unsigned long phys_addr, size_t size, |
129 | unsigned long flags) | 21 | unsigned long flags) |
130 | { | 22 | { |
131 | void *addr; | 23 | unsigned long addr; |
132 | struct vm_struct *area; | 24 | struct vm_struct *area; |
133 | unsigned long offset, last_addr; | 25 | unsigned long offset, last_addr; |
134 | pgprot_t prot; | 26 | pgprot_t prot; |
@@ -159,7 +51,7 @@ void __iomem *__ioremap(unsigned long phys_addr, size_t size, | |||
159 | phys_addr &= PAGE_MASK; | 51 | phys_addr &= PAGE_MASK; |
160 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; | 52 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; |
161 | 53 | ||
162 | prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | 54 | prot = __pgprot(_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_RW | _PAGE_DIRTY |
163 | | _PAGE_ACCESSED | _PAGE_TYPE_SMALL | flags); | 55 | | _PAGE_ACCESSED | _PAGE_TYPE_SMALL | flags); |
164 | 56 | ||
165 | /* | 57 | /* |
@@ -169,9 +61,9 @@ void __iomem *__ioremap(unsigned long phys_addr, size_t size, | |||
169 | if (!area) | 61 | if (!area) |
170 | return NULL; | 62 | return NULL; |
171 | area->phys_addr = phys_addr; | 63 | area->phys_addr = phys_addr; |
172 | addr = area->addr; | 64 | addr = (unsigned long )area->addr; |
173 | if (remap_area_pages((unsigned long)addr, phys_addr, size, prot)) { | 65 | if (ioremap_page_range(addr, addr + size, phys_addr, prot)) { |
174 | vunmap(addr); | 66 | vunmap((void *)addr); |
175 | return NULL; | 67 | return NULL; |
176 | } | 68 | } |
177 | 69 | ||