aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/m32r/mm/ioremap.c93
1 files changed, 7 insertions, 86 deletions
diff --git a/arch/m32r/mm/ioremap.c b/arch/m32r/mm/ioremap.c
index a151849a605e..5152c4e6ac80 100644
--- a/arch/m32r/mm/ioremap.c
+++ b/arch/m32r/mm/ioremap.c
@@ -20,92 +20,8 @@
20#include <asm/byteorder.h> 20#include <asm/byteorder.h>
21 21
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <asm/io.h> 23#include <linux/io.h>
24#include <asm/pgalloc.h> 24#include <asm/pgalloc.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27
28static inline void
29remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
30 unsigned long phys_addr, unsigned long flags)
31{
32 unsigned long end;
33 unsigned long pfn;
34 pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
35 | _PAGE_WRITE | flags);
36
37 address &= ~PMD_MASK;
38 end = address + size;
39 if (end > PMD_SIZE)
40 end = PMD_SIZE;
41 if (address >= end)
42 BUG();
43 pfn = phys_addr >> PAGE_SHIFT;
44 do {
45 if (!pte_none(*pte)) {
46 printk("remap_area_pte: page already exists\n");
47 BUG();
48 }
49 set_pte(pte, pfn_pte(pfn, pgprot));
50 address += PAGE_SIZE;
51 pfn++;
52 pte++;
53 } while (address && (address < end));
54}
55
56static inline int
57remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
58 unsigned long phys_addr, unsigned long flags)
59{
60 unsigned long end;
61
62 address &= ~PGDIR_MASK;
63 end = address + size;
64 if (end > PGDIR_SIZE)
65 end = PGDIR_SIZE;
66 phys_addr -= address;
67 if (address >= end)
68 BUG();
69 do {
70 pte_t * pte = pte_alloc_kernel(pmd, address);
71 if (!pte)
72 return -ENOMEM;
73 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
74 address = (address + PMD_SIZE) & PMD_MASK;
75 pmd++;
76 } while (address && (address < end));
77 return 0;
78}
79
80static int
81remap_area_pages(unsigned long address, unsigned long phys_addr,
82 unsigned long size, unsigned long flags)
83{
84 int error;
85 pgd_t * dir;
86 unsigned long end = address + size;
87
88 phys_addr -= address;
89 dir = pgd_offset(&init_mm, address);
90 flush_cache_all();
91 if (address >= end)
92 BUG();
93 do {
94 pmd_t *pmd;
95 pmd = pmd_alloc(&init_mm, dir, address);
96 error = -ENOMEM;
97 if (!pmd)
98 break;
99 if (remap_area_pmd(pmd, address, end - address,
100 phys_addr + address, flags))
101 break;
102 error = 0;
103 address = (address + PGDIR_SIZE) & PGDIR_MASK;
104 dir++;
105 } while (address && (address < end));
106 flush_tlb_all();
107 return error;
108}
109 25
110/* 26/*
111 * Generic mapping function (not visible outside): 27 * Generic mapping function (not visible outside):
@@ -129,6 +45,7 @@ __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
129 void __iomem * addr; 45 void __iomem * addr;
130 struct vm_struct * area; 46 struct vm_struct * area;
131 unsigned long offset, last_addr; 47 unsigned long offset, last_addr;
48 pgprot_t pgprot;
132 49
133 /* Don't allow wraparound or zero size */ 50 /* Don't allow wraparound or zero size */
134 last_addr = phys_addr + size - 1; 51 last_addr = phys_addr + size - 1;
@@ -157,6 +74,9 @@ __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
157 return NULL; 74 return NULL;
158 } 75 }
159 76
77 pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
78 | _PAGE_WRITE | flags);
79
160 /* 80 /*
161 * Mappings have to be page-aligned 81 * Mappings have to be page-aligned
162 */ 82 */
@@ -172,7 +92,8 @@ __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
172 return NULL; 92 return NULL;
173 area->phys_addr = phys_addr; 93 area->phys_addr = phys_addr;
174 addr = (void __iomem *) area->addr; 94 addr = (void __iomem *) area->addr;
175 if (remap_area_pages((unsigned long)addr, phys_addr, size, flags)) { 95 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
96 phys_addr, pgprot)) {
176 vunmap((void __force *) addr); 97 vunmap((void __force *) addr);
177 return NULL; 98 return NULL;
178 } 99 }