diff options
author | Haavard Skinnemoen <hskinnemoen@atmel.com> | 2006-12-08 05:38:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-08 11:28:52 -0500 |
commit | 10731b88a2725e0f1b21394433c8e53fde6a3634 (patch) | |
tree | b056f55477db6129c6e4030d1131ebc745655916 | |
parent | 5b3e1a85c2145813898ac50530c70e6d03a6aa19 (diff) |
[PATCH] Generic ioremap_page_range: sh64 conversion
Convert SH64 to use generic ioremap_page_range()
Signed-off-by: Haavard Skinnemoen <hskinnemoen@atmel.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/sh64/mm/ioremap.c | 100 |
1 files changed, 8 insertions, 92 deletions
diff --git a/arch/sh64/mm/ioremap.c b/arch/sh64/mm/ioremap.c index 80c56754f513..ff26c02511aa 100644 --- a/arch/sh64/mm/ioremap.c +++ b/arch/sh64/mm/ioremap.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/string.h> | 20 | #include <linux/string.h> |
21 | #include <asm/io.h> | 21 | #include <linux/io.h> |
22 | #include <asm/pgalloc.h> | 22 | #include <asm/pgalloc.h> |
23 | #include <asm/tlbflush.h> | 23 | #include <asm/tlbflush.h> |
24 | #include <linux/ioport.h> | 24 | #include <linux/ioport.h> |
@@ -28,96 +28,6 @@ | |||
28 | static void shmedia_mapioaddr(unsigned long, unsigned long); | 28 | static void shmedia_mapioaddr(unsigned long, unsigned long); |
29 | static unsigned long shmedia_ioremap(struct resource *, u32, int); | 29 | static unsigned long shmedia_ioremap(struct resource *, u32, int); |
30 | 30 | ||
31 | static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | ||
32 | unsigned long phys_addr, unsigned long flags) | ||
33 | { | ||
34 | unsigned long end; | ||
35 | unsigned long pfn; | ||
36 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ | | ||
37 | _PAGE_WRITE | _PAGE_DIRTY | | ||
38 | _PAGE_ACCESSED | _PAGE_SHARED | flags); | ||
39 | |||
40 | address &= ~PMD_MASK; | ||
41 | end = address + size; | ||
42 | if (end > PMD_SIZE) | ||
43 | end = PMD_SIZE; | ||
44 | if (address >= end) | ||
45 | BUG(); | ||
46 | |||
47 | pfn = phys_addr >> PAGE_SHIFT; | ||
48 | |||
49 | pr_debug(" %s: pte %p address %lx size %lx phys_addr %lx\n", | ||
50 | __FUNCTION__,pte,address,size,phys_addr); | ||
51 | |||
52 | do { | ||
53 | if (!pte_none(*pte)) { | ||
54 | printk("remap_area_pte: page already exists\n"); | ||
55 | BUG(); | ||
56 | } | ||
57 | |||
58 | set_pte(pte, pfn_pte(pfn, pgprot)); | ||
59 | address += PAGE_SIZE; | ||
60 | pfn++; | ||
61 | pte++; | ||
62 | } while (address && (address < end)); | ||
63 | } | ||
64 | |||
65 | static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | ||
66 | unsigned long phys_addr, unsigned long flags) | ||
67 | { | ||
68 | unsigned long end; | ||
69 | |||
70 | address &= ~PGDIR_MASK; | ||
71 | end = address + size; | ||
72 | |||
73 | if (end > PGDIR_SIZE) | ||
74 | end = PGDIR_SIZE; | ||
75 | |||
76 | phys_addr -= address; | ||
77 | |||
78 | if (address >= end) | ||
79 | BUG(); | ||
80 | |||
81 | do { | ||
82 | pte_t * pte = pte_alloc_kernel(pmd, address); | ||
83 | if (!pte) | ||
84 | return -ENOMEM; | ||
85 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | ||
86 | address = (address + PMD_SIZE) & PMD_MASK; | ||
87 | pmd++; | ||
88 | } while (address && (address < end)); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static int remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
93 | unsigned long size, unsigned long flags) | ||
94 | { | ||
95 | int error; | ||
96 | pgd_t * dir; | ||
97 | unsigned long end = address + size; | ||
98 | |||
99 | phys_addr -= address; | ||
100 | dir = pgd_offset_k(address); | ||
101 | flush_cache_all(); | ||
102 | if (address >= end) | ||
103 | BUG(); | ||
104 | do { | ||
105 | pmd_t *pmd = pmd_alloc(&init_mm, dir, address); | ||
106 | error = -ENOMEM; | ||
107 | if (!pmd) | ||
108 | break; | ||
109 | if (remap_area_pmd(pmd, address, end - address, | ||
110 | phys_addr + address, flags)) { | ||
111 | break; | ||
112 | } | ||
113 | error = 0; | ||
114 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
115 | dir++; | ||
116 | } while (address && (address < end)); | ||
117 | flush_tlb_all(); | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | /* | 31 | /* |
122 | * Generic mapping function (not visible outside): | 32 | * Generic mapping function (not visible outside): |
123 | */ | 33 | */ |
@@ -136,12 +46,17 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag | |||
136 | void * addr; | 46 | void * addr; |
137 | struct vm_struct * area; | 47 | struct vm_struct * area; |
138 | unsigned long offset, last_addr; | 48 | unsigned long offset, last_addr; |
49 | pgprot_t pgprot; | ||
139 | 50 | ||
140 | /* Don't allow wraparound or zero size */ | 51 | /* Don't allow wraparound or zero size */ |
141 | last_addr = phys_addr + size - 1; | 52 | last_addr = phys_addr + size - 1; |
142 | if (!size || last_addr < phys_addr) | 53 | if (!size || last_addr < phys_addr) |
143 | return NULL; | 54 | return NULL; |
144 | 55 | ||
56 | pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ | | ||
57 | _PAGE_WRITE | _PAGE_DIRTY | | ||
58 | _PAGE_ACCESSED | _PAGE_SHARED | flags); | ||
59 | |||
145 | /* | 60 | /* |
146 | * Mappings have to be page-aligned | 61 | * Mappings have to be page-aligned |
147 | */ | 62 | */ |
@@ -158,7 +73,8 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag | |||
158 | return NULL; | 73 | return NULL; |
159 | area->phys_addr = phys_addr; | 74 | area->phys_addr = phys_addr; |
160 | addr = area->addr; | 75 | addr = area->addr; |
161 | if (remap_area_pages((unsigned long)addr, phys_addr, size, flags)) { | 76 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, |
77 | phys_addr, pgprot)) { | ||
162 | vunmap(addr); | 78 | vunmap(addr); |
163 | return NULL; | 79 | return NULL; |
164 | } | 80 | } |