diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 31 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 8 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_32.c | 14 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_64.c | 265 | ||||
-rw-r--r-- | arch/sh/mm/mmap.c | 136 |
5 files changed, 260 insertions, 194 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index d4079cab2d58..2795618e4f07 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -21,6 +21,29 @@ config PAGE_OFFSET | |||
21 | default "0x20000000" if MMU && SUPERH64 | 21 | default "0x20000000" if MMU && SUPERH64 |
22 | default "0x00000000" | 22 | default "0x00000000" |
23 | 23 | ||
24 | config FORCE_MAX_ZONEORDER | ||
25 | int "Maximum zone order" | ||
26 | range 9 64 if PAGE_SIZE_16KB | ||
27 | default "9" if PAGE_SIZE_16KB | ||
28 | range 7 64 if PAGE_SIZE_64KB | ||
29 | default "7" if PAGE_SIZE_64KB | ||
30 | range 11 64 | ||
31 | default "14" if !MMU | ||
32 | default "11" | ||
33 | help | ||
34 | The kernel memory allocator divides physically contiguous memory | ||
35 | blocks into "zones", where each zone is a power of two number of | ||
36 | pages. This option selects the largest power of two that the kernel | ||
37 | keeps in the memory allocator. If you need to allocate very large | ||
38 | blocks of physically contiguous memory, then you may need to | ||
39 | increase this value. | ||
40 | |||
41 | This config option is actually maximum order plus one. For example, | ||
42 | a value of 11 means that the largest free memory block is 2^10 pages. | ||
43 | |||
44 | The page size is not necessarily 4KB. Keep this in mind when | ||
45 | choosing a value for this option. | ||
46 | |||
24 | config MEMORY_START | 47 | config MEMORY_START |
25 | hex "Physical memory start address" | 48 | hex "Physical memory start address" |
26 | default "0x08000000" | 49 | default "0x08000000" |
@@ -201,14 +224,6 @@ config PAGE_SIZE_64KB | |||
201 | 224 | ||
202 | endchoice | 225 | endchoice |
203 | 226 | ||
204 | config ENTRY_OFFSET | ||
205 | hex | ||
206 | default "0x00001000" if PAGE_SIZE_4KB | ||
207 | default "0x00002000" if PAGE_SIZE_8KB | ||
208 | default "0x00004000" if PAGE_SIZE_16KB | ||
209 | default "0x00010000" if PAGE_SIZE_64KB | ||
210 | default "0x00000000" | ||
211 | |||
212 | choice | 227 | choice |
213 | prompt "HugeTLB page size" | 228 | prompt "HugeTLB page size" |
214 | depends on HUGETLB_PAGE && (CPU_SH4 || CPU_SH5) && MMU | 229 | depends on HUGETLB_PAGE && (CPU_SH4 || CPU_SH5) && MMU |
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 9e277ec7d536..86762092508c 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -60,7 +60,7 @@ static inline void sh64_teardown_dtlb_cache_slot(void) | |||
60 | static inline void sh64_icache_inv_all(void) | 60 | static inline void sh64_icache_inv_all(void) |
61 | { | 61 | { |
62 | unsigned long long addr, flag, data; | 62 | unsigned long long addr, flag, data; |
63 | unsigned int flags; | 63 | unsigned long flags; |
64 | 64 | ||
65 | addr = ICCR0; | 65 | addr = ICCR0; |
66 | flag = ICCR0_ICI; | 66 | flag = ICCR0_ICI; |
@@ -172,7 +172,7 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | |||
172 | unsigned long eaddr; | 172 | unsigned long eaddr; |
173 | unsigned long after_last_page_start; | 173 | unsigned long after_last_page_start; |
174 | unsigned long mm_asid, current_asid; | 174 | unsigned long mm_asid, current_asid; |
175 | unsigned long long flags = 0ULL; | 175 | unsigned long flags = 0; |
176 | 176 | ||
177 | mm_asid = cpu_asid(smp_processor_id(), mm); | 177 | mm_asid = cpu_asid(smp_processor_id(), mm); |
178 | current_asid = get_asid(); | 178 | current_asid = get_asid(); |
@@ -236,7 +236,7 @@ static void sh64_icache_inv_user_small_range(struct mm_struct *mm, | |||
236 | unsigned long long eaddr = start; | 236 | unsigned long long eaddr = start; |
237 | unsigned long long eaddr_end = start + len; | 237 | unsigned long long eaddr_end = start + len; |
238 | unsigned long current_asid, mm_asid; | 238 | unsigned long current_asid, mm_asid; |
239 | unsigned long long flags; | 239 | unsigned long flags; |
240 | unsigned long long epage_start; | 240 | unsigned long long epage_start; |
241 | 241 | ||
242 | /* | 242 | /* |
@@ -342,7 +342,7 @@ static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) | |||
342 | * alloco is a NOP if the cache is write-through. | 342 | * alloco is a NOP if the cache is write-through. |
343 | */ | 343 | */ |
344 | if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags))) | 344 | if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags))) |
345 | ctrl_inb(eaddr); | 345 | __raw_readb((unsigned long)eaddr); |
346 | } | 346 | } |
347 | } | 347 | } |
348 | 348 | ||
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index 60cc486d2c2c..da2f4186f2cd 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c | |||
@@ -46,17 +46,15 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
46 | return NULL; | 46 | return NULL; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * If we're on an SH7751 or SH7780 PCI controller, PCI memory is | 49 | * If we're in the fixed PCI memory range, mapping through page |
50 | * mapped at the end of the address space (typically 0xfd000000) | 50 | * tables is not only pointless, but also fundamentally broken. |
51 | * in a non-translatable area, so mapping through page tables for | 51 | * Just return the physical address instead. |
52 | * this area is not only pointless, but also fundamentally | ||
53 | * broken. Just return the physical address instead. | ||
54 | * | 52 | * |
55 | * For boards that map a small PCI memory aperture somewhere in | 53 | * For boards that map a small PCI memory aperture somewhere in |
56 | * P1/P2 space, ioremap() will already do the right thing, | 54 | * P1/P2 space, ioremap() will already do the right thing, |
57 | * and we'll never get this far. | 55 | * and we'll never get this far. |
58 | */ | 56 | */ |
59 | if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr)) | 57 | if (is_pci_memory_fixed_range(phys_addr, size)) |
60 | return (void __iomem *)phys_addr; | 58 | return (void __iomem *)phys_addr; |
61 | 59 | ||
62 | #if !defined(CONFIG_PMB_FIXED) | 60 | #if !defined(CONFIG_PMB_FIXED) |
@@ -121,7 +119,9 @@ void __iounmap(void __iomem *addr) | |||
121 | unsigned long seg = PXSEG(vaddr); | 119 | unsigned long seg = PXSEG(vaddr); |
122 | struct vm_struct *p; | 120 | struct vm_struct *p; |
123 | 121 | ||
124 | if (seg < P3SEG || vaddr >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) | 122 | if (seg < P3SEG || vaddr >= P3_ADDR_MAX) |
123 | return; | ||
124 | if (is_pci_memory_fixed_range(vaddr, 0)) | ||
125 | return; | 125 | return; |
126 | 126 | ||
127 | #ifdef CONFIG_PMB | 127 | #ifdef CONFIG_PMB |
diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c index 31e1bb5effbe..2331229f8126 100644 --- a/arch/sh/mm/ioremap_64.c +++ b/arch/sh/mm/ioremap_64.c | |||
@@ -27,88 +27,17 @@ | |||
27 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
28 | #include <asm/mmu.h> | 28 | #include <asm/mmu.h> |
29 | 29 | ||
30 | static void shmedia_mapioaddr(unsigned long, unsigned long); | ||
31 | static unsigned long shmedia_ioremap(struct resource *, u32, int); | ||
32 | |||
33 | /* | ||
34 | * Generic mapping function (not visible outside): | ||
35 | */ | ||
36 | |||
37 | /* | ||
38 | * Remap an arbitrary physical address space into the kernel virtual | ||
39 | * address space. Needed when the kernel wants to access high addresses | ||
40 | * directly. | ||
41 | * | ||
42 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
43 | * have to convert them into an offset in a page-aligned mapping, but the | ||
44 | * caller shouldn't need to know that small detail. | ||
45 | */ | ||
46 | void *__ioremap(unsigned long phys_addr, unsigned long size, | ||
47 | unsigned long flags) | ||
48 | { | ||
49 | void * addr; | ||
50 | struct vm_struct * area; | ||
51 | unsigned long offset, last_addr; | ||
52 | pgprot_t pgprot; | ||
53 | |||
54 | /* Don't allow wraparound or zero size */ | ||
55 | last_addr = phys_addr + size - 1; | ||
56 | if (!size || last_addr < phys_addr) | ||
57 | return NULL; | ||
58 | |||
59 | pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ | | ||
60 | _PAGE_WRITE | _PAGE_DIRTY | | ||
61 | _PAGE_ACCESSED | _PAGE_SHARED | flags); | ||
62 | |||
63 | /* | ||
64 | * Mappings have to be page-aligned | ||
65 | */ | ||
66 | offset = phys_addr & ~PAGE_MASK; | ||
67 | phys_addr &= PAGE_MASK; | ||
68 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; | ||
69 | |||
70 | /* | ||
71 | * Ok, go for it.. | ||
72 | */ | ||
73 | area = get_vm_area(size, VM_IOREMAP); | ||
74 | if (!area) | ||
75 | return NULL; | ||
76 | pr_debug("Get vm_area returns %p addr %p\n", area, area->addr); | ||
77 | area->phys_addr = phys_addr; | ||
78 | addr = area->addr; | ||
79 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, | ||
80 | phys_addr, pgprot)) { | ||
81 | vunmap(addr); | ||
82 | return NULL; | ||
83 | } | ||
84 | return (void *) (offset + (char *)addr); | ||
85 | } | ||
86 | EXPORT_SYMBOL(__ioremap); | ||
87 | |||
88 | void __iounmap(void *addr) | ||
89 | { | ||
90 | struct vm_struct *area; | ||
91 | |||
92 | vfree((void *) (PAGE_MASK & (unsigned long) addr)); | ||
93 | area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr)); | ||
94 | if (!area) { | ||
95 | printk(KERN_ERR "iounmap: bad address %p\n", addr); | ||
96 | return; | ||
97 | } | ||
98 | |||
99 | kfree(area); | ||
100 | } | ||
101 | EXPORT_SYMBOL(__iounmap); | ||
102 | |||
103 | static struct resource shmedia_iomap = { | 30 | static struct resource shmedia_iomap = { |
104 | .name = "shmedia_iomap", | 31 | .name = "shmedia_iomap", |
105 | .start = IOBASE_VADDR + PAGE_SIZE, | 32 | .start = IOBASE_VADDR + PAGE_SIZE, |
106 | .end = IOBASE_END - 1, | 33 | .end = IOBASE_END - 1, |
107 | }; | 34 | }; |
108 | 35 | ||
109 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va); | 36 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va, |
37 | unsigned long flags); | ||
110 | static void shmedia_unmapioaddr(unsigned long vaddr); | 38 | static void shmedia_unmapioaddr(unsigned long vaddr); |
111 | static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz); | 39 | static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, |
40 | int sz, unsigned long flags); | ||
112 | 41 | ||
113 | /* | 42 | /* |
114 | * We have the same problem as the SPARC, so lets have the same comment: | 43 | * We have the same problem as the SPARC, so lets have the same comment: |
@@ -130,18 +59,18 @@ static struct xresource xresv[XNRES]; | |||
130 | 59 | ||
131 | static struct xresource *xres_alloc(void) | 60 | static struct xresource *xres_alloc(void) |
132 | { | 61 | { |
133 | struct xresource *xrp; | 62 | struct xresource *xrp; |
134 | int n; | 63 | int n; |
135 | 64 | ||
136 | xrp = xresv; | 65 | xrp = xresv; |
137 | for (n = 0; n < XNRES; n++) { | 66 | for (n = 0; n < XNRES; n++) { |
138 | if (xrp->xflag == 0) { | 67 | if (xrp->xflag == 0) { |
139 | xrp->xflag = 1; | 68 | xrp->xflag = 1; |
140 | return xrp; | 69 | return xrp; |
141 | } | 70 | } |
142 | xrp++; | 71 | xrp++; |
143 | } | 72 | } |
144 | return NULL; | 73 | return NULL; |
145 | } | 74 | } |
146 | 75 | ||
147 | static void xres_free(struct xresource *xrp) | 76 | static void xres_free(struct xresource *xrp) |
@@ -161,76 +90,71 @@ static struct resource *shmedia_find_resource(struct resource *root, | |||
161 | return NULL; | 90 | return NULL; |
162 | } | 91 | } |
163 | 92 | ||
164 | static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size, | 93 | static void __iomem *shmedia_alloc_io(unsigned long phys, unsigned long size, |
165 | const char *name) | 94 | const char *name, unsigned long flags) |
166 | { | 95 | { |
167 | static int printed_full = 0; | 96 | static int printed_full; |
168 | struct xresource *xres; | 97 | struct xresource *xres; |
169 | struct resource *res; | 98 | struct resource *res; |
170 | char *tack; | 99 | char *tack; |
171 | int tlen; | 100 | int tlen; |
172 | 101 | ||
173 | if (name == NULL) name = "???"; | 102 | if (name == NULL) |
174 | 103 | name = "???"; | |
175 | if ((xres = xres_alloc()) != 0) { | 104 | |
176 | tack = xres->xname; | 105 | xres = xres_alloc(); |
177 | res = &xres->xres; | 106 | if (xres != 0) { |
178 | } else { | 107 | tack = xres->xname; |
179 | if (!printed_full) { | 108 | res = &xres->xres; |
180 | printk("%s: done with statics, switching to kmalloc\n", | 109 | } else { |
181 | __func__); | 110 | if (!printed_full) { |
182 | printed_full = 1; | 111 | printk(KERN_NOTICE "%s: done with statics, " |
183 | } | 112 | "switching to kmalloc\n", __func__); |
184 | tlen = strlen(name); | 113 | printed_full = 1; |
185 | tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); | 114 | } |
186 | if (!tack) | 115 | tlen = strlen(name); |
187 | return -ENOMEM; | 116 | tack = kmalloc(sizeof(struct resource) + tlen + 1, GFP_KERNEL); |
188 | memset(tack, 0, sizeof(struct resource)); | 117 | if (!tack) |
189 | res = (struct resource *) tack; | 118 | return NULL; |
190 | tack += sizeof (struct resource); | 119 | memset(tack, 0, sizeof(struct resource)); |
191 | } | 120 | res = (struct resource *) tack; |
192 | 121 | tack += sizeof(struct resource); | |
193 | strncpy(tack, name, XNMLN); | 122 | } |
194 | tack[XNMLN] = 0; | 123 | |
195 | res->name = tack; | 124 | strncpy(tack, name, XNMLN); |
196 | 125 | tack[XNMLN] = 0; | |
197 | return shmedia_ioremap(res, phys, size); | 126 | res->name = tack; |
127 | |||
128 | return shmedia_ioremap(res, phys, size, flags); | ||
198 | } | 129 | } |
199 | 130 | ||
200 | static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz) | 131 | static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, int sz, |
132 | unsigned long flags) | ||
201 | { | 133 | { |
202 | unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); | 134 | unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); |
203 | unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK; | 135 | unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK; |
204 | unsigned long va; | 136 | unsigned long va; |
205 | unsigned int psz; | 137 | unsigned int psz; |
206 | 138 | ||
207 | if (allocate_resource(&shmedia_iomap, res, round_sz, | 139 | if (allocate_resource(&shmedia_iomap, res, round_sz, |
208 | shmedia_iomap.start, shmedia_iomap.end, | 140 | shmedia_iomap.start, shmedia_iomap.end, |
209 | PAGE_SIZE, NULL, NULL) != 0) { | 141 | PAGE_SIZE, NULL, NULL) != 0) { |
210 | panic("alloc_io_res(%s): cannot occupy\n", | 142 | panic("alloc_io_res(%s): cannot occupy\n", |
211 | (res->name != NULL)? res->name: "???"); | 143 | (res->name != NULL) ? res->name : "???"); |
212 | } | 144 | } |
213 | 145 | ||
214 | va = res->start; | 146 | va = res->start; |
215 | pa &= PAGE_MASK; | 147 | pa &= PAGE_MASK; |
216 | 148 | ||
217 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | 149 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; |
218 | 150 | ||
219 | /* log at boot time ... */ | 151 | for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { |
220 | printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n", | 152 | shmedia_mapioaddr(pa, va, flags); |
221 | ((res->name != NULL) ? res->name : "???"), | 153 | va += PAGE_SIZE; |
222 | psz, psz == 1 ? " " : "s", va, pa); | 154 | pa += PAGE_SIZE; |
223 | 155 | } | |
224 | for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { | ||
225 | shmedia_mapioaddr(pa, va); | ||
226 | va += PAGE_SIZE; | ||
227 | pa += PAGE_SIZE; | ||
228 | } | ||
229 | |||
230 | res->start += offset; | ||
231 | res->end = res->start + sz - 1; /* not strictly necessary.. */ | ||
232 | 156 | ||
233 | return res->start; | 157 | return (void __iomem *)(unsigned long)(res->start + offset); |
234 | } | 158 | } |
235 | 159 | ||
236 | static void shmedia_free_io(struct resource *res) | 160 | static void shmedia_free_io(struct resource *res) |
@@ -249,14 +173,12 @@ static void shmedia_free_io(struct resource *res) | |||
249 | 173 | ||
250 | static __init_refok void *sh64_get_page(void) | 174 | static __init_refok void *sh64_get_page(void) |
251 | { | 175 | { |
252 | extern int after_bootmem; | ||
253 | void *page; | 176 | void *page; |
254 | 177 | ||
255 | if (after_bootmem) { | 178 | if (after_bootmem) |
256 | page = (void *)get_zeroed_page(GFP_ATOMIC); | 179 | page = (void *)get_zeroed_page(GFP_KERNEL); |
257 | } else { | 180 | else |
258 | page = alloc_bootmem_pages(PAGE_SIZE); | 181 | page = alloc_bootmem_pages(PAGE_SIZE); |
259 | } | ||
260 | 182 | ||
261 | if (!page || ((unsigned long)page & ~PAGE_MASK)) | 183 | if (!page || ((unsigned long)page & ~PAGE_MASK)) |
262 | panic("sh64_get_page: Out of memory already?\n"); | 184 | panic("sh64_get_page: Out of memory already?\n"); |
@@ -264,17 +186,20 @@ static __init_refok void *sh64_get_page(void) | |||
264 | return page; | 186 | return page; |
265 | } | 187 | } |
266 | 188 | ||
267 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va) | 189 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va, |
190 | unsigned long flags) | ||
268 | { | 191 | { |
269 | pgd_t *pgdp; | 192 | pgd_t *pgdp; |
270 | pud_t *pudp; | 193 | pud_t *pudp; |
271 | pmd_t *pmdp; | 194 | pmd_t *pmdp; |
272 | pte_t *ptep, pte; | 195 | pte_t *ptep, pte; |
273 | pgprot_t prot; | 196 | pgprot_t prot; |
274 | unsigned long flags = 1; /* 1 = CB0-1 device */ | ||
275 | 197 | ||
276 | pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va); | 198 | pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va); |
277 | 199 | ||
200 | if (!flags) | ||
201 | flags = 1; /* 1 = CB0-1 device */ | ||
202 | |||
278 | pgdp = pgd_offset_k(va); | 203 | pgdp = pgd_offset_k(va); |
279 | if (pgd_none(*pgdp) || !pgd_present(*pgdp)) { | 204 | if (pgd_none(*pgdp) || !pgd_present(*pgdp)) { |
280 | pudp = (pud_t *)sh64_get_page(); | 205 | pudp = (pud_t *)sh64_get_page(); |
@@ -288,7 +213,7 @@ static void shmedia_mapioaddr(unsigned long pa, unsigned long va) | |||
288 | } | 213 | } |
289 | 214 | ||
290 | pmdp = pmd_offset(pudp, va); | 215 | pmdp = pmd_offset(pudp, va); |
291 | if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) { | 216 | if (pmd_none(*pmdp) || !pmd_present(*pmdp)) { |
292 | ptep = (pte_t *)sh64_get_page(); | 217 | ptep = (pte_t *)sh64_get_page(); |
293 | set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); | 218 | set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); |
294 | } | 219 | } |
@@ -336,17 +261,19 @@ static void shmedia_unmapioaddr(unsigned long vaddr) | |||
336 | pte_clear(&init_mm, vaddr, ptep); | 261 | pte_clear(&init_mm, vaddr, ptep); |
337 | } | 262 | } |
338 | 263 | ||
339 | unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name) | 264 | void __iomem *__ioremap(unsigned long offset, unsigned long size, |
265 | unsigned long flags) | ||
340 | { | 266 | { |
341 | if (size < PAGE_SIZE) | 267 | char name[14]; |
342 | size = PAGE_SIZE; | ||
343 | 268 | ||
344 | return shmedia_alloc_io(phys, size, name); | 269 | sprintf(name, "phys_%08x", (u32)offset); |
270 | return shmedia_alloc_io(offset, size, name, flags); | ||
345 | } | 271 | } |
346 | EXPORT_SYMBOL(onchip_remap); | 272 | EXPORT_SYMBOL(__ioremap); |
347 | 273 | ||
348 | void onchip_unmap(unsigned long vaddr) | 274 | void __iounmap(void __iomem *virtual) |
349 | { | 275 | { |
276 | unsigned long vaddr = (unsigned long)virtual & PAGE_MASK; | ||
350 | struct resource *res; | 277 | struct resource *res; |
351 | unsigned int psz; | 278 | unsigned int psz; |
352 | 279 | ||
@@ -357,10 +284,7 @@ void onchip_unmap(unsigned long vaddr) | |||
357 | return; | 284 | return; |
358 | } | 285 | } |
359 | 286 | ||
360 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | 287 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; |
361 | |||
362 | printk(KERN_DEBUG "unmapioaddr: %6s [%2d page%s] freed\n", | ||
363 | res->name, psz, psz == 1 ? " " : "s"); | ||
364 | 288 | ||
365 | shmedia_free_io(res); | 289 | shmedia_free_io(res); |
366 | 290 | ||
@@ -371,9 +295,8 @@ void onchip_unmap(unsigned long vaddr) | |||
371 | kfree(res); | 295 | kfree(res); |
372 | } | 296 | } |
373 | } | 297 | } |
374 | EXPORT_SYMBOL(onchip_unmap); | 298 | EXPORT_SYMBOL(__iounmap); |
375 | 299 | ||
376 | #ifdef CONFIG_PROC_FS | ||
377 | static int | 300 | static int |
378 | ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, | 301 | ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, |
379 | void *data) | 302 | void *data) |
@@ -385,7 +308,10 @@ ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, | |||
385 | for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { | 308 | for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { |
386 | if (p + 32 >= e) /* Better than nothing */ | 309 | if (p + 32 >= e) /* Better than nothing */ |
387 | break; | 310 | break; |
388 | if ((nm = r->name) == 0) nm = "???"; | 311 | nm = r->name; |
312 | if (nm == NULL) | ||
313 | nm = "???"; | ||
314 | |||
389 | p += sprintf(p, "%08lx-%08lx: %s\n", | 315 | p += sprintf(p, "%08lx-%08lx: %s\n", |
390 | (unsigned long)r->start, | 316 | (unsigned long)r->start, |
391 | (unsigned long)r->end, nm); | 317 | (unsigned long)r->end, nm); |
@@ -393,14 +319,11 @@ ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, | |||
393 | 319 | ||
394 | return p-buf; | 320 | return p-buf; |
395 | } | 321 | } |
396 | #endif /* CONFIG_PROC_FS */ | ||
397 | 322 | ||
398 | static int __init register_proc_onchip(void) | 323 | static int __init register_proc_onchip(void) |
399 | { | 324 | { |
400 | #ifdef CONFIG_PROC_FS | 325 | create_proc_read_entry("io_map", 0, 0, ioremap_proc_info, |
401 | create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap); | 326 | &shmedia_iomap); |
402 | #endif | ||
403 | return 0; | 327 | return 0; |
404 | } | 328 | } |
405 | 329 | late_initcall(register_proc_onchip); | |
406 | __initcall(register_proc_onchip); | ||
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 931f4d003fa0..1b5fdfb4e0c2 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * arch/sh/mm/mmap.c | 2 | * arch/sh/mm/mmap.c |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Paul Mundt | 4 | * Copyright (C) 2008 - 2009 Paul Mundt |
5 | * | 5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 7 | * License. See the file "COPYING" in the main directory of this archive |
@@ -21,9 +21,26 @@ EXPORT_SYMBOL(shm_align_mask); | |||
21 | /* | 21 | /* |
22 | * To avoid cache aliases, we map the shared page with same color. | 22 | * To avoid cache aliases, we map the shared page with same color. |
23 | */ | 23 | */ |
24 | #define COLOUR_ALIGN(addr, pgoff) \ | 24 | static inline unsigned long COLOUR_ALIGN(unsigned long addr, |
25 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ | 25 | unsigned long pgoff) |
26 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | 26 | { |
27 | unsigned long base = (addr + shm_align_mask) & ~shm_align_mask; | ||
28 | unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; | ||
29 | |||
30 | return base + off; | ||
31 | } | ||
32 | |||
33 | static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, | ||
34 | unsigned long pgoff) | ||
35 | { | ||
36 | unsigned long base = addr & ~shm_align_mask; | ||
37 | unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; | ||
38 | |||
39 | if (base + off <= addr) | ||
40 | return base + off; | ||
41 | |||
42 | return base - off; | ||
43 | } | ||
27 | 44 | ||
28 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | 45 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
29 | unsigned long len, unsigned long pgoff, unsigned long flags) | 46 | unsigned long len, unsigned long pgoff, unsigned long flags) |
@@ -103,6 +120,117 @@ full_search: | |||
103 | addr = COLOUR_ALIGN(addr, pgoff); | 120 | addr = COLOUR_ALIGN(addr, pgoff); |
104 | } | 121 | } |
105 | } | 122 | } |
123 | |||
124 | unsigned long | ||
125 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
126 | const unsigned long len, const unsigned long pgoff, | ||
127 | const unsigned long flags) | ||
128 | { | ||
129 | struct vm_area_struct *vma; | ||
130 | struct mm_struct *mm = current->mm; | ||
131 | unsigned long addr = addr0; | ||
132 | int do_colour_align; | ||
133 | |||
134 | if (flags & MAP_FIXED) { | ||
135 | /* We do not accept a shared mapping if it would violate | ||
136 | * cache aliasing constraints. | ||
137 | */ | ||
138 | if ((flags & MAP_SHARED) && | ||
139 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | ||
140 | return -EINVAL; | ||
141 | return addr; | ||
142 | } | ||
143 | |||
144 | if (unlikely(len > TASK_SIZE)) | ||
145 | return -ENOMEM; | ||
146 | |||
147 | do_colour_align = 0; | ||
148 | if (filp || (flags & MAP_SHARED)) | ||
149 | do_colour_align = 1; | ||
150 | |||
151 | /* requesting a specific address */ | ||
152 | if (addr) { | ||
153 | if (do_colour_align) | ||
154 | addr = COLOUR_ALIGN(addr, pgoff); | ||
155 | else | ||
156 | addr = PAGE_ALIGN(addr); | ||
157 | |||
158 | vma = find_vma(mm, addr); | ||
159 | if (TASK_SIZE - len >= addr && | ||
160 | (!vma || addr + len <= vma->vm_start)) | ||
161 | return addr; | ||
162 | } | ||
163 | |||
164 | /* check if free_area_cache is useful for us */ | ||
165 | if (len <= mm->cached_hole_size) { | ||
166 | mm->cached_hole_size = 0; | ||
167 | mm->free_area_cache = mm->mmap_base; | ||
168 | } | ||
169 | |||
170 | /* either no address requested or can't fit in requested address hole */ | ||
171 | addr = mm->free_area_cache; | ||
172 | if (do_colour_align) { | ||
173 | unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); | ||
174 | |||
175 | addr = base + len; | ||
176 | } | ||
177 | |||
178 | /* make sure it can fit in the remaining address space */ | ||
179 | if (likely(addr > len)) { | ||
180 | vma = find_vma(mm, addr-len); | ||
181 | if (!vma || addr <= vma->vm_start) { | ||
182 | /* remember the address as a hint for next time */ | ||
183 | return (mm->free_area_cache = addr-len); | ||
184 | } | ||
185 | } | ||
186 | |||
187 | if (unlikely(mm->mmap_base < len)) | ||
188 | goto bottomup; | ||
189 | |||
190 | addr = mm->mmap_base-len; | ||
191 | if (do_colour_align) | ||
192 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
193 | |||
194 | do { | ||
195 | /* | ||
196 | * Lookup failure means no vma is above this address, | ||
197 | * else if new region fits below vma->vm_start, | ||
198 | * return with success: | ||
199 | */ | ||
200 | vma = find_vma(mm, addr); | ||
201 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
202 | /* remember the address as a hint for next time */ | ||
203 | return (mm->free_area_cache = addr); | ||
204 | } | ||
205 | |||
206 | /* remember the largest hole we saw so far */ | ||
207 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
208 | mm->cached_hole_size = vma->vm_start - addr; | ||
209 | |||
210 | /* try just below the current vma->vm_start */ | ||
211 | addr = vma->vm_start-len; | ||
212 | if (do_colour_align) | ||
213 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
214 | } while (likely(len < vma->vm_start)); | ||
215 | |||
216 | bottomup: | ||
217 | /* | ||
218 | * A failed mmap() very likely causes application failure, | ||
219 | * so fall back to the bottom-up function here. This scenario | ||
220 | * can happen with large stack limits and large mmap() | ||
221 | * allocations. | ||
222 | */ | ||
223 | mm->cached_hole_size = ~0UL; | ||
224 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
225 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
226 | /* | ||
227 | * Restore the topdown base: | ||
228 | */ | ||
229 | mm->free_area_cache = mm->mmap_base; | ||
230 | mm->cached_hole_size = ~0UL; | ||
231 | |||
232 | return addr; | ||
233 | } | ||
106 | #endif /* CONFIG_MMU */ | 234 | #endif /* CONFIG_MMU */ |
107 | 235 | ||
108 | /* | 236 | /* |