diff options
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r-- | arch/sh/mm/pmb.c | 97 |
1 files changed, 51 insertions, 46 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 9429355c18ca..55d21902d707 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -271,59 +271,18 @@ static void set_pmb_entry(struct pmb_entry *pmbe) | |||
271 | int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, | 271 | int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, |
272 | unsigned long size, pgprot_t prot) | 272 | unsigned long size, pgprot_t prot) |
273 | { | 273 | { |
274 | return 0; | ||
275 | } | ||
276 | |||
277 | void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, | ||
278 | pgprot_t prot, void *caller) | ||
279 | { | ||
280 | struct pmb_entry *pmbp, *pmbe; | 274 | struct pmb_entry *pmbp, *pmbe; |
281 | unsigned long pmb_flags; | 275 | unsigned long pmb_flags; |
282 | int i, mapped; | 276 | int i, mapped; |
283 | unsigned long orig_addr, vaddr; | ||
284 | phys_addr_t offset, last_addr; | ||
285 | phys_addr_t align_mask; | ||
286 | unsigned long aligned; | ||
287 | struct vm_struct *area; | ||
288 | 277 | ||
289 | if (!pmb_iomapping_enabled) | 278 | if (!pmb_addr_valid(vaddr, size)) |
290 | return NULL; | 279 | return -EFAULT; |
291 | |||
292 | /* | ||
293 | * Small mappings need to go through the TLB. | ||
294 | */ | ||
295 | if (size < SZ_16M) | ||
296 | return ERR_PTR(-EINVAL); | ||
297 | if (!pmb_prot_valid(prot)) | ||
298 | return ERR_PTR(-EINVAL); | ||
299 | 280 | ||
300 | pmbp = NULL; | ||
301 | pmb_flags = pgprot_to_pmb_flags(prot); | 281 | pmb_flags = pgprot_to_pmb_flags(prot); |
302 | mapped = 0; | 282 | pmbp = NULL; |
303 | |||
304 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
305 | if (size >= pmb_sizes[i].size) | ||
306 | break; | ||
307 | |||
308 | last_addr = phys + size; | ||
309 | align_mask = ~(pmb_sizes[i].size - 1); | ||
310 | offset = phys & ~align_mask; | ||
311 | phys &= align_mask; | ||
312 | aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys; | ||
313 | |||
314 | area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end, | ||
315 | P3SEG, caller); | ||
316 | if (!area) | ||
317 | return NULL; | ||
318 | |||
319 | area->phys_addr = phys; | ||
320 | orig_addr = vaddr = (unsigned long)area->addr; | ||
321 | |||
322 | if (!pmb_addr_valid(vaddr, aligned)) | ||
323 | return ERR_PTR(-EFAULT); | ||
324 | 283 | ||
325 | again: | 284 | again: |
326 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 285 | for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
327 | unsigned long flags; | 286 | unsigned long flags; |
328 | 287 | ||
329 | if (size < pmb_sizes[i].size) | 288 | if (size < pmb_sizes[i].size) |
@@ -333,7 +292,7 @@ again: | |||
333 | PMB_NO_ENTRY); | 292 | PMB_NO_ENTRY); |
334 | if (IS_ERR(pmbe)) { | 293 | if (IS_ERR(pmbe)) { |
335 | pmb_unmap_entry(pmbp, mapped); | 294 | pmb_unmap_entry(pmbp, mapped); |
336 | return pmbe; | 295 | return PTR_ERR(pmbe); |
337 | } | 296 | } |
338 | 297 | ||
339 | spin_lock_irqsave(&pmbe->lock, flags); | 298 | spin_lock_irqsave(&pmbe->lock, flags); |
@@ -372,6 +331,52 @@ again: | |||
372 | if (size >= SZ_16M) | 331 | if (size >= SZ_16M) |
373 | goto again; | 332 | goto again; |
374 | 333 | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, | ||
338 | pgprot_t prot, void *caller) | ||
339 | { | ||
340 | unsigned long orig_addr, vaddr; | ||
341 | phys_addr_t offset, last_addr; | ||
342 | phys_addr_t align_mask; | ||
343 | unsigned long aligned; | ||
344 | struct vm_struct *area; | ||
345 | int i, ret; | ||
346 | |||
347 | if (!pmb_iomapping_enabled) | ||
348 | return NULL; | ||
349 | |||
350 | /* | ||
351 | * Small mappings need to go through the TLB. | ||
352 | */ | ||
353 | if (size < SZ_16M) | ||
354 | return ERR_PTR(-EINVAL); | ||
355 | if (!pmb_prot_valid(prot)) | ||
356 | return ERR_PTR(-EINVAL); | ||
357 | |||
358 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
359 | if (size >= pmb_sizes[i].size) | ||
360 | break; | ||
361 | |||
362 | last_addr = phys + size; | ||
363 | align_mask = ~(pmb_sizes[i].size - 1); | ||
364 | offset = phys & ~align_mask; | ||
365 | phys &= align_mask; | ||
366 | aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys; | ||
367 | |||
368 | area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end, | ||
369 | P3SEG, caller); | ||
370 | if (!area) | ||
371 | return NULL; | ||
372 | |||
373 | area->phys_addr = phys; | ||
374 | orig_addr = vaddr = (unsigned long)area->addr; | ||
375 | |||
376 | ret = pmb_bolt_mapping(vaddr, phys, size, prot); | ||
377 | if (ret != 0) | ||
378 | return ERR_PTR(ret); | ||
379 | |||
375 | return (void __iomem *)(offset + (char *)orig_addr); | 380 | return (void __iomem *)(offset + (char *)orig_addr); |
376 | } | 381 | } |
377 | 382 | ||