diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/copypage-feroceon.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v3.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wb.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wt.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xsc3.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 20 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/mmap.c | 2 |
10 files changed, 21 insertions, 17 deletions
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index c3ba6a94da0c..70997d5bee2d 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/highmem.h> | 14 | #include <linux/highmem.h> |
15 | 15 | ||
16 | static void __attribute__((naked)) | 16 | static void __naked |
17 | feroceon_copy_user_page(void *kto, const void *kfrom) | 17 | feroceon_copy_user_page(void *kto, const void *kfrom) |
18 | { | 18 | { |
19 | asm("\ | 19 | asm("\ |
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 70ed96c8af8e..de9c06854ad7 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * | 15 | * |
16 | * FIXME: do we need to handle cache stuff... | 16 | * FIXME: do we need to handle cache stuff... |
17 | */ | 17 | */ |
18 | static void __attribute__((naked)) | 18 | static void __naked |
19 | v3_copy_user_page(void *kto, const void *kfrom) | 19 | v3_copy_user_page(void *kto, const void *kfrom) |
20 | { | 20 | { |
21 | asm("\n\ | 21 | asm("\n\ |
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 1601698b9800..7370a7142b04 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(minicache_lock); | |||
44 | * instruction. If your processor does not supply this, you have to write your | 44 | * instruction. If your processor does not supply this, you have to write your |
45 | * own copy_user_highpage that does the right thing. | 45 | * own copy_user_highpage that does the right thing. |
46 | */ | 46 | */ |
47 | static void __attribute__((naked)) | 47 | static void __naked |
48 | mc_copy_user_page(void *from, void *to) | 48 | mc_copy_user_page(void *from, void *to) |
49 | { | 49 | { |
50 | asm volatile( | 50 | asm volatile( |
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 3ec93dab7656..9ab098414227 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c | |||
@@ -22,7 +22,7 @@ | |||
22 | * instruction. If your processor does not supply this, you have to write your | 22 | * instruction. If your processor does not supply this, you have to write your |
23 | * own copy_user_highpage that does the right thing. | 23 | * own copy_user_highpage that does the right thing. |
24 | */ | 24 | */ |
25 | static void __attribute__((naked)) | 25 | static void __naked |
26 | v4wb_copy_user_page(void *kto, const void *kfrom) | 26 | v4wb_copy_user_page(void *kto, const void *kfrom) |
27 | { | 27 | { |
28 | asm("\ | 28 | asm("\ |
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 0f1188efae45..300efafd6643 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c | |||
@@ -20,7 +20,7 @@ | |||
20 | * dirty data in the cache. However, we do have to ensure that | 20 | * dirty data in the cache. However, we do have to ensure that |
21 | * subsequent reads are up to date. | 21 | * subsequent reads are up to date. |
22 | */ | 22 | */ |
23 | static void __attribute__((naked)) | 23 | static void __naked |
24 | v4wt_copy_user_page(void *kto, const void *kfrom) | 24 | v4wt_copy_user_page(void *kto, const void *kfrom) |
25 | { | 25 | { |
26 | asm("\ | 26 | asm("\ |
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 39a994542cad..bc4525f5ab23 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c | |||
@@ -29,7 +29,7 @@ | |||
29 | * if we eventually end up using our copied page. | 29 | * if we eventually end up using our copied page. |
30 | * | 30 | * |
31 | */ | 31 | */ |
32 | static void __attribute__((naked)) | 32 | static void __naked |
33 | xsc3_mc_copy_user_page(void *kto, const void *kfrom) | 33 | xsc3_mc_copy_user_page(void *kto, const void *kfrom) |
34 | { | 34 | { |
35 | asm("\ | 35 | asm("\ |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index d18f2397ee2d..76824d3e966a 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock); | |||
42 | * Dcache aliasing issue. The writes will be forwarded to the write buffer, | 42 | * Dcache aliasing issue. The writes will be forwarded to the write buffer, |
43 | * and merged as appropriate. | 43 | * and merged as appropriate. |
44 | */ | 44 | */ |
45 | static void __attribute__((naked)) | 45 | static void __naked |
46 | mc_copy_user_page(void *from, void *to) | 46 | mc_copy_user_page(void *from, void *to) |
47 | { | 47 | { |
48 | /* | 48 | /* |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 310e479309ef..f1ef5613ccd4 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -490,26 +490,30 @@ core_initcall(consistent_init); | |||
490 | */ | 490 | */ |
491 | void dma_cache_maint(const void *start, size_t size, int direction) | 491 | void dma_cache_maint(const void *start, size_t size, int direction) |
492 | { | 492 | { |
493 | const void *end = start + size; | 493 | void (*inner_op)(const void *, const void *); |
494 | void (*outer_op)(unsigned long, unsigned long); | ||
494 | 495 | ||
495 | BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1)); | 496 | BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1)); |
496 | 497 | ||
497 | switch (direction) { | 498 | switch (direction) { |
498 | case DMA_FROM_DEVICE: /* invalidate only */ | 499 | case DMA_FROM_DEVICE: /* invalidate only */ |
499 | dmac_inv_range(start, end); | 500 | inner_op = dmac_inv_range; |
500 | outer_inv_range(__pa(start), __pa(end)); | 501 | outer_op = outer_inv_range; |
501 | break; | 502 | break; |
502 | case DMA_TO_DEVICE: /* writeback only */ | 503 | case DMA_TO_DEVICE: /* writeback only */ |
503 | dmac_clean_range(start, end); | 504 | inner_op = dmac_clean_range; |
504 | outer_clean_range(__pa(start), __pa(end)); | 505 | outer_op = outer_clean_range; |
505 | break; | 506 | break; |
506 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | 507 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ |
507 | dmac_flush_range(start, end); | 508 | inner_op = dmac_flush_range; |
508 | outer_flush_range(__pa(start), __pa(end)); | 509 | outer_op = outer_flush_range; |
509 | break; | 510 | break; |
510 | default: | 511 | default: |
511 | BUG(); | 512 | BUG(); |
512 | } | 513 | } |
514 | |||
515 | inner_op(start, start + size); | ||
516 | outer_op(__pa(start), __pa(start) + size); | ||
513 | } | 517 | } |
514 | EXPORT_SYMBOL(dma_cache_maint); | 518 | EXPORT_SYMBOL(dma_cache_maint); |
515 | 519 | ||
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 34df4d9d03a6..80fd3b69ae1f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -382,7 +382,7 @@ void __init bootmem_init(void) | |||
382 | for_each_node(node) | 382 | for_each_node(node) |
383 | bootmem_free_node(node, mi); | 383 | bootmem_free_node(node, mi); |
384 | 384 | ||
385 | high_memory = __va(memend_pfn << PAGE_SHIFT); | 385 | high_memory = __va((memend_pfn << PAGE_SHIFT) - 1) + 1; |
386 | 386 | ||
387 | /* | 387 | /* |
388 | * This doesn't seem to be used by the Linux memory manager any | 388 | * This doesn't seem to be used by the Linux memory manager any |
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 5358fcc7f61e..f7457fea6de8 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
@@ -124,7 +124,7 @@ int valid_phys_addr_range(unsigned long addr, size_t size) | |||
124 | { | 124 | { |
125 | if (addr < PHYS_OFFSET) | 125 | if (addr < PHYS_OFFSET) |
126 | return 0; | 126 | return 0; |
127 | if (addr + size > __pa(high_memory)) | 127 | if (addr + size >= __pa(high_memory - 1)) |
128 | return 0; | 128 | return 0; |
129 | 129 | ||
130 | return 1; | 130 | return 1; |