aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/copypage-feroceon.c2
-rw-r--r--arch/arm/mm/copypage-v3.c2
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-v4wb.c2
-rw-r--r--arch/arm/mm/copypage-v4wt.c2
-rw-r--r--arch/arm/mm/copypage-xsc3.c2
-rw-r--r--arch/arm/mm/copypage-xscale.c2
7 files changed, 7 insertions, 7 deletions
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index c3ba6a94da0c..70997d5bee2d 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -13,7 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/highmem.h> 14#include <linux/highmem.h>
15 15
16static void __attribute__((naked)) 16static void __naked
17feroceon_copy_user_page(void *kto, const void *kfrom) 17feroceon_copy_user_page(void *kto, const void *kfrom)
18{ 18{
19 asm("\ 19 asm("\
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index 70ed96c8af8e..de9c06854ad7 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -15,7 +15,7 @@
15 * 15 *
16 * FIXME: do we need to handle cache stuff... 16 * FIXME: do we need to handle cache stuff...
17 */ 17 */
18static void __attribute__((naked)) 18static void __naked
19v3_copy_user_page(void *kto, const void *kfrom) 19v3_copy_user_page(void *kto, const void *kfrom)
20{ 20{
21 asm("\n\ 21 asm("\n\
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 1601698b9800..7370a7142b04 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(minicache_lock);
44 * instruction. If your processor does not supply this, you have to write your 44 * instruction. If your processor does not supply this, you have to write your
45 * own copy_user_highpage that does the right thing. 45 * own copy_user_highpage that does the right thing.
46 */ 46 */
47static void __attribute__((naked)) 47static void __naked
48mc_copy_user_page(void *from, void *to) 48mc_copy_user_page(void *from, void *to)
49{ 49{
50 asm volatile( 50 asm volatile(
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index 3ec93dab7656..9ab098414227 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -22,7 +22,7 @@
22 * instruction. If your processor does not supply this, you have to write your 22 * instruction. If your processor does not supply this, you have to write your
23 * own copy_user_highpage that does the right thing. 23 * own copy_user_highpage that does the right thing.
24 */ 24 */
25static void __attribute__((naked)) 25static void __naked
26v4wb_copy_user_page(void *kto, const void *kfrom) 26v4wb_copy_user_page(void *kto, const void *kfrom)
27{ 27{
28 asm("\ 28 asm("\
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 0f1188efae45..300efafd6643 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -20,7 +20,7 @@
20 * dirty data in the cache. However, we do have to ensure that 20 * dirty data in the cache. However, we do have to ensure that
21 * subsequent reads are up to date. 21 * subsequent reads are up to date.
22 */ 22 */
23static void __attribute__((naked)) 23static void __naked
24v4wt_copy_user_page(void *kto, const void *kfrom) 24v4wt_copy_user_page(void *kto, const void *kfrom)
25{ 25{
26 asm("\ 26 asm("\
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index 39a994542cad..bc4525f5ab23 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -29,7 +29,7 @@
29 * if we eventually end up using our copied page. 29 * if we eventually end up using our copied page.
30 * 30 *
31 */ 31 */
32static void __attribute__((naked)) 32static void __naked
33xsc3_mc_copy_user_page(void *kto, const void *kfrom) 33xsc3_mc_copy_user_page(void *kto, const void *kfrom)
34{ 34{
35 asm("\ 35 asm("\
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index d18f2397ee2d..76824d3e966a 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock);
42 * Dcache aliasing issue. The writes will be forwarded to the write buffer, 42 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
43 * and merged as appropriate. 43 * and merged as appropriate.
44 */ 44 */
45static void __attribute__((naked)) 45static void __naked
46mc_copy_user_page(void *from, void *to) 46mc_copy_user_page(void *from, void *to)
47{ 47{
48 /* 48 /*