aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/hexagon/include/asm/uaccess.h116
-rw-r--r--arch/hexagon/mm/copy_from_user.S114
-rw-r--r--arch/hexagon/mm/copy_to_user.S92
-rw-r--r--arch/hexagon/mm/copy_user_template.S185
-rw-r--r--arch/hexagon/mm/strnlen_user.S139
-rw-r--r--arch/hexagon/mm/uaccess.c59
6 files changed, 705 insertions, 0 deletions
diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
new file mode 100644
index 000000000000..7e706eadbf0a
--- /dev/null
+++ b/arch/hexagon/include/asm/uaccess.h
@@ -0,0 +1,116 @@
1/*
2 * User memory access support for Hexagon
3 *
4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_UACCESS_H
22#define _ASM_UACCESS_H
23/*
24 * User space memory access functions
25 */
26#include <linux/sched.h>
27#include <linux/mm.h>
28#include <asm/segment.h>
29#include <asm/sections.h>
30
31/*
32 * access_ok: - Checks if a user space pointer is valid
33 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
34 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
35 * to write to a block, it is always safe to read from it.
36 * @addr: User space pointer to start of block to check
37 * @size: Size of block to check
38 *
39 * Context: User context only. This function may sleep.
40 *
41 * Checks if a pointer to a block of memory in user space is valid.
42 *
43 * Returns true (nonzero) if the memory block *may* be valid, false (zero)
44 * if it is definitely invalid.
45 *
46 * User address space in Hexagon, like x86, goes to 0xbfffffff, so the
47 * simple MSB-based tests used by MIPS won't work. Some further
48 * optimization is probably possible here, but for now, keep it
49 * reasonably simple and not *too* slow. After all, we've got the
50 * MMU for backup.
51 */
52#define VERIFY_READ 0
53#define VERIFY_WRITE 1
54
55#define __access_ok(addr, size) \
56 ((get_fs().seg == KERNEL_DS.seg) || \
57 (((unsigned long)addr < get_fs().seg) && \
58 (unsigned long)size < (get_fs().seg - (unsigned long)addr)))
59
60/*
61 * When a kernel-mode page fault is taken, the faulting instruction
62 * address is checked against a table of exception_table_entries.
63 * Each entry is a tuple of the address of an instruction that may
64 * be authorized to fault, and the address at which execution should
65 * be resumed instead of the faulting instruction, so as to effect
66 * a workaround.
67 */
68
69/* Assembly somewhat optimized copy routines */
70unsigned long __copy_from_user_hexagon(void *to, const void __user *from,
71 unsigned long n);
72unsigned long __copy_to_user_hexagon(void __user *to, const void *from,
73 unsigned long n);
74
75#define __copy_from_user(to, from, n) __copy_from_user_hexagon(to, from, n)
76#define __copy_to_user(to, from, n) __copy_to_user_hexagon(to, from, n)
77
78/*
79 * XXX todo: some additonal performance gain is possible by
80 * implementing __copy_to/from_user_inatomic, which is much
81 * like __copy_to/from_user, but performs slightly less checking.
82 */
83
84__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
85#define __clear_user(a, s) __clear_user_hexagon((a), (s))
86
87#define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n)
88
89/* get around the ifndef in asm-generic/uaccess.h */
90#define __strnlen_user __strnlen_user
91
92extern long __strnlen_user(const char __user *src, long n);
93
94static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
95 long n);
96
97#include <asm-generic/uaccess.h>
98
99/* Todo: an actual accelerated version of this. */
100static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
101 long n)
102{
103 long res = __strnlen_user(src, n);
104
105 /* return from strnlen can't be zero -- that would be rubbish. */
106
107 if (res > n) {
108 copy_from_user(dst, src, n);
109 return n;
110 } else {
111 copy_from_user(dst, src, res);
112 return res-1;
113 }
114}
115
116#endif
diff --git a/arch/hexagon/mm/copy_from_user.S b/arch/hexagon/mm/copy_from_user.S
new file mode 100644
index 000000000000..8eb1d4d61a3d
--- /dev/null
+++ b/arch/hexagon/mm/copy_from_user.S
@@ -0,0 +1,114 @@
1/*
2 * User memory copy functions for kernel
3 *
4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21/*
22 * The right way to do this involves valignb
23 * The easy way to do this is only speed up src/dest similar alignment.
24 */
25
26/*
27 * Copy to/from user are the same, except that for packets with a load and
28 * a store, I don't know how to tell which kind of exception we got.
29 * Therefore, we duplicate the function, and handle faulting addresses
30 * differently for each function
31 */
32
33/*
34 * copy from user: loads can fault
35 */
36#define src_sav r13
37#define dst_sav r12
38#define src_dst_sav r13:12
39#define d_dbuf r15:14
40#define w_dbuf r15
41
42#define dst r0
43#define src r1
44#define bytes r2
45#define loopcount r5
46
47#define FUNCNAME __copy_from_user_hexagon
48#include "copy_user_template.S"
49
50 /* LOAD FAULTS from COPY_FROM_USER */
51
52 /* Alignment loop. r2 has been updated. Return it. */
53 .falign
541009:
552009:
564009:
57 {
58 r0 = r2
59 jumpr r31
60 }
61 /* Normal copy loops. Do epilog. Use src-src_sav to compute distance */
62 /* X - (A - B) == X + B - A */
63 .falign
648089:
65 {
66 memd(dst) = d_dbuf
67 r2 += sub(src_sav,src)
68 }
69 {
70 r0 = r2
71 jumpr r31
72 }
73 .falign
744089:
75 {
76 memw(dst) = w_dbuf
77 r2 += sub(src_sav,src)
78 }
79 {
80 r0 = r2
81 jumpr r31
82 }
83 .falign
842089:
85 {
86 memh(dst) = w_dbuf
87 r2 += sub(src_sav,src)
88 }
89 {
90 r0 = r2
91 jumpr r31
92 }
93 .falign
941089:
95 {
96 memb(dst) = w_dbuf
97 r2 += sub(src_sav,src)
98 }
99 {
100 r0 = r2
101 jumpr r31
102 }
103
104 /* COPY FROM USER: only loads can fail */
105
106 .section __ex_table,"a"
107 .long 1000b,1009b
108 .long 2000b,2009b
109 .long 4000b,4009b
110 .long 8080b,8089b
111 .long 4080b,4089b
112 .long 2080b,2089b
113 .long 1080b,1089b
114 .previous
diff --git a/arch/hexagon/mm/copy_to_user.S b/arch/hexagon/mm/copy_to_user.S
new file mode 100644
index 000000000000..cb9740ed9e7d
--- /dev/null
+++ b/arch/hexagon/mm/copy_to_user.S
@@ -0,0 +1,92 @@
1/*
2 * User memory copying routines for the Hexagon Kernel
3 *
4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21/* The right way to do this involves valignb
22 * The easy way to do this is only speed up src/dest similar alignment.
23 */
24
25/*
26 * Copy to/from user are the same, except that for packets with a load and
27 * a store, I don't know how to tell which kind of exception we got.
28 * Therefore, we duplicate the function, and handle faulting addresses
29 * differently for each function
30 */
31
32/*
33 * copy to user: stores can fault
34 */
35#define src_sav r13
36#define dst_sav r12
37#define src_dst_sav r13:12
38#define d_dbuf r15:14
39#define w_dbuf r15
40
41#define dst r0
42#define src r1
43#define bytes r2
44#define loopcount r5
45
46#define FUNCNAME __copy_to_user_hexagon
47#include "copy_user_template.S"
48
49 /* STORE FAULTS from COPY_TO_USER */
50 .falign
511109:
522109:
534109:
54 /* Alignment loop. r2 has been updated. Return it. */
55 {
56 r0 = r2
57 jumpr r31
58 }
59 /* Normal copy loops. Use dst-dst_sav to compute distance */
60 /* dst holds best write, no need to unwind any loops */
61 /* X - (A - B) == X + B - A */
62 .falign
638189:
648199:
654189:
664199:
672189:
682199:
691189:
701199:
71 {
72 r2 += sub(dst_sav,dst)
73 }
74 {
75 r0 = r2
76 jumpr r31
77 }
78
79 /* COPY TO USER: only stores can fail */
80 .section __ex_table,"a"
81 .long 1100b,1109b
82 .long 2100b,2109b
83 .long 4100b,4109b
84 .long 8180b,8189b
85 .long 8190b,8199b
86 .long 4180b,4189b
87 .long 4190b,4199b
88 .long 2180b,2189b
89 .long 2190b,2199b
90 .long 1180b,1189b
91 .long 1190b,1199b
92 .previous
diff --git a/arch/hexagon/mm/copy_user_template.S b/arch/hexagon/mm/copy_user_template.S
new file mode 100644
index 000000000000..08d7d7b23daa
--- /dev/null
+++ b/arch/hexagon/mm/copy_user_template.S
@@ -0,0 +1,185 @@
1/*
2 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19/* Numerology:
20 * WXYZ
21 * W: width in bytes
22 * X: Load=0, Store=1
23 * Y: Location 0=preamble,8=loop,9=epilog
24 * Z: Location=0,handler=9
25 */
26 .text
27 .global FUNCNAME
28 .type FUNCNAME, @function
29 .p2align 5
30FUNCNAME:
31 {
32 p0 = cmp.gtu(bytes,#0)
33 if (!p0.new) jump:nt .Ldone
34 r3 = or(dst,src)
35 r4 = xor(dst,src)
36 }
37 {
38 p1 = cmp.gtu(bytes,#15)
39 p0 = bitsclr(r3,#7)
40 if (!p0.new) jump:nt .Loop_not_aligned_8
41 src_dst_sav = combine(src,dst)
42 }
43
44 {
45 loopcount = lsr(bytes,#3)
46 if (!p1) jump .Lsmall
47 }
48 p3=sp1loop0(.Loop8,loopcount)
49.Loop8:
508080:
518180:
52 {
53 if (p3) memd(dst++#8) = d_dbuf
54 d_dbuf = memd(src++#8)
55 }:endloop0
568190:
57 {
58 memd(dst++#8) = d_dbuf
59 bytes -= asl(loopcount,#3)
60 jump .Lsmall
61 }
62
63.Loop_not_aligned_8:
64 {
65 p0 = bitsclr(r4,#7)
66 if (p0.new) jump:nt .Lalign
67 }
68 {
69 p0 = bitsclr(r3,#3)
70 if (!p0.new) jump:nt .Loop_not_aligned_4
71 p1 = cmp.gtu(bytes,#7)
72 }
73
74 {
75 if (!p1) jump .Lsmall
76 loopcount = lsr(bytes,#2)
77 }
78 p3=sp1loop0(.Loop4,loopcount)
79.Loop4:
804080:
814180:
82 {
83 if (p3) memw(dst++#4) = w_dbuf
84 w_dbuf = memw(src++#4)
85 }:endloop0
864190:
87 {
88 memw(dst++#4) = w_dbuf
89 bytes -= asl(loopcount,#2)
90 jump .Lsmall
91 }
92
93.Loop_not_aligned_4:
94 {
95 p0 = bitsclr(r3,#1)
96 if (!p0.new) jump:nt .Loop_not_aligned
97 p1 = cmp.gtu(bytes,#3)
98 }
99
100 {
101 if (!p1) jump .Lsmall
102 loopcount = lsr(bytes,#1)
103 }
104 p3=sp1loop0(.Loop2,loopcount)
105.Loop2:
1062080:
1072180:
108 {
109 if (p3) memh(dst++#2) = w_dbuf
110 w_dbuf = memuh(src++#2)
111 }:endloop0
1122190:
113 {
114 memh(dst++#2) = w_dbuf
115 bytes -= asl(loopcount,#1)
116 jump .Lsmall
117 }
118
119.Loop_not_aligned: /* Works for as small as one byte */
120 p3=sp1loop0(.Loop1,bytes)
121.Loop1:
1221080:
1231180:
124 {
125 if (p3) memb(dst++#1) = w_dbuf
126 w_dbuf = memub(src++#1)
127 }:endloop0
128 /* Done */
1291190:
130 {
131 memb(dst) = w_dbuf
132 jumpr r31
133 r0 = #0
134 }
135
136.Lsmall:
137 {
138 p0 = cmp.gtu(bytes,#0)
139 if (p0.new) jump:nt .Loop_not_aligned
140 }
141.Ldone:
142 {
143 r0 = #0
144 jumpr r31
145 }
146 .falign
147.Lalign:
1481000:
149 {
150 if (p0.new) w_dbuf = memub(src)
151 p0 = tstbit(src,#0)
152 if (!p1) jump .Lsmall
153 }
1541100:
155 {
156 if (p0) memb(dst++#1) = w_dbuf
157 if (p0) bytes = add(bytes,#-1)
158 if (p0) src = add(src,#1)
159 }
1602000:
161 {
162 if (p0.new) w_dbuf = memuh(src)
163 p0 = tstbit(src,#1)
164 if (!p1) jump .Lsmall
165 }
1662100:
167 {
168 if (p0) memh(dst++#2) = w_dbuf
169 if (p0) bytes = add(bytes,#-2)
170 if (p0) src = add(src,#2)
171 }
1724000:
173 {
174 if (p0.new) w_dbuf = memw(src)
175 p0 = tstbit(src,#2)
176 if (!p1) jump .Lsmall
177 }
1784100:
179 {
180 if (p0) memw(dst++#4) = w_dbuf
181 if (p0) bytes = add(bytes,#-4)
182 if (p0) src = add(src,#4)
183 jump FUNCNAME
184 }
185 .size FUNCNAME,.-FUNCNAME
diff --git a/arch/hexagon/mm/strnlen_user.S b/arch/hexagon/mm/strnlen_user.S
new file mode 100644
index 000000000000..5c6a16c7c72a
--- /dev/null
+++ b/arch/hexagon/mm/strnlen_user.S
@@ -0,0 +1,139 @@
1/*
2 * User string length functions for kernel
3 *
4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#define isrc r0
22#define max r1 /* Do not change! */
23
24#define end r2
25#define tmp1 r3
26
27#define obo r6 /* off-by-one */
28#define start r7
29#define mod8 r8
30#define dbuf r15:14
31#define dcmp r13:12
32
33/*
34 * The vector mask version of this turned out *really* badly.
35 * The hardware loop version also turned out *really* badly.
36 * Seems straight pointer arithmetic basically wins here.
37 */
38
39#define fname __strnlen_user
40
41 .text
42 .global fname
43 .type fname, @function
44 .p2align 5 /* why? */
45fname:
46 {
47 mod8 = and(isrc,#7);
48 end = add(isrc,max);
49 start = isrc;
50 }
51 {
52 P0 = cmp.eq(mod8,#0);
53 mod8 = and(end,#7);
54 dcmp = #0;
55 if (P0.new) jump:t dw_loop; /* fire up the oven */
56 }
57
58alignment_loop:
59fail_1: {
60 tmp1 = memb(start++#1);
61 }
62 {
63 P0 = cmp.eq(tmp1,#0);
64 if (P0.new) jump:nt exit_found;
65 P1 = cmp.gtu(end,start);
66 mod8 = and(start,#7);
67 }
68 {
69 if (!P1) jump exit_error; /* hit the end */
70 P0 = cmp.eq(mod8,#0);
71 }
72 {
73 if (!P0) jump alignment_loop;
74 }
75
76
77
78dw_loop:
79fail_2: {
80 dbuf = memd(start);
81 obo = add(start,#1);
82 }
83 {
84 P0 = vcmpb.eq(dbuf,dcmp);
85 }
86 {
87 tmp1 = P0;
88 P0 = cmp.gtu(end,start);
89 }
90 {
91 tmp1 = ct0(tmp1);
92 mod8 = and(end,#7);
93 if (!P0) jump end_check;
94 }
95 {
96 P0 = cmp.eq(tmp1,#32);
97 if (!P0.new) jump:nt exit_found;
98 if (!P0.new) start = add(obo,tmp1);
99 }
100 {
101 start = add(start,#8);
102 jump dw_loop;
103 } /* might be nice to combine these jumps... */
104
105
106end_check:
107 {
108 P0 = cmp.gt(tmp1,mod8);
109 if (P0.new) jump:nt exit_error; /* neverfound! */
110 start = add(obo,tmp1);
111 }
112
113exit_found:
114 {
115 R0 = sub(start,isrc);
116 jumpr R31;
117 }
118
119exit_error:
120 {
121 R0 = add(max,#1);
122 jumpr R31;
123 }
124
125 /* Uh, what does the "fixup" return here? */
126 .falign
127fix_1:
128 {
129 R0 = #0;
130 jumpr R31;
131 }
132
133 .size fname,.-fname
134
135
136.section __ex_table,"a"
137.long fail_1,fix_1
138.long fail_2,fix_1
139.previous
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
new file mode 100644
index 000000000000..e748108b47a7
--- /dev/null
+++ b/arch/hexagon/mm/uaccess.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19/*
20 * Support for user memory access from kernel. This will
21 * probably be inlined for performance at some point, but
22 * for ease of debug, and to a lesser degree for code size,
23 * we implement here as subroutines.
24 */
25#include <linux/types.h>
26#include <asm/uaccess.h>
27#include <asm/pgtable.h>
28
29/*
30 * For clear_user(), exploit previously defined copy_to_user function
31 * and the fact that we've got a handy zero page defined in kernel/head.S
32 *
33 * dczero here would be even faster.
34 */
35__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
36{
37 long uncleared;
38
39 while (count > PAGE_SIZE) {
40 uncleared = __copy_to_user_hexagon(dest, &empty_zero_page,
41 PAGE_SIZE);
42 if (uncleared)
43 return count - (PAGE_SIZE - uncleared);
44 count -= PAGE_SIZE;
45 dest += PAGE_SIZE;
46 }
47 if (count)
48 count = __copy_to_user_hexagon(dest, &empty_zero_page, count);
49
50 return count;
51}
52
53unsigned long clear_user_hexagon(void __user *dest, unsigned long count)
54{
55 if (!access_ok(VERIFY_WRITE, dest, count))
56 return count;
57 else
58 return __clear_user_hexagon(dest, count);
59}