aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh64/mm/extable.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sh64/mm/extable.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/sh64/mm/extable.c')
-rw-r--r--arch/sh64/mm/extable.c81
1 files changed, 81 insertions, 0 deletions
diff --git a/arch/sh64/mm/extable.c b/arch/sh64/mm/extable.c
new file mode 100644
index 000000000000..9da50e28b3fa
--- /dev/null
+++ b/arch/sh64/mm/extable.c
@@ -0,0 +1,81 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/mm/extable.c
7 *
8 * Copyright (C) 2003 Richard Curnow
9 * Copyright (C) 2003, 2004 Paul Mundt
10 *
11 * Cloned from the 2.5 SH version..
12 */
13#include <linux/config.h>
14#include <linux/rwsem.h>
15#include <linux/module.h>
16#include <asm/uaccess.h>
17
18extern unsigned long copy_user_memcpy, copy_user_memcpy_end;
19extern void __copy_user_fixup(void);
20
21static const struct exception_table_entry __copy_user_fixup_ex = {
22 .fixup = (unsigned long)&__copy_user_fixup,
23};
24
25/* Some functions that may trap due to a bad user-mode address have too many loads
26 and stores in them to make it at all practical to label each one and put them all in
27 the main exception table.
28
29 In particular, the fast memcpy routine is like this. It's fix-up is just to fall back
30 to a slow byte-at-a-time copy, which is handled the conventional way. So it's functionally
31 OK to just handle any trap occurring in the fast memcpy with that fixup. */
32static const struct exception_table_entry *check_exception_ranges(unsigned long addr)
33{
34 if ((addr >= (unsigned long)&copy_user_memcpy) &&
35 (addr <= (unsigned long)&copy_user_memcpy_end))
36 return &__copy_user_fixup_ex;
37
38 return NULL;
39}
40
41/* Simple binary search */
42const struct exception_table_entry *
43search_extable(const struct exception_table_entry *first,
44 const struct exception_table_entry *last,
45 unsigned long value)
46{
47 const struct exception_table_entry *mid;
48
49 mid = check_exception_ranges(value);
50 if (mid)
51 return mid;
52
53 while (first <= last) {
54 long diff;
55
56 mid = (last - first) / 2 + first;
57 diff = mid->insn - value;
58 if (diff == 0)
59 return mid;
60 else if (diff < 0)
61 first = mid+1;
62 else
63 last = mid-1;
64 }
65
66 return NULL;
67}
68
69int fixup_exception(struct pt_regs *regs)
70{
71 const struct exception_table_entry *fixup;
72
73 fixup = search_exception_tables(regs->pc);
74 if (fixup) {
75 regs->pc = fixup->fixup;
76 return 1;
77 }
78
79 return 0;
80}
81