diff options
Diffstat (limited to 'arch/powerpc/lib/vmx-helper.c')
-rw-r--r-- | arch/powerpc/lib/vmx-helper.c | 74 |
1 files changed, 74 insertions, 0 deletions
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c new file mode 100644 index 000000000000..3cf529ceec5b --- /dev/null +++ b/arch/powerpc/lib/vmx-helper.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) IBM Corporation, 2011 | ||
17 | * | ||
18 | * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com> | ||
19 | * Anton Blanchard <anton@au.ibm.com> | ||
20 | */ | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <linux/hardirq.h> | ||
23 | #include <asm/switch_to.h> | ||
24 | |||
25 | int enter_vmx_usercopy(void) | ||
26 | { | ||
27 | if (in_interrupt()) | ||
28 | return 0; | ||
29 | |||
30 | /* This acts as preempt_disable() as well and will make | ||
31 | * enable_kernel_altivec(). We need to disable page faults | ||
32 | * as they can call schedule and thus make us lose the VMX | ||
33 | * context. So on page faults, we just fail which will cause | ||
34 | * a fallback to the normal non-vmx copy. | ||
35 | */ | ||
36 | pagefault_disable(); | ||
37 | |||
38 | enable_kernel_altivec(); | ||
39 | |||
40 | return 1; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * This function must return 0 because we tail call optimise when calling | ||
45 | * from __copy_tofrom_user_power7 which returns 0 on success. | ||
46 | */ | ||
47 | int exit_vmx_usercopy(void) | ||
48 | { | ||
49 | pagefault_enable(); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | int enter_vmx_copy(void) | ||
54 | { | ||
55 | if (in_interrupt()) | ||
56 | return 0; | ||
57 | |||
58 | preempt_disable(); | ||
59 | |||
60 | enable_kernel_altivec(); | ||
61 | |||
62 | return 1; | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * All calls to this function will be optimised into tail calls. We are | ||
67 | * passed a pointer to the destination which we return as required by a | ||
68 | * memcpy implementation. | ||
69 | */ | ||
70 | void *exit_vmx_copy(void *dest) | ||
71 | { | ||
72 | preempt_enable(); | ||
73 | return dest; | ||
74 | } | ||