aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/copro_fault.c
diff options
context:
space:
mode:
authorIan Munsie <imunsie@au1.ibm.com>2014-10-08 04:54:51 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2014-10-08 05:14:55 -0400
commit73d16a6e0e51990cbe13f8d8f43bd5329bbab30a (patch)
treed151865457d0653072ee8dd2fbc23fb84f05fe23 /arch/powerpc/mm/copro_fault.c
parente83d01697583d8610d1d62279758c2a881e3396f (diff)
powerpc/cell: Move data segment faulting code out of cell platform
__spu_trap_data_seg() currently contains code to determine the VSID and ESID required for a particular EA and mm struct. This code is generically useful for other co-processors. This moves the code of the cell platform so it can be used by other powerpc code. It also adds 1TB segment handling which Cell didn't support. The new function is called copro_calculate_slb(). This also moves the internal struct spu_slb to a generic struct copro_slb which is now used in the Cell and copro code. We use this new struct instead of passing around esid and vsid parameters. Signed-off-by: Ian Munsie <imunsie@au1.ibm.com> Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/copro_fault.c')
-rw-r--r--arch/powerpc/mm/copro_fault.c46
1 files changed, 46 insertions, 0 deletions
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index ba7df14c6b82..a15a23efc0e2 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -24,6 +24,7 @@
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/export.h> 25#include <linux/export.h>
26#include <asm/reg.h> 26#include <asm/reg.h>
27#include <asm/copro.h>
27 28
28/* 29/*
29 * This ought to be kept in sync with the powerpc specific do_page_fault 30 * This ought to be kept in sync with the powerpc specific do_page_fault
@@ -90,3 +91,48 @@ out_unlock:
90 return ret; 91 return ret;
91} 92}
92EXPORT_SYMBOL_GPL(copro_handle_mm_fault); 93EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
94
95int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
96{
97 u64 vsid;
98 int psize, ssize;
99
100 slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
101
102 switch (REGION_ID(ea)) {
103 case USER_REGION_ID:
104 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
105 psize = get_slice_psize(mm, ea);
106 ssize = user_segment_size(ea);
107 vsid = get_vsid(mm->context.id, ea, ssize);
108 break;
109 case VMALLOC_REGION_ID:
110 pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
111 if (ea < VMALLOC_END)
112 psize = mmu_vmalloc_psize;
113 else
114 psize = mmu_io_psize;
115 ssize = mmu_kernel_ssize;
116 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
117 break;
118 case KERNEL_REGION_ID:
119 pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea);
120 psize = mmu_linear_psize;
121 ssize = mmu_kernel_ssize;
122 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
123 break;
124 default:
125 pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
126 return 1;
127 }
128
129 vsid = (vsid << slb_vsid_shift(ssize)) | SLB_VSID_USER;
130
131 vsid |= mmu_psize_defs[psize].sllp |
132 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
133
134 slb->vsid = vsid;
135
136 return 0;
137}
138EXPORT_SYMBOL_GPL(copro_calculate_slb);