aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIan Munsie <imunsie@au1.ibm.com>2014-10-08 04:54:50 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2014-10-08 05:14:54 -0400
commite83d01697583d8610d1d62279758c2a881e3396f (patch)
tree167a30304a5b48debcb66955efdf5f68808c1998 /arch
parent60666de2dac99777631d0df64257d7fd6a5118fe (diff)
powerpc/cell: Move spu_handle_mm_fault() out of cell platform
Currently spu_handle_mm_fault() is in the cell platform. This code is generically useful for other non-cell co-processors on powerpc. This patch moves this function out of the cell platform into arch/powerpc/mm so that others may use it. Signed-off-by: Ian Munsie <imunsie@au1.ibm.com> Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/include/asm/copro.h16
-rw-r--r--arch/powerpc/include/asm/spu.h5
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/copro_fault.c (renamed from arch/powerpc/platforms/cell/spu_fault.c)14
-rw-r--r--arch/powerpc/platforms/cell/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c4
8 files changed, 33 insertions, 14 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 98ae8b714d31..88eace4e28c3 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -608,6 +608,10 @@ config PPC_SUBPAGE_PROT
608 to set access permissions (read/write, readonly, or no access) 608 to set access permissions (read/write, readonly, or no access)
609 on the 4k subpages of each 64k page. 609 on the 4k subpages of each 64k page.
610 610
611config PPC_COPRO_BASE
612 bool
613 default n
614
611config SCHED_SMT 615config SCHED_SMT
612 bool "SMT (Hyperthreading) scheduler support" 616 bool "SMT (Hyperthreading) scheduler support"
613 depends on PPC64 && SMP 617 depends on PPC64 && SMP
diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h
new file mode 100644
index 000000000000..51cae85a50b4
--- /dev/null
+++ b/arch/powerpc/include/asm/copro.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef _ASM_POWERPC_COPRO_H
11#define _ASM_POWERPC_COPRO_H
12
13int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
14 unsigned long dsisr, unsigned *flt);
15
16#endif /* _ASM_POWERPC_COPRO_H */
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 37b7ca39ec9f..a6e6e2bf9d15 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -27,6 +27,8 @@
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <asm/reg.h>
31#include <asm/copro.h>
30 32
31#define LS_SIZE (256 * 1024) 33#define LS_SIZE (256 * 1024)
32#define LS_ADDR_MASK (LS_SIZE - 1) 34#define LS_ADDR_MASK (LS_SIZE - 1)
@@ -277,9 +279,6 @@ void spu_remove_dev_attr(struct device_attribute *attr);
277int spu_add_dev_attr_group(struct attribute_group *attrs); 279int spu_add_dev_attr_group(struct attribute_group *attrs);
278void spu_remove_dev_attr_group(struct attribute_group *attrs); 280void spu_remove_dev_attr_group(struct attribute_group *attrs);
279 281
280int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
281 unsigned long dsisr, unsigned *flt);
282
283/* 282/*
284 * Notifier blocks: 283 * Notifier blocks:
285 * 284 *
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index d0130fff20e5..325e861616a1 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -34,3 +34,4 @@ obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o
34obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o 34obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
35obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o 35obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
36obj-$(CONFIG_HIGHMEM) += highmem.o 36obj-$(CONFIG_HIGHMEM) += highmem.o
37obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/mm/copro_fault.c
index 641e7273d75a..ba7df14c6b82 100644
--- a/arch/powerpc/platforms/cell/spu_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * SPU mm fault handler 2 * CoProcessor (SPU/AFU) mm fault handler
3 * 3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
5 * 5 *
@@ -23,16 +23,14 @@
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/export.h> 25#include <linux/export.h>
26 26#include <asm/reg.h>
27#include <asm/spu.h>
28#include <asm/spu_csa.h>
29 27
30/* 28/*
31 * This ought to be kept in sync with the powerpc specific do_page_fault 29 * This ought to be kept in sync with the powerpc specific do_page_fault
32 * function. Currently, there are a few corner cases that we haven't had 30 * function. Currently, there are a few corner cases that we haven't had
33 * to handle fortunately. 31 * to handle fortunately.
34 */ 32 */
35int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, 33int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
36 unsigned long dsisr, unsigned *flt) 34 unsigned long dsisr, unsigned *flt)
37{ 35{
38 struct vm_area_struct *vma; 36 struct vm_area_struct *vma;
@@ -58,12 +56,12 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
58 goto out_unlock; 56 goto out_unlock;
59 } 57 }
60 58
61 is_write = dsisr & MFC_DSISR_ACCESS_PUT; 59 is_write = dsisr & DSISR_ISSTORE;
62 if (is_write) { 60 if (is_write) {
63 if (!(vma->vm_flags & VM_WRITE)) 61 if (!(vma->vm_flags & VM_WRITE))
64 goto out_unlock; 62 goto out_unlock;
65 } else { 63 } else {
66 if (dsisr & MFC_DSISR_ACCESS_DENIED) 64 if (dsisr & DSISR_PROTFAULT)
67 goto out_unlock; 65 goto out_unlock;
68 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 66 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
69 goto out_unlock; 67 goto out_unlock;
@@ -91,4 +89,4 @@ out_unlock:
91 up_read(&mm->mmap_sem); 89 up_read(&mm->mmap_sem);
92 return ret; 90 return ret;
93} 91}
94EXPORT_SYMBOL_GPL(spu_handle_mm_fault); 92EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 9978f594cac0..870b6dbd4d18 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -86,6 +86,7 @@ config SPU_FS_64K_LS
86config SPU_BASE 86config SPU_BASE
87 bool 87 bool
88 default n 88 default n
89 select PPC_COPRO_BASE
89 90
90config CBE_RAS 91config CBE_RAS
91 bool "RAS features for bare metal Cell BE" 92 bool "RAS features for bare metal Cell BE"
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index fe053e7c73ee..2d16884f67b9 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -20,7 +20,7 @@ spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o
20 20
21obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ 21obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
22 spu_notify.o \ 22 spu_notify.o \
23 spu_syscalls.o spu_fault.o \ 23 spu_syscalls.o \
24 $(spu-priv1-y) \ 24 $(spu-priv1-y) \
25 $(spu-manage-y) \ 25 $(spu-manage-y) \
26 spufs/ 26 spufs/
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index 8cb6260cc80f..e45894a08118 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -138,7 +138,7 @@ int spufs_handle_class1(struct spu_context *ctx)
138 if (ctx->state == SPU_STATE_RUNNABLE) 138 if (ctx->state == SPU_STATE_RUNNABLE)
139 ctx->spu->stats.hash_flt++; 139 ctx->spu->stats.hash_flt++;
140 140
141 /* we must not hold the lock when entering spu_handle_mm_fault */ 141 /* we must not hold the lock when entering copro_handle_mm_fault */
142 spu_release(ctx); 142 spu_release(ctx);
143 143
144 access = (_PAGE_PRESENT | _PAGE_USER); 144 access = (_PAGE_PRESENT | _PAGE_USER);
@@ -149,7 +149,7 @@ int spufs_handle_class1(struct spu_context *ctx)
149 149
150 /* hashing failed, so try the actual fault handler */ 150 /* hashing failed, so try the actual fault handler */
151 if (ret) 151 if (ret)
152 ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt); 152 ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt);
153 153
154 /* 154 /*
155 * This is nasty: we need the state_mutex for all the bookkeeping even 155 * This is nasty: we need the state_mutex for all the bookkeeping even