aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2009-02-16 19:44:14 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-02-22 18:48:58 -0500
commit60ee031940c1b09c137b617a8829e2f081961fe0 (patch)
treeeb61ad956dc3b6e2e4e5f7434451fddc97cda160 /arch
parent13870b657578bcce167978ee93dc02bf54e3beb0 (diff)
powerpc/spufs: Use correct return value for spu_handle_mm_fault
Currently, spu_handle_mm_fault disregards the 'ret' variable and always returns -EFAULT on error. This change refactos spu_handle_mm_fault a little, to return the ret variable as appropriate. This allows us to combine the error and sucess paths. Also, remove the #if-0-ed IS_VALID_EA() check, it has never been used. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spu_fault.c48
1 files changed, 22 insertions, 26 deletions
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
index c8b1cd42905..95d8dadf2d8 100644
--- a/arch/powerpc/platforms/cell/spu_fault.c
+++ b/arch/powerpc/platforms/cell/spu_fault.c
@@ -39,60 +39,56 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
39 unsigned long is_write; 39 unsigned long is_write;
40 int ret; 40 int ret;
41 41
42#if 0 42 if (mm == NULL)
43 if (!IS_VALID_EA(ea)) {
44 return -EFAULT; 43 return -EFAULT;
45 } 44
46#endif /* XXX */ 45 if (mm->pgd == NULL)
47 if (mm == NULL) {
48 return -EFAULT;
49 }
50 if (mm->pgd == NULL) {
51 return -EFAULT; 46 return -EFAULT;
52 }
53 47
54 down_read(&mm->mmap_sem); 48 down_read(&mm->mmap_sem);
49 ret = -EFAULT;
55 vma = find_vma(mm, ea); 50 vma = find_vma(mm, ea);
56 if (!vma) 51 if (!vma)
57 goto bad_area; 52 goto out_unlock;
58 if (vma->vm_start <= ea) 53
59 goto good_area; 54 if (ea < vma->vm_start) {
60 if (!(vma->vm_flags & VM_GROWSDOWN)) 55 if (!(vma->vm_flags & VM_GROWSDOWN))
61 goto bad_area; 56 goto out_unlock;
62 if (expand_stack(vma, ea)) 57 if (expand_stack(vma, ea))
63 goto bad_area; 58 goto out_unlock;
64good_area: 59 }
60
65 is_write = dsisr & MFC_DSISR_ACCESS_PUT; 61 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
66 if (is_write) { 62 if (is_write) {
67 if (!(vma->vm_flags & VM_WRITE)) 63 if (!(vma->vm_flags & VM_WRITE))
68 goto bad_area; 64 goto out_unlock;
69 } else { 65 } else {
70 if (dsisr & MFC_DSISR_ACCESS_DENIED) 66 if (dsisr & MFC_DSISR_ACCESS_DENIED)
71 goto bad_area; 67 goto out_unlock;
72 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 68 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
73 goto bad_area; 69 goto out_unlock;
74 } 70 }
71
75 ret = 0; 72 ret = 0;
76 *flt = handle_mm_fault(mm, vma, ea, is_write); 73 *flt = handle_mm_fault(mm, vma, ea, is_write);
77 if (unlikely(*flt & VM_FAULT_ERROR)) { 74 if (unlikely(*flt & VM_FAULT_ERROR)) {
78 if (*flt & VM_FAULT_OOM) { 75 if (*flt & VM_FAULT_OOM) {
79 ret = -ENOMEM; 76 ret = -ENOMEM;
80 goto bad_area; 77 goto out_unlock;
81 } else if (*flt & VM_FAULT_SIGBUS) { 78 } else if (*flt & VM_FAULT_SIGBUS) {
82 ret = -EFAULT; 79 ret = -EFAULT;
83 goto bad_area; 80 goto out_unlock;
84 } 81 }
85 BUG(); 82 BUG();
86 } 83 }
84
87 if (*flt & VM_FAULT_MAJOR) 85 if (*flt & VM_FAULT_MAJOR)
88 current->maj_flt++; 86 current->maj_flt++;
89 else 87 else
90 current->min_flt++; 88 current->min_flt++;
91 up_read(&mm->mmap_sem);
92 return ret;
93 89
94bad_area: 90out_unlock:
95 up_read(&mm->mmap_sem); 91 up_read(&mm->mmap_sem);
96 return -EFAULT; 92 return ret;
97} 93}
98EXPORT_SYMBOL_GPL(spu_handle_mm_fault); 94EXPORT_SYMBOL_GPL(spu_handle_mm_fault);