aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2008-02-07 23:50:41 -0500
committerPaul Mackerras <paulus@samba.org>2008-02-08 03:52:35 -0500
commiteebead5b8ff89340dc18ceec996157d0eb7d0287 (patch)
tree65b88470bb8b2b14ea839234eba4c2ca3b60e587 /arch/powerpc/platforms/cell
parent592a607bbc053bc6f614a0e619326009f4b3829e (diff)
[POWERPC] spufs: Fix state_mutex leaks
Fix various state_mutex leaks. The worst one was introduced by the interrutible state_mutex conversion but there've been a few before too. Notably spufs_wait now returns without the state_mutex held when returning an error, which actually cleans up some code. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Luke Browning <lukebrowning@us.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c12
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c49
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h5
4 files changed, 45 insertions, 30 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index eff4d291ba85..e46d300e21a5 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -108,7 +108,7 @@ int spufs_handle_class1(struct spu_context *ctx)
108 u64 ea, dsisr, access; 108 u64 ea, dsisr, access;
109 unsigned long flags; 109 unsigned long flags;
110 unsigned flt = 0; 110 unsigned flt = 0;
111 int ret, ret2; 111 int ret;
112 112
113 /* 113 /*
114 * dar and dsisr get passed from the registers 114 * dar and dsisr get passed from the registers
@@ -148,13 +148,10 @@ int spufs_handle_class1(struct spu_context *ctx)
148 ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt); 148 ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
149 149
150 /* 150 /*
151 * If spu_acquire fails due to a pending signal we just want to return 151 * This is nasty: we need the state_mutex for all the bookkeeping even
152 * EINTR to userspace even if that means missing the dma restart or 152 * if the syscall was interrupted by a signal. ewww.
153 * updating the page fault statistics.
154 */ 153 */
155 ret2 = spu_acquire(ctx); 154 mutex_lock(&ctx->state_mutex);
156 if (ret2)
157 goto out;
158 155
159 /* 156 /*
160 * Clear dsisr under ctxt lock after handling the fault, so that 157 * Clear dsisr under ctxt lock after handling the fault, so that
@@ -185,7 +182,6 @@ int spufs_handle_class1(struct spu_context *ctx)
185 } else 182 } else
186 spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); 183 spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
187 184
188 out:
189 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 185 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
190 return ret; 186 return ret;
191} 187}
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 1018acd1746b..e4ab9d5a86f0 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -358,6 +358,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
358{ 358{
359 struct spu_context *ctx = vma->vm_file->private_data; 359 struct spu_context *ctx = vma->vm_file->private_data;
360 unsigned long area, offset = address - vma->vm_start; 360 unsigned long area, offset = address - vma->vm_start;
361 int ret = 0;
361 362
362 spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx); 363 spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx);
363 364
@@ -379,7 +380,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
379 if (ctx->state == SPU_STATE_SAVED) { 380 if (ctx->state == SPU_STATE_SAVED) {
380 up_read(&current->mm->mmap_sem); 381 up_read(&current->mm->mmap_sem);
381 spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx); 382 spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx);
382 spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 383 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
383 spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu); 384 spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu);
384 down_read(&current->mm->mmap_sem); 385 down_read(&current->mm->mmap_sem);
385 } else { 386 } else {
@@ -388,7 +389,8 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
388 spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu); 389 spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu);
389 } 390 }
390 391
391 spu_release(ctx); 392 if (!ret)
393 spu_release(ctx);
392 return NOPFN_REFAULT; 394 return NOPFN_REFAULT;
393} 395}
394 396
@@ -755,23 +757,25 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
755 757
756 count = spu_acquire(ctx); 758 count = spu_acquire(ctx);
757 if (count) 759 if (count)
758 return count; 760 goto out;
759 761
760 /* wait only for the first element */ 762 /* wait only for the first element */
761 count = 0; 763 count = 0;
762 if (file->f_flags & O_NONBLOCK) { 764 if (file->f_flags & O_NONBLOCK) {
763 if (!spu_ibox_read(ctx, &ibox_data)) 765 if (!spu_ibox_read(ctx, &ibox_data)) {
764 count = -EAGAIN; 766 count = -EAGAIN;
767 goto out_unlock;
768 }
765 } else { 769 } else {
766 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 770 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
771 if (count)
772 goto out;
767 } 773 }
768 if (count)
769 goto out;
770 774
771 /* if we can't write at all, return -EFAULT */ 775 /* if we can't write at all, return -EFAULT */
772 count = __put_user(ibox_data, udata); 776 count = __put_user(ibox_data, udata);
773 if (count) 777 if (count)
774 goto out; 778 goto out_unlock;
775 779
776 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 780 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
777 int ret; 781 int ret;
@@ -788,9 +792,9 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
788 break; 792 break;
789 } 793 }
790 794
791out: 795out_unlock:
792 spu_release(ctx); 796 spu_release(ctx);
793 797out:
794 return count; 798 return count;
795} 799}
796 800
@@ -905,7 +909,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
905 909
906 count = spu_acquire(ctx); 910 count = spu_acquire(ctx);
907 if (count) 911 if (count)
908 return count; 912 goto out;
909 913
910 /* 914 /*
911 * make sure we can at least write one element, by waiting 915 * make sure we can at least write one element, by waiting
@@ -913,14 +917,16 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
913 */ 917 */
914 count = 0; 918 count = 0;
915 if (file->f_flags & O_NONBLOCK) { 919 if (file->f_flags & O_NONBLOCK) {
916 if (!spu_wbox_write(ctx, wbox_data)) 920 if (!spu_wbox_write(ctx, wbox_data)) {
917 count = -EAGAIN; 921 count = -EAGAIN;
922 goto out_unlock;
923 }
918 } else { 924 } else {
919 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 925 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
926 if (count)
927 goto out;
920 } 928 }
921 929
922 if (count)
923 goto out;
924 930
925 /* write as much as possible */ 931 /* write as much as possible */
926 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 932 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
@@ -934,8 +940,9 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
934 break; 940 break;
935 } 941 }
936 942
937out: 943out_unlock:
938 spu_release(ctx); 944 spu_release(ctx);
945out:
939 return count; 946 return count;
940} 947}
941 948
@@ -1598,12 +1605,11 @@ static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1598 } else { 1605 } else {
1599 ret = spufs_wait(ctx->mfc_wq, 1606 ret = spufs_wait(ctx->mfc_wq,
1600 spufs_read_mfc_tagstatus(ctx, &status)); 1607 spufs_read_mfc_tagstatus(ctx, &status));
1608 if (ret)
1609 goto out;
1601 } 1610 }
1602 spu_release(ctx); 1611 spu_release(ctx);
1603 1612
1604 if (ret)
1605 goto out;
1606
1607 ret = 4; 1613 ret = 4;
1608 if (copy_to_user(buffer, &status, 4)) 1614 if (copy_to_user(buffer, &status, 4))
1609 ret = -EFAULT; 1615 ret = -EFAULT;
@@ -1732,6 +1738,8 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1732 int status; 1738 int status;
1733 ret = spufs_wait(ctx->mfc_wq, 1739 ret = spufs_wait(ctx->mfc_wq,
1734 spu_send_mfc_command(ctx, cmd, &status)); 1740 spu_send_mfc_command(ctx, cmd, &status));
1741 if (ret)
1742 goto out;
1735 if (status) 1743 if (status)
1736 ret = status; 1744 ret = status;
1737 } 1745 }
@@ -1785,7 +1793,7 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1785 1793
1786 ret = spu_acquire(ctx); 1794 ret = spu_acquire(ctx);
1787 if (ret) 1795 if (ret)
1788 return ret; 1796 goto out;
1789#if 0 1797#if 0
1790/* this currently hangs */ 1798/* this currently hangs */
1791 ret = spufs_wait(ctx->mfc_wq, 1799 ret = spufs_wait(ctx->mfc_wq,
@@ -1794,12 +1802,13 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1794 goto out; 1802 goto out;
1795 ret = spufs_wait(ctx->mfc_wq, 1803 ret = spufs_wait(ctx->mfc_wq,
1796 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); 1804 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1797out: 1805 if (ret)
1806 goto out;
1798#else 1807#else
1799 ret = 0; 1808 ret = 0;
1800#endif 1809#endif
1801 spu_release(ctx); 1810 spu_release(ctx);
1802 1811out:
1803 return ret; 1812 return ret;
1804} 1813}
1805 1814
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index b4814c740d8a..e9c61a1a8f98 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -354,8 +354,15 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
354 354
355 do { 355 do {
356 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); 356 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
357 if (unlikely(ret)) 357 if (unlikely(ret)) {
358 /*
359 * This is nasty: we need the state_mutex for all the
360 * bookkeeping even if the syscall was interrupted by
361 * a signal. ewww.
362 */
363 mutex_lock(&ctx->state_mutex);
358 break; 364 break;
365 }
359 spu = ctx->spu; 366 spu = ctx->spu;
360 if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, 367 if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
361 &ctx->sched_flags))) { 368 &ctx->sched_flags))) {
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 795a1b52538b..2c2fe3c07d72 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -268,6 +268,9 @@ extern char *isolated_loader;
268 * Same as wait_event_interruptible(), except that here 268 * Same as wait_event_interruptible(), except that here
269 * we need to call spu_release(ctx) before sleeping, and 269 * we need to call spu_release(ctx) before sleeping, and
270 * then spu_acquire(ctx) when awoken. 270 * then spu_acquire(ctx) when awoken.
271 *
272 * Returns with state_mutex re-acquired when successfull or
273 * with -ERESTARTSYS and the state_mutex dropped when interrupted.
271 */ 274 */
272 275
273#define spufs_wait(wq, condition) \ 276#define spufs_wait(wq, condition) \
@@ -278,11 +281,11 @@ extern char *isolated_loader;
278 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ 281 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
279 if (condition) \ 282 if (condition) \
280 break; \ 283 break; \
284 spu_release(ctx); \
281 if (signal_pending(current)) { \ 285 if (signal_pending(current)) { \
282 __ret = -ERESTARTSYS; \ 286 __ret = -ERESTARTSYS; \
283 break; \ 287 break; \
284 } \ 288 } \
285 spu_release(ctx); \
286 schedule(); \ 289 schedule(); \
287 __ret = spu_acquire(ctx); \ 290 __ret = spu_acquire(ctx); \
288 if (__ret) \ 291 if (__ret) \