aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c154
1 files changed, 125 insertions, 29 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 514a1d508035..b4d38cb65f17 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -354,27 +354,54 @@ static int spufs_pipe_open(struct inode *inode, struct file *file)
354 return nonseekable_open(inode, file); 354 return nonseekable_open(inode, file);
355} 355}
356 356
357/*
358 * Read as many bytes from the mailbox as possible, until
359 * one of the conditions becomes true:
360 *
361 * - no more data available in the mailbox
362 * - end of the user provided buffer
363 * - end of the mapped area
364 */
357static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 365static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
358 size_t len, loff_t *pos) 366 size_t len, loff_t *pos)
359{ 367{
360 struct spu_context *ctx = file->private_data; 368 struct spu_context *ctx = file->private_data;
361 u32 mbox_data; 369 u32 mbox_data, __user *udata;
362 int ret; 370 ssize_t count;
363 371
364 if (len < 4) 372 if (len < 4)
365 return -EINVAL; 373 return -EINVAL;
366 374
375 if (!access_ok(VERIFY_WRITE, buf, len))
376 return -EFAULT;
377
378 udata = (void __user *)buf;
379
367 spu_acquire(ctx); 380 spu_acquire(ctx);
368 ret = ctx->ops->mbox_read(ctx, &mbox_data); 381 for (count = 0; count <= len; count += 4, udata++) {
382 int ret;
383 ret = ctx->ops->mbox_read(ctx, &mbox_data);
384 if (ret == 0)
385 break;
386
387 /*
388 * at the end of the mapped area, we can fault
389 * but still need to return the data we have
390 * read successfully so far.
391 */
392 ret = __put_user(mbox_data, udata);
393 if (ret) {
394 if (!count)
395 count = -EFAULT;
396 break;
397 }
398 }
369 spu_release(ctx); 399 spu_release(ctx);
370 400
371 if (!ret) 401 if (!count)
372 return -EAGAIN; 402 count = -EAGAIN;
373
374 if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
375 return -EFAULT;
376 403
377 return 4; 404 return count;
378} 405}
379 406
380static struct file_operations spufs_mbox_fops = { 407static struct file_operations spufs_mbox_fops = {
@@ -430,36 +457,70 @@ void spufs_ibox_callback(struct spu *spu)
430 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); 457 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
431} 458}
432 459
460/*
461 * Read as many bytes from the interrupt mailbox as possible, until
462 * one of the conditions becomes true:
463 *
464 * - no more data available in the mailbox
465 * - end of the user provided buffer
466 * - end of the mapped area
467 *
468 * If the file is opened without O_NONBLOCK, we wait here until
469 * any data is available, but return when we have been able to
470 * read something.
471 */
433static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 472static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
434 size_t len, loff_t *pos) 473 size_t len, loff_t *pos)
435{ 474{
436 struct spu_context *ctx = file->private_data; 475 struct spu_context *ctx = file->private_data;
437 u32 ibox_data; 476 u32 ibox_data, __user *udata;
438 ssize_t ret; 477 ssize_t count;
439 478
440 if (len < 4) 479 if (len < 4)
441 return -EINVAL; 480 return -EINVAL;
442 481
482 if (!access_ok(VERIFY_WRITE, buf, len))
483 return -EFAULT;
484
485 udata = (void __user *)buf;
486
443 spu_acquire(ctx); 487 spu_acquire(ctx);
444 488
445 ret = 0; 489 /* wait only for the first element */
490 count = 0;
446 if (file->f_flags & O_NONBLOCK) { 491 if (file->f_flags & O_NONBLOCK) {
447 if (!spu_ibox_read(ctx, &ibox_data)) 492 if (!spu_ibox_read(ctx, &ibox_data))
448 ret = -EAGAIN; 493 count = -EAGAIN;
449 } else { 494 } else {
450 ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 495 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
451 } 496 }
497 if (count)
498 goto out;
452 499
453 spu_release(ctx); 500 /* if we can't write at all, return -EFAULT */
501 count = __put_user(ibox_data, udata);
502 if (count)
503 goto out;
454 504
455 if (ret) 505 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
456 return ret; 506 int ret;
507 ret = ctx->ops->ibox_read(ctx, &ibox_data);
508 if (ret == 0)
509 break;
510 /*
511 * at the end of the mapped area, we can fault
512 * but still need to return the data we have
513 * read successfully so far.
514 */
515 ret = __put_user(ibox_data, udata);
516 if (ret)
517 break;
518 }
457 519
458 ret = 4; 520out:
459 if (copy_to_user(buf, &ibox_data, sizeof ibox_data)) 521 spu_release(ctx);
460 ret = -EFAULT;
461 522
462 return ret; 523 return count;
463} 524}
464 525
465static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 526static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
@@ -532,32 +593,67 @@ void spufs_wbox_callback(struct spu *spu)
532 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); 593 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
533} 594}
534 595
596/*
597 * Write as many bytes to the interrupt mailbox as possible, until
598 * one of the conditions becomes true:
599 *
600 * - the mailbox is full
601 * - end of the user provided buffer
602 * - end of the mapped area
603 *
604 * If the file is opened without O_NONBLOCK, we wait here until
605 * space is availabyl, but return when we have been able to
606 * write something.
607 */
535static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 608static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
536 size_t len, loff_t *pos) 609 size_t len, loff_t *pos)
537{ 610{
538 struct spu_context *ctx = file->private_data; 611 struct spu_context *ctx = file->private_data;
539 u32 wbox_data; 612 u32 wbox_data, __user *udata;
540 int ret; 613 ssize_t count;
541 614
542 if (len < 4) 615 if (len < 4)
543 return -EINVAL; 616 return -EINVAL;
544 617
545 if (copy_from_user(&wbox_data, buf, sizeof wbox_data)) 618 udata = (void __user *)buf;
619 if (!access_ok(VERIFY_READ, buf, len))
620 return -EFAULT;
621
622 if (__get_user(wbox_data, udata))
546 return -EFAULT; 623 return -EFAULT;
547 624
548 spu_acquire(ctx); 625 spu_acquire(ctx);
549 626
550 ret = 0; 627 /*
628 * make sure we can at least write one element, by waiting
629 * in case of !O_NONBLOCK
630 */
631 count = 0;
551 if (file->f_flags & O_NONBLOCK) { 632 if (file->f_flags & O_NONBLOCK) {
552 if (!spu_wbox_write(ctx, wbox_data)) 633 if (!spu_wbox_write(ctx, wbox_data))
553 ret = -EAGAIN; 634 count = -EAGAIN;
554 } else { 635 } else {
555 ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 636 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
556 } 637 }
557 638
558 spu_release(ctx); 639 if (count)
640 goto out;
559 641
560 return ret ? ret : sizeof wbox_data; 642 /* write aѕ much as possible */
643 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
644 int ret;
645 ret = __get_user(wbox_data, udata);
646 if (ret)
647 break;
648
649 ret = spu_wbox_write(ctx, wbox_data);
650 if (ret == 0)
651 break;
652 }
653
654out:
655 spu_release(ctx);
656 return count;
561} 657}
562 658
563static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 659static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)