aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/mem.c')
-rw-r--r--drivers/char/mem.c125
1 files changed, 17 insertions, 108 deletions
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index bbee97ff355f..64551ab6be03 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -625,65 +625,10 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
625 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); 625 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
626} 626}
627 627
628#ifdef CONFIG_MMU
629/*
630 * For fun, we are using the MMU for this.
631 */
632static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
633{
634 struct mm_struct *mm;
635 struct vm_area_struct * vma;
636 unsigned long addr=(unsigned long)buf;
637
638 mm = current->mm;
639 /* Oops, this was forgotten before. -ben */
640 down_read(&mm->mmap_sem);
641
642 /* For private mappings, just map in zero pages. */
643 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
644 unsigned long count;
645
646 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
647 goto out_up;
648 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
649 break;
650 count = vma->vm_end - addr;
651 if (count > size)
652 count = size;
653
654 zap_page_range(vma, addr, count, NULL);
655 if (zeromap_page_range(vma, addr, count, PAGE_COPY))
656 break;
657
658 size -= count;
659 buf += count;
660 addr += count;
661 if (size == 0)
662 goto out_up;
663 }
664
665 up_read(&mm->mmap_sem);
666
667 /* The shared case is hard. Let's do the conventional zeroing. */
668 do {
669 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
670 if (unwritten)
671 return size + unwritten - PAGE_SIZE;
672 cond_resched();
673 buf += PAGE_SIZE;
674 size -= PAGE_SIZE;
675 } while (size);
676
677 return size;
678out_up:
679 up_read(&mm->mmap_sem);
680 return size;
681}
682
683static ssize_t read_zero(struct file * file, char __user * buf, 628static ssize_t read_zero(struct file * file, char __user * buf,
684 size_t count, loff_t *ppos) 629 size_t count, loff_t *ppos)
685{ 630{
686 unsigned long left, unwritten, written = 0; 631 size_t written;
687 632
688 if (!count) 633 if (!count)
689 return 0; 634 return 0;
@@ -691,69 +636,33 @@ static ssize_t read_zero(struct file * file, char __user * buf,
691 if (!access_ok(VERIFY_WRITE, buf, count)) 636 if (!access_ok(VERIFY_WRITE, buf, count))
692 return -EFAULT; 637 return -EFAULT;
693 638
694 left = count; 639 written = 0;
695 640 while (count) {
696 /* do we want to be clever? Arbitrary cut-off */ 641 unsigned long unwritten;
697 if (count >= PAGE_SIZE*4) { 642 size_t chunk = count;
698 unsigned long partial;
699 643
700 /* How much left of the page? */ 644 if (chunk > PAGE_SIZE)
701 partial = (PAGE_SIZE-1) & -(unsigned long) buf; 645 chunk = PAGE_SIZE; /* Just for latency reasons */
702 unwritten = clear_user(buf, partial); 646 unwritten = clear_user(buf, chunk);
703 written = partial - unwritten; 647 written += chunk - unwritten;
704 if (unwritten)
705 goto out;
706 left -= partial;
707 buf += partial;
708 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
709 written += (left & PAGE_MASK) - unwritten;
710 if (unwritten) 648 if (unwritten)
711 goto out; 649 break;
712 buf += left & PAGE_MASK;
713 left &= ~PAGE_MASK;
714 }
715 unwritten = clear_user(buf, left);
716 written += left - unwritten;
717out:
718 return written ? written : -EFAULT;
719}
720
721static int mmap_zero(struct file * file, struct vm_area_struct * vma)
722{
723 int err;
724
725 if (vma->vm_flags & VM_SHARED)
726 return shmem_zero_setup(vma);
727 err = zeromap_page_range(vma, vma->vm_start,
728 vma->vm_end - vma->vm_start, vma->vm_page_prot);
729 BUG_ON(err == -EEXIST);
730 return err;
731}
732#else /* CONFIG_MMU */
733static ssize_t read_zero(struct file * file, char * buf,
734 size_t count, loff_t *ppos)
735{
736 size_t todo = count;
737
738 while (todo) {
739 size_t chunk = todo;
740
741 if (chunk > 4096)
742 chunk = 4096; /* Just for latency reasons */
743 if (clear_user(buf, chunk))
744 return -EFAULT;
745 buf += chunk; 650 buf += chunk;
746 todo -= chunk; 651 count -= chunk;
747 cond_resched(); 652 cond_resched();
748 } 653 }
749 return count; 654 return written ? written : -EFAULT;
750} 655}
751 656
752static int mmap_zero(struct file * file, struct vm_area_struct * vma) 657static int mmap_zero(struct file * file, struct vm_area_struct * vma)
753{ 658{
659#ifndef CONFIG_MMU
754 return -ENOSYS; 660 return -ENOSYS;
661#endif
662 if (vma->vm_flags & VM_SHARED)
663 return shmem_zero_setup(vma);
664 return 0;
755} 665}
756#endif /* CONFIG_MMU */
757 666
758static ssize_t write_full(struct file * file, const char __user * buf, 667static ssize_t write_full(struct file * file, const char __user * buf,
759 size_t count, loff_t *ppos) 668 size_t count, loff_t *ppos)