aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-16 04:24:40 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:53 -0400
commit557ed1fa2620dc119adb86b34c614e152a629a80 (patch)
treed00b31a7f197583c2bd8fffa1fd135fbbb5d6abc /drivers
parentaadb4bc4a1f9108c1d0fbd121827c936c2ed4217 (diff)
remove ZERO_PAGE
The commit b5810039a54e5babf428e9a1e89fc1940fabff11 contains the note A last caveat: the ZERO_PAGE is now refcounted and managed with rmap (and thus mapcounted and count towards shared rss). These writes to the struct page could cause excessive cacheline bouncing on big systems. There are a number of ways this could be addressed if it is an issue. And indeed this cacheline bouncing has shown up on large SGI systems. There was a situation where an Altix system was essentially livelocked tearing down ZERO_PAGE pagetables when an HPC app aborted during startup. This situation can be avoided in userspace, but it does highlight the potential scalability problem with refcounting ZERO_PAGE, and corner cases where it can really hurt (we don't want the system to livelock!). There are several broad ways to fix this problem: 1. add back some special casing to avoid refcounting ZERO_PAGE 2. per-node or per-cpu ZERO_PAGES 3. remove the ZERO_PAGE completely I will argue for 3. The others should also fix the problem, but they result in more complex code than does 3, with little or no real benefit that I can see. Why? Inserting a ZERO_PAGE for anonymous read faults appears to be a false optimisation: if an application is performance critical, it would not be doing many read faults of new memory, or at least it could be expected to write to that memory soon afterwards. If cache or memory use is critical, it should not be working with a significant number of ZERO_PAGEs anyway (a more compact representation of zeroes should be used). As a sanity check -- mesuring on my desktop system, there are never many mappings to the ZERO_PAGE (eg. 2 or 3), thus memory usage here should not increase much without it. When running a make -j4 kernel compile on my dual core system, there are about 1,000 mappings to the ZERO_PAGE created per second, but about 1,000 ZERO_PAGE COW faults per second (less than 1 ZERO_PAGE mapping per second is torn down without being COWed). So removing ZERO_PAGE will save 1,000 page faults per second when running kbuild, while keeping it only saves less than 1 page clearing operation per second. 1 page clear is cheaper than a thousand faults, presumably, so there isn't an obvious loss. Neither the logical argument nor these basic tests give a guarantee of no regressions. However, this is a reasonable opportunity to try to remove the ZERO_PAGE from the pagefault path. If it is found to cause regressions, we can reintroduce it and just avoid refcounting it. The /dev/zero ZERO_PAGE usage and TLB tricks also get nuked. I don't see much use to them except on benchmarks. All other users of ZERO_PAGE are converted just to use ZERO_PAGE(0) for simplicity. We can look at replacing them all and maybe ripping out ZERO_PAGE completely when we are more satisfied with this solution. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus "snif" Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/mem.c125
1 files changed, 17 insertions, 108 deletions
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index bbee97ff355f..64551ab6be03 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -625,65 +625,10 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
625 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); 625 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
626} 626}
627 627
628#ifdef CONFIG_MMU
629/*
630 * For fun, we are using the MMU for this.
631 */
632static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
633{
634 struct mm_struct *mm;
635 struct vm_area_struct * vma;
636 unsigned long addr=(unsigned long)buf;
637
638 mm = current->mm;
639 /* Oops, this was forgotten before. -ben */
640 down_read(&mm->mmap_sem);
641
642 /* For private mappings, just map in zero pages. */
643 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
644 unsigned long count;
645
646 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
647 goto out_up;
648 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
649 break;
650 count = vma->vm_end - addr;
651 if (count > size)
652 count = size;
653
654 zap_page_range(vma, addr, count, NULL);
655 if (zeromap_page_range(vma, addr, count, PAGE_COPY))
656 break;
657
658 size -= count;
659 buf += count;
660 addr += count;
661 if (size == 0)
662 goto out_up;
663 }
664
665 up_read(&mm->mmap_sem);
666
667 /* The shared case is hard. Let's do the conventional zeroing. */
668 do {
669 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
670 if (unwritten)
671 return size + unwritten - PAGE_SIZE;
672 cond_resched();
673 buf += PAGE_SIZE;
674 size -= PAGE_SIZE;
675 } while (size);
676
677 return size;
678out_up:
679 up_read(&mm->mmap_sem);
680 return size;
681}
682
683static ssize_t read_zero(struct file * file, char __user * buf, 628static ssize_t read_zero(struct file * file, char __user * buf,
684 size_t count, loff_t *ppos) 629 size_t count, loff_t *ppos)
685{ 630{
686 unsigned long left, unwritten, written = 0; 631 size_t written;
687 632
688 if (!count) 633 if (!count)
689 return 0; 634 return 0;
@@ -691,69 +636,33 @@ static ssize_t read_zero(struct file * file, char __user * buf,
691 if (!access_ok(VERIFY_WRITE, buf, count)) 636 if (!access_ok(VERIFY_WRITE, buf, count))
692 return -EFAULT; 637 return -EFAULT;
693 638
694 left = count; 639 written = 0;
695 640 while (count) {
696 /* do we want to be clever? Arbitrary cut-off */ 641 unsigned long unwritten;
697 if (count >= PAGE_SIZE*4) { 642 size_t chunk = count;
698 unsigned long partial;
699 643
700 /* How much left of the page? */ 644 if (chunk > PAGE_SIZE)
701 partial = (PAGE_SIZE-1) & -(unsigned long) buf; 645 chunk = PAGE_SIZE; /* Just for latency reasons */
702 unwritten = clear_user(buf, partial); 646 unwritten = clear_user(buf, chunk);
703 written = partial - unwritten; 647 written += chunk - unwritten;
704 if (unwritten)
705 goto out;
706 left -= partial;
707 buf += partial;
708 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
709 written += (left & PAGE_MASK) - unwritten;
710 if (unwritten) 648 if (unwritten)
711 goto out; 649 break;
712 buf += left & PAGE_MASK;
713 left &= ~PAGE_MASK;
714 }
715 unwritten = clear_user(buf, left);
716 written += left - unwritten;
717out:
718 return written ? written : -EFAULT;
719}
720
721static int mmap_zero(struct file * file, struct vm_area_struct * vma)
722{
723 int err;
724
725 if (vma->vm_flags & VM_SHARED)
726 return shmem_zero_setup(vma);
727 err = zeromap_page_range(vma, vma->vm_start,
728 vma->vm_end - vma->vm_start, vma->vm_page_prot);
729 BUG_ON(err == -EEXIST);
730 return err;
731}
732#else /* CONFIG_MMU */
733static ssize_t read_zero(struct file * file, char * buf,
734 size_t count, loff_t *ppos)
735{
736 size_t todo = count;
737
738 while (todo) {
739 size_t chunk = todo;
740
741 if (chunk > 4096)
742 chunk = 4096; /* Just for latency reasons */
743 if (clear_user(buf, chunk))
744 return -EFAULT;
745 buf += chunk; 650 buf += chunk;
746 todo -= chunk; 651 count -= chunk;
747 cond_resched(); 652 cond_resched();
748 } 653 }
749 return count; 654 return written ? written : -EFAULT;
750} 655}
751 656
752static int mmap_zero(struct file * file, struct vm_area_struct * vma) 657static int mmap_zero(struct file * file, struct vm_area_struct * vma)
753{ 658{
659#ifndef CONFIG_MMU
754 return -ENOSYS; 660 return -ENOSYS;
661#endif
662 if (vma->vm_flags & VM_SHARED)
663 return shmem_zero_setup(vma);
664 return 0;
755} 665}
756#endif /* CONFIG_MMU */
757 666
758static ssize_t write_full(struct file * file, const char __user * buf, 667static ssize_t write_full(struct file * file, const char __user * buf,
759 size_t count, loff_t *ppos) 668 size_t count, loff_t *ppos)