diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-09 17:00:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-09 17:00:58 -0500 |
commit | c40f6f8bbc4cbd2902671aacd587400ddca62627 (patch) | |
tree | a991e5521e10943f4457fb7f494e00aec75cc7df /fs | |
parent | 1a7d0f0bec4be078ce2cfb11538c0f4ffbbed8e5 (diff) | |
parent | cb6ff208076b5f434db1b8c983429269d719cef5 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-nommu
* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-nommu:
NOMMU: Support XIP on initramfs
NOMMU: Teach kobjsize() about VMA regions.
FLAT: Don't attempt to expand the userspace stack to fill the space allocated
FDPIC: Don't attempt to expand the userspace stack to fill the space allocated
NOMMU: Improve procfs output using per-MM VMAs
NOMMU: Make mmap allocation page trimming behaviour configurable.
NOMMU: Make VMAs per MM as for MMU-mode linux
NOMMU: Delete askedalloc and realalloc variables
NOMMU: Rename ARM's struct vm_region
NOMMU: Fix cleanup handling in ramfs_nommu_get_umapped_area()
Diffstat (limited to 'fs')
-rw-r--r-- | fs/binfmt_elf_fdpic.c | 35 | ||||
-rw-r--r-- | fs/binfmt_flat.c | 34 | ||||
-rw-r--r-- | fs/proc/internal.h | 2 | ||||
-rw-r--r-- | fs/proc/meminfo.c | 6 | ||||
-rw-r--r-- | fs/proc/nommu.c | 71 | ||||
-rw-r--r-- | fs/proc/task_nommu.c | 120 | ||||
-rw-r--r-- | fs/ramfs/file-nommu.c | 21 |
7 files changed, 149 insertions, 140 deletions
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index aa5b43205e37..f3e72c5c19f5 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c | |||
@@ -168,9 +168,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, | |||
168 | struct elf_fdpic_params exec_params, interp_params; | 168 | struct elf_fdpic_params exec_params, interp_params; |
169 | struct elf_phdr *phdr; | 169 | struct elf_phdr *phdr; |
170 | unsigned long stack_size, entryaddr; | 170 | unsigned long stack_size, entryaddr; |
171 | #ifndef CONFIG_MMU | ||
172 | unsigned long fullsize; | ||
173 | #endif | ||
174 | #ifdef ELF_FDPIC_PLAT_INIT | 171 | #ifdef ELF_FDPIC_PLAT_INIT |
175 | unsigned long dynaddr; | 172 | unsigned long dynaddr; |
176 | #endif | 173 | #endif |
@@ -390,11 +387,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, | |||
390 | goto error_kill; | 387 | goto error_kill; |
391 | } | 388 | } |
392 | 389 | ||
393 | /* expand the stack mapping to use up the entire allocation granule */ | ||
394 | fullsize = kobjsize((char *) current->mm->start_brk); | ||
395 | if (!IS_ERR_VALUE(do_mremap(current->mm->start_brk, stack_size, | ||
396 | fullsize, 0, 0))) | ||
397 | stack_size = fullsize; | ||
398 | up_write(¤t->mm->mmap_sem); | 390 | up_write(¤t->mm->mmap_sem); |
399 | 391 | ||
400 | current->mm->brk = current->mm->start_brk; | 392 | current->mm->brk = current->mm->start_brk; |
@@ -1567,11 +1559,9 @@ end_coredump: | |||
1567 | static int elf_fdpic_dump_segments(struct file *file, size_t *size, | 1559 | static int elf_fdpic_dump_segments(struct file *file, size_t *size, |
1568 | unsigned long *limit, unsigned long mm_flags) | 1560 | unsigned long *limit, unsigned long mm_flags) |
1569 | { | 1561 | { |
1570 | struct vm_list_struct *vml; | 1562 | struct vm_area_struct *vma; |
1571 | |||
1572 | for (vml = current->mm->context.vmlist; vml; vml = vml->next) { | ||
1573 | struct vm_area_struct *vma = vml->vma; | ||
1574 | 1563 | ||
1564 | for (vma = current->mm->mmap; vma; vma = vma->vm_next) { | ||
1575 | if (!maydump(vma, mm_flags)) | 1565 | if (!maydump(vma, mm_flags)) |
1576 | continue; | 1566 | continue; |
1577 | 1567 | ||
@@ -1617,9 +1607,6 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | |||
1617 | elf_fpxregset_t *xfpu = NULL; | 1607 | elf_fpxregset_t *xfpu = NULL; |
1618 | #endif | 1608 | #endif |
1619 | int thread_status_size = 0; | 1609 | int thread_status_size = 0; |
1620 | #ifndef CONFIG_MMU | ||
1621 | struct vm_list_struct *vml; | ||
1622 | #endif | ||
1623 | elf_addr_t *auxv; | 1610 | elf_addr_t *auxv; |
1624 | unsigned long mm_flags; | 1611 | unsigned long mm_flags; |
1625 | 1612 | ||
@@ -1685,13 +1672,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | |||
1685 | fill_prstatus(prstatus, current, signr); | 1672 | fill_prstatus(prstatus, current, signr); |
1686 | elf_core_copy_regs(&prstatus->pr_reg, regs); | 1673 | elf_core_copy_regs(&prstatus->pr_reg, regs); |
1687 | 1674 | ||
1688 | #ifdef CONFIG_MMU | ||
1689 | segs = current->mm->map_count; | 1675 | segs = current->mm->map_count; |
1690 | #else | ||
1691 | segs = 0; | ||
1692 | for (vml = current->mm->context.vmlist; vml; vml = vml->next) | ||
1693 | segs++; | ||
1694 | #endif | ||
1695 | #ifdef ELF_CORE_EXTRA_PHDRS | 1676 | #ifdef ELF_CORE_EXTRA_PHDRS |
1696 | segs += ELF_CORE_EXTRA_PHDRS; | 1677 | segs += ELF_CORE_EXTRA_PHDRS; |
1697 | #endif | 1678 | #endif |
@@ -1766,20 +1747,10 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | |||
1766 | mm_flags = current->mm->flags; | 1747 | mm_flags = current->mm->flags; |
1767 | 1748 | ||
1768 | /* write program headers for segments dump */ | 1749 | /* write program headers for segments dump */ |
1769 | for ( | 1750 | for (vma = current->mm->mmap; vma; vma = vma->vm_next) { |
1770 | #ifdef CONFIG_MMU | ||
1771 | vma = current->mm->mmap; vma; vma = vma->vm_next | ||
1772 | #else | ||
1773 | vml = current->mm->context.vmlist; vml; vml = vml->next | ||
1774 | #endif | ||
1775 | ) { | ||
1776 | struct elf_phdr phdr; | 1751 | struct elf_phdr phdr; |
1777 | size_t sz; | 1752 | size_t sz; |
1778 | 1753 | ||
1779 | #ifndef CONFIG_MMU | ||
1780 | vma = vml->vma; | ||
1781 | #endif | ||
1782 | |||
1783 | sz = vma->vm_end - vma->vm_start; | 1754 | sz = vma->vm_end - vma->vm_start; |
1784 | 1755 | ||
1785 | phdr.p_type = PT_LOAD; | 1756 | phdr.p_type = PT_LOAD; |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 7bbd5c6b3725..5cebf0b37798 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
@@ -417,8 +417,8 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
417 | unsigned long textpos = 0, datapos = 0, result; | 417 | unsigned long textpos = 0, datapos = 0, result; |
418 | unsigned long realdatastart = 0; | 418 | unsigned long realdatastart = 0; |
419 | unsigned long text_len, data_len, bss_len, stack_len, flags; | 419 | unsigned long text_len, data_len, bss_len, stack_len, flags; |
420 | unsigned long len, reallen, memp = 0; | 420 | unsigned long len, memp = 0; |
421 | unsigned long extra, rlim; | 421 | unsigned long memp_size, extra, rlim; |
422 | unsigned long *reloc = 0, *rp; | 422 | unsigned long *reloc = 0, *rp; |
423 | struct inode *inode; | 423 | struct inode *inode; |
424 | int i, rev, relocs = 0; | 424 | int i, rev, relocs = 0; |
@@ -543,17 +543,10 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
543 | } | 543 | } |
544 | 544 | ||
545 | len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); | 545 | len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); |
546 | len = PAGE_ALIGN(len); | ||
546 | down_write(¤t->mm->mmap_sem); | 547 | down_write(¤t->mm->mmap_sem); |
547 | realdatastart = do_mmap(0, 0, len, | 548 | realdatastart = do_mmap(0, 0, len, |
548 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); | 549 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); |
549 | /* Remap to use all availabe slack region space */ | ||
550 | if (realdatastart && (realdatastart < (unsigned long)-4096)) { | ||
551 | reallen = kobjsize((void *)realdatastart); | ||
552 | if (reallen > len) { | ||
553 | realdatastart = do_mremap(realdatastart, len, | ||
554 | reallen, MREMAP_FIXED, realdatastart); | ||
555 | } | ||
556 | } | ||
557 | up_write(¤t->mm->mmap_sem); | 550 | up_write(¤t->mm->mmap_sem); |
558 | 551 | ||
559 | if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) { | 552 | if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) { |
@@ -591,21 +584,14 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
591 | 584 | ||
592 | reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len)); | 585 | reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len)); |
593 | memp = realdatastart; | 586 | memp = realdatastart; |
594 | 587 | memp_size = len; | |
595 | } else { | 588 | } else { |
596 | 589 | ||
597 | len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); | 590 | len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); |
591 | len = PAGE_ALIGN(len); | ||
598 | down_write(¤t->mm->mmap_sem); | 592 | down_write(¤t->mm->mmap_sem); |
599 | textpos = do_mmap(0, 0, len, | 593 | textpos = do_mmap(0, 0, len, |
600 | PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); | 594 | PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); |
601 | /* Remap to use all availabe slack region space */ | ||
602 | if (textpos && (textpos < (unsigned long) -4096)) { | ||
603 | reallen = kobjsize((void *)textpos); | ||
604 | if (reallen > len) { | ||
605 | textpos = do_mremap(textpos, len, reallen, | ||
606 | MREMAP_FIXED, textpos); | ||
607 | } | ||
608 | } | ||
609 | up_write(¤t->mm->mmap_sem); | 595 | up_write(¤t->mm->mmap_sem); |
610 | 596 | ||
611 | if (!textpos || textpos >= (unsigned long) -4096) { | 597 | if (!textpos || textpos >= (unsigned long) -4096) { |
@@ -622,7 +608,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
622 | reloc = (unsigned long *) (textpos + ntohl(hdr->reloc_start) + | 608 | reloc = (unsigned long *) (textpos + ntohl(hdr->reloc_start) + |
623 | MAX_SHARED_LIBS * sizeof(unsigned long)); | 609 | MAX_SHARED_LIBS * sizeof(unsigned long)); |
624 | memp = textpos; | 610 | memp = textpos; |
625 | 611 | memp_size = len; | |
626 | #ifdef CONFIG_BINFMT_ZFLAT | 612 | #ifdef CONFIG_BINFMT_ZFLAT |
627 | /* | 613 | /* |
628 | * load it all in and treat it like a RAM load from now on | 614 | * load it all in and treat it like a RAM load from now on |
@@ -680,10 +666,12 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
680 | * set up the brk stuff, uses any slack left in data/bss/stack | 666 | * set up the brk stuff, uses any slack left in data/bss/stack |
681 | * allocation. We put the brk after the bss (between the bss | 667 | * allocation. We put the brk after the bss (between the bss |
682 | * and stack) like other platforms. | 668 | * and stack) like other platforms. |
669 | * Userspace code relies on the stack pointer starting out at | ||
670 | * an address right at the end of a page. | ||
683 | */ | 671 | */ |
684 | current->mm->start_brk = datapos + data_len + bss_len; | 672 | current->mm->start_brk = datapos + data_len + bss_len; |
685 | current->mm->brk = (current->mm->start_brk + 3) & ~3; | 673 | current->mm->brk = (current->mm->start_brk + 3) & ~3; |
686 | current->mm->context.end_brk = memp + kobjsize((void *) memp) - stack_len; | 674 | current->mm->context.end_brk = memp + memp_size - stack_len; |
687 | } | 675 | } |
688 | 676 | ||
689 | if (flags & FLAT_FLAG_KTRACE) | 677 | if (flags & FLAT_FLAG_KTRACE) |
@@ -790,8 +778,8 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
790 | 778 | ||
791 | /* zero the BSS, BRK and stack areas */ | 779 | /* zero the BSS, BRK and stack areas */ |
792 | memset((void*)(datapos + data_len), 0, bss_len + | 780 | memset((void*)(datapos + data_len), 0, bss_len + |
793 | (memp + kobjsize((void *) memp) - stack_len - /* end brk */ | 781 | (memp + memp_size - stack_len - /* end brk */ |
794 | libinfo->lib_list[id].start_brk) + /* start brk */ | 782 | libinfo->lib_list[id].start_brk) + /* start brk */ |
795 | stack_len); | 783 | stack_len); |
796 | 784 | ||
797 | return 0; | 785 | return 0; |
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 3e8aeb8b61ce..cd53ff838498 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h | |||
@@ -41,8 +41,6 @@ do { \ | |||
41 | (vmi)->used = 0; \ | 41 | (vmi)->used = 0; \ |
42 | (vmi)->largest_chunk = 0; \ | 42 | (vmi)->largest_chunk = 0; \ |
43 | } while(0) | 43 | } while(0) |
44 | |||
45 | extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *); | ||
46 | #endif | 44 | #endif |
47 | 45 | ||
48 | extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns, | 46 | extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns, |
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index b1675c4e66da..43d23948384a 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c | |||
@@ -74,6 +74,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
74 | "LowTotal: %8lu kB\n" | 74 | "LowTotal: %8lu kB\n" |
75 | "LowFree: %8lu kB\n" | 75 | "LowFree: %8lu kB\n" |
76 | #endif | 76 | #endif |
77 | #ifndef CONFIG_MMU | ||
78 | "MmapCopy: %8lu kB\n" | ||
79 | #endif | ||
77 | "SwapTotal: %8lu kB\n" | 80 | "SwapTotal: %8lu kB\n" |
78 | "SwapFree: %8lu kB\n" | 81 | "SwapFree: %8lu kB\n" |
79 | "Dirty: %8lu kB\n" | 82 | "Dirty: %8lu kB\n" |
@@ -116,6 +119,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
116 | K(i.totalram-i.totalhigh), | 119 | K(i.totalram-i.totalhigh), |
117 | K(i.freeram-i.freehigh), | 120 | K(i.freeram-i.freehigh), |
118 | #endif | 121 | #endif |
122 | #ifndef CONFIG_MMU | ||
123 | K((unsigned long) atomic_read(&mmap_pages_allocated)), | ||
124 | #endif | ||
119 | K(i.totalswap), | 125 | K(i.totalswap), |
120 | K(i.freeswap), | 126 | K(i.freeswap), |
121 | K(global_page_state(NR_FILE_DIRTY)), | 127 | K(global_page_state(NR_FILE_DIRTY)), |
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index 3f87d2632947..b446d7ad0b0d 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c | |||
@@ -33,33 +33,33 @@ | |||
33 | #include "internal.h" | 33 | #include "internal.h" |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * display a single VMA to a sequenced file | 36 | * display a single region to a sequenced file |
37 | */ | 37 | */ |
38 | int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | 38 | static int nommu_region_show(struct seq_file *m, struct vm_region *region) |
39 | { | 39 | { |
40 | unsigned long ino = 0; | 40 | unsigned long ino = 0; |
41 | struct file *file; | 41 | struct file *file; |
42 | dev_t dev = 0; | 42 | dev_t dev = 0; |
43 | int flags, len; | 43 | int flags, len; |
44 | 44 | ||
45 | flags = vma->vm_flags; | 45 | flags = region->vm_flags; |
46 | file = vma->vm_file; | 46 | file = region->vm_file; |
47 | 47 | ||
48 | if (file) { | 48 | if (file) { |
49 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | 49 | struct inode *inode = region->vm_file->f_path.dentry->d_inode; |
50 | dev = inode->i_sb->s_dev; | 50 | dev = inode->i_sb->s_dev; |
51 | ino = inode->i_ino; | 51 | ino = inode->i_ino; |
52 | } | 52 | } |
53 | 53 | ||
54 | seq_printf(m, | 54 | seq_printf(m, |
55 | "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", | 55 | "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", |
56 | vma->vm_start, | 56 | region->vm_start, |
57 | vma->vm_end, | 57 | region->vm_end, |
58 | flags & VM_READ ? 'r' : '-', | 58 | flags & VM_READ ? 'r' : '-', |
59 | flags & VM_WRITE ? 'w' : '-', | 59 | flags & VM_WRITE ? 'w' : '-', |
60 | flags & VM_EXEC ? 'x' : '-', | 60 | flags & VM_EXEC ? 'x' : '-', |
61 | flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', | 61 | flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', |
62 | ((loff_t)vma->vm_pgoff) << PAGE_SHIFT, | 62 | ((loff_t)region->vm_pgoff) << PAGE_SHIFT, |
63 | MAJOR(dev), MINOR(dev), ino, &len); | 63 | MAJOR(dev), MINOR(dev), ino, &len); |
64 | 64 | ||
65 | if (file) { | 65 | if (file) { |
@@ -75,61 +75,54 @@ int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | |||
75 | } | 75 | } |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * display a list of all the VMAs the kernel knows about | 78 | * display a list of all the REGIONs the kernel knows about |
79 | * - nommu kernals have a single flat list | 79 | * - nommu kernals have a single flat list |
80 | */ | 80 | */ |
81 | static int nommu_vma_list_show(struct seq_file *m, void *v) | 81 | static int nommu_region_list_show(struct seq_file *m, void *_p) |
82 | { | 82 | { |
83 | struct vm_area_struct *vma; | 83 | struct rb_node *p = _p; |
84 | 84 | ||
85 | vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb); | 85 | return nommu_region_show(m, rb_entry(p, struct vm_region, vm_rb)); |
86 | return nommu_vma_show(m, vma); | ||
87 | } | 86 | } |
88 | 87 | ||
89 | static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos) | 88 | static void *nommu_region_list_start(struct seq_file *m, loff_t *_pos) |
90 | { | 89 | { |
91 | struct rb_node *_rb; | 90 | struct rb_node *p; |
92 | loff_t pos = *_pos; | 91 | loff_t pos = *_pos; |
93 | void *next = NULL; | ||
94 | 92 | ||
95 | down_read(&nommu_vma_sem); | 93 | down_read(&nommu_region_sem); |
96 | 94 | ||
97 | for (_rb = rb_first(&nommu_vma_tree); _rb; _rb = rb_next(_rb)) { | 95 | for (p = rb_first(&nommu_region_tree); p; p = rb_next(p)) |
98 | if (pos == 0) { | 96 | if (pos-- == 0) |
99 | next = _rb; | 97 | return p; |
100 | break; | 98 | return NULL; |
101 | } | ||
102 | pos--; | ||
103 | } | ||
104 | |||
105 | return next; | ||
106 | } | 99 | } |
107 | 100 | ||
108 | static void nommu_vma_list_stop(struct seq_file *m, void *v) | 101 | static void nommu_region_list_stop(struct seq_file *m, void *v) |
109 | { | 102 | { |
110 | up_read(&nommu_vma_sem); | 103 | up_read(&nommu_region_sem); |
111 | } | 104 | } |
112 | 105 | ||
113 | static void *nommu_vma_list_next(struct seq_file *m, void *v, loff_t *pos) | 106 | static void *nommu_region_list_next(struct seq_file *m, void *v, loff_t *pos) |
114 | { | 107 | { |
115 | (*pos)++; | 108 | (*pos)++; |
116 | return rb_next((struct rb_node *) v); | 109 | return rb_next((struct rb_node *) v); |
117 | } | 110 | } |
118 | 111 | ||
119 | static const struct seq_operations proc_nommu_vma_list_seqop = { | 112 | static struct seq_operations proc_nommu_region_list_seqop = { |
120 | .start = nommu_vma_list_start, | 113 | .start = nommu_region_list_start, |
121 | .next = nommu_vma_list_next, | 114 | .next = nommu_region_list_next, |
122 | .stop = nommu_vma_list_stop, | 115 | .stop = nommu_region_list_stop, |
123 | .show = nommu_vma_list_show | 116 | .show = nommu_region_list_show |
124 | }; | 117 | }; |
125 | 118 | ||
126 | static int proc_nommu_vma_list_open(struct inode *inode, struct file *file) | 119 | static int proc_nommu_region_list_open(struct inode *inode, struct file *file) |
127 | { | 120 | { |
128 | return seq_open(file, &proc_nommu_vma_list_seqop); | 121 | return seq_open(file, &proc_nommu_region_list_seqop); |
129 | } | 122 | } |
130 | 123 | ||
131 | static const struct file_operations proc_nommu_vma_list_operations = { | 124 | static const struct file_operations proc_nommu_region_list_operations = { |
132 | .open = proc_nommu_vma_list_open, | 125 | .open = proc_nommu_region_list_open, |
133 | .read = seq_read, | 126 | .read = seq_read, |
134 | .llseek = seq_lseek, | 127 | .llseek = seq_lseek, |
135 | .release = seq_release, | 128 | .release = seq_release, |
@@ -137,7 +130,7 @@ static const struct file_operations proc_nommu_vma_list_operations = { | |||
137 | 130 | ||
138 | static int __init proc_nommu_init(void) | 131 | static int __init proc_nommu_init(void) |
139 | { | 132 | { |
140 | proc_create("maps", S_IRUGO, NULL, &proc_nommu_vma_list_operations); | 133 | proc_create("maps", S_IRUGO, NULL, &proc_nommu_region_list_operations); |
141 | return 0; | 134 | return 0; |
142 | } | 135 | } |
143 | 136 | ||
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index d4a8be32b902..343ea1216bc8 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
@@ -15,25 +15,32 @@ | |||
15 | */ | 15 | */ |
16 | void task_mem(struct seq_file *m, struct mm_struct *mm) | 16 | void task_mem(struct seq_file *m, struct mm_struct *mm) |
17 | { | 17 | { |
18 | struct vm_list_struct *vml; | 18 | struct vm_area_struct *vma; |
19 | unsigned long bytes = 0, sbytes = 0, slack = 0; | 19 | struct vm_region *region; |
20 | struct rb_node *p; | ||
21 | unsigned long bytes = 0, sbytes = 0, slack = 0, size; | ||
20 | 22 | ||
21 | down_read(&mm->mmap_sem); | 23 | down_read(&mm->mmap_sem); |
22 | for (vml = mm->context.vmlist; vml; vml = vml->next) { | 24 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { |
23 | if (!vml->vma) | 25 | vma = rb_entry(p, struct vm_area_struct, vm_rb); |
24 | continue; | 26 | |
27 | bytes += kobjsize(vma); | ||
28 | |||
29 | region = vma->vm_region; | ||
30 | if (region) { | ||
31 | size = kobjsize(region); | ||
32 | size += region->vm_end - region->vm_start; | ||
33 | } else { | ||
34 | size = vma->vm_end - vma->vm_start; | ||
35 | } | ||
25 | 36 | ||
26 | bytes += kobjsize(vml); | ||
27 | if (atomic_read(&mm->mm_count) > 1 || | 37 | if (atomic_read(&mm->mm_count) > 1 || |
28 | atomic_read(&vml->vma->vm_usage) > 1 | 38 | vma->vm_flags & VM_MAYSHARE) { |
29 | ) { | 39 | sbytes += size; |
30 | sbytes += kobjsize((void *) vml->vma->vm_start); | ||
31 | sbytes += kobjsize(vml->vma); | ||
32 | } else { | 40 | } else { |
33 | bytes += kobjsize((void *) vml->vma->vm_start); | 41 | bytes += size; |
34 | bytes += kobjsize(vml->vma); | 42 | if (region) |
35 | slack += kobjsize((void *) vml->vma->vm_start) - | 43 | slack = region->vm_end - vma->vm_end; |
36 | (vml->vma->vm_end - vml->vma->vm_start); | ||
37 | } | 44 | } |
38 | } | 45 | } |
39 | 46 | ||
@@ -70,13 +77,14 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) | |||
70 | 77 | ||
71 | unsigned long task_vsize(struct mm_struct *mm) | 78 | unsigned long task_vsize(struct mm_struct *mm) |
72 | { | 79 | { |
73 | struct vm_list_struct *tbp; | 80 | struct vm_area_struct *vma; |
81 | struct rb_node *p; | ||
74 | unsigned long vsize = 0; | 82 | unsigned long vsize = 0; |
75 | 83 | ||
76 | down_read(&mm->mmap_sem); | 84 | down_read(&mm->mmap_sem); |
77 | for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) { | 85 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { |
78 | if (tbp->vma) | 86 | vma = rb_entry(p, struct vm_area_struct, vm_rb); |
79 | vsize += kobjsize((void *) tbp->vma->vm_start); | 87 | vsize += vma->vm_end - vma->vm_start; |
80 | } | 88 | } |
81 | up_read(&mm->mmap_sem); | 89 | up_read(&mm->mmap_sem); |
82 | return vsize; | 90 | return vsize; |
@@ -85,15 +93,19 @@ unsigned long task_vsize(struct mm_struct *mm) | |||
85 | int task_statm(struct mm_struct *mm, int *shared, int *text, | 93 | int task_statm(struct mm_struct *mm, int *shared, int *text, |
86 | int *data, int *resident) | 94 | int *data, int *resident) |
87 | { | 95 | { |
88 | struct vm_list_struct *tbp; | 96 | struct vm_area_struct *vma; |
97 | struct vm_region *region; | ||
98 | struct rb_node *p; | ||
89 | int size = kobjsize(mm); | 99 | int size = kobjsize(mm); |
90 | 100 | ||
91 | down_read(&mm->mmap_sem); | 101 | down_read(&mm->mmap_sem); |
92 | for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) { | 102 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { |
93 | size += kobjsize(tbp); | 103 | vma = rb_entry(p, struct vm_area_struct, vm_rb); |
94 | if (tbp->vma) { | 104 | size += kobjsize(vma); |
95 | size += kobjsize(tbp->vma); | 105 | region = vma->vm_region; |
96 | size += kobjsize((void *) tbp->vma->vm_start); | 106 | if (region) { |
107 | size += kobjsize(region); | ||
108 | size += region->vm_end - region->vm_start; | ||
97 | } | 109 | } |
98 | } | 110 | } |
99 | 111 | ||
@@ -105,20 +117,62 @@ int task_statm(struct mm_struct *mm, int *shared, int *text, | |||
105 | } | 117 | } |
106 | 118 | ||
107 | /* | 119 | /* |
120 | * display a single VMA to a sequenced file | ||
121 | */ | ||
122 | static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | ||
123 | { | ||
124 | unsigned long ino = 0; | ||
125 | struct file *file; | ||
126 | dev_t dev = 0; | ||
127 | int flags, len; | ||
128 | |||
129 | flags = vma->vm_flags; | ||
130 | file = vma->vm_file; | ||
131 | |||
132 | if (file) { | ||
133 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | ||
134 | dev = inode->i_sb->s_dev; | ||
135 | ino = inode->i_ino; | ||
136 | } | ||
137 | |||
138 | seq_printf(m, | ||
139 | "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", | ||
140 | vma->vm_start, | ||
141 | vma->vm_end, | ||
142 | flags & VM_READ ? 'r' : '-', | ||
143 | flags & VM_WRITE ? 'w' : '-', | ||
144 | flags & VM_EXEC ? 'x' : '-', | ||
145 | flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', | ||
146 | vma->vm_pgoff << PAGE_SHIFT, | ||
147 | MAJOR(dev), MINOR(dev), ino, &len); | ||
148 | |||
149 | if (file) { | ||
150 | len = 25 + sizeof(void *) * 6 - len; | ||
151 | if (len < 1) | ||
152 | len = 1; | ||
153 | seq_printf(m, "%*c", len, ' '); | ||
154 | seq_path(m, &file->f_path, ""); | ||
155 | } | ||
156 | |||
157 | seq_putc(m, '\n'); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | /* | ||
108 | * display mapping lines for a particular process's /proc/pid/maps | 162 | * display mapping lines for a particular process's /proc/pid/maps |
109 | */ | 163 | */ |
110 | static int show_map(struct seq_file *m, void *_vml) | 164 | static int show_map(struct seq_file *m, void *_p) |
111 | { | 165 | { |
112 | struct vm_list_struct *vml = _vml; | 166 | struct rb_node *p = _p; |
113 | 167 | ||
114 | return nommu_vma_show(m, vml->vma); | 168 | return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb)); |
115 | } | 169 | } |
116 | 170 | ||
117 | static void *m_start(struct seq_file *m, loff_t *pos) | 171 | static void *m_start(struct seq_file *m, loff_t *pos) |
118 | { | 172 | { |
119 | struct proc_maps_private *priv = m->private; | 173 | struct proc_maps_private *priv = m->private; |
120 | struct vm_list_struct *vml; | ||
121 | struct mm_struct *mm; | 174 | struct mm_struct *mm; |
175 | struct rb_node *p; | ||
122 | loff_t n = *pos; | 176 | loff_t n = *pos; |
123 | 177 | ||
124 | /* pin the task and mm whilst we play with them */ | 178 | /* pin the task and mm whilst we play with them */ |
@@ -134,9 +188,9 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
134 | } | 188 | } |
135 | 189 | ||
136 | /* start from the Nth VMA */ | 190 | /* start from the Nth VMA */ |
137 | for (vml = mm->context.vmlist; vml; vml = vml->next) | 191 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) |
138 | if (n-- == 0) | 192 | if (n-- == 0) |
139 | return vml; | 193 | return p; |
140 | return NULL; | 194 | return NULL; |
141 | } | 195 | } |
142 | 196 | ||
@@ -152,12 +206,12 @@ static void m_stop(struct seq_file *m, void *_vml) | |||
152 | } | 206 | } |
153 | } | 207 | } |
154 | 208 | ||
155 | static void *m_next(struct seq_file *m, void *_vml, loff_t *pos) | 209 | static void *m_next(struct seq_file *m, void *_p, loff_t *pos) |
156 | { | 210 | { |
157 | struct vm_list_struct *vml = _vml; | 211 | struct rb_node *p = _p; |
158 | 212 | ||
159 | (*pos)++; | 213 | (*pos)++; |
160 | return vml ? vml->next : NULL; | 214 | return p ? rb_next(p) : NULL; |
161 | } | 215 | } |
162 | 216 | ||
163 | static const struct seq_operations proc_pid_maps_ops = { | 217 | static const struct seq_operations proc_pid_maps_ops = { |
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 76acdbc34611..b9b567a28376 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c | |||
@@ -262,11 +262,11 @@ unsigned long ramfs_nommu_get_unmapped_area(struct file *file, | |||
262 | ret = -ENOMEM; | 262 | ret = -ENOMEM; |
263 | pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL); | 263 | pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL); |
264 | if (!pages) | 264 | if (!pages) |
265 | goto out; | 265 | goto out_free; |
266 | 266 | ||
267 | nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); | 267 | nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); |
268 | if (nr != lpages) | 268 | if (nr != lpages) |
269 | goto out; /* leave if some pages were missing */ | 269 | goto out_free_pages; /* leave if some pages were missing */ |
270 | 270 | ||
271 | /* check the pages for physical adjacency */ | 271 | /* check the pages for physical adjacency */ |
272 | ptr = pages; | 272 | ptr = pages; |
@@ -274,19 +274,18 @@ unsigned long ramfs_nommu_get_unmapped_area(struct file *file, | |||
274 | page++; | 274 | page++; |
275 | for (loop = lpages; loop > 1; loop--) | 275 | for (loop = lpages; loop > 1; loop--) |
276 | if (*ptr++ != page++) | 276 | if (*ptr++ != page++) |
277 | goto out; | 277 | goto out_free_pages; |
278 | 278 | ||
279 | /* okay - all conditions fulfilled */ | 279 | /* okay - all conditions fulfilled */ |
280 | ret = (unsigned long) page_address(pages[0]); | 280 | ret = (unsigned long) page_address(pages[0]); |
281 | 281 | ||
282 | out: | 282 | out_free_pages: |
283 | if (pages) { | 283 | ptr = pages; |
284 | ptr = pages; | 284 | for (loop = nr; loop > 0; loop--) |
285 | for (loop = lpages; loop > 0; loop--) | 285 | put_page(*ptr++); |
286 | put_page(*ptr++); | 286 | out_free: |
287 | kfree(pages); | 287 | kfree(pages); |
288 | } | 288 | out: |
289 | |||
290 | return ret; | 289 | return ret; |
291 | } | 290 | } |
292 | 291 | ||