aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBob Liu <lliubbo@gmail.com>2011-05-24 20:12:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:38 -0400
commitf67d9b1576c1c6e02100f8b27f4e9d66bbeb4d49 (patch)
treec70649a911f85418c10075901c489caa7d8a0762 /mm
parenteb709b0d062efd653a61183af8e27b2711c3cf5c (diff)
nommu: add page alignment to mmap
Currently on nommu arch mmap(),mremap() and munmap() doesn't do page_align() which isn't consist with mmu arch and cause some issues. First, some drivers' mmap() function depends on vma->vm_end - vma->start is page aligned which is true on mmu arch but not on nommu. eg: uvc camera driver. Second munmap() may return -EINVAL[split file] error in cases when end is not page aligned(passed into from userspace) but vma->vm_end is aligned dure to split or driver's mmap() ops. Add page alignment to fix those issues. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Bob Liu <lliubbo@gmail.com> Cc: David Howells <dhowells@redhat.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Greg Ungerer <gerg@snapgear.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/nommu.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index 92e1a47d1e52..1fd0c51b10a6 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1124,7 +1124,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
1124 unsigned long capabilities) 1124 unsigned long capabilities)
1125{ 1125{
1126 struct page *pages; 1126 struct page *pages;
1127 unsigned long total, point, n, rlen; 1127 unsigned long total, point, n;
1128 void *base; 1128 void *base;
1129 int ret, order; 1129 int ret, order;
1130 1130
@@ -1148,13 +1148,12 @@ static int do_mmap_private(struct vm_area_struct *vma,
1148 * make a private copy of the data and map that instead */ 1148 * make a private copy of the data and map that instead */
1149 } 1149 }
1150 1150
1151 rlen = PAGE_ALIGN(len);
1152 1151
1153 /* allocate some memory to hold the mapping 1152 /* allocate some memory to hold the mapping
1154 * - note that this may not return a page-aligned address if the object 1153 * - note that this may not return a page-aligned address if the object
1155 * we're allocating is smaller than a page 1154 * we're allocating is smaller than a page
1156 */ 1155 */
1157 order = get_order(rlen); 1156 order = get_order(len);
1158 kdebug("alloc order %d for %lx", order, len); 1157 kdebug("alloc order %d for %lx", order, len);
1159 1158
1160 pages = alloc_pages(GFP_KERNEL, order); 1159 pages = alloc_pages(GFP_KERNEL, order);
@@ -1164,7 +1163,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
1164 total = 1 << order; 1163 total = 1 << order;
1165 atomic_long_add(total, &mmap_pages_allocated); 1164 atomic_long_add(total, &mmap_pages_allocated);
1166 1165
1167 point = rlen >> PAGE_SHIFT; 1166 point = len >> PAGE_SHIFT;
1168 1167
1169 /* we allocated a power-of-2 sized page set, so we may want to trim off 1168 /* we allocated a power-of-2 sized page set, so we may want to trim off
1170 * the excess */ 1169 * the excess */
@@ -1186,7 +1185,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
1186 base = page_address(pages); 1185 base = page_address(pages);
1187 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; 1186 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1188 region->vm_start = (unsigned long) base; 1187 region->vm_start = (unsigned long) base;
1189 region->vm_end = region->vm_start + rlen; 1188 region->vm_end = region->vm_start + len;
1190 region->vm_top = region->vm_start + (total << PAGE_SHIFT); 1189 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1191 1190
1192 vma->vm_start = region->vm_start; 1191 vma->vm_start = region->vm_start;
@@ -1202,15 +1201,15 @@ static int do_mmap_private(struct vm_area_struct *vma,
1202 1201
1203 old_fs = get_fs(); 1202 old_fs = get_fs();
1204 set_fs(KERNEL_DS); 1203 set_fs(KERNEL_DS);
1205 ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos); 1204 ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
1206 set_fs(old_fs); 1205 set_fs(old_fs);
1207 1206
1208 if (ret < 0) 1207 if (ret < 0)
1209 goto error_free; 1208 goto error_free;
1210 1209
1211 /* clear the last little bit */ 1210 /* clear the last little bit */
1212 if (ret < rlen) 1211 if (ret < len)
1213 memset(base + ret, 0, rlen - ret); 1212 memset(base + ret, 0, len - ret);
1214 1213
1215 } 1214 }
1216 1215
@@ -1259,6 +1258,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1259 1258
1260 /* we ignore the address hint */ 1259 /* we ignore the address hint */
1261 addr = 0; 1260 addr = 0;
1261 len = PAGE_ALIGN(len);
1262 1262
1263 /* we've determined that we can make the mapping, now translate what we 1263 /* we've determined that we can make the mapping, now translate what we
1264 * now know into VMA flags */ 1264 * now know into VMA flags */
@@ -1635,14 +1635,17 @@ static int shrink_vma(struct mm_struct *mm,
1635int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) 1635int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1636{ 1636{
1637 struct vm_area_struct *vma; 1637 struct vm_area_struct *vma;
1638 unsigned long end = start + len; 1638 unsigned long end;
1639 int ret; 1639 int ret;
1640 1640
1641 kenter(",%lx,%zx", start, len); 1641 kenter(",%lx,%zx", start, len);
1642 1642
1643 len = PAGE_ALIGN(len);
1643 if (len == 0) 1644 if (len == 0)
1644 return -EINVAL; 1645 return -EINVAL;
1645 1646
1647 end = start + len;
1648
1646 /* find the first potentially overlapping VMA */ 1649 /* find the first potentially overlapping VMA */
1647 vma = find_vma(mm, start); 1650 vma = find_vma(mm, start);
1648 if (!vma) { 1651 if (!vma) {
@@ -1762,6 +1765,8 @@ unsigned long do_mremap(unsigned long addr,
1762 struct vm_area_struct *vma; 1765 struct vm_area_struct *vma;
1763 1766
1764 /* insanity checks first */ 1767 /* insanity checks first */
1768 old_len = PAGE_ALIGN(old_len);
1769 new_len = PAGE_ALIGN(new_len);
1765 if (old_len == 0 || new_len == 0) 1770 if (old_len == 0 || new_len == 0)
1766 return (unsigned long) -EINVAL; 1771 return (unsigned long) -EINVAL;
1767 1772