diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2014-12-12 19:55:55 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 15:42:48 -0500 |
commit | dbc8358c72373daa4f37b7e233fecbc47105fe54 (patch) | |
tree | a20cfe776a52acaf336e520ea324a28bb7a7eea3 /mm | |
parent | 031bc5743f158b2d5498294f489e534a31251626 (diff) |
mm/nommu: use alloc_pages_exact() rather than its own implementation
do_mmap_private() in nommu.c try to allocate physically contiguous pages
with arbitrary size in some cases and we now have good abstract function
to do exactly same thing, alloc_pages_exact(). So, change to use it.
There is no functional change. This is the preparation step for support
page owner feature accurately.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Dave Hansen <dave@sr71.net>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Jungsoo Son <jungsoo.son@lge.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/nommu.c | 33 |
1 files changed, 11 insertions, 22 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index cd519e1cd8a7..b51eadf6d952 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1149,8 +1149,7 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1149 | unsigned long len, | 1149 | unsigned long len, |
1150 | unsigned long capabilities) | 1150 | unsigned long capabilities) |
1151 | { | 1151 | { |
1152 | struct page *pages; | 1152 | unsigned long total, point; |
1153 | unsigned long total, point, n; | ||
1154 | void *base; | 1153 | void *base; |
1155 | int ret, order; | 1154 | int ret, order; |
1156 | 1155 | ||
@@ -1182,33 +1181,23 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1182 | order = get_order(len); | 1181 | order = get_order(len); |
1183 | kdebug("alloc order %d for %lx", order, len); | 1182 | kdebug("alloc order %d for %lx", order, len); |
1184 | 1183 | ||
1185 | pages = alloc_pages(GFP_KERNEL, order); | ||
1186 | if (!pages) | ||
1187 | goto enomem; | ||
1188 | |||
1189 | total = 1 << order; | 1184 | total = 1 << order; |
1190 | atomic_long_add(total, &mmap_pages_allocated); | ||
1191 | |||
1192 | point = len >> PAGE_SHIFT; | 1185 | point = len >> PAGE_SHIFT; |
1193 | 1186 | ||
1194 | /* we allocated a power-of-2 sized page set, so we may want to trim off | 1187 | /* we don't want to allocate a power-of-2 sized page set */ |
1195 | * the excess */ | ||
1196 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { | 1188 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { |
1197 | while (total > point) { | 1189 | total = point; |
1198 | order = ilog2(total - point); | 1190 | kdebug("try to alloc exact %lu pages", total); |
1199 | n = 1 << order; | 1191 | base = alloc_pages_exact(len, GFP_KERNEL); |
1200 | kdebug("shave %lu/%lu @%lu", n, total - point, total); | 1192 | } else { |
1201 | atomic_long_sub(n, &mmap_pages_allocated); | 1193 | base = (void *)__get_free_pages(GFP_KERNEL, order); |
1202 | total -= n; | ||
1203 | set_page_refcounted(pages + total); | ||
1204 | __free_pages(pages + total, order); | ||
1205 | } | ||
1206 | } | 1194 | } |
1207 | 1195 | ||
1208 | for (point = 1; point < total; point++) | 1196 | if (!base) |
1209 | set_page_refcounted(&pages[point]); | 1197 | goto enomem; |
1198 | |||
1199 | atomic_long_add(total, &mmap_pages_allocated); | ||
1210 | 1200 | ||
1211 | base = page_address(pages); | ||
1212 | region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; | 1201 | region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; |
1213 | region->vm_start = (unsigned long) base; | 1202 | region->vm_start = (unsigned long) base; |
1214 | region->vm_end = region->vm_start + len; | 1203 | region->vm_end = region->vm_start + len; |