aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2009-09-09 18:02:02 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2009-09-09 18:02:02 -0400
commitbf992fa2bc1ad1bb2aeb0bdfadb43f236b9297fd (patch)
treed67f525c76b66956ba7ca0d40bc0fcda0e414700 /mm
parent9d7302299ee96ca954fe4ab8ca640333b6e19ad0 (diff)
parent7135a71b19be1faf48b7148d77844d03bc0717d6 (diff)
Merge branch 'master' into for-linus
Diffstat (limited to 'mm')
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/percpu.c15
-rw-r--r--mm/rmap.c1
-rw-r--r--mm/slub.c4
-rw-r--r--mm/vmscan.c9
6 files changed, 29 insertions, 9 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index 4bde489ec431..66e81e7e9fe9 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1352,6 +1352,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1352 } 1352 }
1353 1353
1354 vma->vm_region = region; 1354 vma->vm_region = region;
1355 add_nommu_region(region);
1355 1356
1356 /* set up the mapping */ 1357 /* set up the mapping */
1357 if (file && vma->vm_flags & VM_SHARED) 1358 if (file && vma->vm_flags & VM_SHARED)
@@ -1361,8 +1362,6 @@ unsigned long do_mmap_pgoff(struct file *file,
1361 if (ret < 0) 1362 if (ret < 0)
1362 goto error_put_region; 1363 goto error_put_region;
1363 1364
1364 add_nommu_region(region);
1365
1366 /* okay... we have a mapping; now we have to register it */ 1365 /* okay... we have a mapping; now we have to register it */
1367 result = vma->vm_start; 1366 result = vma->vm_start;
1368 1367
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5cc986eb9f6f..a0de15f46987 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -817,13 +817,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
817 * agressive about taking ownership of free pages 817 * agressive about taking ownership of free pages
818 */ 818 */
819 if (unlikely(current_order >= (pageblock_order >> 1)) || 819 if (unlikely(current_order >= (pageblock_order >> 1)) ||
820 start_migratetype == MIGRATE_RECLAIMABLE) { 820 start_migratetype == MIGRATE_RECLAIMABLE ||
821 page_group_by_mobility_disabled) {
821 unsigned long pages; 822 unsigned long pages;
822 pages = move_freepages_block(zone, page, 823 pages = move_freepages_block(zone, page,
823 start_migratetype); 824 start_migratetype);
824 825
825 /* Claim the whole block if over half of it is free */ 826 /* Claim the whole block if over half of it is free */
826 if (pages >= (1 << (pageblock_order-1))) 827 if (pages >= (1 << (pageblock_order-1)) ||
828 page_group_by_mobility_disabled)
827 set_pageblock_migratetype(page, 829 set_pageblock_migratetype(page,
828 start_migratetype); 830 start_migratetype);
829 831
diff --git a/mm/percpu.c b/mm/percpu.c
index 5fe37842e0ea..3311c8919f37 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -197,7 +197,12 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
197static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, 197static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
198 int page_idx) 198 int page_idx)
199{ 199{
200 return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; 200 /*
201 * Any possible cpu id can be used here, so there's no need to
202 * worry about preemption or cpu hotplug.
203 */
204 return *pcpu_chunk_pagep(chunk, raw_smp_processor_id(),
205 page_idx) != NULL;
201} 206}
202 207
203/* set the pointer to a chunk in a page struct */ 208/* set the pointer to a chunk in a page struct */
@@ -297,6 +302,14 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
297 return pcpu_first_chunk; 302 return pcpu_first_chunk;
298 } 303 }
299 304
305 /*
306 * The address is relative to unit0 which might be unused and
307 * thus unmapped. Offset the address to the unit space of the
308 * current processor before looking it up in the vmalloc
309 * space. Note that any possible cpu id can be used here, so
310 * there's no need to worry about preemption or cpu hotplug.
311 */
312 addr += raw_smp_processor_id() * pcpu_unit_size;
300 return pcpu_get_page_chunk(vmalloc_to_page(addr)); 313 return pcpu_get_page_chunk(vmalloc_to_page(addr));
301} 314}
302 315
diff --git a/mm/rmap.c b/mm/rmap.c
index 836c6c63e1f2..0895b5c7cbff 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -358,6 +358,7 @@ static int page_referenced_one(struct page *page,
358 */ 358 */
359 if (vma->vm_flags & VM_LOCKED) { 359 if (vma->vm_flags & VM_LOCKED) {
360 *mapcount = 1; /* break early from loop */ 360 *mapcount = 1; /* break early from loop */
361 *vm_flags |= VM_LOCKED;
361 goto out_unmap; 362 goto out_unmap;
362 } 363 }
363 364
diff --git a/mm/slub.c b/mm/slub.c
index b9f1491a58a1..b6276753626e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2594,8 +2594,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
2594 */ 2594 */
2595void kmem_cache_destroy(struct kmem_cache *s) 2595void kmem_cache_destroy(struct kmem_cache *s)
2596{ 2596{
2597 if (s->flags & SLAB_DESTROY_BY_RCU)
2598 rcu_barrier();
2599 down_write(&slub_lock); 2597 down_write(&slub_lock);
2600 s->refcount--; 2598 s->refcount--;
2601 if (!s->refcount) { 2599 if (!s->refcount) {
@@ -2606,6 +2604,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
2606 "still has objects.\n", s->name, __func__); 2604 "still has objects.\n", s->name, __func__);
2607 dump_stack(); 2605 dump_stack();
2608 } 2606 }
2607 if (s->flags & SLAB_DESTROY_BY_RCU)
2608 rcu_barrier();
2609 sysfs_slab_remove(s); 2609 sysfs_slab_remove(s);
2610 } else 2610 } else
2611 up_write(&slub_lock); 2611 up_write(&slub_lock);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dea7abd31098..94e86dd6954c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -630,9 +630,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
630 630
631 referenced = page_referenced(page, 1, 631 referenced = page_referenced(page, 1,
632 sc->mem_cgroup, &vm_flags); 632 sc->mem_cgroup, &vm_flags);
633 /* In active use or really unfreeable? Activate it. */ 633 /*
634 * In active use or really unfreeable? Activate it.
635 * If page which have PG_mlocked lost isoltation race,
636 * try_to_unmap moves it to unevictable list
637 */
634 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && 638 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
635 referenced && page_mapping_inuse(page)) 639 referenced && page_mapping_inuse(page)
640 && !(vm_flags & VM_LOCKED))
636 goto activate_locked; 641 goto activate_locked;
637 642
638 /* 643 /*