aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-10-06 05:51:07 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-10-06 05:51:07 -0400
commit012e060c95e547eceea4a12c6f58592473bf4011 (patch)
treeb57d3eafb50ce517577d2cf366c9ef0b4b286589 /mm
parent923f122573851d18a3832ca808269fa2d5046fb1 (diff)
parented39f731ab2e77e58122232f6e27333331d7793d (diff)
Merge branch 'master'
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/bootmem.c14
-rw-r--r--mm/mempolicy.c7
-rw-r--r--mm/mmap.c5
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/mremap.c6
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/slab.c72
-rw-r--r--mm/swapfile.c1
-rw-r--r--mm/vmscan.c4
10 files changed, 67 insertions, 52 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 4e9937ac3529..391ffc54d136 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -29,7 +29,7 @@ config FLATMEM_MANUAL
29 If unsure, choose this option (Flat Memory) over any other. 29 If unsure, choose this option (Flat Memory) over any other.
30 30
31config DISCONTIGMEM_MANUAL 31config DISCONTIGMEM_MANUAL
32 bool "Discontigious Memory" 32 bool "Discontiguous Memory"
33 depends on ARCH_DISCONTIGMEM_ENABLE 33 depends on ARCH_DISCONTIGMEM_ENABLE
34 help 34 help
35 This option provides enhanced support for discontiguous 35 This option provides enhanced support for discontiguous
@@ -52,7 +52,7 @@ config SPARSEMEM_MANUAL
52 memory hotplug systems. This is normal. 52 memory hotplug systems. This is normal.
53 53
54 For many other systems, this will be an alternative to 54 For many other systems, this will be an alternative to
55 "Discontigious Memory". This option provides some potential 55 "Discontiguous Memory". This option provides some potential
56 performance benefits, along with decreased code complexity, 56 performance benefits, along with decreased code complexity,
57 but it is newer, and more experimental. 57 but it is newer, and more experimental.
58 58
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 8ec4e4c2a179..c1330cc19783 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -61,17 +61,9 @@ static unsigned long __init init_bootmem_core (pg_data_t *pgdat,
61{ 61{
62 bootmem_data_t *bdata = pgdat->bdata; 62 bootmem_data_t *bdata = pgdat->bdata;
63 unsigned long mapsize = ((end - start)+7)/8; 63 unsigned long mapsize = ((end - start)+7)/8;
64 static struct pglist_data *pgdat_last; 64
65 65 pgdat->pgdat_next = pgdat_list;
66 pgdat->pgdat_next = NULL; 66 pgdat_list = pgdat;
67 /* Add new nodes last so that bootmem always starts
68 searching in the first nodes, not the last ones */
69 if (pgdat_last)
70 pgdat_last->pgdat_next = pgdat;
71 else {
72 pgdat_list = pgdat;
73 pgdat_last = pgdat;
74 }
75 67
76 mapsize = ALIGN(mapsize, sizeof(long)); 68 mapsize = ALIGN(mapsize, sizeof(long));
77 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT); 69 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index afa06e184d88..9033f0859aa8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -333,8 +333,13 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
333 if (prev && prev->vm_end < vma->vm_start) 333 if (prev && prev->vm_end < vma->vm_start)
334 return ERR_PTR(-EFAULT); 334 return ERR_PTR(-EFAULT);
335 if ((flags & MPOL_MF_STRICT) && !is_vm_hugetlb_page(vma)) { 335 if ((flags & MPOL_MF_STRICT) && !is_vm_hugetlb_page(vma)) {
336 unsigned long endvma = vma->vm_end;
337 if (endvma > end)
338 endvma = end;
339 if (vma->vm_start > start)
340 start = vma->vm_start;
336 err = check_pgd_range(vma->vm_mm, 341 err = check_pgd_range(vma->vm_mm,
337 vma->vm_start, vma->vm_end, nodes); 342 start, endvma, nodes);
338 if (err) { 343 if (err) {
339 first = ERR_PTR(err); 344 first = ERR_PTR(err);
340 break; 345 break;
diff --git a/mm/mmap.c b/mm/mmap.c
index 12334aecf8ad..fa11d91242e8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1640,7 +1640,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1640/* 1640/*
1641 * Get rid of page table information in the indicated region. 1641 * Get rid of page table information in the indicated region.
1642 * 1642 *
1643 * Called with the page table lock held. 1643 * Called with the mm semaphore held.
1644 */ 1644 */
1645static void unmap_region(struct mm_struct *mm, 1645static void unmap_region(struct mm_struct *mm,
1646 struct vm_area_struct *vma, struct vm_area_struct *prev, 1646 struct vm_area_struct *vma, struct vm_area_struct *prev,
@@ -1993,6 +1993,9 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
1993 __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); 1993 __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
1994 if (__vma && __vma->vm_start < vma->vm_end) 1994 if (__vma && __vma->vm_start < vma->vm_end)
1995 return -ENOMEM; 1995 return -ENOMEM;
1996 if ((vma->vm_flags & VM_ACCOUNT) &&
1997 security_vm_enough_memory(vma_pages(vma)))
1998 return -ENOMEM;
1996 vma_link(mm, vma, prev, rb_link, rb_parent); 1999 vma_link(mm, vma, prev, rb_link, rb_parent);
1997 return 0; 2000 return 0;
1998} 2001}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e9fbd013ad9a..57577f63b305 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -248,7 +248,8 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
248 248
249 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); 249 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
250 250
251 if ((newflags & ~(newflags >> 4)) & 0xf) { 251 /* newflags >> 4 shift VM_MAY% in place of VM_% */
252 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
252 error = -EACCES; 253 error = -EACCES;
253 goto out; 254 goto out;
254 } 255 }
diff --git a/mm/mremap.c b/mm/mremap.c
index a32fed454bd7..f343fc73a8bd 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -141,10 +141,10 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr,
141 if (dst) { 141 if (dst) {
142 pte_t pte; 142 pte_t pte;
143 pte = ptep_clear_flush(vma, old_addr, src); 143 pte = ptep_clear_flush(vma, old_addr, src);
144
144 /* ZERO_PAGE can be dependant on virtual addr */ 145 /* ZERO_PAGE can be dependant on virtual addr */
145 if (pfn_valid(pte_pfn(pte)) && 146 pte = move_pte(pte, new_vma->vm_page_prot,
146 pte_page(pte) == ZERO_PAGE(old_addr)) 147 old_addr, new_addr);
147 pte = pte_wrprotect(mk_pte(ZERO_PAGE(new_addr), new_vma->vm_page_prot));
148 set_pte_at(mm, new_addr, dst, pte); 148 set_pte_at(mm, new_addr, dst, pte);
149 } else 149 } else
150 error = -ENOMEM; 150 error = -ENOMEM;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c5823c395f71..ae2903339e71 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -22,6 +22,7 @@
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/kernel.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/suspend.h> 27#include <linux/suspend.h>
27#include <linux/pagevec.h> 28#include <linux/pagevec.h>
@@ -117,7 +118,7 @@ static void bad_page(const char *function, struct page *page)
117 set_page_count(page, 0); 118 set_page_count(page, 0);
118 reset_page_mapcount(page); 119 reset_page_mapcount(page);
119 page->mapping = NULL; 120 page->mapping = NULL;
120 tainted |= TAINT_BAD_PAGE; 121 add_taint(TAINT_BAD_PAGE);
121} 122}
122 123
123#ifndef CONFIG_HUGETLB_PAGE 124#ifndef CONFIG_HUGETLB_PAGE
diff --git a/mm/slab.c b/mm/slab.c
index 9e876d6dfad9..5cbbdfa6dd0e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -308,12 +308,12 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
308#define SIZE_L3 (1 + MAX_NUMNODES) 308#define SIZE_L3 (1 + MAX_NUMNODES)
309 309
310/* 310/*
311 * This function may be completely optimized away if 311 * This function must be completely optimized away if
312 * a constant is passed to it. Mostly the same as 312 * a constant is passed to it. Mostly the same as
313 * what is in linux/slab.h except it returns an 313 * what is in linux/slab.h except it returns an
314 * index. 314 * index.
315 */ 315 */
316static inline int index_of(const size_t size) 316static __always_inline int index_of(const size_t size)
317{ 317{
318 if (__builtin_constant_p(size)) { 318 if (__builtin_constant_p(size)) {
319 int i = 0; 319 int i = 0;
@@ -329,7 +329,8 @@ static inline int index_of(const size_t size)
329 extern void __bad_size(void); 329 extern void __bad_size(void);
330 __bad_size(); 330 __bad_size();
331 } 331 }
332 } 332 } else
333 BUG();
333 return 0; 334 return 0;
334} 335}
335 336
@@ -639,7 +640,7 @@ static enum {
639 640
640static DEFINE_PER_CPU(struct work_struct, reap_work); 641static DEFINE_PER_CPU(struct work_struct, reap_work);
641 642
642static void free_block(kmem_cache_t* cachep, void** objpp, int len); 643static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node);
643static void enable_cpucache (kmem_cache_t *cachep); 644static void enable_cpucache (kmem_cache_t *cachep);
644static void cache_reap (void *unused); 645static void cache_reap (void *unused);
645static int __node_shrink(kmem_cache_t *cachep, int node); 646static int __node_shrink(kmem_cache_t *cachep, int node);
@@ -659,7 +660,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size,
659 * kmem_cache_create(), or __kmalloc(), before 660 * kmem_cache_create(), or __kmalloc(), before
660 * the generic caches are initialized. 661 * the generic caches are initialized.
661 */ 662 */
662 BUG_ON(csizep->cs_cachep == NULL); 663 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
663#endif 664#endif
664 while (size > csizep->cs_size) 665 while (size > csizep->cs_size)
665 csizep++; 666 csizep++;
@@ -804,7 +805,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache
804 805
805 if (ac->avail) { 806 if (ac->avail) {
806 spin_lock(&rl3->list_lock); 807 spin_lock(&rl3->list_lock);
807 free_block(cachep, ac->entry, ac->avail); 808 free_block(cachep, ac->entry, ac->avail, node);
808 ac->avail = 0; 809 ac->avail = 0;
809 spin_unlock(&rl3->list_lock); 810 spin_unlock(&rl3->list_lock);
810 } 811 }
@@ -925,7 +926,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
925 /* Free limit for this kmem_list3 */ 926 /* Free limit for this kmem_list3 */
926 l3->free_limit -= cachep->batchcount; 927 l3->free_limit -= cachep->batchcount;
927 if (nc) 928 if (nc)
928 free_block(cachep, nc->entry, nc->avail); 929 free_block(cachep, nc->entry, nc->avail, node);
929 930
930 if (!cpus_empty(mask)) { 931 if (!cpus_empty(mask)) {
931 spin_unlock(&l3->list_lock); 932 spin_unlock(&l3->list_lock);
@@ -934,7 +935,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
934 935
935 if (l3->shared) { 936 if (l3->shared) {
936 free_block(cachep, l3->shared->entry, 937 free_block(cachep, l3->shared->entry,
937 l3->shared->avail); 938 l3->shared->avail, node);
938 kfree(l3->shared); 939 kfree(l3->shared);
939 l3->shared = NULL; 940 l3->shared = NULL;
940 } 941 }
@@ -1882,12 +1883,13 @@ static void do_drain(void *arg)
1882{ 1883{
1883 kmem_cache_t *cachep = (kmem_cache_t*)arg; 1884 kmem_cache_t *cachep = (kmem_cache_t*)arg;
1884 struct array_cache *ac; 1885 struct array_cache *ac;
1886 int node = numa_node_id();
1885 1887
1886 check_irq_off(); 1888 check_irq_off();
1887 ac = ac_data(cachep); 1889 ac = ac_data(cachep);
1888 spin_lock(&cachep->nodelists[numa_node_id()]->list_lock); 1890 spin_lock(&cachep->nodelists[node]->list_lock);
1889 free_block(cachep, ac->entry, ac->avail); 1891 free_block(cachep, ac->entry, ac->avail, node);
1890 spin_unlock(&cachep->nodelists[numa_node_id()]->list_lock); 1892 spin_unlock(&cachep->nodelists[node]->list_lock);
1891 ac->avail = 0; 1893 ac->avail = 0;
1892} 1894}
1893 1895
@@ -2508,16 +2510,12 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
2508#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2510#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2509#endif 2511#endif
2510 2512
2511 2513static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
2512static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
2513{ 2514{
2514 unsigned long save_flags;
2515 void* objp; 2515 void* objp;
2516 struct array_cache *ac; 2516 struct array_cache *ac;
2517 2517
2518 cache_alloc_debugcheck_before(cachep, flags); 2518 check_irq_off();
2519
2520 local_irq_save(save_flags);
2521 ac = ac_data(cachep); 2519 ac = ac_data(cachep);
2522 if (likely(ac->avail)) { 2520 if (likely(ac->avail)) {
2523 STATS_INC_ALLOCHIT(cachep); 2521 STATS_INC_ALLOCHIT(cachep);
@@ -2527,6 +2525,18 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast fl
2527 STATS_INC_ALLOCMISS(cachep); 2525 STATS_INC_ALLOCMISS(cachep);
2528 objp = cache_alloc_refill(cachep, flags); 2526 objp = cache_alloc_refill(cachep, flags);
2529 } 2527 }
2528 return objp;
2529}
2530
2531static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
2532{
2533 unsigned long save_flags;
2534 void* objp;
2535
2536 cache_alloc_debugcheck_before(cachep, flags);
2537
2538 local_irq_save(save_flags);
2539 objp = ____cache_alloc(cachep, flags);
2530 local_irq_restore(save_flags); 2540 local_irq_restore(save_flags);
2531 objp = cache_alloc_debugcheck_after(cachep, flags, objp, 2541 objp = cache_alloc_debugcheck_after(cachep, flags, objp,
2532 __builtin_return_address(0)); 2542 __builtin_return_address(0));
@@ -2608,7 +2618,7 @@ done:
2608/* 2618/*
2609 * Caller needs to acquire correct kmem_list's list_lock 2619 * Caller needs to acquire correct kmem_list's list_lock
2610 */ 2620 */
2611static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) 2621static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
2612{ 2622{
2613 int i; 2623 int i;
2614 struct kmem_list3 *l3; 2624 struct kmem_list3 *l3;
@@ -2617,14 +2627,12 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
2617 void *objp = objpp[i]; 2627 void *objp = objpp[i];
2618 struct slab *slabp; 2628 struct slab *slabp;
2619 unsigned int objnr; 2629 unsigned int objnr;
2620 int nodeid = 0;
2621 2630
2622 slabp = GET_PAGE_SLAB(virt_to_page(objp)); 2631 slabp = GET_PAGE_SLAB(virt_to_page(objp));
2623 nodeid = slabp->nodeid; 2632 l3 = cachep->nodelists[node];
2624 l3 = cachep->nodelists[nodeid];
2625 list_del(&slabp->list); 2633 list_del(&slabp->list);
2626 objnr = (objp - slabp->s_mem) / cachep->objsize; 2634 objnr = (objp - slabp->s_mem) / cachep->objsize;
2627 check_spinlock_acquired_node(cachep, nodeid); 2635 check_spinlock_acquired_node(cachep, node);
2628 check_slabp(cachep, slabp); 2636 check_slabp(cachep, slabp);
2629 2637
2630 2638
@@ -2664,13 +2672,14 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
2664{ 2672{
2665 int batchcount; 2673 int batchcount;
2666 struct kmem_list3 *l3; 2674 struct kmem_list3 *l3;
2675 int node = numa_node_id();
2667 2676
2668 batchcount = ac->batchcount; 2677 batchcount = ac->batchcount;
2669#if DEBUG 2678#if DEBUG
2670 BUG_ON(!batchcount || batchcount > ac->avail); 2679 BUG_ON(!batchcount || batchcount > ac->avail);
2671#endif 2680#endif
2672 check_irq_off(); 2681 check_irq_off();
2673 l3 = cachep->nodelists[numa_node_id()]; 2682 l3 = cachep->nodelists[node];
2674 spin_lock(&l3->list_lock); 2683 spin_lock(&l3->list_lock);
2675 if (l3->shared) { 2684 if (l3->shared) {
2676 struct array_cache *shared_array = l3->shared; 2685 struct array_cache *shared_array = l3->shared;
@@ -2686,7 +2695,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
2686 } 2695 }
2687 } 2696 }
2688 2697
2689 free_block(cachep, ac->entry, batchcount); 2698 free_block(cachep, ac->entry, batchcount, node);
2690free_done: 2699free_done:
2691#if STATS 2700#if STATS
2692 { 2701 {
@@ -2751,7 +2760,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
2751 } else { 2760 } else {
2752 spin_lock(&(cachep->nodelists[nodeid])-> 2761 spin_lock(&(cachep->nodelists[nodeid])->
2753 list_lock); 2762 list_lock);
2754 free_block(cachep, &objp, 1); 2763 free_block(cachep, &objp, 1, nodeid);
2755 spin_unlock(&(cachep->nodelists[nodeid])-> 2764 spin_unlock(&(cachep->nodelists[nodeid])->
2756 list_lock); 2765 list_lock);
2757 } 2766 }
@@ -2844,7 +2853,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i
2844 unsigned long save_flags; 2853 unsigned long save_flags;
2845 void *ptr; 2854 void *ptr;
2846 2855
2847 if (nodeid == numa_node_id() || nodeid == -1) 2856 if (nodeid == -1)
2848 return __cache_alloc(cachep, flags); 2857 return __cache_alloc(cachep, flags);
2849 2858
2850 if (unlikely(!cachep->nodelists[nodeid])) { 2859 if (unlikely(!cachep->nodelists[nodeid])) {
@@ -2855,7 +2864,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i
2855 2864
2856 cache_alloc_debugcheck_before(cachep, flags); 2865 cache_alloc_debugcheck_before(cachep, flags);
2857 local_irq_save(save_flags); 2866 local_irq_save(save_flags);
2858 ptr = __cache_alloc_node(cachep, flags, nodeid); 2867 if (nodeid == numa_node_id())
2868 ptr = ____cache_alloc(cachep, flags);
2869 else
2870 ptr = __cache_alloc_node(cachep, flags, nodeid);
2859 local_irq_restore(save_flags); 2871 local_irq_restore(save_flags);
2860 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); 2872 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0));
2861 2873
@@ -3079,7 +3091,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
3079 3091
3080 if ((nc = cachep->nodelists[node]->shared)) 3092 if ((nc = cachep->nodelists[node]->shared))
3081 free_block(cachep, nc->entry, 3093 free_block(cachep, nc->entry,
3082 nc->avail); 3094 nc->avail, node);
3083 3095
3084 l3->shared = new; 3096 l3->shared = new;
3085 if (!cachep->nodelists[node]->alien) { 3097 if (!cachep->nodelists[node]->alien) {
@@ -3160,7 +3172,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
3160 if (!ccold) 3172 if (!ccold)
3161 continue; 3173 continue;
3162 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3174 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3163 free_block(cachep, ccold->entry, ccold->avail); 3175 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
3164 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3176 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3165 kfree(ccold); 3177 kfree(ccold);
3166 } 3178 }
@@ -3240,7 +3252,7 @@ static void drain_array_locked(kmem_cache_t *cachep,
3240 if (tofree > ac->avail) { 3252 if (tofree > ac->avail) {
3241 tofree = (ac->avail+1)/2; 3253 tofree = (ac->avail+1)/2;
3242 } 3254 }
3243 free_block(cachep, ac->entry, tofree); 3255 free_block(cachep, ac->entry, tofree, node);
3244 ac->avail -= tofree; 3256 ac->avail -= tofree;
3245 memmove(ac->entry, &(ac->entry[tofree]), 3257 memmove(ac->entry, &(ac->entry[tofree]),
3246 sizeof(void*)*ac->avail); 3258 sizeof(void*)*ac->avail);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0184f510aace..1dcaeda039f4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1381,6 +1381,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1381 error = bd_claim(bdev, sys_swapon); 1381 error = bd_claim(bdev, sys_swapon);
1382 if (error < 0) { 1382 if (error < 0) {
1383 bdev = NULL; 1383 bdev = NULL;
1384 error = -EINVAL;
1384 goto bad_swap; 1385 goto bad_swap;
1385 } 1386 }
1386 p->old_block_size = block_size(bdev); 1387 p->old_block_size = block_size(bdev);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a740778f688d..0ea71e887bb6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1258,9 +1258,9 @@ void wakeup_kswapd(struct zone *zone, int order)
1258 pgdat->kswapd_max_order = order; 1258 pgdat->kswapd_max_order = order;
1259 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1259 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1260 return; 1260 return;
1261 if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) 1261 if (!waitqueue_active(&pgdat->kswapd_wait))
1262 return; 1262 return;
1263 wake_up_interruptible(&zone->zone_pgdat->kswapd_wait); 1263 wake_up_interruptible(&pgdat->kswapd_wait);
1264} 1264}
1265 1265
1266#ifdef CONFIG_PM 1266#ifdef CONFIG_PM