diff options
author | Paul Mackerras <paulus@samba.org> | 2005-09-25 08:51:50 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-09-25 08:51:50 -0400 |
commit | e5baa396af7560382d2cf3f0871d616b61fc284c (patch) | |
tree | 6afc166894b8c8b3b2cf6add72a726be14ae2443 /mm | |
parent | d6a4c847e43c851cc0ddf73087a730227223f989 (diff) | |
parent | ef6bd6eb90ad72ee8ee7ba8b271f27102e9a90c1 (diff) |
Merge from Linus' tree.
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/mprotect.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 45 | ||||
-rw-r--r-- | mm/swapfile.c | 1 |
4 files changed, 27 insertions, 24 deletions
@@ -1640,7 +1640,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) | |||
1640 | /* | 1640 | /* |
1641 | * Get rid of page table information in the indicated region. | 1641 | * Get rid of page table information in the indicated region. |
1642 | * | 1642 | * |
1643 | * Called with the page table lock held. | 1643 | * Called with the mm semaphore held. |
1644 | */ | 1644 | */ |
1645 | static void unmap_region(struct mm_struct *mm, | 1645 | static void unmap_region(struct mm_struct *mm, |
1646 | struct vm_area_struct *vma, struct vm_area_struct *prev, | 1646 | struct vm_area_struct *vma, struct vm_area_struct *prev, |
diff --git a/mm/mprotect.c b/mm/mprotect.c index e9fbd013ad9a..57577f63b305 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -248,7 +248,8 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot) | |||
248 | 248 | ||
249 | newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); | 249 | newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); |
250 | 250 | ||
251 | if ((newflags & ~(newflags >> 4)) & 0xf) { | 251 | /* newflags >> 4 shift VM_MAY% in place of VM_% */ |
252 | if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { | ||
252 | error = -EACCES; | 253 | error = -EACCES; |
253 | goto out; | 254 | goto out; |
254 | } | 255 | } |
@@ -308,12 +308,12 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; | |||
308 | #define SIZE_L3 (1 + MAX_NUMNODES) | 308 | #define SIZE_L3 (1 + MAX_NUMNODES) |
309 | 309 | ||
310 | /* | 310 | /* |
311 | * This function may be completely optimized away if | 311 | * This function must be completely optimized away if |
312 | * a constant is passed to it. Mostly the same as | 312 | * a constant is passed to it. Mostly the same as |
313 | * what is in linux/slab.h except it returns an | 313 | * what is in linux/slab.h except it returns an |
314 | * index. | 314 | * index. |
315 | */ | 315 | */ |
316 | static inline int index_of(const size_t size) | 316 | static __always_inline int index_of(const size_t size) |
317 | { | 317 | { |
318 | if (__builtin_constant_p(size)) { | 318 | if (__builtin_constant_p(size)) { |
319 | int i = 0; | 319 | int i = 0; |
@@ -329,7 +329,8 @@ static inline int index_of(const size_t size) | |||
329 | extern void __bad_size(void); | 329 | extern void __bad_size(void); |
330 | __bad_size(); | 330 | __bad_size(); |
331 | } | 331 | } |
332 | } | 332 | } else |
333 | BUG(); | ||
333 | return 0; | 334 | return 0; |
334 | } | 335 | } |
335 | 336 | ||
@@ -639,7 +640,7 @@ static enum { | |||
639 | 640 | ||
640 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 641 | static DEFINE_PER_CPU(struct work_struct, reap_work); |
641 | 642 | ||
642 | static void free_block(kmem_cache_t* cachep, void** objpp, int len); | 643 | static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node); |
643 | static void enable_cpucache (kmem_cache_t *cachep); | 644 | static void enable_cpucache (kmem_cache_t *cachep); |
644 | static void cache_reap (void *unused); | 645 | static void cache_reap (void *unused); |
645 | static int __node_shrink(kmem_cache_t *cachep, int node); | 646 | static int __node_shrink(kmem_cache_t *cachep, int node); |
@@ -804,7 +805,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache | |||
804 | 805 | ||
805 | if (ac->avail) { | 806 | if (ac->avail) { |
806 | spin_lock(&rl3->list_lock); | 807 | spin_lock(&rl3->list_lock); |
807 | free_block(cachep, ac->entry, ac->avail); | 808 | free_block(cachep, ac->entry, ac->avail, node); |
808 | ac->avail = 0; | 809 | ac->avail = 0; |
809 | spin_unlock(&rl3->list_lock); | 810 | spin_unlock(&rl3->list_lock); |
810 | } | 811 | } |
@@ -925,7 +926,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
925 | /* Free limit for this kmem_list3 */ | 926 | /* Free limit for this kmem_list3 */ |
926 | l3->free_limit -= cachep->batchcount; | 927 | l3->free_limit -= cachep->batchcount; |
927 | if (nc) | 928 | if (nc) |
928 | free_block(cachep, nc->entry, nc->avail); | 929 | free_block(cachep, nc->entry, nc->avail, node); |
929 | 930 | ||
930 | if (!cpus_empty(mask)) { | 931 | if (!cpus_empty(mask)) { |
931 | spin_unlock(&l3->list_lock); | 932 | spin_unlock(&l3->list_lock); |
@@ -934,7 +935,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
934 | 935 | ||
935 | if (l3->shared) { | 936 | if (l3->shared) { |
936 | free_block(cachep, l3->shared->entry, | 937 | free_block(cachep, l3->shared->entry, |
937 | l3->shared->avail); | 938 | l3->shared->avail, node); |
938 | kfree(l3->shared); | 939 | kfree(l3->shared); |
939 | l3->shared = NULL; | 940 | l3->shared = NULL; |
940 | } | 941 | } |
@@ -1882,12 +1883,13 @@ static void do_drain(void *arg) | |||
1882 | { | 1883 | { |
1883 | kmem_cache_t *cachep = (kmem_cache_t*)arg; | 1884 | kmem_cache_t *cachep = (kmem_cache_t*)arg; |
1884 | struct array_cache *ac; | 1885 | struct array_cache *ac; |
1886 | int node = numa_node_id(); | ||
1885 | 1887 | ||
1886 | check_irq_off(); | 1888 | check_irq_off(); |
1887 | ac = ac_data(cachep); | 1889 | ac = ac_data(cachep); |
1888 | spin_lock(&cachep->nodelists[numa_node_id()]->list_lock); | 1890 | spin_lock(&cachep->nodelists[node]->list_lock); |
1889 | free_block(cachep, ac->entry, ac->avail); | 1891 | free_block(cachep, ac->entry, ac->avail, node); |
1890 | spin_unlock(&cachep->nodelists[numa_node_id()]->list_lock); | 1892 | spin_unlock(&cachep->nodelists[node]->list_lock); |
1891 | ac->avail = 0; | 1893 | ac->avail = 0; |
1892 | } | 1894 | } |
1893 | 1895 | ||
@@ -2608,7 +2610,7 @@ done: | |||
2608 | /* | 2610 | /* |
2609 | * Caller needs to acquire correct kmem_list's list_lock | 2611 | * Caller needs to acquire correct kmem_list's list_lock |
2610 | */ | 2612 | */ |
2611 | static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) | 2613 | static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node) |
2612 | { | 2614 | { |
2613 | int i; | 2615 | int i; |
2614 | struct kmem_list3 *l3; | 2616 | struct kmem_list3 *l3; |
@@ -2617,14 +2619,12 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) | |||
2617 | void *objp = objpp[i]; | 2619 | void *objp = objpp[i]; |
2618 | struct slab *slabp; | 2620 | struct slab *slabp; |
2619 | unsigned int objnr; | 2621 | unsigned int objnr; |
2620 | int nodeid = 0; | ||
2621 | 2622 | ||
2622 | slabp = GET_PAGE_SLAB(virt_to_page(objp)); | 2623 | slabp = GET_PAGE_SLAB(virt_to_page(objp)); |
2623 | nodeid = slabp->nodeid; | 2624 | l3 = cachep->nodelists[node]; |
2624 | l3 = cachep->nodelists[nodeid]; | ||
2625 | list_del(&slabp->list); | 2625 | list_del(&slabp->list); |
2626 | objnr = (objp - slabp->s_mem) / cachep->objsize; | 2626 | objnr = (objp - slabp->s_mem) / cachep->objsize; |
2627 | check_spinlock_acquired_node(cachep, nodeid); | 2627 | check_spinlock_acquired_node(cachep, node); |
2628 | check_slabp(cachep, slabp); | 2628 | check_slabp(cachep, slabp); |
2629 | 2629 | ||
2630 | 2630 | ||
@@ -2664,13 +2664,14 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) | |||
2664 | { | 2664 | { |
2665 | int batchcount; | 2665 | int batchcount; |
2666 | struct kmem_list3 *l3; | 2666 | struct kmem_list3 *l3; |
2667 | int node = numa_node_id(); | ||
2667 | 2668 | ||
2668 | batchcount = ac->batchcount; | 2669 | batchcount = ac->batchcount; |
2669 | #if DEBUG | 2670 | #if DEBUG |
2670 | BUG_ON(!batchcount || batchcount > ac->avail); | 2671 | BUG_ON(!batchcount || batchcount > ac->avail); |
2671 | #endif | 2672 | #endif |
2672 | check_irq_off(); | 2673 | check_irq_off(); |
2673 | l3 = cachep->nodelists[numa_node_id()]; | 2674 | l3 = cachep->nodelists[node]; |
2674 | spin_lock(&l3->list_lock); | 2675 | spin_lock(&l3->list_lock); |
2675 | if (l3->shared) { | 2676 | if (l3->shared) { |
2676 | struct array_cache *shared_array = l3->shared; | 2677 | struct array_cache *shared_array = l3->shared; |
@@ -2686,7 +2687,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) | |||
2686 | } | 2687 | } |
2687 | } | 2688 | } |
2688 | 2689 | ||
2689 | free_block(cachep, ac->entry, batchcount); | 2690 | free_block(cachep, ac->entry, batchcount, node); |
2690 | free_done: | 2691 | free_done: |
2691 | #if STATS | 2692 | #if STATS |
2692 | { | 2693 | { |
@@ -2751,7 +2752,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) | |||
2751 | } else { | 2752 | } else { |
2752 | spin_lock(&(cachep->nodelists[nodeid])-> | 2753 | spin_lock(&(cachep->nodelists[nodeid])-> |
2753 | list_lock); | 2754 | list_lock); |
2754 | free_block(cachep, &objp, 1); | 2755 | free_block(cachep, &objp, 1, nodeid); |
2755 | spin_unlock(&(cachep->nodelists[nodeid])-> | 2756 | spin_unlock(&(cachep->nodelists[nodeid])-> |
2756 | list_lock); | 2757 | list_lock); |
2757 | } | 2758 | } |
@@ -2844,7 +2845,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i | |||
2844 | unsigned long save_flags; | 2845 | unsigned long save_flags; |
2845 | void *ptr; | 2846 | void *ptr; |
2846 | 2847 | ||
2847 | if (nodeid == numa_node_id() || nodeid == -1) | 2848 | if (nodeid == -1) |
2848 | return __cache_alloc(cachep, flags); | 2849 | return __cache_alloc(cachep, flags); |
2849 | 2850 | ||
2850 | if (unlikely(!cachep->nodelists[nodeid])) { | 2851 | if (unlikely(!cachep->nodelists[nodeid])) { |
@@ -3079,7 +3080,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep) | |||
3079 | 3080 | ||
3080 | if ((nc = cachep->nodelists[node]->shared)) | 3081 | if ((nc = cachep->nodelists[node]->shared)) |
3081 | free_block(cachep, nc->entry, | 3082 | free_block(cachep, nc->entry, |
3082 | nc->avail); | 3083 | nc->avail, node); |
3083 | 3084 | ||
3084 | l3->shared = new; | 3085 | l3->shared = new; |
3085 | if (!cachep->nodelists[node]->alien) { | 3086 | if (!cachep->nodelists[node]->alien) { |
@@ -3160,7 +3161,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, | |||
3160 | if (!ccold) | 3161 | if (!ccold) |
3161 | continue; | 3162 | continue; |
3162 | spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 3163 | spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); |
3163 | free_block(cachep, ccold->entry, ccold->avail); | 3164 | free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); |
3164 | spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 3165 | spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); |
3165 | kfree(ccold); | 3166 | kfree(ccold); |
3166 | } | 3167 | } |
@@ -3240,7 +3241,7 @@ static void drain_array_locked(kmem_cache_t *cachep, | |||
3240 | if (tofree > ac->avail) { | 3241 | if (tofree > ac->avail) { |
3241 | tofree = (ac->avail+1)/2; | 3242 | tofree = (ac->avail+1)/2; |
3242 | } | 3243 | } |
3243 | free_block(cachep, ac->entry, tofree); | 3244 | free_block(cachep, ac->entry, tofree, node); |
3244 | ac->avail -= tofree; | 3245 | ac->avail -= tofree; |
3245 | memmove(ac->entry, &(ac->entry[tofree]), | 3246 | memmove(ac->entry, &(ac->entry[tofree]), |
3246 | sizeof(void*)*ac->avail); | 3247 | sizeof(void*)*ac->avail); |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 0184f510aace..1dcaeda039f4 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -1381,6 +1381,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) | |||
1381 | error = bd_claim(bdev, sys_swapon); | 1381 | error = bd_claim(bdev, sys_swapon); |
1382 | if (error < 0) { | 1382 | if (error < 0) { |
1383 | bdev = NULL; | 1383 | bdev = NULL; |
1384 | error = -EINVAL; | ||
1384 | goto bad_swap; | 1385 | goto bad_swap; |
1385 | } | 1386 | } |
1386 | p->old_block_size = block_size(bdev); | 1387 | p->old_block_size = block_size(bdev); |