diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 10 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 10 | ||||
-rw-r--r-- | mm/slab.c | 176 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 32 |
6 files changed, 148 insertions, 84 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b21d78c941b5..67f29516662a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -107,7 +107,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) | |||
107 | set_page_count(page, 1); | 107 | set_page_count(page, 1); |
108 | page[1].mapping = (void *)free_huge_page; | 108 | page[1].mapping = (void *)free_huge_page; |
109 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) | 109 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) |
110 | clear_highpage(&page[i]); | 110 | clear_user_highpage(&page[i], addr); |
111 | return page; | 111 | return page; |
112 | } | 112 | } |
113 | 113 | ||
@@ -391,12 +391,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | |||
391 | 391 | ||
392 | if (!new_page) { | 392 | if (!new_page) { |
393 | page_cache_release(old_page); | 393 | page_cache_release(old_page); |
394 | 394 | return VM_FAULT_OOM; | |
395 | /* Logically this is OOM, not a SIGBUS, but an OOM | ||
396 | * could cause the kernel to go killing other | ||
397 | * processes which won't help the hugepage situation | ||
398 | * at all (?) */ | ||
399 | return VM_FAULT_SIGBUS; | ||
400 | } | 395 | } |
401 | 396 | ||
402 | spin_unlock(&mm->page_table_lock); | 397 | spin_unlock(&mm->page_table_lock); |
@@ -444,6 +439,7 @@ retry: | |||
444 | page = alloc_huge_page(vma, address); | 439 | page = alloc_huge_page(vma, address); |
445 | if (!page) { | 440 | if (!page) { |
446 | hugetlb_put_quota(mapping); | 441 | hugetlb_put_quota(mapping); |
442 | ret = VM_FAULT_OOM; | ||
447 | goto out; | 443 | goto out; |
448 | } | 444 | } |
449 | 445 | ||
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 27da6d5c77ba..3bd7fb7e4b75 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1159,6 +1159,7 @@ static inline unsigned interleave_nid(struct mempolicy *pol, | |||
1159 | return interleave_nodes(pol); | 1159 | return interleave_nodes(pol); |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | #ifdef CONFIG_HUGETLBFS | ||
1162 | /* Return a zonelist suitable for a huge page allocation. */ | 1163 | /* Return a zonelist suitable for a huge page allocation. */ |
1163 | struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr) | 1164 | struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr) |
1164 | { | 1165 | { |
@@ -1172,6 +1173,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr) | |||
1172 | } | 1173 | } |
1173 | return zonelist_policy(GFP_HIGHUSER, pol); | 1174 | return zonelist_policy(GFP_HIGHUSER, pol); |
1174 | } | 1175 | } |
1176 | #endif | ||
1175 | 1177 | ||
1176 | /* Allocate a page in interleaved policy. | 1178 | /* Allocate a page in interleaved policy. |
1177 | Own path because it needs to do special accounting. */ | 1179 | Own path because it needs to do special accounting. */ |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 44b4eb4202d9..dde04ff4be31 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1213,18 +1213,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) | |||
1213 | { | 1213 | { |
1214 | int cpu = 0; | 1214 | int cpu = 0; |
1215 | 1215 | ||
1216 | memset(ret, 0, sizeof(*ret)); | 1216 | memset(ret, 0, nr * sizeof(unsigned long)); |
1217 | cpus_and(*cpumask, *cpumask, cpu_online_map); | 1217 | cpus_and(*cpumask, *cpumask, cpu_online_map); |
1218 | 1218 | ||
1219 | cpu = first_cpu(*cpumask); | 1219 | cpu = first_cpu(*cpumask); |
1220 | while (cpu < NR_CPUS) { | 1220 | while (cpu < NR_CPUS) { |
1221 | unsigned long *in, *out, off; | 1221 | unsigned long *in, *out, off; |
1222 | 1222 | ||
1223 | if (!cpu_isset(cpu, *cpumask)) | ||
1224 | continue; | ||
1225 | |||
1223 | in = (unsigned long *)&per_cpu(page_states, cpu); | 1226 | in = (unsigned long *)&per_cpu(page_states, cpu); |
1224 | 1227 | ||
1225 | cpu = next_cpu(cpu, *cpumask); | 1228 | cpu = next_cpu(cpu, *cpumask); |
1226 | 1229 | ||
1227 | if (cpu < NR_CPUS) | 1230 | if (likely(cpu < NR_CPUS)) |
1228 | prefetch(&per_cpu(page_states, cpu)); | 1231 | prefetch(&per_cpu(page_states, cpu)); |
1229 | 1232 | ||
1230 | out = (unsigned long *)ret; | 1233 | out = (unsigned long *)ret; |
@@ -1886,8 +1889,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p, | |||
1886 | * not check if the processor is online before following the pageset pointer. | 1889 | * not check if the processor is online before following the pageset pointer. |
1887 | * Other parts of the kernel may not check if the zone is available. | 1890 | * Other parts of the kernel may not check if the zone is available. |
1888 | */ | 1891 | */ |
1889 | static struct per_cpu_pageset | 1892 | static struct per_cpu_pageset boot_pageset[NR_CPUS]; |
1890 | boot_pageset[NR_CPUS]; | ||
1891 | 1893 | ||
1892 | /* | 1894 | /* |
1893 | * Dynamically allocate memory for the | 1895 | * Dynamically allocate memory for the |
@@ -294,6 +294,7 @@ struct kmem_list3 { | |||
294 | unsigned long next_reap; | 294 | unsigned long next_reap; |
295 | int free_touched; | 295 | int free_touched; |
296 | unsigned int free_limit; | 296 | unsigned int free_limit; |
297 | unsigned int colour_next; /* Per-node cache coloring */ | ||
297 | spinlock_t list_lock; | 298 | spinlock_t list_lock; |
298 | struct array_cache *shared; /* shared per node */ | 299 | struct array_cache *shared; /* shared per node */ |
299 | struct array_cache **alien; /* on other nodes */ | 300 | struct array_cache **alien; /* on other nodes */ |
@@ -344,6 +345,7 @@ static void kmem_list3_init(struct kmem_list3 *parent) | |||
344 | INIT_LIST_HEAD(&parent->slabs_free); | 345 | INIT_LIST_HEAD(&parent->slabs_free); |
345 | parent->shared = NULL; | 346 | parent->shared = NULL; |
346 | parent->alien = NULL; | 347 | parent->alien = NULL; |
348 | parent->colour_next = 0; | ||
347 | spin_lock_init(&parent->list_lock); | 349 | spin_lock_init(&parent->list_lock); |
348 | parent->free_objects = 0; | 350 | parent->free_objects = 0; |
349 | parent->free_touched = 0; | 351 | parent->free_touched = 0; |
@@ -390,7 +392,6 @@ struct kmem_cache { | |||
390 | 392 | ||
391 | size_t colour; /* cache colouring range */ | 393 | size_t colour; /* cache colouring range */ |
392 | unsigned int colour_off; /* colour offset */ | 394 | unsigned int colour_off; /* colour offset */ |
393 | unsigned int colour_next; /* cache colouring */ | ||
394 | struct kmem_cache *slabp_cache; | 395 | struct kmem_cache *slabp_cache; |
395 | unsigned int slab_size; | 396 | unsigned int slab_size; |
396 | unsigned int dflags; /* dynamic flags */ | 397 | unsigned int dflags; /* dynamic flags */ |
@@ -883,14 +884,14 @@ static void __drain_alien_cache(struct kmem_cache *cachep, | |||
883 | } | 884 | } |
884 | } | 885 | } |
885 | 886 | ||
886 | static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3) | 887 | static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien) |
887 | { | 888 | { |
888 | int i = 0; | 889 | int i = 0; |
889 | struct array_cache *ac; | 890 | struct array_cache *ac; |
890 | unsigned long flags; | 891 | unsigned long flags; |
891 | 892 | ||
892 | for_each_online_node(i) { | 893 | for_each_online_node(i) { |
893 | ac = l3->alien[i]; | 894 | ac = alien[i]; |
894 | if (ac) { | 895 | if (ac) { |
895 | spin_lock_irqsave(&ac->lock, flags); | 896 | spin_lock_irqsave(&ac->lock, flags); |
896 | __drain_alien_cache(cachep, ac, i); | 897 | __drain_alien_cache(cachep, ac, i); |
@@ -899,9 +900,18 @@ static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3) | |||
899 | } | 900 | } |
900 | } | 901 | } |
901 | #else | 902 | #else |
902 | #define alloc_alien_cache(node, limit) do { } while (0) | 903 | |
903 | #define free_alien_cache(ac_ptr) do { } while (0) | 904 | #define drain_alien_cache(cachep, alien) do { } while (0) |
904 | #define drain_alien_cache(cachep, l3) do { } while (0) | 905 | |
906 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | ||
907 | { | ||
908 | return (struct array_cache **) 0x01020304ul; | ||
909 | } | ||
910 | |||
911 | static inline void free_alien_cache(struct array_cache **ac_ptr) | ||
912 | { | ||
913 | } | ||
914 | |||
905 | #endif | 915 | #endif |
906 | 916 | ||
907 | static int __devinit cpuup_callback(struct notifier_block *nfb, | 917 | static int __devinit cpuup_callback(struct notifier_block *nfb, |
@@ -935,6 +945,11 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
935 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 945 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + |
936 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 946 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
937 | 947 | ||
948 | /* | ||
949 | * The l3s don't come and go as CPUs come and | ||
950 | * go. cache_chain_mutex is sufficient | ||
951 | * protection here. | ||
952 | */ | ||
938 | cachep->nodelists[node] = l3; | 953 | cachep->nodelists[node] = l3; |
939 | } | 954 | } |
940 | 955 | ||
@@ -949,26 +964,46 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
949 | & array cache's */ | 964 | & array cache's */ |
950 | list_for_each_entry(cachep, &cache_chain, next) { | 965 | list_for_each_entry(cachep, &cache_chain, next) { |
951 | struct array_cache *nc; | 966 | struct array_cache *nc; |
967 | struct array_cache *shared; | ||
968 | struct array_cache **alien; | ||
952 | 969 | ||
953 | nc = alloc_arraycache(node, cachep->limit, | 970 | nc = alloc_arraycache(node, cachep->limit, |
954 | cachep->batchcount); | 971 | cachep->batchcount); |
955 | if (!nc) | 972 | if (!nc) |
956 | goto bad; | 973 | goto bad; |
974 | shared = alloc_arraycache(node, | ||
975 | cachep->shared * cachep->batchcount, | ||
976 | 0xbaadf00d); | ||
977 | if (!shared) | ||
978 | goto bad; | ||
979 | |||
980 | alien = alloc_alien_cache(node, cachep->limit); | ||
981 | if (!alien) | ||
982 | goto bad; | ||
957 | cachep->array[cpu] = nc; | 983 | cachep->array[cpu] = nc; |
958 | 984 | ||
959 | l3 = cachep->nodelists[node]; | 985 | l3 = cachep->nodelists[node]; |
960 | BUG_ON(!l3); | 986 | BUG_ON(!l3); |
961 | if (!l3->shared) { | ||
962 | if (!(nc = alloc_arraycache(node, | ||
963 | cachep->shared * | ||
964 | cachep->batchcount, | ||
965 | 0xbaadf00d))) | ||
966 | goto bad; | ||
967 | 987 | ||
968 | /* we are serialised from CPU_DEAD or | 988 | spin_lock_irq(&l3->list_lock); |
969 | CPU_UP_CANCELLED by the cpucontrol lock */ | 989 | if (!l3->shared) { |
970 | l3->shared = nc; | 990 | /* |
991 | * We are serialised from CPU_DEAD or | ||
992 | * CPU_UP_CANCELLED by the cpucontrol lock | ||
993 | */ | ||
994 | l3->shared = shared; | ||
995 | shared = NULL; | ||
996 | } | ||
997 | #ifdef CONFIG_NUMA | ||
998 | if (!l3->alien) { | ||
999 | l3->alien = alien; | ||
1000 | alien = NULL; | ||
971 | } | 1001 | } |
1002 | #endif | ||
1003 | spin_unlock_irq(&l3->list_lock); | ||
1004 | |||
1005 | kfree(shared); | ||
1006 | free_alien_cache(alien); | ||
972 | } | 1007 | } |
973 | mutex_unlock(&cache_chain_mutex); | 1008 | mutex_unlock(&cache_chain_mutex); |
974 | break; | 1009 | break; |
@@ -977,25 +1012,34 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
977 | break; | 1012 | break; |
978 | #ifdef CONFIG_HOTPLUG_CPU | 1013 | #ifdef CONFIG_HOTPLUG_CPU |
979 | case CPU_DEAD: | 1014 | case CPU_DEAD: |
1015 | /* | ||
1016 | * Even if all the cpus of a node are down, we don't free the | ||
1017 | * kmem_list3 of any cache. This to avoid a race between | ||
1018 | * cpu_down, and a kmalloc allocation from another cpu for | ||
1019 | * memory from the node of the cpu going down. The list3 | ||
1020 | * structure is usually allocated from kmem_cache_create() and | ||
1021 | * gets destroyed at kmem_cache_destroy(). | ||
1022 | */ | ||
980 | /* fall thru */ | 1023 | /* fall thru */ |
981 | case CPU_UP_CANCELED: | 1024 | case CPU_UP_CANCELED: |
982 | mutex_lock(&cache_chain_mutex); | 1025 | mutex_lock(&cache_chain_mutex); |
983 | 1026 | ||
984 | list_for_each_entry(cachep, &cache_chain, next) { | 1027 | list_for_each_entry(cachep, &cache_chain, next) { |
985 | struct array_cache *nc; | 1028 | struct array_cache *nc; |
1029 | struct array_cache *shared; | ||
1030 | struct array_cache **alien; | ||
986 | cpumask_t mask; | 1031 | cpumask_t mask; |
987 | 1032 | ||
988 | mask = node_to_cpumask(node); | 1033 | mask = node_to_cpumask(node); |
989 | spin_lock_irq(&cachep->spinlock); | ||
990 | /* cpu is dead; no one can alloc from it. */ | 1034 | /* cpu is dead; no one can alloc from it. */ |
991 | nc = cachep->array[cpu]; | 1035 | nc = cachep->array[cpu]; |
992 | cachep->array[cpu] = NULL; | 1036 | cachep->array[cpu] = NULL; |
993 | l3 = cachep->nodelists[node]; | 1037 | l3 = cachep->nodelists[node]; |
994 | 1038 | ||
995 | if (!l3) | 1039 | if (!l3) |
996 | goto unlock_cache; | 1040 | goto free_array_cache; |
997 | 1041 | ||
998 | spin_lock(&l3->list_lock); | 1042 | spin_lock_irq(&l3->list_lock); |
999 | 1043 | ||
1000 | /* Free limit for this kmem_list3 */ | 1044 | /* Free limit for this kmem_list3 */ |
1001 | l3->free_limit -= cachep->batchcount; | 1045 | l3->free_limit -= cachep->batchcount; |
@@ -1003,34 +1047,44 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
1003 | free_block(cachep, nc->entry, nc->avail, node); | 1047 | free_block(cachep, nc->entry, nc->avail, node); |
1004 | 1048 | ||
1005 | if (!cpus_empty(mask)) { | 1049 | if (!cpus_empty(mask)) { |
1006 | spin_unlock(&l3->list_lock); | 1050 | spin_unlock_irq(&l3->list_lock); |
1007 | goto unlock_cache; | 1051 | goto free_array_cache; |
1008 | } | 1052 | } |
1009 | 1053 | ||
1010 | if (l3->shared) { | 1054 | shared = l3->shared; |
1055 | if (shared) { | ||
1011 | free_block(cachep, l3->shared->entry, | 1056 | free_block(cachep, l3->shared->entry, |
1012 | l3->shared->avail, node); | 1057 | l3->shared->avail, node); |
1013 | kfree(l3->shared); | ||
1014 | l3->shared = NULL; | 1058 | l3->shared = NULL; |
1015 | } | 1059 | } |
1016 | if (l3->alien) { | ||
1017 | drain_alien_cache(cachep, l3); | ||
1018 | free_alien_cache(l3->alien); | ||
1019 | l3->alien = NULL; | ||
1020 | } | ||
1021 | 1060 | ||
1022 | /* free slabs belonging to this node */ | 1061 | alien = l3->alien; |
1023 | if (__node_shrink(cachep, node)) { | 1062 | l3->alien = NULL; |
1024 | cachep->nodelists[node] = NULL; | 1063 | |
1025 | spin_unlock(&l3->list_lock); | 1064 | spin_unlock_irq(&l3->list_lock); |
1026 | kfree(l3); | 1065 | |
1027 | } else { | 1066 | kfree(shared); |
1028 | spin_unlock(&l3->list_lock); | 1067 | if (alien) { |
1068 | drain_alien_cache(cachep, alien); | ||
1069 | free_alien_cache(alien); | ||
1029 | } | 1070 | } |
1030 | unlock_cache: | 1071 | free_array_cache: |
1031 | spin_unlock_irq(&cachep->spinlock); | ||
1032 | kfree(nc); | 1072 | kfree(nc); |
1033 | } | 1073 | } |
1074 | /* | ||
1075 | * In the previous loop, all the objects were freed to | ||
1076 | * the respective cache's slabs, now we can go ahead and | ||
1077 | * shrink each nodelist to its limit. | ||
1078 | */ | ||
1079 | list_for_each_entry(cachep, &cache_chain, next) { | ||
1080 | l3 = cachep->nodelists[node]; | ||
1081 | if (!l3) | ||
1082 | continue; | ||
1083 | spin_lock_irq(&l3->list_lock); | ||
1084 | /* free slabs belonging to this node */ | ||
1085 | __node_shrink(cachep, node); | ||
1086 | spin_unlock_irq(&l3->list_lock); | ||
1087 | } | ||
1034 | mutex_unlock(&cache_chain_mutex); | 1088 | mutex_unlock(&cache_chain_mutex); |
1035 | break; | 1089 | break; |
1036 | #endif | 1090 | #endif |
@@ -1119,7 +1173,6 @@ void __init kmem_cache_init(void) | |||
1119 | BUG(); | 1173 | BUG(); |
1120 | 1174 | ||
1121 | cache_cache.colour = left_over / cache_cache.colour_off; | 1175 | cache_cache.colour = left_over / cache_cache.colour_off; |
1122 | cache_cache.colour_next = 0; | ||
1123 | cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + | 1176 | cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + |
1124 | sizeof(struct slab), cache_line_size()); | 1177 | sizeof(struct slab), cache_line_size()); |
1125 | 1178 | ||
@@ -2011,18 +2064,16 @@ static void drain_cpu_caches(struct kmem_cache *cachep) | |||
2011 | 2064 | ||
2012 | smp_call_function_all_cpus(do_drain, cachep); | 2065 | smp_call_function_all_cpus(do_drain, cachep); |
2013 | check_irq_on(); | 2066 | check_irq_on(); |
2014 | spin_lock_irq(&cachep->spinlock); | ||
2015 | for_each_online_node(node) { | 2067 | for_each_online_node(node) { |
2016 | l3 = cachep->nodelists[node]; | 2068 | l3 = cachep->nodelists[node]; |
2017 | if (l3) { | 2069 | if (l3) { |
2018 | spin_lock(&l3->list_lock); | 2070 | spin_lock_irq(&l3->list_lock); |
2019 | drain_array_locked(cachep, l3->shared, 1, node); | 2071 | drain_array_locked(cachep, l3->shared, 1, node); |
2020 | spin_unlock(&l3->list_lock); | 2072 | spin_unlock_irq(&l3->list_lock); |
2021 | if (l3->alien) | 2073 | if (l3->alien) |
2022 | drain_alien_cache(cachep, l3); | 2074 | drain_alien_cache(cachep, l3->alien); |
2023 | } | 2075 | } |
2024 | } | 2076 | } |
2025 | spin_unlock_irq(&cachep->spinlock); | ||
2026 | } | 2077 | } |
2027 | 2078 | ||
2028 | static int __node_shrink(struct kmem_cache *cachep, int node) | 2079 | static int __node_shrink(struct kmem_cache *cachep, int node) |
@@ -2324,20 +2375,20 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
2324 | */ | 2375 | */ |
2325 | ctor_flags |= SLAB_CTOR_ATOMIC; | 2376 | ctor_flags |= SLAB_CTOR_ATOMIC; |
2326 | 2377 | ||
2327 | /* About to mess with non-constant members - lock. */ | 2378 | /* Take the l3 list lock to change the colour_next on this node */ |
2328 | check_irq_off(); | 2379 | check_irq_off(); |
2329 | spin_lock(&cachep->spinlock); | 2380 | l3 = cachep->nodelists[nodeid]; |
2381 | spin_lock(&l3->list_lock); | ||
2330 | 2382 | ||
2331 | /* Get colour for the slab, and cal the next value. */ | 2383 | /* Get colour for the slab, and cal the next value. */ |
2332 | offset = cachep->colour_next; | 2384 | offset = l3->colour_next; |
2333 | cachep->colour_next++; | 2385 | l3->colour_next++; |
2334 | if (cachep->colour_next >= cachep->colour) | 2386 | if (l3->colour_next >= cachep->colour) |
2335 | cachep->colour_next = 0; | 2387 | l3->colour_next = 0; |
2336 | offset *= cachep->colour_off; | 2388 | spin_unlock(&l3->list_lock); |
2337 | 2389 | ||
2338 | spin_unlock(&cachep->spinlock); | 2390 | offset *= cachep->colour_off; |
2339 | 2391 | ||
2340 | check_irq_off(); | ||
2341 | if (local_flags & __GFP_WAIT) | 2392 | if (local_flags & __GFP_WAIT) |
2342 | local_irq_enable(); | 2393 | local_irq_enable(); |
2343 | 2394 | ||
@@ -2367,7 +2418,6 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
2367 | if (local_flags & __GFP_WAIT) | 2418 | if (local_flags & __GFP_WAIT) |
2368 | local_irq_disable(); | 2419 | local_irq_disable(); |
2369 | check_irq_off(); | 2420 | check_irq_off(); |
2370 | l3 = cachep->nodelists[nodeid]; | ||
2371 | spin_lock(&l3->list_lock); | 2421 | spin_lock(&l3->list_lock); |
2372 | 2422 | ||
2373 | /* Make slab active. */ | 2423 | /* Make slab active. */ |
@@ -2725,6 +2775,7 @@ static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int node | |||
2725 | BUG_ON(!l3); | 2775 | BUG_ON(!l3); |
2726 | 2776 | ||
2727 | retry: | 2777 | retry: |
2778 | check_irq_off(); | ||
2728 | spin_lock(&l3->list_lock); | 2779 | spin_lock(&l3->list_lock); |
2729 | entry = l3->slabs_partial.next; | 2780 | entry = l3->slabs_partial.next; |
2730 | if (entry == &l3->slabs_partial) { | 2781 | if (entry == &l3->slabs_partial) { |
@@ -3304,11 +3355,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount | |||
3304 | smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); | 3355 | smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); |
3305 | 3356 | ||
3306 | check_irq_on(); | 3357 | check_irq_on(); |
3307 | spin_lock_irq(&cachep->spinlock); | 3358 | spin_lock(&cachep->spinlock); |
3308 | cachep->batchcount = batchcount; | 3359 | cachep->batchcount = batchcount; |
3309 | cachep->limit = limit; | 3360 | cachep->limit = limit; |
3310 | cachep->shared = shared; | 3361 | cachep->shared = shared; |
3311 | spin_unlock_irq(&cachep->spinlock); | 3362 | spin_unlock(&cachep->spinlock); |
3312 | 3363 | ||
3313 | for_each_online_cpu(i) { | 3364 | for_each_online_cpu(i) { |
3314 | struct array_cache *ccold = new.new[i]; | 3365 | struct array_cache *ccold = new.new[i]; |
@@ -3440,7 +3491,7 @@ static void cache_reap(void *unused) | |||
3440 | 3491 | ||
3441 | l3 = searchp->nodelists[numa_node_id()]; | 3492 | l3 = searchp->nodelists[numa_node_id()]; |
3442 | if (l3->alien) | 3493 | if (l3->alien) |
3443 | drain_alien_cache(searchp, l3); | 3494 | drain_alien_cache(searchp, l3->alien); |
3444 | spin_lock_irq(&l3->list_lock); | 3495 | spin_lock_irq(&l3->list_lock); |
3445 | 3496 | ||
3446 | drain_array_locked(searchp, cpu_cache_get(searchp), 0, | 3497 | drain_array_locked(searchp, cpu_cache_get(searchp), 0, |
@@ -3564,8 +3615,7 @@ static int s_show(struct seq_file *m, void *p) | |||
3564 | int node; | 3615 | int node; |
3565 | struct kmem_list3 *l3; | 3616 | struct kmem_list3 *l3; |
3566 | 3617 | ||
3567 | check_irq_on(); | 3618 | spin_lock(&cachep->spinlock); |
3568 | spin_lock_irq(&cachep->spinlock); | ||
3569 | active_objs = 0; | 3619 | active_objs = 0; |
3570 | num_slabs = 0; | 3620 | num_slabs = 0; |
3571 | for_each_online_node(node) { | 3621 | for_each_online_node(node) { |
@@ -3573,7 +3623,8 @@ static int s_show(struct seq_file *m, void *p) | |||
3573 | if (!l3) | 3623 | if (!l3) |
3574 | continue; | 3624 | continue; |
3575 | 3625 | ||
3576 | spin_lock(&l3->list_lock); | 3626 | check_irq_on(); |
3627 | spin_lock_irq(&l3->list_lock); | ||
3577 | 3628 | ||
3578 | list_for_each(q, &l3->slabs_full) { | 3629 | list_for_each(q, &l3->slabs_full) { |
3579 | slabp = list_entry(q, struct slab, list); | 3630 | slabp = list_entry(q, struct slab, list); |
@@ -3598,9 +3649,10 @@ static int s_show(struct seq_file *m, void *p) | |||
3598 | num_slabs++; | 3649 | num_slabs++; |
3599 | } | 3650 | } |
3600 | free_objects += l3->free_objects; | 3651 | free_objects += l3->free_objects; |
3601 | shared_avail += l3->shared->avail; | 3652 | if (l3->shared) |
3653 | shared_avail += l3->shared->avail; | ||
3602 | 3654 | ||
3603 | spin_unlock(&l3->list_lock); | 3655 | spin_unlock_irq(&l3->list_lock); |
3604 | } | 3656 | } |
3605 | num_slabs += active_slabs; | 3657 | num_slabs += active_slabs; |
3606 | num_objs = num_slabs * cachep->num; | 3658 | num_objs = num_slabs * cachep->num; |
@@ -3644,7 +3696,7 @@ static int s_show(struct seq_file *m, void *p) | |||
3644 | } | 3696 | } |
3645 | #endif | 3697 | #endif |
3646 | seq_putc(m, '\n'); | 3698 | seq_putc(m, '\n'); |
3647 | spin_unlock_irq(&cachep->spinlock); | 3699 | spin_unlock(&cachep->spinlock); |
3648 | return 0; | 3700 | return 0; |
3649 | } | 3701 | } |
3650 | 3702 | ||
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(slab_reclaim_pages); | |||
336 | 336 | ||
337 | #ifdef CONFIG_SMP | 337 | #ifdef CONFIG_SMP |
338 | 338 | ||
339 | void *__alloc_percpu(size_t size, size_t align) | 339 | void *__alloc_percpu(size_t size) |
340 | { | 340 | { |
341 | int i; | 341 | int i; |
342 | struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); | 342 | struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); |
@@ -34,19 +34,22 @@ | |||
34 | /* How many pages do we try to swap or page in/out together? */ | 34 | /* How many pages do we try to swap or page in/out together? */ |
35 | int page_cluster; | 35 | int page_cluster; |
36 | 36 | ||
37 | void put_page(struct page *page) | 37 | static void put_compound_page(struct page *page) |
38 | { | 38 | { |
39 | if (unlikely(PageCompound(page))) { | 39 | page = (struct page *)page_private(page); |
40 | page = (struct page *)page_private(page); | 40 | if (put_page_testzero(page)) { |
41 | if (put_page_testzero(page)) { | 41 | void (*dtor)(struct page *page); |
42 | void (*dtor)(struct page *page); | ||
43 | 42 | ||
44 | dtor = (void (*)(struct page *))page[1].mapping; | 43 | dtor = (void (*)(struct page *))page[1].mapping; |
45 | (*dtor)(page); | 44 | (*dtor)(page); |
46 | } | ||
47 | return; | ||
48 | } | 45 | } |
49 | if (put_page_testzero(page)) | 46 | } |
47 | |||
48 | void put_page(struct page *page) | ||
49 | { | ||
50 | if (unlikely(PageCompound(page))) | ||
51 | put_compound_page(page); | ||
52 | else if (put_page_testzero(page)) | ||
50 | __page_cache_release(page); | 53 | __page_cache_release(page); |
51 | } | 54 | } |
52 | EXPORT_SYMBOL(put_page); | 55 | EXPORT_SYMBOL(put_page); |
@@ -244,6 +247,15 @@ void release_pages(struct page **pages, int nr, int cold) | |||
244 | struct page *page = pages[i]; | 247 | struct page *page = pages[i]; |
245 | struct zone *pagezone; | 248 | struct zone *pagezone; |
246 | 249 | ||
250 | if (unlikely(PageCompound(page))) { | ||
251 | if (zone) { | ||
252 | spin_unlock_irq(&zone->lru_lock); | ||
253 | zone = NULL; | ||
254 | } | ||
255 | put_compound_page(page); | ||
256 | continue; | ||
257 | } | ||
258 | |||
247 | if (!put_page_testzero(page)) | 259 | if (!put_page_testzero(page)) |
248 | continue; | 260 | continue; |
249 | 261 | ||