aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-31 02:19:48 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-31 02:19:48 -0500
commit818fa7f3908c7bd6c0045e9d94dc23a899ef6144 (patch)
treead3435c3f57c8222ad61709b716168932f13be6c /mm
parent3fd4bc015ef879a7d2b955ce97fb125e3a51ba7e (diff)
parent5fdf7e5975a0b0f6a0370655612c5dca3fd6311b (diff)
Merge branch 'tracing/kmemtrace' into tracing/kmemtrace2
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile1
-rw-r--r--mm/bounce.c9
-rw-r--r--mm/failslab.c59
-rw-r--r--mm/memory.c15
-rw-r--r--mm/slab.c81
-rw-r--r--mm/slub.c50
6 files changed, 124 insertions, 91 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 3782eb66d4b3..c92e8af13206 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SLOB) += slob.o
28obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o 28obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
29obj-$(CONFIG_SLAB) += slab.o 29obj-$(CONFIG_SLAB) += slab.o
30obj-$(CONFIG_SLUB) += slub.o 30obj-$(CONFIG_SLUB) += slub.o
31obj-$(CONFIG_FAILSLAB) += failslab.o
31obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 32obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
32obj-$(CONFIG_FS_XIP) += filemap_xip.o 33obj-$(CONFIG_FS_XIP) += filemap_xip.o
33obj-$(CONFIG_MIGRATION) += migrate.o 34obj-$(CONFIG_MIGRATION) += migrate.o
diff --git a/mm/bounce.c b/mm/bounce.c
index bf0cf7c8387b..e590272fe7a8 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -198,8 +198,13 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
198 /* 198 /*
199 * irk, bounce it 199 * irk, bounce it
200 */ 200 */
201 if (!bio) 201 if (!bio) {
202 bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt); 202 unsigned int cnt = (*bio_orig)->bi_vcnt;
203
204 bio = bio_alloc(GFP_NOIO, cnt);
205 memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
206 }
207
203 208
204 to = bio->bi_io_vec + i; 209 to = bio->bi_io_vec + i;
205 210
diff --git a/mm/failslab.c b/mm/failslab.c
new file mode 100644
index 000000000000..7c6ea6493f80
--- /dev/null
+++ b/mm/failslab.c
@@ -0,0 +1,59 @@
1#include <linux/fault-inject.h>
2
3static struct {
4 struct fault_attr attr;
5 u32 ignore_gfp_wait;
6#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
7 struct dentry *ignore_gfp_wait_file;
8#endif
9} failslab = {
10 .attr = FAULT_ATTR_INITIALIZER,
11 .ignore_gfp_wait = 1,
12};
13
14bool should_failslab(size_t size, gfp_t gfpflags)
15{
16 if (gfpflags & __GFP_NOFAIL)
17 return false;
18
19 if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT))
20 return false;
21
22 return should_fail(&failslab.attr, size);
23}
24
25static int __init setup_failslab(char *str)
26{
27 return setup_fault_attr(&failslab.attr, str);
28}
29__setup("failslab=", setup_failslab);
30
31#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
32
33static int __init failslab_debugfs_init(void)
34{
35 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
36 struct dentry *dir;
37 int err;
38
39 err = init_fault_attr_dentries(&failslab.attr, "failslab");
40 if (err)
41 return err;
42 dir = failslab.attr.dentries.dir;
43
44 failslab.ignore_gfp_wait_file =
45 debugfs_create_bool("ignore-gfp-wait", mode, dir,
46 &failslab.ignore_gfp_wait);
47
48 if (!failslab.ignore_gfp_wait_file) {
49 err = -ENOMEM;
50 debugfs_remove(failslab.ignore_gfp_wait_file);
51 cleanup_fault_attr_dentries(&failslab.attr);
52 }
53
54 return err;
55}
56
57late_initcall(failslab_debugfs_init);
58
59#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
diff --git a/mm/memory.c b/mm/memory.c
index f01b7eed6e16..0a2010a9518c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3075,3 +3075,18 @@ void print_vma_addr(char *prefix, unsigned long ip)
3075 } 3075 }
3076 up_read(&current->mm->mmap_sem); 3076 up_read(&current->mm->mmap_sem);
3077} 3077}
3078
3079#ifdef CONFIG_PROVE_LOCKING
3080void might_fault(void)
3081{
3082 might_sleep();
3083 /*
3084 * it would be nicer only to annotate paths which are not under
3085 * pagefault_disable, however that requires a larger audit and
3086 * providing helpers like get_user_atomic.
3087 */
3088 if (!in_atomic() && current->mm)
3089 might_lock_read(&current->mm->mmap_sem);
3090}
3091EXPORT_SYMBOL(might_fault);
3092#endif
diff --git a/mm/slab.c b/mm/slab.c
index 7f72bb386a09..83075f36df7b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2132,6 +2132,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2132 * 2132 *
2133 * @name must be valid until the cache is destroyed. This implies that 2133 * @name must be valid until the cache is destroyed. This implies that
2134 * the module calling this has to destroy the cache before getting unloaded. 2134 * the module calling this has to destroy the cache before getting unloaded.
2135 * Note that kmem_cache_name() is not guaranteed to return the same pointer,
2136 * therefore applications must manage it themselves.
2135 * 2137 *
2136 * The flags are 2138 * The flags are
2137 * 2139 *
@@ -2618,7 +2620,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2618 if (OFF_SLAB(cachep)) { 2620 if (OFF_SLAB(cachep)) {
2619 /* Slab management obj is off-slab. */ 2621 /* Slab management obj is off-slab. */
2620 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2622 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2621 local_flags & ~GFP_THISNODE, nodeid); 2623 local_flags, nodeid);
2622 if (!slabp) 2624 if (!slabp)
2623 return NULL; 2625 return NULL;
2624 } else { 2626 } else {
@@ -3006,7 +3008,7 @@ retry:
3006 * there must be at least one object available for 3008 * there must be at least one object available for
3007 * allocation. 3009 * allocation.
3008 */ 3010 */
3009 BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); 3011 BUG_ON(slabp->inuse >= cachep->num);
3010 3012
3011 while (slabp->inuse < cachep->num && batchcount--) { 3013 while (slabp->inuse < cachep->num && batchcount--) {
3012 STATS_INC_ALLOCED(cachep); 3014 STATS_INC_ALLOCED(cachep);
@@ -3115,79 +3117,14 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3115#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3117#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3116#endif 3118#endif
3117 3119
3118#ifdef CONFIG_FAILSLAB 3120static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3119
3120static struct failslab_attr {
3121
3122 struct fault_attr attr;
3123
3124 u32 ignore_gfp_wait;
3125#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3126 struct dentry *ignore_gfp_wait_file;
3127#endif
3128
3129} failslab = {
3130 .attr = FAULT_ATTR_INITIALIZER,
3131 .ignore_gfp_wait = 1,
3132};
3133
3134static int __init setup_failslab(char *str)
3135{
3136 return setup_fault_attr(&failslab.attr, str);
3137}
3138__setup("failslab=", setup_failslab);
3139
3140static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3141{ 3121{
3142 if (cachep == &cache_cache) 3122 if (cachep == &cache_cache)
3143 return 0; 3123 return false;
3144 if (flags & __GFP_NOFAIL)
3145 return 0;
3146 if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
3147 return 0;
3148 3124
3149 return should_fail(&failslab.attr, obj_size(cachep)); 3125 return should_failslab(obj_size(cachep), flags);
3150} 3126}
3151 3127
3152#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3153
3154static int __init failslab_debugfs(void)
3155{
3156 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
3157 struct dentry *dir;
3158 int err;
3159
3160 err = init_fault_attr_dentries(&failslab.attr, "failslab");
3161 if (err)
3162 return err;
3163 dir = failslab.attr.dentries.dir;
3164
3165 failslab.ignore_gfp_wait_file =
3166 debugfs_create_bool("ignore-gfp-wait", mode, dir,
3167 &failslab.ignore_gfp_wait);
3168
3169 if (!failslab.ignore_gfp_wait_file) {
3170 err = -ENOMEM;
3171 debugfs_remove(failslab.ignore_gfp_wait_file);
3172 cleanup_fault_attr_dentries(&failslab.attr);
3173 }
3174
3175 return err;
3176}
3177
3178late_initcall(failslab_debugfs);
3179
3180#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3181
3182#else /* CONFIG_FAILSLAB */
3183
3184static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3185{
3186 return 0;
3187}
3188
3189#endif /* CONFIG_FAILSLAB */
3190
3191static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3128static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3192{ 3129{
3193 void *objp; 3130 void *objp;
@@ -3390,7 +3327,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3390 unsigned long save_flags; 3327 unsigned long save_flags;
3391 void *ptr; 3328 void *ptr;
3392 3329
3393 if (should_failslab(cachep, flags)) 3330 if (slab_should_failslab(cachep, flags))
3394 return NULL; 3331 return NULL;
3395 3332
3396 cache_alloc_debugcheck_before(cachep, flags); 3333 cache_alloc_debugcheck_before(cachep, flags);
@@ -3466,7 +3403,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3466 unsigned long save_flags; 3403 unsigned long save_flags;
3467 void *objp; 3404 void *objp;
3468 3405
3469 if (should_failslab(cachep, flags)) 3406 if (slab_should_failslab(cachep, flags))
3470 return NULL; 3407 return NULL;
3471 3408
3472 cache_alloc_debugcheck_before(cachep, flags); 3409 cache_alloc_debugcheck_before(cachep, flags);
diff --git a/mm/slub.c b/mm/slub.c
index 7bf8cf8ec082..4fac7bbb029a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -25,6 +25,7 @@
25#include <linux/kallsyms.h> 25#include <linux/kallsyms.h>
26#include <linux/memory.h> 26#include <linux/memory.h>
27#include <linux/math64.h> 27#include <linux/math64.h>
28#include <linux/fault-inject.h>
28 29
29/* 30/*
30 * Lock order: 31 * Lock order:
@@ -154,6 +155,10 @@
154#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 155#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
155#endif 156#endif
156 157
158#define OO_SHIFT 16
159#define OO_MASK ((1 << OO_SHIFT) - 1)
160#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
161
157/* Internal SLUB flags */ 162/* Internal SLUB flags */
158#define __OBJECT_POISON 0x80000000 /* Poison object */ 163#define __OBJECT_POISON 0x80000000 /* Poison object */
159#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 164#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
@@ -291,7 +296,7 @@ static inline struct kmem_cache_order_objects oo_make(int order,
291 unsigned long size) 296 unsigned long size)
292{ 297{
293 struct kmem_cache_order_objects x = { 298 struct kmem_cache_order_objects x = {
294 (order << 16) + (PAGE_SIZE << order) / size 299 (order << OO_SHIFT) + (PAGE_SIZE << order) / size
295 }; 300 };
296 301
297 return x; 302 return x;
@@ -299,12 +304,12 @@ static inline struct kmem_cache_order_objects oo_make(int order,
299 304
300static inline int oo_order(struct kmem_cache_order_objects x) 305static inline int oo_order(struct kmem_cache_order_objects x)
301{ 306{
302 return x.x >> 16; 307 return x.x >> OO_SHIFT;
303} 308}
304 309
305static inline int oo_objects(struct kmem_cache_order_objects x) 310static inline int oo_objects(struct kmem_cache_order_objects x)
306{ 311{
307 return x.x & ((1 << 16) - 1); 312 return x.x & OO_MASK;
308} 313}
309 314
310#ifdef CONFIG_SLUB_DEBUG 315#ifdef CONFIG_SLUB_DEBUG
@@ -693,7 +698,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
693 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 698 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
694 object_err(s, page, p, "Freepointer corrupt"); 699 object_err(s, page, p, "Freepointer corrupt");
695 /* 700 /*
696 * No choice but to zap it and thus loose the remainder 701 * No choice but to zap it and thus lose the remainder
697 * of the free objects in this slab. May cause 702 * of the free objects in this slab. May cause
698 * another error because the object count is now wrong. 703 * another error because the object count is now wrong.
699 */ 704 */
@@ -765,8 +770,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
765 } 770 }
766 771
767 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 772 max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
768 if (max_objects > 65535) 773 if (max_objects > MAX_OBJS_PER_PAGE)
769 max_objects = 65535; 774 max_objects = MAX_OBJS_PER_PAGE;
770 775
771 if (page->objects != max_objects) { 776 if (page->objects != max_objects) {
772 slab_err(s, page, "Wrong number of objects. Found %d but " 777 slab_err(s, page, "Wrong number of objects. Found %d but "
@@ -1592,6 +1597,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1592 unsigned long flags; 1597 unsigned long flags;
1593 unsigned int objsize; 1598 unsigned int objsize;
1594 1599
1600 might_sleep_if(gfpflags & __GFP_WAIT);
1601
1602 if (should_failslab(s->objsize, gfpflags))
1603 return NULL;
1604
1595 local_irq_save(flags); 1605 local_irq_save(flags);
1596 c = get_cpu_slab(s, smp_processor_id()); 1606 c = get_cpu_slab(s, smp_processor_id());
1597 objsize = c->objsize; 1607 objsize = c->objsize;
@@ -1766,7 +1776,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1766} 1776}
1767EXPORT_SYMBOL(kmem_cache_free); 1777EXPORT_SYMBOL(kmem_cache_free);
1768 1778
1769/* Figure out on which slab object the object resides */ 1779/* Figure out on which slab page the object resides */
1770static struct page *get_object_page(const void *x) 1780static struct page *get_object_page(const void *x)
1771{ 1781{
1772 struct page *page = virt_to_head_page(x); 1782 struct page *page = virt_to_head_page(x);
@@ -1838,8 +1848,8 @@ static inline int slab_order(int size, int min_objects,
1838 int rem; 1848 int rem;
1839 int min_order = slub_min_order; 1849 int min_order = slub_min_order;
1840 1850
1841 if ((PAGE_SIZE << min_order) / size > 65535) 1851 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
1842 return get_order(size * 65535) - 1; 1852 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1843 1853
1844 for (order = max(min_order, 1854 for (order = max(min_order,
1845 fls(min_objects * size - 1) - PAGE_SHIFT); 1855 fls(min_objects * size - 1) - PAGE_SHIFT);
@@ -2104,8 +2114,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2104 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2114 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2105 * memory on a fresh node that has no slab structures yet. 2115 * memory on a fresh node that has no slab structures yet.
2106 */ 2116 */
2107static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, 2117static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
2108 int node)
2109{ 2118{
2110 struct page *page; 2119 struct page *page;
2111 struct kmem_cache_node *n; 2120 struct kmem_cache_node *n;
@@ -2143,7 +2152,6 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2143 local_irq_save(flags); 2152 local_irq_save(flags);
2144 add_partial(n, page, 0); 2153 add_partial(n, page, 0);
2145 local_irq_restore(flags); 2154 local_irq_restore(flags);
2146 return n;
2147} 2155}
2148 2156
2149static void free_kmem_cache_nodes(struct kmem_cache *s) 2157static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -2175,8 +2183,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2175 n = &s->local_node; 2183 n = &s->local_node;
2176 else { 2184 else {
2177 if (slab_state == DOWN) { 2185 if (slab_state == DOWN) {
2178 n = early_kmem_cache_node_alloc(gfpflags, 2186 early_kmem_cache_node_alloc(gfpflags, node);
2179 node);
2180 continue; 2187 continue;
2181 } 2188 }
2182 n = kmem_cache_alloc_node(kmalloc_caches, 2189 n = kmem_cache_alloc_node(kmalloc_caches,
@@ -3176,8 +3183,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3176 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3183 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3177 up_write(&slub_lock); 3184 up_write(&slub_lock);
3178 3185
3179 if (sysfs_slab_alias(s, name)) 3186 if (sysfs_slab_alias(s, name)) {
3187 down_write(&slub_lock);
3188 s->refcount--;
3189 up_write(&slub_lock);
3180 goto err; 3190 goto err;
3191 }
3181 return s; 3192 return s;
3182 } 3193 }
3183 3194
@@ -3187,8 +3198,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3187 size, align, flags, ctor)) { 3198 size, align, flags, ctor)) {
3188 list_add(&s->list, &slab_caches); 3199 list_add(&s->list, &slab_caches);
3189 up_write(&slub_lock); 3200 up_write(&slub_lock);
3190 if (sysfs_slab_add(s)) 3201 if (sysfs_slab_add(s)) {
3202 down_write(&slub_lock);
3203 list_del(&s->list);
3204 up_write(&slub_lock);
3205 kfree(s);
3191 goto err; 3206 goto err;
3207 }
3192 return s; 3208 return s;
3193 } 3209 }
3194 kfree(s); 3210 kfree(s);
@@ -4412,7 +4428,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
4412 4428
4413/* 4429/*
4414 * Need to buffer aliases during bootup until sysfs becomes 4430 * Need to buffer aliases during bootup until sysfs becomes
4415 * available lest we loose that information. 4431 * available lest we lose that information.
4416 */ 4432 */
4417struct saved_alias { 4433struct saved_alias {
4418 struct kmem_cache *s; 4434 struct kmem_cache *s;