diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 13:31:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 13:31:36 -0500 |
commit | 8dcd175bc3d50b78413c56d5b17d4bddd77412ef (patch) | |
tree | 2c2fb25759b43f2e73830f07ef3b444d76825280 /mm/slub.c | |
parent | afe6fe7036c6efdcb46cabc64bec9b6e4a005210 (diff) | |
parent | fff04900ea79915939ef6a3aad78fca6511a3034 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
- a few misc things
- ocfs2 updates
- most of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (159 commits)
tools/testing/selftests/proc/proc-self-syscall.c: remove duplicate include
proc: more robust bulk read test
proc: test /proc/*/maps, smaps, smaps_rollup, statm
proc: use seq_puts() everywhere
proc: read kernel cpu stat pointer once
proc: remove unused argument in proc_pid_lookup()
fs/proc/thread_self.c: code cleanup for proc_setup_thread_self()
fs/proc/self.c: code cleanup for proc_setup_self()
proc: return exit code 4 for skipped tests
mm,mremap: bail out earlier in mremap_to under map pressure
mm/sparse: fix a bad comparison
mm/memory.c: do_fault: avoid usage of stale vm_area_struct
writeback: fix inode cgroup switching comment
mm/huge_memory.c: fix "orig_pud" set but not used
mm/hotplug: fix an imbalance with DEBUG_PAGEALLOC
mm/memcontrol.c: fix bad line in comment
mm/cma.c: cma_declare_contiguous: correct err handling
mm/page_ext.c: fix an imbalance with kmemleak
mm/compaction: pass pgdat to too_many_isolated() instead of zone
mm: remove zone_lru_lock() function, access ->lru_lock directly
...
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 16 |
1 files changed, 7 insertions, 9 deletions
@@ -1093,8 +1093,7 @@ static void setup_page_debug(struct kmem_cache *s, void *addr, int order) | |||
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | static inline int alloc_consistency_checks(struct kmem_cache *s, | 1095 | static inline int alloc_consistency_checks(struct kmem_cache *s, |
1096 | struct page *page, | 1096 | struct page *page, void *object) |
1097 | void *object, unsigned long addr) | ||
1098 | { | 1097 | { |
1099 | if (!check_slab(s, page)) | 1098 | if (!check_slab(s, page)) |
1100 | return 0; | 1099 | return 0; |
@@ -1115,7 +1114,7 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, | |||
1115 | void *object, unsigned long addr) | 1114 | void *object, unsigned long addr) |
1116 | { | 1115 | { |
1117 | if (s->flags & SLAB_CONSISTENCY_CHECKS) { | 1116 | if (s->flags & SLAB_CONSISTENCY_CHECKS) { |
1118 | if (!alloc_consistency_checks(s, page, object, addr)) | 1117 | if (!alloc_consistency_checks(s, page, object)) |
1119 | goto bad; | 1118 | goto bad; |
1120 | } | 1119 | } |
1121 | 1120 | ||
@@ -2130,7 +2129,7 @@ redo: | |||
2130 | if (!lock) { | 2129 | if (!lock) { |
2131 | lock = 1; | 2130 | lock = 1; |
2132 | /* | 2131 | /* |
2133 | * Taking the spinlock removes the possiblity | 2132 | * Taking the spinlock removes the possibility |
2134 | * that acquire_slab() will see a slab page that | 2133 | * that acquire_slab() will see a slab page that |
2135 | * is frozen | 2134 | * is frozen |
2136 | */ | 2135 | */ |
@@ -2254,8 +2253,8 @@ static void unfreeze_partials(struct kmem_cache *s, | |||
2254 | } | 2253 | } |
2255 | 2254 | ||
2256 | /* | 2255 | /* |
2257 | * Put a page that was just frozen (in __slab_free) into a partial page | 2256 | * Put a page that was just frozen (in __slab_free|get_partial_node) into a |
2258 | * slot if available. | 2257 | * partial page slot if available. |
2259 | * | 2258 | * |
2260 | * If we did not find a slot then simply move all the partials to the | 2259 | * If we did not find a slot then simply move all the partials to the |
2261 | * per node partial list. | 2260 | * per node partial list. |
@@ -2482,8 +2481,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, | |||
2482 | stat(s, ALLOC_SLAB); | 2481 | stat(s, ALLOC_SLAB); |
2483 | c->page = page; | 2482 | c->page = page; |
2484 | *pc = c; | 2483 | *pc = c; |
2485 | } else | 2484 | } |
2486 | freelist = NULL; | ||
2487 | 2485 | ||
2488 | return freelist; | 2486 | return freelist; |
2489 | } | 2487 | } |
@@ -4264,7 +4262,7 @@ void __init kmem_cache_init(void) | |||
4264 | cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, | 4262 | cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, |
4265 | slub_cpu_dead); | 4263 | slub_cpu_dead); |
4266 | 4264 | ||
4267 | pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n", | 4265 | pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", |
4268 | cache_line_size(), | 4266 | cache_line_size(), |
4269 | slub_min_order, slub_max_order, slub_min_objects, | 4267 | slub_min_order, slub_max_order, slub_min_objects, |
4270 | nr_cpu_ids, nr_node_ids); | 4268 | nr_cpu_ids, nr_node_ids); |