diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-27 00:32:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-27 00:32:40 -0400 |
commit | e12fab28df1d7ae9369839a3af260a41447a5e79 (patch) | |
tree | 2f638674612075002fb6cc5ad1201c701fe4c741 | |
parent | 478a1469a7d27fe6b2f85fc801ecdeb8afc836e6 (diff) | |
parent | bbccb9c7bbeb6c78eb467c191c97360160f13704 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton:
"10 fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
drivers/pinctrl/intel/pinctrl-baytrail.c: fix build with gcc-4.4
update "mm/zsmalloc: don't fail if can't create debugfs info"
dma-debug: avoid spinlock recursion when disabling dma-debug
mm: oom_reaper: remove some bloat
memcg: fix mem_cgroup_out_of_memory() return value.
ocfs2: fix improper handling of return errno
mm: slub: remove unused virt_to_obj()
mm: kasan: remove unused 'reserved' field from struct kasan_alloc_meta
mm: make CONFIG_DEFERRED_STRUCT_PAGE_INIT depends on !FLATMEM explicitly
seqlock: fix raw_read_seqcount_latch()
-rw-r--r-- | drivers/pinctrl/intel/pinctrl-baytrail.c | 10 | ||||
-rw-r--r-- | fs/ocfs2/inode.c | 7 | ||||
-rw-r--r-- | include/linux/mm_types.h | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | include/linux/seqlock.h | 4 | ||||
-rw-r--r-- | include/linux/slub_def.h | 16 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | lib/dma-debug.c | 2 | ||||
-rw-r--r-- | mm/Kconfig | 1 | ||||
-rw-r--r-- | mm/kasan/kasan.h | 1 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/zsmalloc.c | 35 |
12 files changed, 37 insertions, 49 deletions
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 55182fc58c6a..677a811b3a6f 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c | |||
@@ -153,8 +153,10 @@ struct byt_community { | |||
153 | .name = (n), \ | 153 | .name = (n), \ |
154 | .pins = (p), \ | 154 | .pins = (p), \ |
155 | .npins = ARRAY_SIZE((p)), \ | 155 | .npins = ARRAY_SIZE((p)), \ |
156 | .has_simple_funcs = 1, \ | 156 | .has_simple_funcs = 1, \ |
157 | .simple_funcs = (f), \ | 157 | { \ |
158 | .simple_funcs = (f), \ | ||
159 | }, \ | ||
158 | .nfuncs = ARRAY_SIZE((f)), \ | 160 | .nfuncs = ARRAY_SIZE((f)), \ |
159 | } | 161 | } |
160 | #define PIN_GROUP_MIXED(n, p, f) \ | 162 | #define PIN_GROUP_MIXED(n, p, f) \ |
@@ -163,7 +165,9 @@ struct byt_community { | |||
163 | .pins = (p), \ | 165 | .pins = (p), \ |
164 | .npins = ARRAY_SIZE((p)), \ | 166 | .npins = ARRAY_SIZE((p)), \ |
165 | .has_simple_funcs = 0, \ | 167 | .has_simple_funcs = 0, \ |
166 | .mixed_funcs = (f), \ | 168 | { \ |
169 | .mixed_funcs = (f), \ | ||
170 | }, \ | ||
167 | .nfuncs = ARRAY_SIZE((f)), \ | 171 | .nfuncs = ARRAY_SIZE((f)), \ |
168 | } | 172 | } |
169 | 173 | ||
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 0748777f2e2a..c56a7679df93 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c | |||
@@ -176,12 +176,7 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags, | |||
176 | } | 176 | } |
177 | if (is_bad_inode(inode)) { | 177 | if (is_bad_inode(inode)) { |
178 | iput(inode); | 178 | iput(inode); |
179 | if ((flags & OCFS2_FI_FLAG_FILECHECK_CHK) || | 179 | inode = ERR_PTR(rc); |
180 | (flags & OCFS2_FI_FLAG_FILECHECK_FIX)) | ||
181 | /* Return OCFS2_FILECHECK_ERR_XXX related errno */ | ||
182 | inode = ERR_PTR(rc); | ||
183 | else | ||
184 | inode = ERR_PTR(-ESTALE); | ||
185 | goto bail; | 180 | goto bail; |
186 | } | 181 | } |
187 | 182 | ||
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index d553855503e6..ca3e517980a0 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -514,7 +514,9 @@ struct mm_struct { | |||
514 | #ifdef CONFIG_HUGETLB_PAGE | 514 | #ifdef CONFIG_HUGETLB_PAGE |
515 | atomic_long_t hugetlb_usage; | 515 | atomic_long_t hugetlb_usage; |
516 | #endif | 516 | #endif |
517 | #ifdef CONFIG_MMU | ||
517 | struct work_struct async_put_work; | 518 | struct work_struct async_put_work; |
519 | #endif | ||
518 | }; | 520 | }; |
519 | 521 | ||
520 | static inline void mm_init_cpumask(struct mm_struct *mm) | 522 | static inline void mm_init_cpumask(struct mm_struct *mm) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 23e075dcdfe4..6e42ada26345 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2745,10 +2745,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm) | |||
2745 | 2745 | ||
2746 | /* mmput gets rid of the mappings and all user-space */ | 2746 | /* mmput gets rid of the mappings and all user-space */ |
2747 | extern void mmput(struct mm_struct *); | 2747 | extern void mmput(struct mm_struct *); |
2748 | /* same as above but performs the slow path from the async kontext. Can | 2748 | #ifdef CONFIG_MMU |
2749 | /* same as above but performs the slow path from the async context. Can | ||
2749 | * be called from the atomic context as well | 2750 | * be called from the atomic context as well |
2750 | */ | 2751 | */ |
2751 | extern void mmput_async(struct mm_struct *); | 2752 | extern void mmput_async(struct mm_struct *); |
2753 | #endif | ||
2752 | 2754 | ||
2753 | /* Grab a reference to a task's mm, if it is not already going away */ | 2755 | /* Grab a reference to a task's mm, if it is not already going away */ |
2754 | extern struct mm_struct *get_task_mm(struct task_struct *task); | 2756 | extern struct mm_struct *get_task_mm(struct task_struct *task); |
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index e0582106ef4f..7973a821ac58 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
@@ -277,7 +277,7 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) | |||
277 | 277 | ||
278 | static inline int raw_read_seqcount_latch(seqcount_t *s) | 278 | static inline int raw_read_seqcount_latch(seqcount_t *s) |
279 | { | 279 | { |
280 | return lockless_dereference(s->sequence); | 280 | return lockless_dereference(s)->sequence; |
281 | } | 281 | } |
282 | 282 | ||
283 | /** | 283 | /** |
@@ -331,7 +331,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) | |||
331 | * unsigned seq, idx; | 331 | * unsigned seq, idx; |
332 | * | 332 | * |
333 | * do { | 333 | * do { |
334 | * seq = lockless_dereference(latch->seq); | 334 | * seq = lockless_dereference(latch)->seq; |
335 | * | 335 | * |
336 | * idx = seq & 0x01; | 336 | * idx = seq & 0x01; |
337 | * entry = data_query(latch->data[idx], ...); | 337 | * entry = data_query(latch->data[idx], ...); |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 665cd0cd18b8..d1faa019c02a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -111,22 +111,6 @@ static inline void sysfs_slab_remove(struct kmem_cache *s) | |||
111 | } | 111 | } |
112 | #endif | 112 | #endif |
113 | 113 | ||
114 | |||
115 | /** | ||
116 | * virt_to_obj - returns address of the beginning of object. | ||
117 | * @s: object's kmem_cache | ||
118 | * @slab_page: address of slab page | ||
119 | * @x: address within object memory range | ||
120 | * | ||
121 | * Returns address of the beginning of object | ||
122 | */ | ||
123 | static inline void *virt_to_obj(struct kmem_cache *s, | ||
124 | const void *slab_page, | ||
125 | const void *x) | ||
126 | { | ||
127 | return (void *)x - ((x - slab_page) % s->size); | ||
128 | } | ||
129 | |||
130 | void object_err(struct kmem_cache *s, struct page *page, | 114 | void object_err(struct kmem_cache *s, struct page *page, |
131 | u8 *object, char *reason); | 115 | u8 *object, char *reason); |
132 | 116 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index 47887bba944f..5c2c355aa97f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -736,6 +736,7 @@ void mmput(struct mm_struct *mm) | |||
736 | } | 736 | } |
737 | EXPORT_SYMBOL_GPL(mmput); | 737 | EXPORT_SYMBOL_GPL(mmput); |
738 | 738 | ||
739 | #ifdef CONFIG_MMU | ||
739 | static void mmput_async_fn(struct work_struct *work) | 740 | static void mmput_async_fn(struct work_struct *work) |
740 | { | 741 | { |
741 | struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); | 742 | struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); |
@@ -749,6 +750,7 @@ void mmput_async(struct mm_struct *mm) | |||
749 | schedule_work(&mm->async_put_work); | 750 | schedule_work(&mm->async_put_work); |
750 | } | 751 | } |
751 | } | 752 | } |
753 | #endif | ||
752 | 754 | ||
753 | /** | 755 | /** |
754 | * set_mm_exe_file - change a reference to the mm's executable file | 756 | * set_mm_exe_file - change a reference to the mm's executable file |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 4a1515f4b452..51a76af25c66 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -657,9 +657,9 @@ static struct dma_debug_entry *dma_entry_alloc(void) | |||
657 | spin_lock_irqsave(&free_entries_lock, flags); | 657 | spin_lock_irqsave(&free_entries_lock, flags); |
658 | 658 | ||
659 | if (list_empty(&free_entries)) { | 659 | if (list_empty(&free_entries)) { |
660 | pr_err("DMA-API: debugging out of memory - disabling\n"); | ||
661 | global_disable = true; | 660 | global_disable = true; |
662 | spin_unlock_irqrestore(&free_entries_lock, flags); | 661 | spin_unlock_irqrestore(&free_entries_lock, flags); |
662 | pr_err("DMA-API: debugging out of memory - disabling\n"); | ||
663 | return NULL; | 663 | return NULL; |
664 | } | 664 | } |
665 | 665 | ||
diff --git a/mm/Kconfig b/mm/Kconfig index 2664c118b5d2..22fa8189e4fc 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -649,6 +649,7 @@ config DEFERRED_STRUCT_PAGE_INIT | |||
649 | default n | 649 | default n |
650 | depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT | 650 | depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT |
651 | depends on MEMORY_HOTPLUG | 651 | depends on MEMORY_HOTPLUG |
652 | depends on !FLATMEM | ||
652 | help | 653 | help |
653 | Ordinarily all struct pages are initialised during early boot in a | 654 | Ordinarily all struct pages are initialised during early boot in a |
654 | single thread. On very large machines this can take a considerable | 655 | single thread. On very large machines this can take a considerable |
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 7f7ac51d7faf..fb87923552ef 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h | |||
@@ -77,7 +77,6 @@ struct kasan_alloc_meta { | |||
77 | struct kasan_track track; | 77 | struct kasan_track track; |
78 | u32 state : 2; /* enum kasan_state */ | 78 | u32 state : 2; /* enum kasan_state */ |
79 | u32 alloc_size : 30; | 79 | u32 alloc_size : 30; |
80 | u32 reserved; | ||
81 | }; | 80 | }; |
82 | 81 | ||
83 | struct qlist_node { | 82 | struct qlist_node { |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cf428d7b9a03..f6477a9dbe7a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1302,6 +1302,8 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, | |||
1302 | mem_cgroup_iter_break(memcg, iter); | 1302 | mem_cgroup_iter_break(memcg, iter); |
1303 | if (chosen) | 1303 | if (chosen) |
1304 | put_task_struct(chosen); | 1304 | put_task_struct(chosen); |
1305 | /* Set a dummy value to return "true". */ | ||
1306 | chosen = (void *) 1; | ||
1305 | goto unlock; | 1307 | goto unlock; |
1306 | case OOM_SCAN_OK: | 1308 | case OOM_SCAN_OK: |
1307 | break; | 1309 | break; |
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 72698db958e7..b6d4f258cb53 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c | |||
@@ -45,6 +45,8 @@ | |||
45 | * | 45 | * |
46 | */ | 46 | */ |
47 | 47 | ||
48 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
49 | |||
48 | #include <linux/module.h> | 50 | #include <linux/module.h> |
49 | #include <linux/kernel.h> | 51 | #include <linux/kernel.h> |
50 | #include <linux/sched.h> | 52 | #include <linux/sched.h> |
@@ -483,16 +485,16 @@ static inline unsigned long zs_stat_get(struct size_class *class, | |||
483 | 485 | ||
484 | #ifdef CONFIG_ZSMALLOC_STAT | 486 | #ifdef CONFIG_ZSMALLOC_STAT |
485 | 487 | ||
486 | static int __init zs_stat_init(void) | 488 | static void __init zs_stat_init(void) |
487 | { | 489 | { |
488 | if (!debugfs_initialized()) | 490 | if (!debugfs_initialized()) { |
489 | return -ENODEV; | 491 | pr_warn("debugfs not available, stat dir not created\n"); |
492 | return; | ||
493 | } | ||
490 | 494 | ||
491 | zs_stat_root = debugfs_create_dir("zsmalloc", NULL); | 495 | zs_stat_root = debugfs_create_dir("zsmalloc", NULL); |
492 | if (!zs_stat_root) | 496 | if (!zs_stat_root) |
493 | return -ENOMEM; | 497 | pr_warn("debugfs 'zsmalloc' stat dir creation failed\n"); |
494 | |||
495 | return 0; | ||
496 | } | 498 | } |
497 | 499 | ||
498 | static void __exit zs_stat_exit(void) | 500 | static void __exit zs_stat_exit(void) |
@@ -577,8 +579,10 @@ static void zs_pool_stat_create(struct zs_pool *pool, const char *name) | |||
577 | { | 579 | { |
578 | struct dentry *entry; | 580 | struct dentry *entry; |
579 | 581 | ||
580 | if (!zs_stat_root) | 582 | if (!zs_stat_root) { |
583 | pr_warn("no root stat dir, not creating <%s> stat dir\n", name); | ||
581 | return; | 584 | return; |
585 | } | ||
582 | 586 | ||
583 | entry = debugfs_create_dir(name, zs_stat_root); | 587 | entry = debugfs_create_dir(name, zs_stat_root); |
584 | if (!entry) { | 588 | if (!entry) { |
@@ -592,7 +596,8 @@ static void zs_pool_stat_create(struct zs_pool *pool, const char *name) | |||
592 | if (!entry) { | 596 | if (!entry) { |
593 | pr_warn("%s: debugfs file entry <%s> creation failed\n", | 597 | pr_warn("%s: debugfs file entry <%s> creation failed\n", |
594 | name, "classes"); | 598 | name, "classes"); |
595 | return; | 599 | debugfs_remove_recursive(pool->stat_dentry); |
600 | pool->stat_dentry = NULL; | ||
596 | } | 601 | } |
597 | } | 602 | } |
598 | 603 | ||
@@ -602,9 +607,8 @@ static void zs_pool_stat_destroy(struct zs_pool *pool) | |||
602 | } | 607 | } |
603 | 608 | ||
604 | #else /* CONFIG_ZSMALLOC_STAT */ | 609 | #else /* CONFIG_ZSMALLOC_STAT */ |
605 | static int __init zs_stat_init(void) | 610 | static void __init zs_stat_init(void) |
606 | { | 611 | { |
607 | return 0; | ||
608 | } | 612 | } |
609 | 613 | ||
610 | static void __exit zs_stat_exit(void) | 614 | static void __exit zs_stat_exit(void) |
@@ -2011,17 +2015,10 @@ static int __init zs_init(void) | |||
2011 | zpool_register_driver(&zs_zpool_driver); | 2015 | zpool_register_driver(&zs_zpool_driver); |
2012 | #endif | 2016 | #endif |
2013 | 2017 | ||
2014 | ret = zs_stat_init(); | 2018 | zs_stat_init(); |
2015 | if (ret) { | 2019 | |
2016 | pr_err("zs stat initialization failed\n"); | ||
2017 | goto stat_fail; | ||
2018 | } | ||
2019 | return 0; | 2020 | return 0; |
2020 | 2021 | ||
2021 | stat_fail: | ||
2022 | #ifdef CONFIG_ZPOOL | ||
2023 | zpool_unregister_driver(&zs_zpool_driver); | ||
2024 | #endif | ||
2025 | notifier_fail: | 2022 | notifier_fail: |
2026 | zs_unregister_cpu_notifier(); | 2023 | zs_unregister_cpu_notifier(); |
2027 | 2024 | ||