diff options
| -rw-r--r-- | MAINTAINERS | 9 | ||||
| -rw-r--r-- | arch/x86/kernel/e820.c | 15 | ||||
| -rw-r--r-- | include/linux/dax.h | 2 | ||||
| -rw-r--r-- | include/linux/slub_def.h | 4 | ||||
| -rw-r--r-- | lib/Kconfig.kasan | 1 | ||||
| -rw-r--r-- | lib/percpu_ida.c | 2 | ||||
| -rw-r--r-- | mm/slab_common.c | 4 | ||||
| -rw-r--r-- | mm/slub.c | 7 | ||||
| -rw-r--r-- | mm/vmstat.c | 2 |
9 files changed, 38 insertions, 8 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index bfb8a18d4793..07d1576fc766 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -11481,6 +11481,15 @@ W: http://wireless.kernel.org/en/users/Drivers/p54 | |||
| 11481 | S: Obsolete | 11481 | S: Obsolete |
| 11482 | F: drivers/net/wireless/intersil/prism54/ | 11482 | F: drivers/net/wireless/intersil/prism54/ |
| 11483 | 11483 | ||
| 11484 | PROC FILESYSTEM | ||
| 11485 | R: Alexey Dobriyan <adobriyan@gmail.com> | ||
| 11486 | L: linux-kernel@vger.kernel.org | ||
| 11487 | L: linux-fsdevel@vger.kernel.org | ||
| 11488 | S: Maintained | ||
| 11489 | F: fs/proc/ | ||
| 11490 | F: include/linux/proc_fs.h | ||
| 11491 | F: tools/testing/selftests/proc/ | ||
| 11492 | |||
| 11484 | PROC SYSCTL | 11493 | PROC SYSCTL |
| 11485 | M: "Luis R. Rodriguez" <mcgrof@kernel.org> | 11494 | M: "Luis R. Rodriguez" <mcgrof@kernel.org> |
| 11486 | M: Kees Cook <keescook@chromium.org> | 11495 | M: Kees Cook <keescook@chromium.org> |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index d1f25c831447..c88c23c658c1 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
| @@ -1248,6 +1248,7 @@ void __init e820__memblock_setup(void) | |||
| 1248 | { | 1248 | { |
| 1249 | int i; | 1249 | int i; |
| 1250 | u64 end; | 1250 | u64 end; |
| 1251 | u64 addr = 0; | ||
| 1251 | 1252 | ||
| 1252 | /* | 1253 | /* |
| 1253 | * The bootstrap memblock region count maximum is 128 entries | 1254 | * The bootstrap memblock region count maximum is 128 entries |
| @@ -1264,13 +1265,21 @@ void __init e820__memblock_setup(void) | |||
| 1264 | struct e820_entry *entry = &e820_table->entries[i]; | 1265 | struct e820_entry *entry = &e820_table->entries[i]; |
| 1265 | 1266 | ||
| 1266 | end = entry->addr + entry->size; | 1267 | end = entry->addr + entry->size; |
| 1268 | if (addr < entry->addr) | ||
| 1269 | memblock_reserve(addr, entry->addr - addr); | ||
| 1270 | addr = end; | ||
| 1267 | if (end != (resource_size_t)end) | 1271 | if (end != (resource_size_t)end) |
| 1268 | continue; | 1272 | continue; |
| 1269 | 1273 | ||
| 1274 | /* | ||
| 1275 | * all !E820_TYPE_RAM ranges (including gap ranges) are put | ||
| 1276 | * into memblock.reserved to make sure that struct pages in | ||
| 1277 | * such regions are not left uninitialized after bootup. | ||
| 1278 | */ | ||
| 1270 | if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) | 1279 | if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) |
| 1271 | continue; | 1280 | memblock_reserve(entry->addr, entry->size); |
| 1272 | 1281 | else | |
| 1273 | memblock_add(entry->addr, entry->size); | 1282 | memblock_add(entry->addr, entry->size); |
| 1274 | } | 1283 | } |
| 1275 | 1284 | ||
| 1276 | /* Throw away partial pages: */ | 1285 | /* Throw away partial pages: */ |
diff --git a/include/linux/dax.h b/include/linux/dax.h index 3855e3800f48..deb0f663252f 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h | |||
| @@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); | |||
| 135 | 135 | ||
| 136 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, | 136 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, |
| 137 | const struct iomap_ops *ops); | 137 | const struct iomap_ops *ops); |
| 138 | int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, | 138 | vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, |
| 139 | pfn_t *pfnp, int *errp, const struct iomap_ops *ops); | 139 | pfn_t *pfnp, int *errp, const struct iomap_ops *ops); |
| 140 | vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, | 140 | vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, |
| 141 | enum page_entry_size pe_size, pfn_t pfn); | 141 | enum page_entry_size pe_size, pfn_t pfn); |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 09fa2c6f0e68..3a1a1dbc6f49 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -155,8 +155,12 @@ struct kmem_cache { | |||
| 155 | 155 | ||
| 156 | #ifdef CONFIG_SYSFS | 156 | #ifdef CONFIG_SYSFS |
| 157 | #define SLAB_SUPPORTS_SYSFS | 157 | #define SLAB_SUPPORTS_SYSFS |
| 158 | void sysfs_slab_unlink(struct kmem_cache *); | ||
| 158 | void sysfs_slab_release(struct kmem_cache *); | 159 | void sysfs_slab_release(struct kmem_cache *); |
| 159 | #else | 160 | #else |
| 161 | static inline void sysfs_slab_unlink(struct kmem_cache *s) | ||
| 162 | { | ||
| 163 | } | ||
| 160 | static inline void sysfs_slab_release(struct kmem_cache *s) | 164 | static inline void sysfs_slab_release(struct kmem_cache *s) |
| 161 | { | 165 | { |
| 162 | } | 166 | } |
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 3d35d062970d..c253c1b46c6b 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan | |||
| @@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN | |||
| 6 | config KASAN | 6 | config KASAN |
| 7 | bool "KASan: runtime memory debugger" | 7 | bool "KASan: runtime memory debugger" |
| 8 | depends on SLUB || (SLAB && !DEBUG_SLAB) | 8 | depends on SLUB || (SLAB && !DEBUG_SLAB) |
| 9 | select SLUB_DEBUG if SLUB | ||
| 9 | select CONSTRUCTORS | 10 | select CONSTRUCTORS |
| 10 | select STACKDEPOT | 11 | select STACKDEPOT |
| 11 | help | 12 | help |
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 9bbd9c5d375a..beb14839b41a 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c | |||
| @@ -141,7 +141,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) | |||
| 141 | spin_lock_irqsave(&tags->lock, flags); | 141 | spin_lock_irqsave(&tags->lock, flags); |
| 142 | 142 | ||
| 143 | /* Fastpath */ | 143 | /* Fastpath */ |
| 144 | if (likely(tags->nr_free >= 0)) { | 144 | if (likely(tags->nr_free)) { |
| 145 | tag = tags->freelist[--tags->nr_free]; | 145 | tag = tags->freelist[--tags->nr_free]; |
| 146 | spin_unlock_irqrestore(&tags->lock, flags); | 146 | spin_unlock_irqrestore(&tags->lock, flags); |
| 147 | return tag; | 147 | return tag; |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 890b1f04a03a..2296caf87bfb 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
| @@ -567,10 +567,14 @@ static int shutdown_cache(struct kmem_cache *s) | |||
| 567 | list_del(&s->list); | 567 | list_del(&s->list); |
| 568 | 568 | ||
| 569 | if (s->flags & SLAB_TYPESAFE_BY_RCU) { | 569 | if (s->flags & SLAB_TYPESAFE_BY_RCU) { |
| 570 | #ifdef SLAB_SUPPORTS_SYSFS | ||
| 571 | sysfs_slab_unlink(s); | ||
| 572 | #endif | ||
| 570 | list_add_tail(&s->list, &slab_caches_to_rcu_destroy); | 573 | list_add_tail(&s->list, &slab_caches_to_rcu_destroy); |
| 571 | schedule_work(&slab_caches_to_rcu_destroy_work); | 574 | schedule_work(&slab_caches_to_rcu_destroy_work); |
| 572 | } else { | 575 | } else { |
| 573 | #ifdef SLAB_SUPPORTS_SYSFS | 576 | #ifdef SLAB_SUPPORTS_SYSFS |
| 577 | sysfs_slab_unlink(s); | ||
| 574 | sysfs_slab_release(s); | 578 | sysfs_slab_release(s); |
| 575 | #else | 579 | #else |
| 576 | slab_kmem_cache_release(s); | 580 | slab_kmem_cache_release(s); |
| @@ -5667,7 +5667,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work) | |||
| 5667 | kset_unregister(s->memcg_kset); | 5667 | kset_unregister(s->memcg_kset); |
| 5668 | #endif | 5668 | #endif |
| 5669 | kobject_uevent(&s->kobj, KOBJ_REMOVE); | 5669 | kobject_uevent(&s->kobj, KOBJ_REMOVE); |
| 5670 | kobject_del(&s->kobj); | ||
| 5671 | out: | 5670 | out: |
| 5672 | kobject_put(&s->kobj); | 5671 | kobject_put(&s->kobj); |
| 5673 | } | 5672 | } |
| @@ -5752,6 +5751,12 @@ static void sysfs_slab_remove(struct kmem_cache *s) | |||
| 5752 | schedule_work(&s->kobj_remove_work); | 5751 | schedule_work(&s->kobj_remove_work); |
| 5753 | } | 5752 | } |
| 5754 | 5753 | ||
| 5754 | void sysfs_slab_unlink(struct kmem_cache *s) | ||
| 5755 | { | ||
| 5756 | if (slab_state >= FULL) | ||
| 5757 | kobject_del(&s->kobj); | ||
| 5758 | } | ||
| 5759 | |||
| 5755 | void sysfs_slab_release(struct kmem_cache *s) | 5760 | void sysfs_slab_release(struct kmem_cache *s) |
| 5756 | { | 5761 | { |
| 5757 | if (slab_state >= FULL) | 5762 | if (slab_state >= FULL) |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 75eda9c2b260..8ba0870ecddd 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -1796,11 +1796,9 @@ static void vmstat_update(struct work_struct *w) | |||
| 1796 | * to occur in the future. Keep on running the | 1796 | * to occur in the future. Keep on running the |
| 1797 | * update worker thread. | 1797 | * update worker thread. |
| 1798 | */ | 1798 | */ |
| 1799 | preempt_disable(); | ||
| 1800 | queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, | 1799 | queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, |
| 1801 | this_cpu_ptr(&vmstat_work), | 1800 | this_cpu_ptr(&vmstat_work), |
| 1802 | round_jiffies_relative(sysctl_stat_interval)); | 1801 | round_jiffies_relative(sysctl_stat_interval)); |
| 1803 | preempt_enable(); | ||
| 1804 | } | 1802 | } |
| 1805 | } | 1803 | } |
| 1806 | 1804 | ||
