aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-21 11:15:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-21 11:15:16 -0400
commit7a752478efa617cd5a7d646daa7fc3f4615924f6 (patch)
tree2b64325d8044535b31926702cf276e7308c2228c
parent83beed7b2b26f232d782127792dd0cd4362fdc41 (diff)
parentabc1be13fd113ddef5e2d807a466286b864caed3 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: - "fork: unconditionally clear stack on fork" is a non-bugfix which got lost during the merge window - performance concerns appear to have been adequately addressed. - and a bunch of fixes * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/filemap.c: fix NULL pointer in page_cache_tree_insert() mm: memcg: add __GFP_NOWARN in __memcg_schedule_kmem_cache_create() fs, elf: don't complain MAP_FIXED_NOREPLACE unless -EEXIST error kexec_file: do not add extra alignment to efi memmap proc: fix /proc/loadavg regression proc: revalidate kernel thread inodes to root:root autofs: mount point create should honour passed in mode MAINTAINERS: add personal addresses for Sascha and Uwe kasan: add no_sanitize attribute for clang builds rapidio: fix rio_dma_transfer error handling mm: enable thp migration for shmem thp writeback: safer lock nesting mm, pagemap: fix swap offset value for PMD migration entry mm: fix do_pages_move status handling fork: unconditionally clear stack on fork
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c5
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c19
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/binfmt_elf.c8
-rw-r--r--fs/fs-writeback.c7
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/loadavg.c2
-rw-r--r--fs/proc/task_mmu.c6
-rw-r--r--include/linux/backing-dev-defs.h5
-rw-r--r--include/linux/backing-dev.h30
-rw-r--r--include/linux/compiler-clang.h3
-rw-r--r--include/linux/thread_info.h6
-rw-r--r--kernel/fork.c3
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/migrate.c22
-rw-r--r--mm/page-writeback.c18
-rw-r--r--mm/rmap.c3
21 files changed, 106 insertions, 72 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 5ae51d05c09b..b7ff5654b8b5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1373,7 +1373,8 @@ F: arch/arm/mach-ebsa110/
1373F: drivers/net/ethernet/amd/am79c961a.* 1373F: drivers/net/ethernet/amd/am79c961a.*
1374 1374
1375ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT 1375ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT
1376M: Uwe Kleine-König <kernel@pengutronix.de> 1376M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
1377R: Pengutronix Kernel Team <kernel@pengutronix.de>
1377L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1378L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1378S: Maintained 1379S: Maintained
1379N: efm32 1380N: efm32
@@ -1401,7 +1402,8 @@ F: arch/arm/mach-footbridge/
1401 1402
1402ARM/FREESCALE IMX / MXC ARM ARCHITECTURE 1403ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
1403M: Shawn Guo <shawnguo@kernel.org> 1404M: Shawn Guo <shawnguo@kernel.org>
1404M: Sascha Hauer <kernel@pengutronix.de> 1405M: Sascha Hauer <s.hauer@pengutronix.de>
1406R: Pengutronix Kernel Team <kernel@pengutronix.de>
1405R: Fabio Estevam <fabio.estevam@nxp.com> 1407R: Fabio Estevam <fabio.estevam@nxp.com>
1406L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1408L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1407S: Maintained 1409S: Maintained
@@ -1416,7 +1418,8 @@ F: include/soc/imx/
1416 1418
1417ARM/FREESCALE VYBRID ARM ARCHITECTURE 1419ARM/FREESCALE VYBRID ARM ARCHITECTURE
1418M: Shawn Guo <shawnguo@kernel.org> 1420M: Shawn Guo <shawnguo@kernel.org>
1419M: Sascha Hauer <kernel@pengutronix.de> 1421M: Sascha Hauer <s.hauer@pengutronix.de>
1422R: Pengutronix Kernel Team <kernel@pengutronix.de>
1420R: Stefan Agner <stefan@agner.ch> 1423R: Stefan Agner <stefan@agner.ch>
1421L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1424L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1422S: Maintained 1425S: Maintained
@@ -5652,7 +5655,8 @@ F: drivers/net/ethernet/freescale/fec.h
5652F: Documentation/devicetree/bindings/net/fsl-fec.txt 5655F: Documentation/devicetree/bindings/net/fsl-fec.txt
5653 5656
5654FREESCALE IMX / MXC FRAMEBUFFER DRIVER 5657FREESCALE IMX / MXC FRAMEBUFFER DRIVER
5655M: Sascha Hauer <kernel@pengutronix.de> 5658M: Sascha Hauer <s.hauer@pengutronix.de>
5659R: Pengutronix Kernel Team <kernel@pengutronix.de>
5656L: linux-fbdev@vger.kernel.org 5660L: linux-fbdev@vger.kernel.org
5657L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 5661L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
5658S: Maintained 5662S: Maintained
@@ -12825,7 +12829,8 @@ F: include/linux/siphash.h
12825 12829
12826SIOX 12830SIOX
12827M: Gavin Schenk <g.schenk@eckelmann.de> 12831M: Gavin Schenk <g.schenk@eckelmann.de>
12828M: Uwe Kleine-König <kernel@pengutronix.de> 12832M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
12833R: Pengutronix Kernel Team <kernel@pengutronix.de>
12829S: Supported 12834S: Supported
12830F: drivers/siox/* 12835F: drivers/siox/*
12831F: include/trace/events/siox.h 12836F: include/trace/events/siox.h
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 9033c8194eda..ccc421503363 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
1093 LOAD_INT(c), LOAD_FRAC(c), 1093 LOAD_INT(c), LOAD_FRAC(c),
1094 count_active_contexts(), 1094 count_active_contexts(),
1095 atomic_read(&nr_spu_contexts), 1095 atomic_read(&nr_spu_contexts),
1096 idr_get_cursor(&task_active_pid_ns(current)->idr)); 1096 idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
1097 return 0; 1097 return 0;
1098} 1098}
1099 1099
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 3182908b7e6c..7326078eaa7a 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
398 * little bit simple 398 * little bit simple
399 */ 399 */
400 efi_map_sz = efi_get_runtime_map_size(); 400 efi_map_sz = efi_get_runtime_map_size();
401 efi_map_sz = ALIGN(efi_map_sz, 16);
402 params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + 401 params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
403 MAX_ELFCOREHDR_STR_LEN; 402 MAX_ELFCOREHDR_STR_LEN;
404 params_cmdline_sz = ALIGN(params_cmdline_sz, 16); 403 params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
405 kbuf.bufsz = params_cmdline_sz + efi_map_sz + 404 kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) +
406 sizeof(struct setup_data) + 405 sizeof(struct setup_data) +
407 sizeof(struct efi_setup_data); 406 sizeof(struct efi_setup_data);
408 407
@@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
410 if (!params) 409 if (!params)
411 return ERR_PTR(-ENOMEM); 410 return ERR_PTR(-ENOMEM);
412 efi_map_offset = params_cmdline_sz; 411 efi_map_offset = params_cmdline_sz;
413 efi_setup_data_offset = efi_map_offset + efi_map_sz; 412 efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16);
414 413
415 /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ 414 /* Copy setup header onto bootparams. Documentation/x86/boot.txt */
416 setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; 415 setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset;
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 9d27016c899e..0434ab7b6497 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req,
740 tx->callback = dma_xfer_callback; 740 tx->callback = dma_xfer_callback;
741 tx->callback_param = req; 741 tx->callback_param = req;
742 742
743 req->dmach = chan;
744 req->sync = sync;
745 req->status = DMA_IN_PROGRESS; 743 req->status = DMA_IN_PROGRESS;
746 init_completion(&req->req_comp);
747 kref_get(&req->refcount); 744 kref_get(&req->refcount);
748 745
749 cookie = dmaengine_submit(tx); 746 cookie = dmaengine_submit(tx);
@@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
831 if (!req) 828 if (!req)
832 return -ENOMEM; 829 return -ENOMEM;
833 830
834 kref_init(&req->refcount);
835
836 ret = get_dma_channel(priv); 831 ret = get_dma_channel(priv);
837 if (ret) { 832 if (ret) {
838 kfree(req); 833 kfree(req);
839 return ret; 834 return ret;
840 } 835 }
836 chan = priv->dmach;
837
838 kref_init(&req->refcount);
839 init_completion(&req->req_comp);
840 req->dir = dir;
841 req->filp = filp;
842 req->priv = priv;
843 req->dmach = chan;
844 req->sync = sync;
841 845
842 /* 846 /*
843 * If parameter loc_addr != NULL, we are transferring data from/to 847 * If parameter loc_addr != NULL, we are transferring data from/to
@@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
925 xfer->offset, xfer->length); 929 xfer->offset, xfer->length);
926 } 930 }
927 931
928 req->dir = dir;
929 req->filp = filp;
930 req->priv = priv;
931 chan = priv->dmach;
932
933 nents = dma_map_sg(chan->device->dev, 932 nents = dma_map_sg(chan->device->dev,
934 req->sgt.sgl, req->sgt.nents, dir); 933 req->sgt.sgl, req->sgt.nents, dir);
935 if (nents == 0) { 934 if (nents == 0) {
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 82e8f6edfb48..b12e37f27530 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir,
749 749
750 autofs4_del_active(dentry); 750 autofs4_del_active(dentry);
751 751
752 inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); 752 inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
753 if (!inode) 753 if (!inode)
754 return -ENOMEM; 754 return -ENOMEM;
755 d_add(dentry, inode); 755 d_add(dentry, inode);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 41e04183e4ce..4ad6f669fe34 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -377,10 +377,10 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
377 } else 377 } else
378 map_addr = vm_mmap(filep, addr, size, prot, type, off); 378 map_addr = vm_mmap(filep, addr, size, prot, type, off);
379 379
380 if ((type & MAP_FIXED_NOREPLACE) && BAD_ADDR(map_addr)) 380 if ((type & MAP_FIXED_NOREPLACE) &&
381 pr_info("%d (%s): Uhuuh, elf segment at %p requested but the memory is mapped already\n", 381 PTR_ERR((void *)map_addr) == -EEXIST)
382 task_pid_nr(current), current->comm, 382 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
383 (void *)addr); 383 task_pid_nr(current), current->comm, (void *)addr);
384 384
385 return(map_addr); 385 return(map_addr);
386} 386}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 4b12ba70a895..47d7c151fcba 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits)
745 */ 745 */
746 if (inode && inode_to_wb_is_valid(inode)) { 746 if (inode && inode_to_wb_is_valid(inode)) {
747 struct bdi_writeback *wb; 747 struct bdi_writeback *wb;
748 bool locked, congested; 748 struct wb_lock_cookie lock_cookie = {};
749 bool congested;
749 750
750 wb = unlocked_inode_to_wb_begin(inode, &locked); 751 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
751 congested = wb_congested(wb, cong_bits); 752 congested = wb_congested(wb, cong_bits);
752 unlocked_inode_to_wb_end(inode, locked); 753 unlocked_inode_to_wb_end(inode, &lock_cookie);
753 return congested; 754 return congested;
754 } 755 }
755 756
diff --git a/fs/proc/base.c b/fs/proc/base.c
index eafa39a3a88c..1b2ede6abcdf 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1693,6 +1693,12 @@ void task_dump_owner(struct task_struct *task, umode_t mode,
1693 kuid_t uid; 1693 kuid_t uid;
1694 kgid_t gid; 1694 kgid_t gid;
1695 1695
1696 if (unlikely(task->flags & PF_KTHREAD)) {
1697 *ruid = GLOBAL_ROOT_UID;
1698 *rgid = GLOBAL_ROOT_GID;
1699 return;
1700 }
1701
1696 /* Default to the tasks effective ownership */ 1702 /* Default to the tasks effective ownership */
1697 rcu_read_lock(); 1703 rcu_read_lock();
1698 cred = __task_cred(task); 1704 cred = __task_cred(task);
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
index a000d7547479..b572cc865b92 100644
--- a/fs/proc/loadavg.c
+++ b/fs/proc/loadavg.c
@@ -24,7 +24,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v)
24 LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), 24 LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
25 LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), 25 LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
26 nr_running(), nr_threads, 26 nr_running(), nr_threads,
27 idr_get_cursor(&task_active_pid_ns(current)->idr)); 27 idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
28 return 0; 28 return 0;
29} 29}
30 30
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 65ae54659833..c486ad4b43f0 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1310,9 +1310,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1310#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1310#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1311 else if (is_swap_pmd(pmd)) { 1311 else if (is_swap_pmd(pmd)) {
1312 swp_entry_t entry = pmd_to_swp_entry(pmd); 1312 swp_entry_t entry = pmd_to_swp_entry(pmd);
1313 unsigned long offset = swp_offset(entry);
1313 1314
1315 offset += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1314 frame = swp_type(entry) | 1316 frame = swp_type(entry) |
1315 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 1317 (offset << MAX_SWAPFILES_SHIFT);
1316 flags |= PM_SWAP; 1318 flags |= PM_SWAP;
1317 if (pmd_swp_soft_dirty(pmd)) 1319 if (pmd_swp_soft_dirty(pmd))
1318 flags |= PM_SOFT_DIRTY; 1320 flags |= PM_SOFT_DIRTY;
@@ -1332,6 +1334,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1332 break; 1334 break;
1333 if (pm->show_pfn && (flags & PM_PRESENT)) 1335 if (pm->show_pfn && (flags & PM_PRESENT))
1334 frame++; 1336 frame++;
1337 else if (flags & PM_SWAP)
1338 frame += (1 << MAX_SWAPFILES_SHIFT);
1335 } 1339 }
1336 spin_unlock(ptl); 1340 spin_unlock(ptl);
1337 return err; 1341 return err;
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index bfe86b54f6c1..0bd432a4d7bd 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
223 set_wb_congested(bdi->wb.congested, sync); 223 set_wb_congested(bdi->wb.congested, sync);
224} 224}
225 225
226struct wb_lock_cookie {
227 bool locked;
228 unsigned long flags;
229};
230
226#ifdef CONFIG_CGROUP_WRITEBACK 231#ifdef CONFIG_CGROUP_WRITEBACK
227 232
228/** 233/**
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index f6be4b0b6c18..72ca0f3d39f3 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -347,7 +347,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
347/** 347/**
348 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction 348 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
349 * @inode: target inode 349 * @inode: target inode
350 * @lockedp: temp bool output param, to be passed to the end function 350 * @cookie: output param, to be passed to the end function
351 * 351 *
352 * The caller wants to access the wb associated with @inode but isn't 352 * The caller wants to access the wb associated with @inode but isn't
353 * holding inode->i_lock, the i_pages lock or wb->list_lock. This 353 * holding inode->i_lock, the i_pages lock or wb->list_lock. This
@@ -355,12 +355,12 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
355 * association doesn't change until the transaction is finished with 355 * association doesn't change until the transaction is finished with
356 * unlocked_inode_to_wb_end(). 356 * unlocked_inode_to_wb_end().
357 * 357 *
358 * The caller must call unlocked_inode_to_wb_end() with *@lockdep 358 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
359 * afterwards and can't sleep during transaction. IRQ may or may not be 359 * can't sleep during the transaction. IRQs may or may not be disabled on
360 * disabled on return. 360 * return.
361 */ 361 */
362static inline struct bdi_writeback * 362static inline struct bdi_writeback *
363unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 363unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
364{ 364{
365 rcu_read_lock(); 365 rcu_read_lock();
366 366
@@ -368,10 +368,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
368 * Paired with store_release in inode_switch_wb_work_fn() and 368 * Paired with store_release in inode_switch_wb_work_fn() and
369 * ensures that we see the new wb if we see cleared I_WB_SWITCH. 369 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
370 */ 370 */
371 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 371 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
372 372
373 if (unlikely(*lockedp)) 373 if (unlikely(cookie->locked))
374 xa_lock_irq(&inode->i_mapping->i_pages); 374 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
375 375
376 /* 376 /*
377 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages 377 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
@@ -383,12 +383,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
383/** 383/**
384 * unlocked_inode_to_wb_end - end inode wb access transaction 384 * unlocked_inode_to_wb_end - end inode wb access transaction
385 * @inode: target inode 385 * @inode: target inode
386 * @locked: *@lockedp from unlocked_inode_to_wb_begin() 386 * @cookie: @cookie from unlocked_inode_to_wb_begin()
387 */ 387 */
388static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 388static inline void unlocked_inode_to_wb_end(struct inode *inode,
389 struct wb_lock_cookie *cookie)
389{ 390{
390 if (unlikely(locked)) 391 if (unlikely(cookie->locked))
391 xa_unlock_irq(&inode->i_mapping->i_pages); 392 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
392 393
393 rcu_read_unlock(); 394 rcu_read_unlock();
394} 395}
@@ -435,12 +436,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
435} 436}
436 437
437static inline struct bdi_writeback * 438static inline struct bdi_writeback *
438unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 439unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
439{ 440{
440 return inode_to_wb(inode); 441 return inode_to_wb(inode);
441} 442}
442 443
443static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 444static inline void unlocked_inode_to_wb_end(struct inode *inode,
445 struct wb_lock_cookie *cookie)
444{ 446{
445} 447}
446 448
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index ceb96ecab96e..7d98e263e048 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -25,6 +25,9 @@
25#define __SANITIZE_ADDRESS__ 25#define __SANITIZE_ADDRESS__
26#endif 26#endif
27 27
28#undef __no_sanitize_address
29#define __no_sanitize_address __attribute__((no_sanitize("address")))
30
28/* Clang doesn't have a way to turn it off per-function, yet. */ 31/* Clang doesn't have a way to turn it off per-function, yet. */
29#ifdef __noretpoline 32#ifdef __noretpoline
30#undef __noretpoline 33#undef __noretpoline
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 34f053a150a9..cf2862bd134a 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -43,11 +43,7 @@ enum {
43#define THREAD_ALIGN THREAD_SIZE 43#define THREAD_ALIGN THREAD_SIZE
44#endif 44#endif
45 45
46#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) 46#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
47# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
48#else
49# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT)
50#endif
51 47
52/* 48/*
53 * flag set/clear/test wrappers 49 * flag set/clear/test wrappers
diff --git a/kernel/fork.c b/kernel/fork.c
index 242c8c93d285..a5d21c42acfc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -216,10 +216,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
216 if (!s) 216 if (!s)
217 continue; 217 continue;
218 218
219#ifdef CONFIG_DEBUG_KMEMLEAK
220 /* Clear stale pointers from reused stack. */ 219 /* Clear stale pointers from reused stack. */
221 memset(s->addr, 0, THREAD_SIZE); 220 memset(s->addr, 0, THREAD_SIZE);
222#endif 221
223 tsk->stack_vm_area = s; 222 tsk->stack_vm_area = s;
224 return s->addr; 223 return s->addr;
225 } 224 }
diff --git a/mm/filemap.c b/mm/filemap.c
index 9276bdb2343c..0604cb02e6f3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -786,7 +786,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
786 VM_BUG_ON_PAGE(!PageLocked(new), new); 786 VM_BUG_ON_PAGE(!PageLocked(new), new);
787 VM_BUG_ON_PAGE(new->mapping, new); 787 VM_BUG_ON_PAGE(new->mapping, new);
788 788
789 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 789 error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
790 if (!error) { 790 if (!error) {
791 struct address_space *mapping = old->mapping; 791 struct address_space *mapping = old->mapping;
792 void (*freepage)(struct page *); 792 void (*freepage)(struct page *);
@@ -842,7 +842,7 @@ static int __add_to_page_cache_locked(struct page *page,
842 return error; 842 return error;
843 } 843 }
844 844
845 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 845 error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
846 if (error) { 846 if (error) {
847 if (!huge) 847 if (!huge)
848 mem_cgroup_cancel_charge(page, memcg, false); 848 mem_cgroup_cancel_charge(page, memcg, false);
@@ -1585,8 +1585,7 @@ no_page:
1585 if (fgp_flags & FGP_ACCESSED) 1585 if (fgp_flags & FGP_ACCESSED)
1586 __SetPageReferenced(page); 1586 __SetPageReferenced(page);
1587 1587
1588 err = add_to_page_cache_lru(page, mapping, offset, 1588 err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
1589 gfp_mask & GFP_RECLAIM_MASK);
1590 if (unlikely(err)) { 1589 if (unlikely(err)) {
1591 put_page(page); 1590 put_page(page);
1592 page = NULL; 1591 page = NULL;
@@ -2387,7 +2386,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
2387 if (!page) 2386 if (!page)
2388 return -ENOMEM; 2387 return -ENOMEM;
2389 2388
2390 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); 2389 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
2391 if (ret == 0) 2390 if (ret == 0)
2392 ret = mapping->a_ops->readpage(file, page); 2391 ret = mapping->a_ops->readpage(file, page);
2393 else if (ret == -EEXIST) 2392 else if (ret == -EEXIST)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 14ed6ee5e02f..a3a1815f8e11 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2925,7 +2925,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
2925 pmde = maybe_pmd_mkwrite(pmde, vma); 2925 pmde = maybe_pmd_mkwrite(pmde, vma);
2926 2926
2927 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); 2927 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
2928 page_add_anon_rmap(new, vma, mmun_start, true); 2928 if (PageAnon(new))
2929 page_add_anon_rmap(new, vma, mmun_start, true);
2930 else
2931 page_add_file_rmap(new, true);
2929 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); 2932 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
2930 if (vma->vm_flags & VM_LOCKED) 2933 if (vma->vm_flags & VM_LOCKED)
2931 mlock_vma_page(new); 2934 mlock_vma_page(new);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e074f7c637aa..2bd3df3d101a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2192,7 +2192,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2192{ 2192{
2193 struct memcg_kmem_cache_create_work *cw; 2193 struct memcg_kmem_cache_create_work *cw;
2194 2194
2195 cw = kmalloc(sizeof(*cw), GFP_NOWAIT); 2195 cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2196 if (!cw) 2196 if (!cw)
2197 return; 2197 return;
2198 2198
diff --git a/mm/migrate.c b/mm/migrate.c
index f65dd69e1fd1..568433023831 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -472,7 +472,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
472 pslot = radix_tree_lookup_slot(&mapping->i_pages, 472 pslot = radix_tree_lookup_slot(&mapping->i_pages,
473 page_index(page)); 473 page_index(page));
474 474
475 expected_count += 1 + page_has_private(page); 475 expected_count += hpage_nr_pages(page) + page_has_private(page);
476 if (page_count(page) != expected_count || 476 if (page_count(page) != expected_count ||
477 radix_tree_deref_slot_protected(pslot, 477 radix_tree_deref_slot_protected(pslot,
478 &mapping->i_pages.xa_lock) != page) { 478 &mapping->i_pages.xa_lock) != page) {
@@ -505,7 +505,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
505 */ 505 */
506 newpage->index = page->index; 506 newpage->index = page->index;
507 newpage->mapping = page->mapping; 507 newpage->mapping = page->mapping;
508 get_page(newpage); /* add cache reference */ 508 page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
509 if (PageSwapBacked(page)) { 509 if (PageSwapBacked(page)) {
510 __SetPageSwapBacked(newpage); 510 __SetPageSwapBacked(newpage);
511 if (PageSwapCache(page)) { 511 if (PageSwapCache(page)) {
@@ -524,13 +524,26 @@ int migrate_page_move_mapping(struct address_space *mapping,
524 } 524 }
525 525
526 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); 526 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
527 if (PageTransHuge(page)) {
528 int i;
529 int index = page_index(page);
530
531 for (i = 0; i < HPAGE_PMD_NR; i++) {
532 pslot = radix_tree_lookup_slot(&mapping->i_pages,
533 index + i);
534 radix_tree_replace_slot(&mapping->i_pages, pslot,
535 newpage + i);
536 }
537 } else {
538 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
539 }
527 540
528 /* 541 /*
529 * Drop cache reference from old page by unfreezing 542 * Drop cache reference from old page by unfreezing
530 * to one less reference. 543 * to one less reference.
531 * We know this isn't the last reference. 544 * We know this isn't the last reference.
532 */ 545 */
533 page_ref_unfreeze(page, expected_count - 1); 546 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
534 547
535 xa_unlock(&mapping->i_pages); 548 xa_unlock(&mapping->i_pages);
536 /* Leave irq disabled to prevent preemption while updating stats */ 549 /* Leave irq disabled to prevent preemption while updating stats */
@@ -1622,6 +1635,9 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1622 current_node = NUMA_NO_NODE; 1635 current_node = NUMA_NO_NODE;
1623 } 1636 }
1624out_flush: 1637out_flush:
1638 if (list_empty(&pagelist))
1639 return err;
1640
1625 /* Make sure we do not overwrite the existing error */ 1641 /* Make sure we do not overwrite the existing error */
1626 err1 = do_move_pages_to_node(mm, &pagelist, current_node); 1642 err1 = do_move_pages_to_node(mm, &pagelist, current_node);
1627 if (!err1) 1643 if (!err1)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 5c1a3279e63f..337c6afb3345 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2502,13 +2502,13 @@ void account_page_redirty(struct page *page)
2502 if (mapping && mapping_cap_account_dirty(mapping)) { 2502 if (mapping && mapping_cap_account_dirty(mapping)) {
2503 struct inode *inode = mapping->host; 2503 struct inode *inode = mapping->host;
2504 struct bdi_writeback *wb; 2504 struct bdi_writeback *wb;
2505 bool locked; 2505 struct wb_lock_cookie cookie = {};
2506 2506
2507 wb = unlocked_inode_to_wb_begin(inode, &locked); 2507 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2508 current->nr_dirtied--; 2508 current->nr_dirtied--;
2509 dec_node_page_state(page, NR_DIRTIED); 2509 dec_node_page_state(page, NR_DIRTIED);
2510 dec_wb_stat(wb, WB_DIRTIED); 2510 dec_wb_stat(wb, WB_DIRTIED);
2511 unlocked_inode_to_wb_end(inode, locked); 2511 unlocked_inode_to_wb_end(inode, &cookie);
2512 } 2512 }
2513} 2513}
2514EXPORT_SYMBOL(account_page_redirty); 2514EXPORT_SYMBOL(account_page_redirty);
@@ -2614,15 +2614,15 @@ void __cancel_dirty_page(struct page *page)
2614 if (mapping_cap_account_dirty(mapping)) { 2614 if (mapping_cap_account_dirty(mapping)) {
2615 struct inode *inode = mapping->host; 2615 struct inode *inode = mapping->host;
2616 struct bdi_writeback *wb; 2616 struct bdi_writeback *wb;
2617 bool locked; 2617 struct wb_lock_cookie cookie = {};
2618 2618
2619 lock_page_memcg(page); 2619 lock_page_memcg(page);
2620 wb = unlocked_inode_to_wb_begin(inode, &locked); 2620 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2621 2621
2622 if (TestClearPageDirty(page)) 2622 if (TestClearPageDirty(page))
2623 account_page_cleaned(page, mapping, wb); 2623 account_page_cleaned(page, mapping, wb);
2624 2624
2625 unlocked_inode_to_wb_end(inode, locked); 2625 unlocked_inode_to_wb_end(inode, &cookie);
2626 unlock_page_memcg(page); 2626 unlock_page_memcg(page);
2627 } else { 2627 } else {
2628 ClearPageDirty(page); 2628 ClearPageDirty(page);
@@ -2654,7 +2654,7 @@ int clear_page_dirty_for_io(struct page *page)
2654 if (mapping && mapping_cap_account_dirty(mapping)) { 2654 if (mapping && mapping_cap_account_dirty(mapping)) {
2655 struct inode *inode = mapping->host; 2655 struct inode *inode = mapping->host;
2656 struct bdi_writeback *wb; 2656 struct bdi_writeback *wb;
2657 bool locked; 2657 struct wb_lock_cookie cookie = {};
2658 2658
2659 /* 2659 /*
2660 * Yes, Virginia, this is indeed insane. 2660 * Yes, Virginia, this is indeed insane.
@@ -2691,14 +2691,14 @@ int clear_page_dirty_for_io(struct page *page)
2691 * always locked coming in here, so we get the desired 2691 * always locked coming in here, so we get the desired
2692 * exclusion. 2692 * exclusion.
2693 */ 2693 */
2694 wb = unlocked_inode_to_wb_begin(inode, &locked); 2694 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2695 if (TestClearPageDirty(page)) { 2695 if (TestClearPageDirty(page)) {
2696 dec_lruvec_page_state(page, NR_FILE_DIRTY); 2696 dec_lruvec_page_state(page, NR_FILE_DIRTY);
2697 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2697 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2698 dec_wb_stat(wb, WB_RECLAIMABLE); 2698 dec_wb_stat(wb, WB_RECLAIMABLE);
2699 ret = 1; 2699 ret = 1;
2700 } 2700 }
2701 unlocked_inode_to_wb_end(inode, locked); 2701 unlocked_inode_to_wb_end(inode, &cookie);
2702 return ret; 2702 return ret;
2703 } 2703 }
2704 return TestClearPageDirty(page); 2704 return TestClearPageDirty(page);
diff --git a/mm/rmap.c b/mm/rmap.c
index f0dd4e4565bc..8d5337fed37b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1374,9 +1374,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1374 if (!pvmw.pte && (flags & TTU_MIGRATION)) { 1374 if (!pvmw.pte && (flags & TTU_MIGRATION)) {
1375 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 1375 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1376 1376
1377 if (!PageAnon(page))
1378 continue;
1379
1380 set_pmd_migration_entry(&pvmw, page); 1377 set_pmd_migration_entry(&pvmw, page);
1381 continue; 1378 continue;
1382 } 1379 }