aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-07-01 20:47:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-01 20:47:51 -0400
commit2d01eedf1d14432f4db5388a49dc5596a8c5bd02 (patch)
tree646525acc0475b2899827c1bfbd25f05ec1b8092
parent6ac15baacb6ecd87c66209627753b96ded3b4515 (diff)
parentabdd4a7025282fbe3737e1bcb5f51afc8d8ea1b8 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge third patchbomb from Andrew Morton: - the rest of MM - scripts/gdb updates - ipc/ updates - lib/ updates - MAINTAINERS updates - various other misc things * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (67 commits) genalloc: rename of_get_named_gen_pool() to of_gen_pool_get() genalloc: rename dev_get_gen_pool() to gen_pool_get() x86: opt into HAVE_COPY_THREAD_TLS, for both 32-bit and 64-bit MAINTAINERS: add zpool MAINTAINERS: BCACHE: Kent Overstreet has changed email address MAINTAINERS: move Jens Osterkamp to CREDITS MAINTAINERS: remove unused nbd.h pattern MAINTAINERS: update brcm gpio filename pattern MAINTAINERS: update brcm dts pattern MAINTAINERS: update sound soc intel patterns MAINTAINERS: remove website for paride MAINTAINERS: update Emulex ocrdma email addresses bcache: use kvfree() in various places libcxgbi: use kvfree() in cxgbi_free_big_mem() target: use kvfree() in session alloc and free IB/ehca: use kvfree() in ipz_queue_{cd}tor() drm/nouveau/gem: use kvfree() in u_free() drm: use kvfree() in drm_free_large() cxgb4: use kvfree() in t4_free_mem() cxgb3: use kvfree() in cxgb_free_mem() ...
-rw-r--r--CREDITS4
-rw-r--r--MAINTAINERS33
-rw-r--r--arch/arc/include/asm/dma-mapping.h12
-rw-r--r--arch/arm/mach-at91/pm.c2
-rw-r--r--arch/arm/mach-imx/pm-imx5.c2
-rw-r--r--arch/arm/mach-imx/pm-imx6.c2
-rw-r--r--arch/arm/mach-socfpga/pm.c2
-rw-r--r--arch/ia64/mm/numa.c19
-rw-r--r--arch/unicore32/kernel/fpu-ucf64.c4
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c11
-rw-r--r--arch/x86/kernel/process_32.c6
-rw-r--r--arch/x86/kernel/process_64.c8
-rw-r--r--drivers/base/node.c6
-rw-r--r--drivers/crypto/marvell/cesa.c5
-rw-r--r--drivers/dma/mmp_tdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c10
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/super.c10
-rw-r--r--drivers/md/bcache/util.h10
-rw-r--r--drivers/media/platform/coda/coda-common.c4
-rw-r--r--drivers/memstick/host/jmb38x_ms.c12
-rw-r--r--drivers/memstick/host/r592.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h5
-rw-r--r--drivers/scsi/scsi_debug.c12
-rw-r--r--drivers/target/target_core_transport.c10
-rw-r--r--fs/adfs/super.c2
-rw-r--r--fs/affs/amigaffs.c2
-rw-r--r--fs/affs/inode.c2
-rw-r--r--fs/affs/symlink.c4
-rw-r--r--fs/devpts/inode.c31
-rw-r--r--fs/mount.h3
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/proc_namespace.c34
-rw-r--r--fs/seq_file.c19
-rw-r--r--include/drm/drm_mem_util.h5
-rw-r--r--include/linux/genalloc.h6
-rw-r--r--include/linux/gfp.h8
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/memblock.h18
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mmzone.h23
-rw-r--r--include/linux/scatterlist.h7
-rw-r--r--init/main.c2
-rw-r--r--ipc/msg.c50
-rw-r--r--ipc/sem.c4
-rw-r--r--ipc/shm.c12
-rw-r--r--ipc/util.c28
-rw-r--r--ipc/util.h2
-rw-r--r--kernel/gcov/base.c6
-rw-r--r--kernel/gcov/gcc_4_7.c4
-rw-r--r--kernel/kexec.c11
-rw-r--r--kernel/panic.c5
-rw-r--r--kernel/printk/printk.c8
-rw-r--r--kernel/relay.c5
-rw-r--r--lib/genalloc.c14
-rw-r--r--lib/scatterlist.c18
-rw-r--r--mm/Kconfig18
-rw-r--r--mm/bootmem.c13
-rw-r--r--mm/internal.h11
-rw-r--r--mm/memblock.c34
-rw-r--r--mm/mm_init.c9
-rw-r--r--mm/nobootmem.c7
-rw-r--r--mm/page_alloc.c442
-rw-r--r--scripts/gdb/linux/dmesg.py1
-rw-r--r--scripts/gdb/linux/lists.py92
-rw-r--r--scripts/gdb/linux/symbols.py9
-rw-r--r--scripts/gdb/linux/tasks.py20
-rw-r--r--scripts/gdb/linux/utils.py4
-rw-r--r--scripts/gdb/vmlinux-gdb.py1
-rw-r--r--sound/core/memalloc.c2
74 files changed, 894 insertions, 339 deletions
diff --git a/CREDITS b/CREDITS
index 4df764ebe217..1d616640bbf6 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2740,6 +2740,10 @@ S: C/ Mieses 20, 9-B
2740S: Valladolid 47009 2740S: Valladolid 47009
2741S: Spain 2741S: Spain
2742 2742
2743N: Jens Osterkamp
2744E: jens@de.ibm.com
2745D: Maintainer of Spidernet network driver for Cell
2746
2743N: Gadi Oxman 2747N: Gadi Oxman
2744E: gadio@netvision.net.il 2748E: gadio@netvision.net.il
2745D: Original author and maintainer of IDE/ATAPI floppy/tape drivers 2749D: Original author and maintainer of IDE/ATAPI floppy/tape drivers
diff --git a/MAINTAINERS b/MAINTAINERS
index ab6fb58b3873..058b0fbc52ff 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2026,10 +2026,10 @@ S: Maintained
2026F: drivers/net/hamradio/baycom* 2026F: drivers/net/hamradio/baycom*
2027 2027
2028BCACHE (BLOCK LAYER CACHE) 2028BCACHE (BLOCK LAYER CACHE)
2029M: Kent Overstreet <kmo@daterainc.com> 2029M: Kent Overstreet <kent.overstreet@gmail.com>
2030L: linux-bcache@vger.kernel.org 2030L: linux-bcache@vger.kernel.org
2031W: http://bcache.evilpiepirate.org 2031W: http://bcache.evilpiepirate.org
2032S: Maintained: 2032S: Maintained
2033F: drivers/md/bcache/ 2033F: drivers/md/bcache/
2034 2034
2035BDISP ST MEDIA DRIVER 2035BDISP ST MEDIA DRIVER
@@ -2280,7 +2280,7 @@ S: Maintained
2280F: arch/mips/bmips/* 2280F: arch/mips/bmips/*
2281F: arch/mips/include/asm/mach-bmips/* 2281F: arch/mips/include/asm/mach-bmips/*
2282F: arch/mips/kernel/*bmips* 2282F: arch/mips/kernel/*bmips*
2283F: arch/mips/boot/dts/bcm*.dts* 2283F: arch/mips/boot/dts/brcm/bcm*.dts*
2284F: drivers/irqchip/irq-bcm7* 2284F: drivers/irqchip/irq-bcm7*
2285F: drivers/irqchip/irq-brcmstb* 2285F: drivers/irqchip/irq-brcmstb*
2286 2286
@@ -2339,7 +2339,7 @@ M: Ray Jui <rjui@broadcom.com>
2339L: bcm-kernel-feedback-list@broadcom.com 2339L: bcm-kernel-feedback-list@broadcom.com
2340S: Supported 2340S: Supported
2341F: drivers/gpio/gpio-bcm-kona.c 2341F: drivers/gpio/gpio-bcm-kona.c
2342F: Documentation/devicetree/bindings/gpio/gpio-bcm-kona.txt 2342F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
2343 2343
2344BROADCOM NVRAM DRIVER 2344BROADCOM NVRAM DRIVER
2345M: Rafał Miłecki <zajec5@gmail.com> 2345M: Rafał Miłecki <zajec5@gmail.com>
@@ -5285,11 +5285,10 @@ INTEL ASoC BDW/HSW DRIVERS
5285M: Jie Yang <yang.jie@linux.intel.com> 5285M: Jie Yang <yang.jie@linux.intel.com>
5286L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5286L: alsa-devel@alsa-project.org (moderated for non-subscribers)
5287S: Supported 5287S: Supported
5288F: sound/soc/intel/sst-haswell* 5288F: sound/soc/intel/common/sst-dsp*
5289F: sound/soc/intel/sst-dsp* 5289F: sound/soc/intel/common/sst-firmware.c
5290F: sound/soc/intel/sst-firmware.c 5290F: sound/soc/intel/boards/broadwell.c
5291F: sound/soc/intel/broadwell.c 5291F: sound/soc/intel/haswell/
5292F: sound/soc/intel/haswell.c
5293 5292
5294INTEL C600 SERIES SAS CONTROLLER DRIVER 5293INTEL C600 SERIES SAS CONTROLLER DRIVER
5295M: Intel SCU Linux support <intel-linux-scu@intel.com> 5294M: Intel SCU Linux support <intel-linux-scu@intel.com>
@@ -7019,7 +7018,6 @@ L: nbd-general@lists.sourceforge.net
7019T: git git://git.pengutronix.de/git/mpa/linux-nbd.git 7018T: git git://git.pengutronix.de/git/mpa/linux-nbd.git
7020F: Documentation/blockdev/nbd.txt 7019F: Documentation/blockdev/nbd.txt
7021F: drivers/block/nbd.c 7020F: drivers/block/nbd.c
7022F: include/linux/nbd.h
7023F: include/uapi/linux/nbd.h 7021F: include/uapi/linux/nbd.h
7024 7022
7025NETWORK DROP MONITOR 7023NETWORK DROP MONITOR
@@ -7647,7 +7645,6 @@ F: arch/*/include/asm/paravirt.h
7647PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES 7645PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES
7648M: Tim Waugh <tim@cyberelk.net> 7646M: Tim Waugh <tim@cyberelk.net>
7649L: linux-parport@lists.infradead.org (subscribers-only) 7647L: linux-parport@lists.infradead.org (subscribers-only)
7650W: http://www.torque.net/linux-pp.html
7651S: Maintained 7648S: Maintained
7652F: Documentation/blockdev/paride.txt 7649F: Documentation/blockdev/paride.txt
7653F: drivers/block/paride/ 7650F: drivers/block/paride/
@@ -9091,9 +9088,9 @@ S: Supported
9091F: drivers/net/ethernet/emulex/benet/ 9088F: drivers/net/ethernet/emulex/benet/
9092 9089
9093EMULEX ONECONNECT ROCE DRIVER 9090EMULEX ONECONNECT ROCE DRIVER
9094M: Selvin Xavier <selvin.xavier@emulex.com> 9091M: Selvin Xavier <selvin.xavier@avagotech.com>
9095M: Devesh Sharma <devesh.sharma@emulex.com> 9092M: Devesh Sharma <devesh.sharma@avagotech.com>
9096M: Mitesh Ahuja <mitesh.ahuja@emulex.com> 9093M: Mitesh Ahuja <mitesh.ahuja@avagotech.com>
9097L: linux-rdma@vger.kernel.org 9094L: linux-rdma@vger.kernel.org
9098W: http://www.emulex.com 9095W: http://www.emulex.com
9099S: Supported 9096S: Supported
@@ -9593,7 +9590,6 @@ F: include/uapi/linux/spi/
9593 9590
9594SPIDERNET NETWORK DRIVER for CELL 9591SPIDERNET NETWORK DRIVER for CELL
9595M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp> 9592M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
9596M: Jens Osterkamp <jens@de.ibm.com>
9597L: netdev@vger.kernel.org 9593L: netdev@vger.kernel.org
9598S: Supported 9594S: Supported
9599F: Documentation/networking/spider_net.txt 9595F: Documentation/networking/spider_net.txt
@@ -11354,6 +11350,13 @@ L: zd1211-devs@lists.sourceforge.net (subscribers-only)
11354S: Maintained 11350S: Maintained
11355F: drivers/net/wireless/zd1211rw/ 11351F: drivers/net/wireless/zd1211rw/
11356 11352
11353ZPOOL COMPRESSED PAGE STORAGE API
11354M: Dan Streetman <ddstreet@ieee.org>
11355L: linux-mm@kvack.org
11356S: Maintained
11357F: mm/zpool.c
11358F: include/linux/zpool.h
11359
11357ZR36067 VIDEO FOR LINUX DRIVER 11360ZR36067 VIDEO FOR LINUX DRIVER
11358L: mjpeg-users@lists.sourceforge.net 11361L: mjpeg-users@lists.sourceforge.net
11359L: linux-media@vger.kernel.org 11362L: linux-media@vger.kernel.org
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index fd6cdb56d4fd..2d28ba939d8e 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -157,22 +157,24 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
157} 157}
158 158
159static inline void 159static inline void
160dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 160dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
161 enum dma_data_direction dir) 161 enum dma_data_direction dir)
162{ 162{
163 int i; 163 int i;
164 struct scatterlist *sg;
164 165
165 for (i = 0; i < nelems; i++, sg++) 166 for_each_sg(sglist, sg, nelems, i)
166 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); 167 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
167} 168}
168 169
169static inline void 170static inline void
170dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 171dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
171 enum dma_data_direction dir) 172 int nelems, enum dma_data_direction dir)
172{ 173{
173 int i; 174 int i;
175 struct scatterlist *sg;
174 176
175 for (i = 0; i < nelems; i++, sg++) 177 for_each_sg(sglist, sg, nelems, i)
176 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); 178 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
177} 179}
178 180
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 1e184767c3be..e24df77abd79 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -369,7 +369,7 @@ static void __init at91_pm_sram_init(void)
369 return; 369 return;
370 } 370 }
371 371
372 sram_pool = dev_get_gen_pool(&pdev->dev); 372 sram_pool = gen_pool_get(&pdev->dev);
373 if (!sram_pool) { 373 if (!sram_pool) {
374 pr_warn("%s: sram pool unavailable!\n", __func__); 374 pr_warn("%s: sram pool unavailable!\n", __func__);
375 return; 375 return;
diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
index 0309ccda36a9..1885676c23c0 100644
--- a/arch/arm/mach-imx/pm-imx5.c
+++ b/arch/arm/mach-imx/pm-imx5.c
@@ -297,7 +297,7 @@ static int __init imx_suspend_alloc_ocram(
297 goto put_node; 297 goto put_node;
298 } 298 }
299 299
300 ocram_pool = dev_get_gen_pool(&pdev->dev); 300 ocram_pool = gen_pool_get(&pdev->dev);
301 if (!ocram_pool) { 301 if (!ocram_pool) {
302 pr_warn("%s: ocram pool unavailable!\n", __func__); 302 pr_warn("%s: ocram pool unavailable!\n", __func__);
303 ret = -ENODEV; 303 ret = -ENODEV;
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index b01650d94f91..93ecf559d06d 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -451,7 +451,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
451 goto put_node; 451 goto put_node;
452 } 452 }
453 453
454 ocram_pool = dev_get_gen_pool(&pdev->dev); 454 ocram_pool = gen_pool_get(&pdev->dev);
455 if (!ocram_pool) { 455 if (!ocram_pool) {
456 pr_warn("%s: ocram pool unavailable!\n", __func__); 456 pr_warn("%s: ocram pool unavailable!\n", __func__);
457 ret = -ENODEV; 457 ret = -ENODEV;
diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c
index 1ed89fc2b7a8..6a4199f2bffb 100644
--- a/arch/arm/mach-socfpga/pm.c
+++ b/arch/arm/mach-socfpga/pm.c
@@ -56,7 +56,7 @@ static int socfpga_setup_ocram_self_refresh(void)
56 goto put_node; 56 goto put_node;
57 } 57 }
58 58
59 ocram_pool = dev_get_gen_pool(&pdev->dev); 59 ocram_pool = gen_pool_get(&pdev->dev);
60 if (!ocram_pool) { 60 if (!ocram_pool) {
61 pr_warn("%s: ocram pool unavailable!\n", __func__); 61 pr_warn("%s: ocram pool unavailable!\n", __func__);
62 ret = -ENODEV; 62 ret = -ENODEV;
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index ea21d4cad540..aa19b7ac8222 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -58,27 +58,22 @@ paddr_to_nid(unsigned long paddr)
58 * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where 58 * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
59 * the section resides. 59 * the section resides.
60 */ 60 */
61int __meminit __early_pfn_to_nid(unsigned long pfn) 61int __meminit __early_pfn_to_nid(unsigned long pfn,
62 struct mminit_pfnnid_cache *state)
62{ 63{
63 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; 64 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
64 /*
65 * NOTE: The following SMP-unsafe globals are only used early in boot
66 * when the kernel is running single-threaded.
67 */
68 static int __meminitdata last_ssec, last_esec;
69 static int __meminitdata last_nid;
70 65
71 if (section >= last_ssec && section < last_esec) 66 if (section >= state->last_start && section < state->last_end)
72 return last_nid; 67 return state->last_nid;
73 68
74 for (i = 0; i < num_node_memblks; i++) { 69 for (i = 0; i < num_node_memblks; i++) {
75 ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; 70 ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
76 esec = (node_memblk[i].start_paddr + node_memblk[i].size + 71 esec = (node_memblk[i].start_paddr + node_memblk[i].size +
77 ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; 72 ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
78 if (section >= ssec && section < esec) { 73 if (section >= ssec && section < esec) {
79 last_ssec = ssec; 74 state->last_start = ssec;
80 last_esec = esec; 75 state->last_end = esec;
81 last_nid = node_memblk[i].nid; 76 state->last_nid = node_memblk[i].nid;
82 return node_memblk[i].nid; 77 return node_memblk[i].nid;
83 } 78 }
84 } 79 }
diff --git a/arch/unicore32/kernel/fpu-ucf64.c b/arch/unicore32/kernel/fpu-ucf64.c
index 282a60ac82ba..a53343a90ca2 100644
--- a/arch/unicore32/kernel/fpu-ucf64.c
+++ b/arch/unicore32/kernel/fpu-ucf64.c
@@ -90,8 +90,8 @@ void ucf64_exchandler(u32 inst, u32 fpexc, struct pt_regs *regs)
90 tmp &= ~(FPSCR_CON); 90 tmp &= ~(FPSCR_CON);
91 exc &= ~(FPSCR_CMPINSTR_BIT | FPSCR_CON); 91 exc &= ~(FPSCR_CMPINSTR_BIT | FPSCR_CON);
92 } else { 92 } else {
93 pr_debug(KERN_ERR "UniCore-F64 Error: unhandled exceptions\n"); 93 pr_debug("UniCore-F64 Error: unhandled exceptions\n");
94 pr_debug(KERN_ERR "UniCore-F64 FPSCR 0x%08x INST 0x%08x\n", 94 pr_debug("UniCore-F64 FPSCR 0x%08x INST 0x%08x\n",
95 cff(FPSCR), inst); 95 cff(FPSCR), inst);
96 96
97 ucf64_raise_sigfpe(0, regs); 97 ucf64_raise_sigfpe(0, regs);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d05a42357ef0..55bced17dc95 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -34,6 +34,7 @@ config X86
34 select ARCH_MIGHT_HAVE_PC_PARPORT 34 select ARCH_MIGHT_HAVE_PC_PARPORT
35 select ARCH_MIGHT_HAVE_PC_SERIO 35 select ARCH_MIGHT_HAVE_PC_SERIO
36 select ARCH_SUPPORTS_ATOMIC_RMW 36 select ARCH_SUPPORTS_ATOMIC_RMW
37 select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
37 select ARCH_SUPPORTS_INT128 if X86_64 38 select ARCH_SUPPORTS_INT128 if X86_64
38 select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 39 select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
39 select ARCH_USE_BUILTIN_BSWAP 40 select ARCH_USE_BUILTIN_BSWAP
@@ -87,6 +88,7 @@ config X86
87 select HAVE_CMPXCHG_DOUBLE 88 select HAVE_CMPXCHG_DOUBLE
88 select HAVE_CMPXCHG_LOCAL 89 select HAVE_CMPXCHG_LOCAL
89 select HAVE_CONTEXT_TRACKING if X86_64 90 select HAVE_CONTEXT_TRACKING if X86_64
91 select HAVE_COPY_THREAD_TLS
90 select HAVE_C_RECORDMCOUNT 92 select HAVE_C_RECORDMCOUNT
91 select HAVE_DEBUG_KMEMLEAK 93 select HAVE_DEBUG_KMEMLEAK
92 select HAVE_DEBUG_STACKOVERFLOW 94 select HAVE_DEBUG_STACKOVERFLOW
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index ca05f86481aa..ca83f7ac388b 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -72,15 +72,16 @@ static int setup_cmdline(struct kimage *image, struct boot_params *params,
72 unsigned long cmdline_len) 72 unsigned long cmdline_len)
73{ 73{
74 char *cmdline_ptr = ((char *)params) + cmdline_offset; 74 char *cmdline_ptr = ((char *)params) + cmdline_offset;
75 unsigned long cmdline_ptr_phys, len; 75 unsigned long cmdline_ptr_phys, len = 0;
76 uint32_t cmdline_low_32, cmdline_ext_32; 76 uint32_t cmdline_low_32, cmdline_ext_32;
77 77
78 memcpy(cmdline_ptr, cmdline, cmdline_len);
79 if (image->type == KEXEC_TYPE_CRASH) { 78 if (image->type == KEXEC_TYPE_CRASH) {
80 len = sprintf(cmdline_ptr + cmdline_len - 1, 79 len = sprintf(cmdline_ptr,
81 " elfcorehdr=0x%lx", image->arch.elf_load_addr); 80 "elfcorehdr=0x%lx ", image->arch.elf_load_addr);
82 cmdline_len += len;
83 } 81 }
82 memcpy(cmdline_ptr + len, cmdline, cmdline_len);
83 cmdline_len += len;
84
84 cmdline_ptr[cmdline_len - 1] = '\0'; 85 cmdline_ptr[cmdline_len - 1] = '\0';
85 86
86 pr_debug("Final command line is: %s\n", cmdline_ptr); 87 pr_debug("Final command line is: %s\n", cmdline_ptr);
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index c09c99ccf3e3..f73c962fe636 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -128,8 +128,8 @@ void release_thread(struct task_struct *dead_task)
128 release_vm86_irqs(dead_task); 128 release_vm86_irqs(dead_task);
129} 129}
130 130
131int copy_thread(unsigned long clone_flags, unsigned long sp, 131int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
132 unsigned long arg, struct task_struct *p) 132 unsigned long arg, struct task_struct *p, unsigned long tls)
133{ 133{
134 struct pt_regs *childregs = task_pt_regs(p); 134 struct pt_regs *childregs = task_pt_regs(p);
135 struct task_struct *tsk; 135 struct task_struct *tsk;
@@ -184,7 +184,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
184 */ 184 */
185 if (clone_flags & CLONE_SETTLS) 185 if (clone_flags & CLONE_SETTLS)
186 err = do_set_thread_area(p, -1, 186 err = do_set_thread_area(p, -1,
187 (struct user_desc __user *)childregs->si, 0); 187 (struct user_desc __user *)tls, 0);
188 188
189 if (err && p->thread.io_bitmap_ptr) { 189 if (err && p->thread.io_bitmap_ptr) {
190 kfree(p->thread.io_bitmap_ptr); 190 kfree(p->thread.io_bitmap_ptr);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 843f92e4c711..71d7849a07f7 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -150,8 +150,8 @@ static inline u32 read_32bit_tls(struct task_struct *t, int tls)
150 return get_desc_base(&t->thread.tls_array[tls]); 150 return get_desc_base(&t->thread.tls_array[tls]);
151} 151}
152 152
153int copy_thread(unsigned long clone_flags, unsigned long sp, 153int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
154 unsigned long arg, struct task_struct *p) 154 unsigned long arg, struct task_struct *p, unsigned long tls)
155{ 155{
156 int err; 156 int err;
157 struct pt_regs *childregs; 157 struct pt_regs *childregs;
@@ -207,10 +207,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
207#ifdef CONFIG_IA32_EMULATION 207#ifdef CONFIG_IA32_EMULATION
208 if (is_ia32_task()) 208 if (is_ia32_task())
209 err = do_set_thread_area(p, -1, 209 err = do_set_thread_area(p, -1,
210 (struct user_desc __user *)childregs->si, 0); 210 (struct user_desc __user *)tls, 0);
211 else 211 else
212#endif 212#endif
213 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 213 err = do_arch_prctl(p, ARCH_SET_FS, tls);
214 if (err) 214 if (err)
215 goto out; 215 goto out;
216 } 216 }
diff --git a/drivers/base/node.c b/drivers/base/node.c
index a2aa65b4215d..31df474d72f4 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -359,12 +359,16 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
359#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 359#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
360#define page_initialized(page) (page->lru.next) 360#define page_initialized(page) (page->lru.next)
361 361
362static int get_nid_for_pfn(unsigned long pfn) 362static int __init_refok get_nid_for_pfn(unsigned long pfn)
363{ 363{
364 struct page *page; 364 struct page *page;
365 365
366 if (!pfn_valid_within(pfn)) 366 if (!pfn_valid_within(pfn))
367 return -1; 367 return -1;
368#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
369 if (system_state == SYSTEM_BOOTING)
370 return early_pfn_to_nid(pfn);
371#endif
368 page = pfn_to_page(pfn); 372 page = pfn_to_page(pfn);
369 if (!page_initialized(page)) 373 if (!page_initialized(page))
370 return -1; 374 return -1;
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index a432633bced4..1c6f98dd88f4 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -321,9 +321,8 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
321 const char *res_name = "sram"; 321 const char *res_name = "sram";
322 struct resource *res; 322 struct resource *res;
323 323
324 engine->pool = of_get_named_gen_pool(cesa->dev->of_node, 324 engine->pool = of_gen_pool_get(cesa->dev->of_node,
325 "marvell,crypto-srams", 325 "marvell,crypto-srams", idx);
326 idx);
327 if (engine->pool) { 326 if (engine->pool) {
328 engine->sram = gen_pool_dma_alloc(engine->pool, 327 engine->sram = gen_pool_dma_alloc(engine->pool,
329 cesa->sram_size, 328 cesa->sram_size,
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 449e785def17..e683761e0f8f 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -657,7 +657,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
657 INIT_LIST_HEAD(&tdev->device.channels); 657 INIT_LIST_HEAD(&tdev->device.channels);
658 658
659 if (pdev->dev.of_node) 659 if (pdev->dev.of_node)
660 pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0); 660 pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
661 else 661 else
662 pool = sram_get_gpool("asram"); 662 pool = sram_get_gpool("asram");
663 if (!pool) { 663 if (!pool) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0e690bf19fc9..af1ee517f372 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -555,10 +555,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
555static inline void 555static inline void
556u_free(void *addr) 556u_free(void *addr)
557{ 557{
558 if (!is_vmalloc_addr(addr)) 558 kvfree(addr);
559 kfree(addr);
560 else
561 vfree(addr);
562} 559}
563 560
564static inline void * 561static inline void *
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index 8d594517cd29..7ffc748cb973 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -245,10 +245,7 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
245ipz_queue_ctor_exit0: 245ipz_queue_ctor_exit0:
246 ehca_gen_err("Couldn't alloc pages queue=%p " 246 ehca_gen_err("Couldn't alloc pages queue=%p "
247 "nr_of_pages=%x", queue, nr_of_pages); 247 "nr_of_pages=%x", queue, nr_of_pages);
248 if (is_vmalloc_addr(queue->queue_pages)) 248 kvfree(queue->queue_pages);
249 vfree(queue->queue_pages);
250 else
251 kfree(queue->queue_pages);
252 249
253 return 0; 250 return 0;
254} 251}
@@ -270,10 +267,7 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
270 free_page((unsigned long)queue->queue_pages[i]); 267 free_page((unsigned long)queue->queue_pages[i]);
271 } 268 }
272 269
273 if (is_vmalloc_addr(queue->queue_pages)) 270 kvfree(queue->queue_pages);
274 vfree(queue->queue_pages);
275 else
276 kfree(queue->queue_pages);
277 271
278 return 1; 272 return 1;
279} 273}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index fe080ad0e558..ce64fc851251 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -157,7 +157,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
157 157
158 for_each_cache(ca, c, iter) { 158 for_each_cache(ca, c, iter) {
159 struct journal_device *ja = &ca->journal; 159 struct journal_device *ja = &ca->journal;
160 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG]; 160 DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
161 unsigned i, l, r, m; 161 unsigned i, l, r, m;
162 uint64_t seq; 162 uint64_t seq;
163 163
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 4dd2bb7167f0..94980bfca434 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -760,14 +760,8 @@ static void bcache_device_free(struct bcache_device *d)
760 bio_split_pool_free(&d->bio_split_hook); 760 bio_split_pool_free(&d->bio_split_hook);
761 if (d->bio_split) 761 if (d->bio_split)
762 bioset_free(d->bio_split); 762 bioset_free(d->bio_split);
763 if (is_vmalloc_addr(d->full_dirty_stripes)) 763 kvfree(d->full_dirty_stripes);
764 vfree(d->full_dirty_stripes); 764 kvfree(d->stripe_sectors_dirty);
765 else
766 kfree(d->full_dirty_stripes);
767 if (is_vmalloc_addr(d->stripe_sectors_dirty))
768 vfree(d->stripe_sectors_dirty);
769 else
770 kfree(d->stripe_sectors_dirty);
771 765
772 closure_debug_destroy(&d->cl); 766 closure_debug_destroy(&d->cl);
773} 767}
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 98df7572b5f7..1d04c4859c70 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -52,10 +52,7 @@ struct closure;
52 52
53#define free_heap(heap) \ 53#define free_heap(heap) \
54do { \ 54do { \
55 if (is_vmalloc_addr((heap)->data)) \ 55 kvfree((heap)->data); \
56 vfree((heap)->data); \
57 else \
58 kfree((heap)->data); \
59 (heap)->data = NULL; \ 56 (heap)->data = NULL; \
60} while (0) 57} while (0)
61 58
@@ -163,10 +160,7 @@ do { \
163 160
164#define free_fifo(fifo) \ 161#define free_fifo(fifo) \
165do { \ 162do { \
166 if (is_vmalloc_addr((fifo)->data)) \ 163 kvfree((fifo)->data); \
167 vfree((fifo)->data); \
168 else \
169 kfree((fifo)->data); \
170 (fifo)->data = NULL; \ 164 (fifo)->data = NULL; \
171} while (0) 165} while (0)
172 166
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 6d6e0ca91fb4..58f65486de33 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -2155,9 +2155,9 @@ static int coda_probe(struct platform_device *pdev)
2155 } 2155 }
2156 2156
2157 /* Get IRAM pool from device tree or platform data */ 2157 /* Get IRAM pool from device tree or platform data */
2158 pool = of_get_named_gen_pool(np, "iram", 0); 2158 pool = of_gen_pool_get(np, "iram", 0);
2159 if (!pool && pdata) 2159 if (!pool && pdata)
2160 pool = dev_get_gen_pool(pdata->iram_dev); 2160 pool = gen_pool_get(pdata->iram_dev);
2161 if (!pool) { 2161 if (!pool) {
2162 dev_err(&pdev->dev, "iram pool not available\n"); 2162 dev_err(&pdev->dev, "iram pool not available\n");
2163 return -ENOMEM; 2163 return -ENOMEM;
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index aeabaa5aedf7..48db922075e2 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -419,10 +419,10 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
419 } 419 }
420 420
421 if (host->cmd_flags & DMA_DATA) { 421 if (host->cmd_flags & DMA_DATA) {
422 if (1 != pci_map_sg(host->chip->pdev, &host->req->sg, 1, 422 if (1 != dma_map_sg(&host->chip->pdev->dev, &host->req->sg, 1,
423 host->req->data_dir == READ 423 host->req->data_dir == READ
424 ? PCI_DMA_FROMDEVICE 424 ? DMA_FROM_DEVICE
425 : PCI_DMA_TODEVICE)) { 425 : DMA_TO_DEVICE)) {
426 host->req->error = -ENOMEM; 426 host->req->error = -ENOMEM;
427 return host->req->error; 427 return host->req->error;
428 } 428 }
@@ -487,9 +487,9 @@ static void jmb38x_ms_complete_cmd(struct memstick_host *msh, int last)
487 writel(0, host->addr + DMA_CONTROL); 487 writel(0, host->addr + DMA_CONTROL);
488 488
489 if (host->cmd_flags & DMA_DATA) { 489 if (host->cmd_flags & DMA_DATA) {
490 pci_unmap_sg(host->chip->pdev, &host->req->sg, 1, 490 dma_unmap_sg(&host->chip->pdev->dev, &host->req->sg, 1,
491 host->req->data_dir == READ 491 host->req->data_dir == READ
492 ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); 492 ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
493 } else { 493 } else {
494 t_val = readl(host->addr + INT_STATUS_ENABLE); 494 t_val = readl(host->addr + INT_STATUS_ENABLE);
495 if (host->req->data_dir == READ) 495 if (host->req->data_dir == READ)
@@ -925,7 +925,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
925 int pci_dev_busy = 0; 925 int pci_dev_busy = 0;
926 int rc, cnt; 926 int rc, cnt;
927 927
928 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 928 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
929 if (rc) 929 if (rc)
930 return rc; 930 return rc;
931 931
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index e2a4f5f415b2..ef09ba0289d7 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -754,7 +754,7 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
754 goto error2; 754 goto error2;
755 755
756 pci_set_master(pdev); 756 pci_set_master(pdev);
757 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 757 error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
758 if (error) 758 if (error)
759 goto error3; 759 goto error3;
760 760
@@ -787,8 +787,8 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
787 } 787 }
788 788
789 /* This is just a precation, so don't fail */ 789 /* This is just a precation, so don't fail */
790 dev->dummy_dma_page = pci_alloc_consistent(pdev, PAGE_SIZE, 790 dev->dummy_dma_page = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
791 &dev->dummy_dma_page_physical_address); 791 &dev->dummy_dma_page_physical_address, GFP_KERNEL);
792 r592_stop_dma(dev , 0); 792 r592_stop_dma(dev , 0);
793 793
794 if (request_irq(dev->irq, &r592_irq, IRQF_SHARED, 794 if (request_irq(dev->irq, &r592_irq, IRQF_SHARED,
@@ -805,7 +805,7 @@ error7:
805 free_irq(dev->irq, dev); 805 free_irq(dev->irq, dev);
806error6: 806error6:
807 if (dev->dummy_dma_page) 807 if (dev->dummy_dma_page)
808 pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, 808 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
809 dev->dummy_dma_page_physical_address); 809 dev->dummy_dma_page_physical_address);
810 810
811 kthread_stop(dev->io_thread); 811 kthread_stop(dev->io_thread);
@@ -845,7 +845,7 @@ static void r592_remove(struct pci_dev *pdev)
845 memstick_free_host(dev->host); 845 memstick_free_host(dev->host);
846 846
847 if (dev->dummy_dma_page) 847 if (dev->dummy_dma_page)
848 pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, 848 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
849 dev->dummy_dma_page_physical_address); 849 dev->dummy_dma_page_physical_address);
850} 850}
851 851
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index b0cbb2b7fd48..76684dcb874c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1169,10 +1169,7 @@ void *cxgb_alloc_mem(unsigned long size)
1169 */ 1169 */
1170void cxgb_free_mem(void *addr) 1170void cxgb_free_mem(void *addr)
1171{ 1171{
1172 if (is_vmalloc_addr(addr)) 1172 kvfree(addr);
1173 vfree(addr);
1174 else
1175 kfree(addr);
1176} 1173}
1177 1174
1178/* 1175/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c64b5a99bfef..351f3b1bf800 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1150,10 +1150,7 @@ void *t4_alloc_mem(size_t size)
1150 */ 1150 */
1151void t4_free_mem(void *addr) 1151void t4_free_mem(void *addr)
1152{ 1152{
1153 if (is_vmalloc_addr(addr)) 1153 kvfree(addr);
1154 vfree(addr);
1155 else
1156 kfree(addr);
1157} 1154}
1158 1155
1159/* Send a Work Request to write the filter at a specified index. We construct 1156/* Send a Work Request to write the filter at a specified index. We construct
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index b3e5bd1d5d9c..9842301f7980 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -685,10 +685,7 @@ static inline void *cxgbi_alloc_big_mem(unsigned int size,
685 685
686static inline void cxgbi_free_big_mem(void *addr) 686static inline void cxgbi_free_big_mem(void *addr)
687{ 687{
688 if (is_vmalloc_addr(addr)) 688 kvfree(addr);
689 vfree(addr);
690 else
691 kfree(addr);
692} 689}
693 690
694static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr) 691static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 1f8e2dc9c616..30268bb2ddb6 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2363,17 +2363,13 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2363 u64 block, rest = 0; 2363 u64 block, rest = 0;
2364 struct scsi_data_buffer *sdb; 2364 struct scsi_data_buffer *sdb;
2365 enum dma_data_direction dir; 2365 enum dma_data_direction dir;
2366 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
2367 off_t);
2368 2366
2369 if (do_write) { 2367 if (do_write) {
2370 sdb = scsi_out(scmd); 2368 sdb = scsi_out(scmd);
2371 dir = DMA_TO_DEVICE; 2369 dir = DMA_TO_DEVICE;
2372 func = sg_pcopy_to_buffer;
2373 } else { 2370 } else {
2374 sdb = scsi_in(scmd); 2371 sdb = scsi_in(scmd);
2375 dir = DMA_FROM_DEVICE; 2372 dir = DMA_FROM_DEVICE;
2376 func = sg_pcopy_from_buffer;
2377 } 2373 }
2378 2374
2379 if (!sdb->length) 2375 if (!sdb->length)
@@ -2385,16 +2381,16 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2385 if (block + num > sdebug_store_sectors) 2381 if (block + num > sdebug_store_sectors)
2386 rest = block + num - sdebug_store_sectors; 2382 rest = block + num - sdebug_store_sectors;
2387 2383
2388 ret = func(sdb->table.sgl, sdb->table.nents, 2384 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2389 fake_storep + (block * scsi_debug_sector_size), 2385 fake_storep + (block * scsi_debug_sector_size),
2390 (num - rest) * scsi_debug_sector_size, 0); 2386 (num - rest) * scsi_debug_sector_size, 0, do_write);
2391 if (ret != (num - rest) * scsi_debug_sector_size) 2387 if (ret != (num - rest) * scsi_debug_sector_size)
2392 return ret; 2388 return ret;
2393 2389
2394 if (rest) { 2390 if (rest) {
2395 ret += func(sdb->table.sgl, sdb->table.nents, 2391 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2396 fake_storep, rest * scsi_debug_sector_size, 2392 fake_storep, rest * scsi_debug_sector_size,
2397 (num - rest) * scsi_debug_sector_size); 2393 (num - rest) * scsi_debug_sector_size, do_write);
2398 } 2394 }
2399 2395
2400 return ret; 2396 return ret;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0b4e24217564..cd3bfc16d25f 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -279,10 +279,7 @@ int transport_alloc_session_tags(struct se_session *se_sess,
279 if (rc < 0) { 279 if (rc < 0) {
280 pr_err("Unable to init se_sess->sess_tag_pool," 280 pr_err("Unable to init se_sess->sess_tag_pool,"
281 " tag_num: %u\n", tag_num); 281 " tag_num: %u\n", tag_num);
282 if (is_vmalloc_addr(se_sess->sess_cmd_map)) 282 kvfree(se_sess->sess_cmd_map);
283 vfree(se_sess->sess_cmd_map);
284 else
285 kfree(se_sess->sess_cmd_map);
286 se_sess->sess_cmd_map = NULL; 283 se_sess->sess_cmd_map = NULL;
287 return -ENOMEM; 284 return -ENOMEM;
288 } 285 }
@@ -489,10 +486,7 @@ void transport_free_session(struct se_session *se_sess)
489{ 486{
490 if (se_sess->sess_cmd_map) { 487 if (se_sess->sess_cmd_map) {
491 percpu_ida_destroy(&se_sess->sess_tag_pool); 488 percpu_ida_destroy(&se_sess->sess_tag_pool);
492 if (is_vmalloc_addr(se_sess->sess_cmd_map)) 489 kvfree(se_sess->sess_cmd_map);
493 vfree(se_sess->sess_cmd_map);
494 else
495 kfree(se_sess->sess_cmd_map);
496 } 490 }
497 kmem_cache_free(se_sess_cache, se_sess); 491 kmem_cache_free(se_sess_cache, se_sess);
498} 492}
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index a19c31d3f369..4d4a0df8344f 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -242,7 +242,7 @@ static struct kmem_cache *adfs_inode_cachep;
242static struct inode *adfs_alloc_inode(struct super_block *sb) 242static struct inode *adfs_alloc_inode(struct super_block *sb)
243{ 243{
244 struct adfs_inode_info *ei; 244 struct adfs_inode_info *ei;
245 ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL); 245 ei = kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL);
246 if (!ei) 246 if (!ei)
247 return NULL; 247 return NULL;
248 return &ei->vfs_inode; 248 return &ei->vfs_inode;
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index a8f463c028ce..5fa92bc790ef 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -140,7 +140,7 @@ affs_remove_link(struct dentry *dentry)
140{ 140{
141 struct inode *dir, *inode = d_inode(dentry); 141 struct inode *dir, *inode = d_inode(dentry);
142 struct super_block *sb = inode->i_sb; 142 struct super_block *sb = inode->i_sb;
143 struct buffer_head *bh = NULL, *link_bh = NULL; 143 struct buffer_head *bh, *link_bh = NULL;
144 u32 link_ino, ino; 144 u32 link_ino, ino;
145 int retval; 145 int retval;
146 146
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index a022f4accd76..17349500592d 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -346,7 +346,7 @@ affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s3
346{ 346{
347 struct super_block *sb = dir->i_sb; 347 struct super_block *sb = dir->i_sb;
348 struct buffer_head *inode_bh = NULL; 348 struct buffer_head *inode_bh = NULL;
349 struct buffer_head *bh = NULL; 349 struct buffer_head *bh;
350 u32 block = 0; 350 u32 block = 0;
351 int retval; 351 int retval;
352 352
diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c
index f39b71c3981e..ea5b69a18ba9 100644
--- a/fs/affs/symlink.c
+++ b/fs/affs/symlink.c
@@ -16,14 +16,12 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
16 struct inode *inode = page->mapping->host; 16 struct inode *inode = page->mapping->host;
17 char *link = kmap(page); 17 char *link = kmap(page);
18 struct slink_front *lf; 18 struct slink_front *lf;
19 int err;
20 int i, j; 19 int i, j;
21 char c; 20 char c;
22 char lc; 21 char lc;
23 22
24 pr_debug("follow_link(ino=%lu)\n", inode->i_ino); 23 pr_debug("follow_link(ino=%lu)\n", inode->i_ino);
25 24
26 err = -EIO;
27 bh = affs_bread(inode->i_sb, inode->i_ino); 25 bh = affs_bread(inode->i_sb, inode->i_ino);
28 if (!bh) 26 if (!bh)
29 goto fail; 27 goto fail;
@@ -66,7 +64,7 @@ fail:
66 SetPageError(page); 64 SetPageError(page);
67 kunmap(page); 65 kunmap(page);
68 unlock_page(page); 66 unlock_page(page);
69 return err; 67 return -EIO;
70} 68}
71 69
72const struct address_space_operations affs_symlink_aops = { 70const struct address_space_operations affs_symlink_aops = {
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index add566303c68..c35ffdc12bba 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -142,6 +142,8 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode)
142 if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) 142 if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
143 return inode->i_sb; 143 return inode->i_sb;
144#endif 144#endif
145 if (!devpts_mnt)
146 return NULL;
145 return devpts_mnt->mnt_sb; 147 return devpts_mnt->mnt_sb;
146} 148}
147 149
@@ -525,10 +527,14 @@ static struct file_system_type devpts_fs_type = {
525int devpts_new_index(struct inode *ptmx_inode) 527int devpts_new_index(struct inode *ptmx_inode)
526{ 528{
527 struct super_block *sb = pts_sb_from_inode(ptmx_inode); 529 struct super_block *sb = pts_sb_from_inode(ptmx_inode);
528 struct pts_fs_info *fsi = DEVPTS_SB(sb); 530 struct pts_fs_info *fsi;
529 int index; 531 int index;
530 int ida_ret; 532 int ida_ret;
531 533
534 if (!sb)
535 return -ENODEV;
536
537 fsi = DEVPTS_SB(sb);
532retry: 538retry:
533 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) 539 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
534 return -ENOMEM; 540 return -ENOMEM;
@@ -584,11 +590,18 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
584 struct dentry *dentry; 590 struct dentry *dentry;
585 struct super_block *sb = pts_sb_from_inode(ptmx_inode); 591 struct super_block *sb = pts_sb_from_inode(ptmx_inode);
586 struct inode *inode; 592 struct inode *inode;
587 struct dentry *root = sb->s_root; 593 struct dentry *root;
588 struct pts_fs_info *fsi = DEVPTS_SB(sb); 594 struct pts_fs_info *fsi;
589 struct pts_mount_opts *opts = &fsi->mount_opts; 595 struct pts_mount_opts *opts;
590 char s[12]; 596 char s[12];
591 597
598 if (!sb)
599 return ERR_PTR(-ENODEV);
600
601 root = sb->s_root;
602 fsi = DEVPTS_SB(sb);
603 opts = &fsi->mount_opts;
604
592 inode = new_inode(sb); 605 inode = new_inode(sb);
593 if (!inode) 606 if (!inode)
594 return ERR_PTR(-ENOMEM); 607 return ERR_PTR(-ENOMEM);
@@ -676,12 +689,16 @@ static int __init init_devpts_fs(void)
676 struct ctl_table_header *table; 689 struct ctl_table_header *table;
677 690
678 if (!err) { 691 if (!err) {
692 struct vfsmount *mnt;
693
679 table = register_sysctl_table(pty_root_table); 694 table = register_sysctl_table(pty_root_table);
680 devpts_mnt = kern_mount(&devpts_fs_type); 695 mnt = kern_mount(&devpts_fs_type);
681 if (IS_ERR(devpts_mnt)) { 696 if (IS_ERR(mnt)) {
682 err = PTR_ERR(devpts_mnt); 697 err = PTR_ERR(mnt);
683 unregister_filesystem(&devpts_fs_type); 698 unregister_filesystem(&devpts_fs_type);
684 unregister_sysctl_table(table); 699 unregister_sysctl_table(table);
700 } else {
701 devpts_mnt = mnt;
685 } 702 }
686 } 703 }
687 return err; 704 return err;
diff --git a/fs/mount.h b/fs/mount.h
index b5b8082bfa42..14db05d424f7 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -118,7 +118,6 @@ static inline void unlock_mount_hash(void)
118} 118}
119 119
120struct proc_mounts { 120struct proc_mounts {
121 struct seq_file m;
122 struct mnt_namespace *ns; 121 struct mnt_namespace *ns;
123 struct path root; 122 struct path root;
124 int (*show)(struct seq_file *, struct vfsmount *); 123 int (*show)(struct seq_file *, struct vfsmount *);
@@ -127,8 +126,6 @@ struct proc_mounts {
127 loff_t cached_index; 126 loff_t cached_index;
128}; 127};
129 128
130#define proc_mounts(p) (container_of((p), struct proc_mounts, m))
131
132extern const struct seq_operations mounts_op; 129extern const struct seq_operations mounts_op;
133 130
134extern bool __is_local_mountpoint(struct dentry *dentry); 131extern bool __is_local_mountpoint(struct dentry *dentry);
diff --git a/fs/namespace.c b/fs/namespace.c
index 9c1c43d0d4f1..e99f1f4e00cd 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1226,7 +1226,7 @@ EXPORT_SYMBOL(replace_mount_options);
1226/* iterator; we want it to have access to namespace_sem, thus here... */ 1226/* iterator; we want it to have access to namespace_sem, thus here... */
1227static void *m_start(struct seq_file *m, loff_t *pos) 1227static void *m_start(struct seq_file *m, loff_t *pos)
1228{ 1228{
1229 struct proc_mounts *p = proc_mounts(m); 1229 struct proc_mounts *p = m->private;
1230 1230
1231 down_read(&namespace_sem); 1231 down_read(&namespace_sem);
1232 if (p->cached_event == p->ns->event) { 1232 if (p->cached_event == p->ns->event) {
@@ -1247,7 +1247,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
1247 1247
1248static void *m_next(struct seq_file *m, void *v, loff_t *pos) 1248static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1249{ 1249{
1250 struct proc_mounts *p = proc_mounts(m); 1250 struct proc_mounts *p = m->private;
1251 1251
1252 p->cached_mount = seq_list_next(v, &p->ns->list, pos); 1252 p->cached_mount = seq_list_next(v, &p->ns->list, pos);
1253 p->cached_index = *pos; 1253 p->cached_index = *pos;
@@ -1261,7 +1261,7 @@ static void m_stop(struct seq_file *m, void *v)
1261 1261
1262static int m_show(struct seq_file *m, void *v) 1262static int m_show(struct seq_file *m, void *v)
1263{ 1263{
1264 struct proc_mounts *p = proc_mounts(m); 1264 struct proc_mounts *p = m->private;
1265 struct mount *r = list_entry(v, struct mount, mnt_list); 1265 struct mount *r = list_entry(v, struct mount, mnt_list);
1266 return p->show(m, &r->mnt); 1266 return p->show(m, &r->mnt);
1267} 1267}
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 8db932da4009..8ebd9a334085 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -17,7 +17,8 @@
17 17
18static unsigned mounts_poll(struct file *file, poll_table *wait) 18static unsigned mounts_poll(struct file *file, poll_table *wait)
19{ 19{
20 struct proc_mounts *p = proc_mounts(file->private_data); 20 struct seq_file *m = file->private_data;
21 struct proc_mounts *p = m->private;
21 struct mnt_namespace *ns = p->ns; 22 struct mnt_namespace *ns = p->ns;
22 unsigned res = POLLIN | POLLRDNORM; 23 unsigned res = POLLIN | POLLRDNORM;
23 int event; 24 int event;
@@ -25,8 +26,8 @@ static unsigned mounts_poll(struct file *file, poll_table *wait)
25 poll_wait(file, &p->ns->poll, wait); 26 poll_wait(file, &p->ns->poll, wait);
26 27
27 event = ACCESS_ONCE(ns->event); 28 event = ACCESS_ONCE(ns->event);
28 if (p->m.poll_event != event) { 29 if (m->poll_event != event) {
29 p->m.poll_event = event; 30 m->poll_event = event;
30 res |= POLLERR | POLLPRI; 31 res |= POLLERR | POLLPRI;
31 } 32 }
32 33
@@ -92,7 +93,7 @@ static void show_type(struct seq_file *m, struct super_block *sb)
92 93
93static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) 94static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
94{ 95{
95 struct proc_mounts *p = proc_mounts(m); 96 struct proc_mounts *p = m->private;
96 struct mount *r = real_mount(mnt); 97 struct mount *r = real_mount(mnt);
97 int err = 0; 98 int err = 0;
98 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 99 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
@@ -126,7 +127,7 @@ out:
126 127
127static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt) 128static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
128{ 129{
129 struct proc_mounts *p = proc_mounts(m); 130 struct proc_mounts *p = m->private;
130 struct mount *r = real_mount(mnt); 131 struct mount *r = real_mount(mnt);
131 struct super_block *sb = mnt->mnt_sb; 132 struct super_block *sb = mnt->mnt_sb;
132 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 133 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
@@ -186,7 +187,7 @@ out:
186 187
187static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt) 188static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
188{ 189{
189 struct proc_mounts *p = proc_mounts(m); 190 struct proc_mounts *p = m->private;
190 struct mount *r = real_mount(mnt); 191 struct mount *r = real_mount(mnt);
191 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 192 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
192 struct super_block *sb = mnt_path.dentry->d_sb; 193 struct super_block *sb = mnt_path.dentry->d_sb;
@@ -236,6 +237,7 @@ static int mounts_open_common(struct inode *inode, struct file *file,
236 struct mnt_namespace *ns = NULL; 237 struct mnt_namespace *ns = NULL;
237 struct path root; 238 struct path root;
238 struct proc_mounts *p; 239 struct proc_mounts *p;
240 struct seq_file *m;
239 int ret = -EINVAL; 241 int ret = -EINVAL;
240 242
241 if (!task) 243 if (!task)
@@ -260,26 +262,21 @@ static int mounts_open_common(struct inode *inode, struct file *file,
260 task_unlock(task); 262 task_unlock(task);
261 put_task_struct(task); 263 put_task_struct(task);
262 264
263 ret = -ENOMEM; 265 ret = seq_open_private(file, &mounts_op, sizeof(struct proc_mounts));
264 p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); 266 if (ret)
265 if (!p)
266 goto err_put_path; 267 goto err_put_path;
267 268
268 file->private_data = &p->m; 269 m = file->private_data;
269 ret = seq_open(file, &mounts_op); 270 m->poll_event = ns->event;
270 if (ret)
271 goto err_free;
272 271
272 p = m->private;
273 p->ns = ns; 273 p->ns = ns;
274 p->root = root; 274 p->root = root;
275 p->m.poll_event = ns->event;
276 p->show = show; 275 p->show = show;
277 p->cached_event = ~0ULL; 276 p->cached_event = ~0ULL;
278 277
279 return 0; 278 return 0;
280 279
281 err_free:
282 kfree(p);
283 err_put_path: 280 err_put_path:
284 path_put(&root); 281 path_put(&root);
285 err_put_ns: 282 err_put_ns:
@@ -290,10 +287,11 @@ static int mounts_open_common(struct inode *inode, struct file *file,
290 287
291static int mounts_release(struct inode *inode, struct file *file) 288static int mounts_release(struct inode *inode, struct file *file)
292{ 289{
293 struct proc_mounts *p = proc_mounts(file->private_data); 290 struct seq_file *m = file->private_data;
291 struct proc_mounts *p = m->private;
294 path_put(&p->root); 292 path_put(&p->root);
295 put_mnt_ns(p->ns); 293 put_mnt_ns(p->ns);
296 return seq_release(inode, file); 294 return seq_release_private(inode, file);
297} 295}
298 296
299static int mounts_open(struct inode *inode, struct file *file) 297static int mounts_open(struct inode *inode, struct file *file)
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 52b492721603..1d9c1cbd4d0b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -48,18 +48,21 @@ static void *seq_buf_alloc(unsigned long size)
48 * ERR_PTR(error). In the end of sequence they return %NULL. ->show() 48 * ERR_PTR(error). In the end of sequence they return %NULL. ->show()
49 * returns 0 in case of success and negative number in case of error. 49 * returns 0 in case of success and negative number in case of error.
50 * Returning SEQ_SKIP means "discard this element and move on". 50 * Returning SEQ_SKIP means "discard this element and move on".
51 * Note: seq_open() will allocate a struct seq_file and store its
52 * pointer in @file->private_data. This pointer should not be modified.
51 */ 53 */
52int seq_open(struct file *file, const struct seq_operations *op) 54int seq_open(struct file *file, const struct seq_operations *op)
53{ 55{
54 struct seq_file *p = file->private_data; 56 struct seq_file *p;
57
58 WARN_ON(file->private_data);
59
60 p = kzalloc(sizeof(*p), GFP_KERNEL);
61 if (!p)
62 return -ENOMEM;
63
64 file->private_data = p;
55 65
56 if (!p) {
57 p = kmalloc(sizeof(*p), GFP_KERNEL);
58 if (!p)
59 return -ENOMEM;
60 file->private_data = p;
61 }
62 memset(p, 0, sizeof(*p));
63 mutex_init(&p->lock); 66 mutex_init(&p->lock);
64 p->op = op; 67 p->op = op;
65#ifdef CONFIG_USER_NS 68#ifdef CONFIG_USER_NS
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index 19a240446fca..e42495ad8136 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -56,10 +56,7 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
56 56
57static __inline void drm_free_large(void *ptr) 57static __inline void drm_free_large(void *ptr)
58{ 58{
59 if (!is_vmalloc_addr(ptr)) 59 kvfree(ptr);
60 return kfree(ptr);
61
62 vfree(ptr);
63} 60}
64 61
65#endif 62#endif
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 1ccaab44abcc..5383bb1394a1 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -119,16 +119,16 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
119 119
120extern struct gen_pool *devm_gen_pool_create(struct device *dev, 120extern struct gen_pool *devm_gen_pool_create(struct device *dev,
121 int min_alloc_order, int nid); 121 int min_alloc_order, int nid);
122extern struct gen_pool *dev_get_gen_pool(struct device *dev); 122extern struct gen_pool *gen_pool_get(struct device *dev);
123 123
124bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, 124bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
125 size_t size); 125 size_t size);
126 126
127#ifdef CONFIG_OF 127#ifdef CONFIG_OF
128extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, 128extern struct gen_pool *of_gen_pool_get(struct device_node *np,
129 const char *propname, int index); 129 const char *propname, int index);
130#else 130#else
131static inline struct gen_pool *of_get_named_gen_pool(struct device_node *np, 131static inline struct gen_pool *of_gen_pool_get(struct device_node *np,
132 const char *propname, int index) 132 const char *propname, int index)
133{ 133{
134 return NULL; 134 return NULL;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 6ba7cf23748f..ad35f300b9a4 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -384,6 +384,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
384void drain_all_pages(struct zone *zone); 384void drain_all_pages(struct zone *zone);
385void drain_local_pages(struct zone *zone); 385void drain_local_pages(struct zone *zone);
386 386
387#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
388void page_alloc_init_late(void);
389#else
390static inline void page_alloc_init_late(void)
391{
392}
393#endif
394
387/* 395/*
388 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what 396 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
389 * GFP flags are used before interrupts are enabled. Once interrupts are 397 * GFP flags are used before interrupts are enabled. Once interrupts are
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cfa9351c7536..5f0be58640ea 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -439,6 +439,9 @@ extern int panic_on_unrecovered_nmi;
439extern int panic_on_io_nmi; 439extern int panic_on_io_nmi;
440extern int panic_on_warn; 440extern int panic_on_warn;
441extern int sysctl_panic_on_stackoverflow; 441extern int sysctl_panic_on_stackoverflow;
442
443extern bool crash_kexec_post_notifiers;
444
442/* 445/*
443 * Only to be used by arch init code. If the user over-wrote the default 446 * Only to be used by arch init code. If the user over-wrote the default
444 * CONFIG_PANIC_TIMEOUT, honor it. 447 * CONFIG_PANIC_TIMEOUT, honor it.
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 0215ffd63069..cc4b01972060 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -101,6 +101,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
101 struct memblock_type *type_b, phys_addr_t *out_start, 101 struct memblock_type *type_b, phys_addr_t *out_start,
102 phys_addr_t *out_end, int *out_nid); 102 phys_addr_t *out_end, int *out_nid);
103 103
104void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
105 phys_addr_t *out_end);
106
104/** 107/**
105 * for_each_mem_range - iterate through memblock areas from type_a and not 108 * for_each_mem_range - iterate through memblock areas from type_a and not
106 * included in type_b. Or just type_a if type_b is NULL. 109 * included in type_b. Or just type_a if type_b is NULL.
@@ -142,6 +145,21 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
142 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ 145 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
143 p_start, p_end, p_nid)) 146 p_start, p_end, p_nid))
144 147
148/**
149 * for_each_reserved_mem_region - iterate over all reserved memblock areas
150 * @i: u64 used as loop variable
151 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
152 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
153 *
154 * Walks over reserved areas of memblock. Available as soon as memblock
155 * is initialized.
156 */
157#define for_each_reserved_mem_region(i, p_start, p_end) \
158 for (i = 0UL, \
159 __next_reserved_mem_region(&i, p_start, p_end); \
160 i != (u64)ULLONG_MAX; \
161 __next_reserved_mem_region(&i, p_start, p_end))
162
145#ifdef CONFIG_MOVABLE_NODE 163#ifdef CONFIG_MOVABLE_NODE
146static inline bool memblock_is_hotpluggable(struct memblock_region *m) 164static inline bool memblock_is_hotpluggable(struct memblock_region *m)
147{ 165{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 99959a34f4f1..2e872f92dbac 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1635,6 +1635,8 @@ extern void free_highmem_page(struct page *page);
1635extern void adjust_managed_page_count(struct page *page, long count); 1635extern void adjust_managed_page_count(struct page *page, long count);
1636extern void mem_init_print_info(const char *str); 1636extern void mem_init_print_info(const char *str);
1637 1637
1638extern void reserve_bootmem_region(unsigned long start, unsigned long end);
1639
1638/* Free the reserved page into the buddy system, so it gets managed. */ 1640/* Free the reserved page into the buddy system, so it gets managed. */
1639static inline void __free_reserved_page(struct page *page) 1641static inline void __free_reserved_page(struct page *page)
1640{ 1642{
@@ -1724,7 +1726,8 @@ extern void sparse_memory_present_with_active_regions(int nid);
1724 1726
1725#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 1727#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1726 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1728 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1727static inline int __early_pfn_to_nid(unsigned long pfn) 1729static inline int __early_pfn_to_nid(unsigned long pfn,
1730 struct mminit_pfnnid_cache *state)
1728{ 1731{
1729 return 0; 1732 return 0;
1730} 1733}
@@ -1732,7 +1735,8 @@ static inline int __early_pfn_to_nid(unsigned long pfn)
1732/* please see mm/page_alloc.c */ 1735/* please see mm/page_alloc.c */
1733extern int __meminit early_pfn_to_nid(unsigned long pfn); 1736extern int __meminit early_pfn_to_nid(unsigned long pfn);
1734/* there is a per-arch backend function. */ 1737/* there is a per-arch backend function. */
1735extern int __meminit __early_pfn_to_nid(unsigned long pfn); 1738extern int __meminit __early_pfn_to_nid(unsigned long pfn,
1739 struct mminit_pfnnid_cache *state);
1736#endif 1740#endif
1737 1741
1738extern void set_dma_reserve(unsigned long new_dma_reserve); 1742extern void set_dma_reserve(unsigned long new_dma_reserve);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 54d74f6eb233..754c25966a0a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -762,6 +762,14 @@ typedef struct pglist_data {
762 /* Number of pages migrated during the rate limiting time interval */ 762 /* Number of pages migrated during the rate limiting time interval */
763 unsigned long numabalancing_migrate_nr_pages; 763 unsigned long numabalancing_migrate_nr_pages;
764#endif 764#endif
765
766#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
767 /*
768 * If memory initialisation on large machines is deferred then this
769 * is the first PFN that needs to be initialised.
770 */
771 unsigned long first_deferred_pfn;
772#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
765} pg_data_t; 773} pg_data_t;
766 774
767#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 775#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
@@ -1216,11 +1224,16 @@ void sparse_init(void);
1216#define sparse_index_init(_sec, _nid) do {} while (0) 1224#define sparse_index_init(_sec, _nid) do {} while (0)
1217#endif /* CONFIG_SPARSEMEM */ 1225#endif /* CONFIG_SPARSEMEM */
1218 1226
1219#ifdef CONFIG_NODES_SPAN_OTHER_NODES 1227/*
1220bool early_pfn_in_nid(unsigned long pfn, int nid); 1228 * During memory init memblocks map pfns to nids. The search is expensive and
1221#else 1229 * this caches recent lookups. The implementation of __early_pfn_to_nid
1222#define early_pfn_in_nid(pfn, nid) (1) 1230 * may treat start/end as pfns or sections.
1223#endif 1231 */
1232struct mminit_pfnnid_cache {
1233 unsigned long last_start;
1234 unsigned long last_end;
1235 int last_nid;
1236};
1224 1237
1225#ifndef early_pfn_valid 1238#ifndef early_pfn_valid
1226#define early_pfn_valid(pfn) (1) 1239#define early_pfn_valid(pfn) (1)
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 50a8486c524b..9b1ef0c820a7 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -265,13 +265,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
265 unsigned long offset, unsigned long size, 265 unsigned long offset, unsigned long size,
266 gfp_t gfp_mask); 266 gfp_t gfp_mask);
267 267
268size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
269 size_t buflen, off_t skip, bool to_buffer);
270
268size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 271size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
269 void *buf, size_t buflen); 272 const void *buf, size_t buflen);
270size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, 273size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
271 void *buf, size_t buflen); 274 void *buf, size_t buflen);
272 275
273size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, 276size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
274 void *buf, size_t buflen, off_t skip); 277 const void *buf, size_t buflen, off_t skip);
275size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, 278size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
276 void *buf, size_t buflen, off_t skip); 279 void *buf, size_t buflen, off_t skip);
277 280
diff --git a/init/main.c b/init/main.c
index c599aea23bb1..c5d5626289ce 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1004,6 +1004,8 @@ static noinline void __init kernel_init_freeable(void)
1004 smp_init(); 1004 smp_init();
1005 sched_init_smp(); 1005 sched_init_smp();
1006 1006
1007 page_alloc_init_late();
1008
1007 do_basic_setup(); 1009 do_basic_setup();
1008 1010
1009 /* Open the /dev/console on the rootfs, this should never fail */ 1011 /* Open the /dev/console on the rootfs, this should never fail */
diff --git a/ipc/msg.c b/ipc/msg.c
index 2b6fdbb9e0e9..66c4f567eb73 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -76,7 +76,7 @@ struct msg_sender {
76 76
77static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) 77static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id)
78{ 78{
79 struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id); 79 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&msg_ids(ns), id);
80 80
81 if (IS_ERR(ipcp)) 81 if (IS_ERR(ipcp))
82 return ERR_CAST(ipcp); 82 return ERR_CAST(ipcp);
@@ -196,7 +196,7 @@ static void expunge_all(struct msg_queue *msq, int res)
196 * or dealing with -EAGAIN cases. See lockless receive part 1 196 * or dealing with -EAGAIN cases. See lockless receive part 1
197 * and 2 in do_msgrcv(). 197 * and 2 in do_msgrcv().
198 */ 198 */
199 smp_mb(); 199 smp_wmb(); /* barrier (B) */
200 msr->r_msg = ERR_PTR(res); 200 msr->r_msg = ERR_PTR(res);
201 } 201 }
202} 202}
@@ -580,7 +580,8 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
580 /* initialize pipelined send ordering */ 580 /* initialize pipelined send ordering */
581 msr->r_msg = NULL; 581 msr->r_msg = NULL;
582 wake_up_process(msr->r_tsk); 582 wake_up_process(msr->r_tsk);
583 smp_mb(); /* see barrier comment below */ 583 /* barrier (B) see barrier comment below */
584 smp_wmb();
584 msr->r_msg = ERR_PTR(-E2BIG); 585 msr->r_msg = ERR_PTR(-E2BIG);
585 } else { 586 } else {
586 msr->r_msg = NULL; 587 msr->r_msg = NULL;
@@ -589,11 +590,12 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
589 wake_up_process(msr->r_tsk); 590 wake_up_process(msr->r_tsk);
590 /* 591 /*
591 * Ensure that the wakeup is visible before 592 * Ensure that the wakeup is visible before
592 * setting r_msg, as the receiving end depends 593 * setting r_msg, as the receiving can otherwise
593 * on it. See lockless receive part 1 and 2 in 594 * exit - once r_msg is set, the receiver can
594 * do_msgrcv(). 595 * continue. See lockless receive part 1 and 2
596 * in do_msgrcv(). Barrier (B).
595 */ 597 */
596 smp_mb(); 598 smp_wmb();
597 msr->r_msg = msg; 599 msr->r_msg = msg;
598 600
599 return 1; 601 return 1;
@@ -932,12 +934,38 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
932 /* Lockless receive, part 2: 934 /* Lockless receive, part 2:
933 * Wait until pipelined_send or expunge_all are outside of 935 * Wait until pipelined_send or expunge_all are outside of
934 * wake_up_process(). There is a race with exit(), see 936 * wake_up_process(). There is a race with exit(), see
935 * ipc/mqueue.c for the details. 937 * ipc/mqueue.c for the details. The correct serialization
938 * ensures that a receiver cannot continue without the wakeup
939 * being visibible _before_ setting r_msg:
940 *
941 * CPU 0 CPU 1
942 * <loop receiver>
943 * smp_rmb(); (A) <-- pair -. <waker thread>
944 * <load ->r_msg> | msr->r_msg = NULL;
945 * | wake_up_process();
946 * <continue> `------> smp_wmb(); (B)
947 * msr->r_msg = msg;
948 *
949 * Where (A) orders the message value read and where (B) orders
950 * the write to the r_msg -- done in both pipelined_send and
951 * expunge_all.
936 */ 952 */
937 msg = (struct msg_msg *)msr_d.r_msg; 953 for (;;) {
938 while (msg == NULL) { 954 /*
939 cpu_relax(); 955 * Pairs with writer barrier in pipelined_send
956 * or expunge_all.
957 */
958 smp_rmb(); /* barrier (A) */
940 msg = (struct msg_msg *)msr_d.r_msg; 959 msg = (struct msg_msg *)msr_d.r_msg;
960 if (msg)
961 break;
962
963 /*
964 * The cpu_relax() call is a compiler barrier
965 * which forces everything in this loop to be
966 * re-loaded.
967 */
968 cpu_relax();
941 } 969 }
942 970
943 /* Lockless receive, part 3: 971 /* Lockless receive, part 3:
diff --git a/ipc/sem.c b/ipc/sem.c
index d1a6edd17eba..bc3d530cb23e 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -391,7 +391,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
391 struct kern_ipc_perm *ipcp; 391 struct kern_ipc_perm *ipcp;
392 struct sem_array *sma; 392 struct sem_array *sma;
393 393
394 ipcp = ipc_obtain_object(&sem_ids(ns), id); 394 ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
395 if (IS_ERR(ipcp)) 395 if (IS_ERR(ipcp))
396 return ERR_CAST(ipcp); 396 return ERR_CAST(ipcp);
397 397
@@ -410,7 +410,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
410 410
411static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) 411static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
412{ 412{
413 struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id); 413 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
414 414
415 if (IS_ERR(ipcp)) 415 if (IS_ERR(ipcp))
416 return ERR_CAST(ipcp); 416 return ERR_CAST(ipcp);
diff --git a/ipc/shm.c b/ipc/shm.c
index 6d767071c367..06e5cf2fe019 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -129,7 +129,7 @@ void __init shm_init(void)
129 129
130static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) 130static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
131{ 131{
132 struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); 132 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
133 133
134 if (IS_ERR(ipcp)) 134 if (IS_ERR(ipcp))
135 return ERR_CAST(ipcp); 135 return ERR_CAST(ipcp);
@@ -155,8 +155,11 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
155{ 155{
156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
157 157
158 if (IS_ERR(ipcp)) 158 /*
159 return (struct shmid_kernel *)ipcp; 159 * We raced in the idr lookup or with shm_destroy(). Either way, the
160 * ID is busted.
161 */
162 BUG_ON(IS_ERR(ipcp));
160 163
161 return container_of(ipcp, struct shmid_kernel, shm_perm); 164 return container_of(ipcp, struct shmid_kernel, shm_perm);
162} 165}
@@ -191,7 +194,6 @@ static void shm_open(struct vm_area_struct *vma)
191 struct shmid_kernel *shp; 194 struct shmid_kernel *shp;
192 195
193 shp = shm_lock(sfd->ns, sfd->id); 196 shp = shm_lock(sfd->ns, sfd->id);
194 BUG_ON(IS_ERR(shp));
195 shp->shm_atim = get_seconds(); 197 shp->shm_atim = get_seconds();
196 shp->shm_lprid = task_tgid_vnr(current); 198 shp->shm_lprid = task_tgid_vnr(current);
197 shp->shm_nattch++; 199 shp->shm_nattch++;
@@ -258,7 +260,6 @@ static void shm_close(struct vm_area_struct *vma)
258 down_write(&shm_ids(ns).rwsem); 260 down_write(&shm_ids(ns).rwsem);
259 /* remove from the list of attaches of the shm segment */ 261 /* remove from the list of attaches of the shm segment */
260 shp = shm_lock(ns, sfd->id); 262 shp = shm_lock(ns, sfd->id);
261 BUG_ON(IS_ERR(shp));
262 shp->shm_lprid = task_tgid_vnr(current); 263 shp->shm_lprid = task_tgid_vnr(current);
263 shp->shm_dtim = get_seconds(); 264 shp->shm_dtim = get_seconds();
264 shp->shm_nattch--; 265 shp->shm_nattch--;
@@ -1191,7 +1192,6 @@ out_fput:
1191out_nattch: 1192out_nattch:
1192 down_write(&shm_ids(ns).rwsem); 1193 down_write(&shm_ids(ns).rwsem);
1193 shp = shm_lock(ns, shmid); 1194 shp = shm_lock(ns, shmid);
1194 BUG_ON(IS_ERR(shp));
1195 shp->shm_nattch--; 1195 shp->shm_nattch--;
1196 if (shm_may_destroy(ns, shp)) 1196 if (shm_may_destroy(ns, shp))
1197 shm_destroy(ns, shp); 1197 shm_destroy(ns, shp);
diff --git a/ipc/util.c b/ipc/util.c
index ff3323ef8d8b..be4230020a1f 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -467,10 +467,7 @@ void ipc_rcu_free(struct rcu_head *head)
467{ 467{
468 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); 468 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
469 469
470 if (is_vmalloc_addr(p)) 470 kvfree(p);
471 vfree(p);
472 else
473 kfree(p);
474} 471}
475 472
476/** 473/**
@@ -558,7 +555,7 @@ void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out)
558 * Call inside the RCU critical section. 555 * Call inside the RCU critical section.
559 * The ipc object is *not* locked on exit. 556 * The ipc object is *not* locked on exit.
560 */ 557 */
561struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) 558struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id)
562{ 559{
563 struct kern_ipc_perm *out; 560 struct kern_ipc_perm *out;
564 int lid = ipcid_to_idx(id); 561 int lid = ipcid_to_idx(id);
@@ -584,21 +581,24 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
584 struct kern_ipc_perm *out; 581 struct kern_ipc_perm *out;
585 582
586 rcu_read_lock(); 583 rcu_read_lock();
587 out = ipc_obtain_object(ids, id); 584 out = ipc_obtain_object_idr(ids, id);
588 if (IS_ERR(out)) 585 if (IS_ERR(out))
589 goto err1; 586 goto err;
590 587
591 spin_lock(&out->lock); 588 spin_lock(&out->lock);
592 589
593 /* ipc_rmid() may have already freed the ID while ipc_lock 590 /*
594 * was spinning: here verify that the structure is still valid 591 * ipc_rmid() may have already freed the ID while ipc_lock()
592 * was spinning: here verify that the structure is still valid.
593 * Upon races with RMID, return -EIDRM, thus indicating that
594 * the ID points to a removed identifier.
595 */ 595 */
596 if (ipc_valid_object(out)) 596 if (ipc_valid_object(out))
597 return out; 597 return out;
598 598
599 spin_unlock(&out->lock); 599 spin_unlock(&out->lock);
600 out = ERR_PTR(-EINVAL); 600 out = ERR_PTR(-EIDRM);
601err1: 601err:
602 rcu_read_unlock(); 602 rcu_read_unlock();
603 return out; 603 return out;
604} 604}
@@ -608,7 +608,7 @@ err1:
608 * @ids: ipc identifier set 608 * @ids: ipc identifier set
609 * @id: ipc id to look for 609 * @id: ipc id to look for
610 * 610 *
611 * Similar to ipc_obtain_object() but also checks 611 * Similar to ipc_obtain_object_idr() but also checks
612 * the ipc object reference counter. 612 * the ipc object reference counter.
613 * 613 *
614 * Call inside the RCU critical section. 614 * Call inside the RCU critical section.
@@ -616,13 +616,13 @@ err1:
616 */ 616 */
617struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id) 617struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id)
618{ 618{
619 struct kern_ipc_perm *out = ipc_obtain_object(ids, id); 619 struct kern_ipc_perm *out = ipc_obtain_object_idr(ids, id);
620 620
621 if (IS_ERR(out)) 621 if (IS_ERR(out))
622 goto out; 622 goto out;
623 623
624 if (ipc_checkid(out, id)) 624 if (ipc_checkid(out, id))
625 return ERR_PTR(-EIDRM); 625 return ERR_PTR(-EINVAL);
626out: 626out:
627 return out; 627 return out;
628} 628}
diff --git a/ipc/util.h b/ipc/util.h
index 1a5a0fcd099c..3a8a5a0eca62 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -132,7 +132,7 @@ void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
132void ipc_rcu_free(struct rcu_head *head); 132void ipc_rcu_free(struct rcu_head *head);
133 133
134struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); 134struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
135struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); 135struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id);
136 136
137void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); 137void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
138void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); 138void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out);
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
index a744098e4eb7..7080ae1eb6c1 100644
--- a/kernel/gcov/base.c
+++ b/kernel/gcov/base.c
@@ -92,6 +92,12 @@ void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters)
92} 92}
93EXPORT_SYMBOL(__gcov_merge_time_profile); 93EXPORT_SYMBOL(__gcov_merge_time_profile);
94 94
95void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters)
96{
97 /* Unused. */
98}
99EXPORT_SYMBOL(__gcov_merge_icall_topn);
100
95/** 101/**
96 * gcov_enable_events - enable event reporting through gcov_event() 102 * gcov_enable_events - enable event reporting through gcov_event()
97 * 103 *
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index 826ba9fb5e32..e25e92fb44fa 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -18,7 +18,9 @@
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include "gcov.h" 19#include "gcov.h"
20 20
21#if __GNUC__ == 4 && __GNUC_MINOR__ >= 9 21#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
22#define GCOV_COUNTERS 10
23#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
22#define GCOV_COUNTERS 9 24#define GCOV_COUNTERS 9
23#else 25#else
24#define GCOV_COUNTERS 8 26#define GCOV_COUNTERS 8
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 7a36fdcca5bf..a785c1015e25 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -84,6 +84,17 @@ struct resource crashk_low_res = {
84 84
85int kexec_should_crash(struct task_struct *p) 85int kexec_should_crash(struct task_struct *p)
86{ 86{
87 /*
88 * If crash_kexec_post_notifiers is enabled, don't run
89 * crash_kexec() here yet, which must be run after panic
90 * notifiers in panic().
91 */
92 if (crash_kexec_post_notifiers)
93 return 0;
94 /*
95 * There are 4 panic() calls in do_exit() path, each of which
96 * corresponds to each of these 4 conditions.
97 */
87 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) 98 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
88 return 1; 99 return 1;
89 return 0; 100 return 0;
diff --git a/kernel/panic.c b/kernel/panic.c
index 8136ad76e5fd..04e91ff7560b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -32,7 +32,7 @@ static unsigned long tainted_mask;
32static int pause_on_oops; 32static int pause_on_oops;
33static int pause_on_oops_flag; 33static int pause_on_oops_flag;
34static DEFINE_SPINLOCK(pause_on_oops_lock); 34static DEFINE_SPINLOCK(pause_on_oops_lock);
35static bool crash_kexec_post_notifiers; 35bool crash_kexec_post_notifiers;
36int panic_on_warn __read_mostly; 36int panic_on_warn __read_mostly;
37 37
38int panic_timeout = CONFIG_PANIC_TIMEOUT; 38int panic_timeout = CONFIG_PANIC_TIMEOUT;
@@ -142,7 +142,8 @@ void panic(const char *fmt, ...)
142 * Note: since some panic_notifiers can make crashed kernel 142 * Note: since some panic_notifiers can make crashed kernel
143 * more unstable, it can increase risks of the kdump failure too. 143 * more unstable, it can increase risks of the kdump failure too.
144 */ 144 */
145 crash_kexec(NULL); 145 if (crash_kexec_post_notifiers)
146 crash_kexec(NULL);
146 147
147 bust_spinlocks(0); 148 bust_spinlocks(0);
148 149
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index de553849f3ac..cf8c24203368 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -207,14 +207,14 @@ static int console_may_schedule;
207 * need to be changed in the future, when the requirements change. 207 * need to be changed in the future, when the requirements change.
208 * 208 *
209 * /dev/kmsg exports the structured data in the following line format: 209 * /dev/kmsg exports the structured data in the following line format:
210 * "<level>,<sequnum>,<timestamp>,<contflag>;<message text>\n" 210 * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
211 *
212 * Users of the export format should ignore possible additional values
213 * separated by ',', and find the message after the ';' character.
211 * 214 *
212 * The optional key/value pairs are attached as continuation lines starting 215 * The optional key/value pairs are attached as continuation lines starting
213 * with a space character and terminated by a newline. All possible 216 * with a space character and terminated by a newline. All possible
214 * non-prinatable characters are escaped in the "\xff" notation. 217 * non-prinatable characters are escaped in the "\xff" notation.
215 *
216 * Users of the export format should ignore possible additional values
217 * separated by ',', and find the message after the ';' character.
218 */ 218 */
219 219
220enum log_flags { 220enum log_flags {
diff --git a/kernel/relay.c b/kernel/relay.c
index e9dbaeb8fd65..0b4570cfacae 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -81,10 +81,7 @@ static struct page **relay_alloc_page_array(unsigned int n_pages)
81 */ 81 */
82static void relay_free_page_array(struct page **array) 82static void relay_free_page_array(struct page **array)
83{ 83{
84 if (is_vmalloc_addr(array)) 84 kvfree(array);
85 vfree(array);
86 else
87 kfree(array);
88} 85}
89 86
90/** 87/**
diff --git a/lib/genalloc.c b/lib/genalloc.c
index d214866eeea2..daf0afb6d979 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -602,12 +602,12 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
602EXPORT_SYMBOL(devm_gen_pool_create); 602EXPORT_SYMBOL(devm_gen_pool_create);
603 603
604/** 604/**
605 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device 605 * gen_pool_get - Obtain the gen_pool (if any) for a device
606 * @dev: device to retrieve the gen_pool from 606 * @dev: device to retrieve the gen_pool from
607 * 607 *
608 * Returns the gen_pool for the device if one is present, or NULL. 608 * Returns the gen_pool for the device if one is present, or NULL.
609 */ 609 */
610struct gen_pool *dev_get_gen_pool(struct device *dev) 610struct gen_pool *gen_pool_get(struct device *dev)
611{ 611{
612 struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, 612 struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
613 NULL); 613 NULL);
@@ -616,11 +616,11 @@ struct gen_pool *dev_get_gen_pool(struct device *dev)
616 return NULL; 616 return NULL;
617 return *p; 617 return *p;
618} 618}
619EXPORT_SYMBOL_GPL(dev_get_gen_pool); 619EXPORT_SYMBOL_GPL(gen_pool_get);
620 620
621#ifdef CONFIG_OF 621#ifdef CONFIG_OF
622/** 622/**
623 * of_get_named_gen_pool - find a pool by phandle property 623 * of_gen_pool_get - find a pool by phandle property
624 * @np: device node 624 * @np: device node
625 * @propname: property name containing phandle(s) 625 * @propname: property name containing phandle(s)
626 * @index: index into the phandle array 626 * @index: index into the phandle array
@@ -629,7 +629,7 @@ EXPORT_SYMBOL_GPL(dev_get_gen_pool);
629 * address of the device tree node pointed at by the phandle property, 629 * address of the device tree node pointed at by the phandle property,
630 * or NULL if not found. 630 * or NULL if not found.
631 */ 631 */
632struct gen_pool *of_get_named_gen_pool(struct device_node *np, 632struct gen_pool *of_gen_pool_get(struct device_node *np,
633 const char *propname, int index) 633 const char *propname, int index)
634{ 634{
635 struct platform_device *pdev; 635 struct platform_device *pdev;
@@ -642,7 +642,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
642 of_node_put(np_pool); 642 of_node_put(np_pool);
643 if (!pdev) 643 if (!pdev)
644 return NULL; 644 return NULL;
645 return dev_get_gen_pool(&pdev->dev); 645 return gen_pool_get(&pdev->dev);
646} 646}
647EXPORT_SYMBOL_GPL(of_get_named_gen_pool); 647EXPORT_SYMBOL_GPL(of_gen_pool_get);
648#endif /* CONFIG_OF */ 648#endif /* CONFIG_OF */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 99fbc2f238c4..d105a9f56878 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -650,9 +650,8 @@ EXPORT_SYMBOL(sg_miter_stop);
650 * Returns the number of copied bytes. 650 * Returns the number of copied bytes.
651 * 651 *
652 **/ 652 **/
653static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, 653size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
654 void *buf, size_t buflen, off_t skip, 654 size_t buflen, off_t skip, bool to_buffer)
655 bool to_buffer)
656{ 655{
657 unsigned int offset = 0; 656 unsigned int offset = 0;
658 struct sg_mapping_iter miter; 657 struct sg_mapping_iter miter;
@@ -689,6 +688,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
689 local_irq_restore(flags); 688 local_irq_restore(flags);
690 return offset; 689 return offset;
691} 690}
691EXPORT_SYMBOL(sg_copy_buffer);
692 692
693/** 693/**
694 * sg_copy_from_buffer - Copy from a linear buffer to an SG list 694 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
@@ -701,9 +701,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
701 * 701 *
702 **/ 702 **/
703size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 703size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
704 void *buf, size_t buflen) 704 const void *buf, size_t buflen)
705{ 705{
706 return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); 706 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
707} 707}
708EXPORT_SYMBOL(sg_copy_from_buffer); 708EXPORT_SYMBOL(sg_copy_from_buffer);
709 709
@@ -729,16 +729,16 @@ EXPORT_SYMBOL(sg_copy_to_buffer);
729 * @sgl: The SG list 729 * @sgl: The SG list
730 * @nents: Number of SG entries 730 * @nents: Number of SG entries
731 * @buf: Where to copy from 731 * @buf: Where to copy from
732 * @skip: Number of bytes to skip before copying
733 * @buflen: The number of bytes to copy 732 * @buflen: The number of bytes to copy
733 * @skip: Number of bytes to skip before copying
734 * 734 *
735 * Returns the number of copied bytes. 735 * Returns the number of copied bytes.
736 * 736 *
737 **/ 737 **/
738size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, 738size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
739 void *buf, size_t buflen, off_t skip) 739 const void *buf, size_t buflen, off_t skip)
740{ 740{
741 return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); 741 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
742} 742}
743EXPORT_SYMBOL(sg_pcopy_from_buffer); 743EXPORT_SYMBOL(sg_pcopy_from_buffer);
744 744
@@ -747,8 +747,8 @@ EXPORT_SYMBOL(sg_pcopy_from_buffer);
747 * @sgl: The SG list 747 * @sgl: The SG list
748 * @nents: Number of SG entries 748 * @nents: Number of SG entries
749 * @buf: Where to copy to 749 * @buf: Where to copy to
750 * @skip: Number of bytes to skip before copying
751 * @buflen: The number of bytes to copy 750 * @buflen: The number of bytes to copy
751 * @skip: Number of bytes to skip before copying
752 * 752 *
753 * Returns the number of copied bytes. 753 * Returns the number of copied bytes.
754 * 754 *
diff --git a/mm/Kconfig b/mm/Kconfig
index c180af880ed5..e79de2bd12cd 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -636,3 +636,21 @@ config MAX_STACK_SIZE_MB
636 changed to a smaller value in which case that is used. 636 changed to a smaller value in which case that is used.
637 637
638 A sane initial value is 80 MB. 638 A sane initial value is 80 MB.
639
640# For architectures that support deferred memory initialisation
641config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
642 bool
643
644config DEFERRED_STRUCT_PAGE_INIT
645 bool "Defer initialisation of struct pages to kswapd"
646 default n
647 depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
648 depends on MEMORY_HOTPLUG
649 help
650 Ordinarily all struct pages are initialised during early boot in a
651 single thread. On very large machines this can take a considerable
652 amount of time. If this option is set, large machines will bring up
653 a subset of memmap at boot and then initialise the rest in parallel
654 when kswapd starts. This has a potential performance impact on
655 processes running early in the lifetime of the systemm until kswapd
656 finishes the initialisation.
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 477be696511d..a23dd1934654 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -164,7 +164,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
164 end = PFN_DOWN(physaddr + size); 164 end = PFN_DOWN(physaddr + size);
165 165
166 for (; cursor < end; cursor++) { 166 for (; cursor < end; cursor++) {
167 __free_pages_bootmem(pfn_to_page(cursor), 0); 167 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
168 totalram_pages++; 168 totalram_pages++;
169 } 169 }
170} 170}
@@ -172,7 +172,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
172static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 172static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
173{ 173{
174 struct page *page; 174 struct page *page;
175 unsigned long *map, start, end, pages, count = 0; 175 unsigned long *map, start, end, pages, cur, count = 0;
176 176
177 if (!bdata->node_bootmem_map) 177 if (!bdata->node_bootmem_map)
178 return 0; 178 return 0;
@@ -210,17 +210,17 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
210 if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) { 210 if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
211 int order = ilog2(BITS_PER_LONG); 211 int order = ilog2(BITS_PER_LONG);
212 212
213 __free_pages_bootmem(pfn_to_page(start), order); 213 __free_pages_bootmem(pfn_to_page(start), start, order);
214 count += BITS_PER_LONG; 214 count += BITS_PER_LONG;
215 start += BITS_PER_LONG; 215 start += BITS_PER_LONG;
216 } else { 216 } else {
217 unsigned long cur = start; 217 cur = start;
218 218
219 start = ALIGN(start + 1, BITS_PER_LONG); 219 start = ALIGN(start + 1, BITS_PER_LONG);
220 while (vec && cur != start) { 220 while (vec && cur != start) {
221 if (vec & 1) { 221 if (vec & 1) {
222 page = pfn_to_page(cur); 222 page = pfn_to_page(cur);
223 __free_pages_bootmem(page, 0); 223 __free_pages_bootmem(page, cur, 0);
224 count++; 224 count++;
225 } 225 }
226 vec >>= 1; 226 vec >>= 1;
@@ -229,12 +229,13 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
229 } 229 }
230 } 230 }
231 231
232 cur = bdata->node_min_pfn;
232 page = virt_to_page(bdata->node_bootmem_map); 233 page = virt_to_page(bdata->node_bootmem_map);
233 pages = bdata->node_low_pfn - bdata->node_min_pfn; 234 pages = bdata->node_low_pfn - bdata->node_min_pfn;
234 pages = bootmem_bootmap_pages(pages); 235 pages = bootmem_bootmap_pages(pages);
235 count += pages; 236 count += pages;
236 while (pages--) 237 while (pages--)
237 __free_pages_bootmem(page++, 0); 238 __free_pages_bootmem(page++, cur++, 0);
238 239
239 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); 240 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
240 241
diff --git a/mm/internal.h b/mm/internal.h
index a25e359a4039..36b23f1e2ca6 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -155,7 +155,8 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
155} 155}
156 156
157extern int __isolate_free_page(struct page *page, unsigned int order); 157extern int __isolate_free_page(struct page *page, unsigned int order);
158extern void __free_pages_bootmem(struct page *page, unsigned int order); 158extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
159 unsigned int order);
159extern void prep_compound_page(struct page *page, unsigned long order); 160extern void prep_compound_page(struct page *page, unsigned long order);
160#ifdef CONFIG_MEMORY_FAILURE 161#ifdef CONFIG_MEMORY_FAILURE
161extern bool is_free_buddy_page(struct page *page); 162extern bool is_free_buddy_page(struct page *page);
@@ -361,10 +362,7 @@ do { \
361} while (0) 362} while (0)
362 363
363extern void mminit_verify_pageflags_layout(void); 364extern void mminit_verify_pageflags_layout(void);
364extern void mminit_verify_page_links(struct page *page,
365 enum zone_type zone, unsigned long nid, unsigned long pfn);
366extern void mminit_verify_zonelist(void); 365extern void mminit_verify_zonelist(void);
367
368#else 366#else
369 367
370static inline void mminit_dprintk(enum mminit_level level, 368static inline void mminit_dprintk(enum mminit_level level,
@@ -376,11 +374,6 @@ static inline void mminit_verify_pageflags_layout(void)
376{ 374{
377} 375}
378 376
379static inline void mminit_verify_page_links(struct page *page,
380 enum zone_type zone, unsigned long nid, unsigned long pfn)
381{
382}
383
384static inline void mminit_verify_zonelist(void) 377static inline void mminit_verify_zonelist(void)
385{ 378{
386} 379}
diff --git a/mm/memblock.c b/mm/memblock.c
index 1b444c730846..87108e77e476 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -820,6 +820,38 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
820 820
821 821
822/** 822/**
823 * __next_reserved_mem_region - next function for for_each_reserved_region()
824 * @idx: pointer to u64 loop variable
825 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
826 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
827 *
828 * Iterate over all reserved memory regions.
829 */
830void __init_memblock __next_reserved_mem_region(u64 *idx,
831 phys_addr_t *out_start,
832 phys_addr_t *out_end)
833{
834 struct memblock_type *rsv = &memblock.reserved;
835
836 if (*idx >= 0 && *idx < rsv->cnt) {
837 struct memblock_region *r = &rsv->regions[*idx];
838 phys_addr_t base = r->base;
839 phys_addr_t size = r->size;
840
841 if (out_start)
842 *out_start = base;
843 if (out_end)
844 *out_end = base + size - 1;
845
846 *idx += 1;
847 return;
848 }
849
850 /* signal end of iteration */
851 *idx = ULLONG_MAX;
852}
853
854/**
823 * __next__mem_range - next function for for_each_free_mem_range() etc. 855 * __next__mem_range - next function for for_each_free_mem_range() etc.
824 * @idx: pointer to u64 loop variable 856 * @idx: pointer to u64 loop variable
825 * @nid: node selector, %NUMA_NO_NODE for all nodes 857 * @nid: node selector, %NUMA_NO_NODE for all nodes
@@ -1387,7 +1419,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1387 end = PFN_DOWN(base + size); 1419 end = PFN_DOWN(base + size);
1388 1420
1389 for (; cursor < end; cursor++) { 1421 for (; cursor < end; cursor++) {
1390 __free_pages_bootmem(pfn_to_page(cursor), 0); 1422 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1391 totalram_pages++; 1423 totalram_pages++;
1392 } 1424 }
1393} 1425}
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 5f420f7fafa1..fdadf918de76 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -11,6 +11,7 @@
11#include <linux/export.h> 11#include <linux/export.h>
12#include <linux/memory.h> 12#include <linux/memory.h>
13#include <linux/notifier.h> 13#include <linux/notifier.h>
14#include <linux/sched.h>
14#include "internal.h" 15#include "internal.h"
15 16
16#ifdef CONFIG_DEBUG_MEMORY_INIT 17#ifdef CONFIG_DEBUG_MEMORY_INIT
@@ -130,14 +131,6 @@ void __init mminit_verify_pageflags_layout(void)
130 BUG_ON(or_mask != add_mask); 131 BUG_ON(or_mask != add_mask);
131} 132}
132 133
133void __meminit mminit_verify_page_links(struct page *page, enum zone_type zone,
134 unsigned long nid, unsigned long pfn)
135{
136 BUG_ON(page_to_nid(page) != nid);
137 BUG_ON(page_zonenum(page) != zone);
138 BUG_ON(page_to_pfn(page) != pfn);
139}
140
141static __init int set_mminit_loglevel(char *str) 134static __init int set_mminit_loglevel(char *str)
142{ 135{
143 get_option(&str, &mminit_loglevel); 136 get_option(&str, &mminit_loglevel);
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 5258386fa1be..e57cf24babd6 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -86,7 +86,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
86 end = PFN_DOWN(addr + size); 86 end = PFN_DOWN(addr + size);
87 87
88 for (; cursor < end; cursor++) { 88 for (; cursor < end; cursor++) {
89 __free_pages_bootmem(pfn_to_page(cursor), 0); 89 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
90 totalram_pages++; 90 totalram_pages++;
91 } 91 }
92} 92}
@@ -101,7 +101,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
101 while (start + (1UL << order) > end) 101 while (start + (1UL << order) > end)
102 order--; 102 order--;
103 103
104 __free_pages_bootmem(pfn_to_page(start), order); 104 __free_pages_bootmem(pfn_to_page(start), start, order);
105 105
106 start += (1UL << order); 106 start += (1UL << order);
107 } 107 }
@@ -130,6 +130,9 @@ static unsigned long __init free_low_memory_core_early(void)
130 130
131 memblock_clear_hotplug(0, -1); 131 memblock_clear_hotplug(0, -1);
132 132
133 for_each_reserved_mem_region(i, &start, &end)
134 reserve_bootmem_region(start, end);
135
133 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, 136 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
134 NULL) 137 NULL)
135 count += __free_memory_core(start, end); 138 count += __free_memory_core(start, end);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5e6fa06f2784..506eac8b38af 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -18,6 +18,7 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/rwsem.h>
21#include <linux/pagemap.h> 22#include <linux/pagemap.h>
22#include <linux/jiffies.h> 23#include <linux/jiffies.h>
23#include <linux/bootmem.h> 24#include <linux/bootmem.h>
@@ -61,6 +62,7 @@
61#include <linux/hugetlb.h> 62#include <linux/hugetlb.h>
62#include <linux/sched/rt.h> 63#include <linux/sched/rt.h>
63#include <linux/page_owner.h> 64#include <linux/page_owner.h>
65#include <linux/kthread.h>
64 66
65#include <asm/sections.h> 67#include <asm/sections.h>
66#include <asm/tlbflush.h> 68#include <asm/tlbflush.h>
@@ -235,6 +237,77 @@ EXPORT_SYMBOL(nr_online_nodes);
235 237
236int page_group_by_mobility_disabled __read_mostly; 238int page_group_by_mobility_disabled __read_mostly;
237 239
240#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
241static inline void reset_deferred_meminit(pg_data_t *pgdat)
242{
243 pgdat->first_deferred_pfn = ULONG_MAX;
244}
245
246/* Returns true if the struct page for the pfn is uninitialised */
247static inline bool __meminit early_page_uninitialised(unsigned long pfn)
248{
249 int nid = early_pfn_to_nid(pfn);
250
251 if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
252 return true;
253
254 return false;
255}
256
257static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
258{
259 if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
260 return true;
261
262 return false;
263}
264
265/*
266 * Returns false when the remaining initialisation should be deferred until
267 * later in the boot cycle when it can be parallelised.
268 */
269static inline bool update_defer_init(pg_data_t *pgdat,
270 unsigned long pfn, unsigned long zone_end,
271 unsigned long *nr_initialised)
272{
273 /* Always populate low zones for address-contrained allocations */
274 if (zone_end < pgdat_end_pfn(pgdat))
275 return true;
276
277 /* Initialise at least 2G of the highest zone */
278 (*nr_initialised)++;
279 if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) &&
280 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
281 pgdat->first_deferred_pfn = pfn;
282 return false;
283 }
284
285 return true;
286}
287#else
288static inline void reset_deferred_meminit(pg_data_t *pgdat)
289{
290}
291
292static inline bool early_page_uninitialised(unsigned long pfn)
293{
294 return false;
295}
296
297static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
298{
299 return false;
300}
301
302static inline bool update_defer_init(pg_data_t *pgdat,
303 unsigned long pfn, unsigned long zone_end,
304 unsigned long *nr_initialised)
305{
306 return true;
307}
308#endif
309
310
238void set_pageblock_migratetype(struct page *page, int migratetype) 311void set_pageblock_migratetype(struct page *page, int migratetype)
239{ 312{
240 if (unlikely(page_group_by_mobility_disabled && 313 if (unlikely(page_group_by_mobility_disabled &&
@@ -764,6 +837,75 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
764 return 0; 837 return 0;
765} 838}
766 839
840static void __meminit __init_single_page(struct page *page, unsigned long pfn,
841 unsigned long zone, int nid)
842{
843 set_page_links(page, zone, nid, pfn);
844 init_page_count(page);
845 page_mapcount_reset(page);
846 page_cpupid_reset_last(page);
847
848 INIT_LIST_HEAD(&page->lru);
849#ifdef WANT_PAGE_VIRTUAL
850 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
851 if (!is_highmem_idx(zone))
852 set_page_address(page, __va(pfn << PAGE_SHIFT));
853#endif
854}
855
856static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
857 int nid)
858{
859 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
860}
861
862#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
863static void init_reserved_page(unsigned long pfn)
864{
865 pg_data_t *pgdat;
866 int nid, zid;
867
868 if (!early_page_uninitialised(pfn))
869 return;
870
871 nid = early_pfn_to_nid(pfn);
872 pgdat = NODE_DATA(nid);
873
874 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
875 struct zone *zone = &pgdat->node_zones[zid];
876
877 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
878 break;
879 }
880 __init_single_pfn(pfn, zid, nid);
881}
882#else
883static inline void init_reserved_page(unsigned long pfn)
884{
885}
886#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
887
888/*
889 * Initialised pages do not have PageReserved set. This function is
890 * called for each range allocated by the bootmem allocator and
891 * marks the pages PageReserved. The remaining valid pages are later
892 * sent to the buddy page allocator.
893 */
894void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
895{
896 unsigned long start_pfn = PFN_DOWN(start);
897 unsigned long end_pfn = PFN_UP(end);
898
899 for (; start_pfn < end_pfn; start_pfn++) {
900 if (pfn_valid(start_pfn)) {
901 struct page *page = pfn_to_page(start_pfn);
902
903 init_reserved_page(start_pfn);
904 SetPageReserved(page);
905 }
906 }
907}
908
767static bool free_pages_prepare(struct page *page, unsigned int order) 909static bool free_pages_prepare(struct page *page, unsigned int order)
768{ 910{
769 bool compound = PageCompound(page); 911 bool compound = PageCompound(page);
@@ -818,7 +960,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
818 local_irq_restore(flags); 960 local_irq_restore(flags);
819} 961}
820 962
821void __init __free_pages_bootmem(struct page *page, unsigned int order) 963static void __init __free_pages_boot_core(struct page *page,
964 unsigned long pfn, unsigned int order)
822{ 965{
823 unsigned int nr_pages = 1 << order; 966 unsigned int nr_pages = 1 << order;
824 struct page *p = page; 967 struct page *p = page;
@@ -838,6 +981,223 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
838 __free_pages(page, order); 981 __free_pages(page, order);
839} 982}
840 983
984#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
985 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
986/* Only safe to use early in boot when initialisation is single-threaded */
987static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
988
989int __meminit early_pfn_to_nid(unsigned long pfn)
990{
991 int nid;
992
993 /* The system will behave unpredictably otherwise */
994 BUG_ON(system_state != SYSTEM_BOOTING);
995
996 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
997 if (nid >= 0)
998 return nid;
999 /* just returns 0 */
1000 return 0;
1001}
1002#endif
1003
1004#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1005static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1006 struct mminit_pfnnid_cache *state)
1007{
1008 int nid;
1009
1010 nid = __early_pfn_to_nid(pfn, state);
1011 if (nid >= 0 && nid != node)
1012 return false;
1013 return true;
1014}
1015
1016/* Only safe to use early in boot when initialisation is single-threaded */
1017static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1018{
1019 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1020}
1021
1022#else
1023
1024static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1025{
1026 return true;
1027}
1028static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1029 struct mminit_pfnnid_cache *state)
1030{
1031 return true;
1032}
1033#endif
1034
1035
1036void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
1037 unsigned int order)
1038{
1039 if (early_page_uninitialised(pfn))
1040 return;
1041 return __free_pages_boot_core(page, pfn, order);
1042}
1043
1044#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1045static void __init deferred_free_range(struct page *page,
1046 unsigned long pfn, int nr_pages)
1047{
1048 int i;
1049
1050 if (!page)
1051 return;
1052
1053 /* Free a large naturally-aligned chunk if possible */
1054 if (nr_pages == MAX_ORDER_NR_PAGES &&
1055 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
1056 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1057 __free_pages_boot_core(page, pfn, MAX_ORDER-1);
1058 return;
1059 }
1060
1061 for (i = 0; i < nr_pages; i++, page++, pfn++)
1062 __free_pages_boot_core(page, pfn, 0);
1063}
1064
1065static __initdata DECLARE_RWSEM(pgdat_init_rwsem);
1066
1067/* Initialise remaining memory on a node */
1068static int __init deferred_init_memmap(void *data)
1069{
1070 pg_data_t *pgdat = data;
1071 int nid = pgdat->node_id;
1072 struct mminit_pfnnid_cache nid_init_state = { };
1073 unsigned long start = jiffies;
1074 unsigned long nr_pages = 0;
1075 unsigned long walk_start, walk_end;
1076 int i, zid;
1077 struct zone *zone;
1078 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
1079 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1080
1081 if (first_init_pfn == ULONG_MAX) {
1082 up_read(&pgdat_init_rwsem);
1083 return 0;
1084 }
1085
1086 /* Bind memory initialisation thread to a local node if possible */
1087 if (!cpumask_empty(cpumask))
1088 set_cpus_allowed_ptr(current, cpumask);
1089
1090 /* Sanity check boundaries */
1091 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1092 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1093 pgdat->first_deferred_pfn = ULONG_MAX;
1094
1095 /* Only the highest zone is deferred so find it */
1096 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1097 zone = pgdat->node_zones + zid;
1098 if (first_init_pfn < zone_end_pfn(zone))
1099 break;
1100 }
1101
1102 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1103 unsigned long pfn, end_pfn;
1104 struct page *page = NULL;
1105 struct page *free_base_page = NULL;
1106 unsigned long free_base_pfn = 0;
1107 int nr_to_free = 0;
1108
1109 end_pfn = min(walk_end, zone_end_pfn(zone));
1110 pfn = first_init_pfn;
1111 if (pfn < walk_start)
1112 pfn = walk_start;
1113 if (pfn < zone->zone_start_pfn)
1114 pfn = zone->zone_start_pfn;
1115
1116 for (; pfn < end_pfn; pfn++) {
1117 if (!pfn_valid_within(pfn))
1118 goto free_range;
1119
1120 /*
1121 * Ensure pfn_valid is checked every
1122 * MAX_ORDER_NR_PAGES for memory holes
1123 */
1124 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
1125 if (!pfn_valid(pfn)) {
1126 page = NULL;
1127 goto free_range;
1128 }
1129 }
1130
1131 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1132 page = NULL;
1133 goto free_range;
1134 }
1135
1136 /* Minimise pfn page lookups and scheduler checks */
1137 if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
1138 page++;
1139 } else {
1140 nr_pages += nr_to_free;
1141 deferred_free_range(free_base_page,
1142 free_base_pfn, nr_to_free);
1143 free_base_page = NULL;
1144 free_base_pfn = nr_to_free = 0;
1145
1146 page = pfn_to_page(pfn);
1147 cond_resched();
1148 }
1149
1150 if (page->flags) {
1151 VM_BUG_ON(page_zone(page) != zone);
1152 goto free_range;
1153 }
1154
1155 __init_single_page(page, pfn, zid, nid);
1156 if (!free_base_page) {
1157 free_base_page = page;
1158 free_base_pfn = pfn;
1159 nr_to_free = 0;
1160 }
1161 nr_to_free++;
1162
1163 /* Where possible, batch up pages for a single free */
1164 continue;
1165free_range:
1166 /* Free the current block of pages to allocator */
1167 nr_pages += nr_to_free;
1168 deferred_free_range(free_base_page, free_base_pfn,
1169 nr_to_free);
1170 free_base_page = NULL;
1171 free_base_pfn = nr_to_free = 0;
1172 }
1173
1174 first_init_pfn = max(end_pfn, first_init_pfn);
1175 }
1176
1177 /* Sanity check that the next zone really is unpopulated */
1178 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1179
1180 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1181 jiffies_to_msecs(jiffies - start));
1182 up_read(&pgdat_init_rwsem);
1183 return 0;
1184}
1185
1186void __init page_alloc_init_late(void)
1187{
1188 int nid;
1189
1190 for_each_node_state(nid, N_MEMORY) {
1191 down_read(&pgdat_init_rwsem);
1192 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1193 }
1194
1195 /* Block until all are initialised */
1196 down_write(&pgdat_init_rwsem);
1197 up_write(&pgdat_init_rwsem);
1198}
1199#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1200
841#ifdef CONFIG_CMA 1201#ifdef CONFIG_CMA
842/* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 1202/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
843void __init init_cma_reserved_pageblock(struct page *page) 1203void __init init_cma_reserved_pageblock(struct page *page)
@@ -4150,6 +4510,9 @@ static void setup_zone_migrate_reserve(struct zone *zone)
4150 zone->nr_migrate_reserve_block = reserve; 4510 zone->nr_migrate_reserve_block = reserve;
4151 4511
4152 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 4512 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
4513 if (!early_page_nid_uninitialised(pfn, zone_to_nid(zone)))
4514 return;
4515
4153 if (!pfn_valid(pfn)) 4516 if (!pfn_valid(pfn))
4154 continue; 4517 continue;
4155 page = pfn_to_page(pfn); 4518 page = pfn_to_page(pfn);
@@ -4212,15 +4575,16 @@ static void setup_zone_migrate_reserve(struct zone *zone)
4212void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 4575void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4213 unsigned long start_pfn, enum memmap_context context) 4576 unsigned long start_pfn, enum memmap_context context)
4214{ 4577{
4215 struct page *page; 4578 pg_data_t *pgdat = NODE_DATA(nid);
4216 unsigned long end_pfn = start_pfn + size; 4579 unsigned long end_pfn = start_pfn + size;
4217 unsigned long pfn; 4580 unsigned long pfn;
4218 struct zone *z; 4581 struct zone *z;
4582 unsigned long nr_initialised = 0;
4219 4583
4220 if (highest_memmap_pfn < end_pfn - 1) 4584 if (highest_memmap_pfn < end_pfn - 1)
4221 highest_memmap_pfn = end_pfn - 1; 4585 highest_memmap_pfn = end_pfn - 1;
4222 4586
4223 z = &NODE_DATA(nid)->node_zones[zone]; 4587 z = &pgdat->node_zones[zone];
4224 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 4588 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4225 /* 4589 /*
4226 * There can be holes in boot-time mem_map[]s 4590 * There can be holes in boot-time mem_map[]s
@@ -4232,14 +4596,11 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4232 continue; 4596 continue;
4233 if (!early_pfn_in_nid(pfn, nid)) 4597 if (!early_pfn_in_nid(pfn, nid))
4234 continue; 4598 continue;
4599 if (!update_defer_init(pgdat, pfn, end_pfn,
4600 &nr_initialised))
4601 break;
4235 } 4602 }
4236 page = pfn_to_page(pfn); 4603
4237 set_page_links(page, zone, nid, pfn);
4238 mminit_verify_page_links(page, zone, nid, pfn);
4239 init_page_count(page);
4240 page_mapcount_reset(page);
4241 page_cpupid_reset_last(page);
4242 SetPageReserved(page);
4243 /* 4604 /*
4244 * Mark the block movable so that blocks are reserved for 4605 * Mark the block movable so that blocks are reserved for
4245 * movable at startup. This will force kernel allocations 4606 * movable at startup. This will force kernel allocations
@@ -4254,17 +4615,14 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4254 * check here not to call set_pageblock_migratetype() against 4615 * check here not to call set_pageblock_migratetype() against
4255 * pfn out of zone. 4616 * pfn out of zone.
4256 */ 4617 */
4257 if ((z->zone_start_pfn <= pfn) 4618 if (!(pfn & (pageblock_nr_pages - 1))) {
4258 && (pfn < zone_end_pfn(z)) 4619 struct page *page = pfn_to_page(pfn);
4259 && !(pfn & (pageblock_nr_pages - 1)))
4260 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4261 4620
4262 INIT_LIST_HEAD(&page->lru); 4621 __init_single_page(page, pfn, zone, nid);
4263#ifdef WANT_PAGE_VIRTUAL 4622 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4264 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 4623 } else {
4265 if (!is_highmem_idx(zone)) 4624 __init_single_pfn(pfn, zone, nid);
4266 set_page_address(page, __va(pfn << PAGE_SHIFT)); 4625 }
4267#endif
4268 } 4626 }
4269} 4627}
4270 4628
@@ -4522,57 +4880,30 @@ int __meminit init_currently_empty_zone(struct zone *zone,
4522 4880
4523#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4881#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4524#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 4882#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
4883
4525/* 4884/*
4526 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 4885 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
4527 */ 4886 */
4528int __meminit __early_pfn_to_nid(unsigned long pfn) 4887int __meminit __early_pfn_to_nid(unsigned long pfn,
4888 struct mminit_pfnnid_cache *state)
4529{ 4889{
4530 unsigned long start_pfn, end_pfn; 4890 unsigned long start_pfn, end_pfn;
4531 int nid; 4891 int nid;
4532 /*
4533 * NOTE: The following SMP-unsafe globals are only used early in boot
4534 * when the kernel is running single-threaded.
4535 */
4536 static unsigned long __meminitdata last_start_pfn, last_end_pfn;
4537 static int __meminitdata last_nid;
4538 4892
4539 if (last_start_pfn <= pfn && pfn < last_end_pfn) 4893 if (state->last_start <= pfn && pfn < state->last_end)
4540 return last_nid; 4894 return state->last_nid;
4541 4895
4542 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 4896 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
4543 if (nid != -1) { 4897 if (nid != -1) {
4544 last_start_pfn = start_pfn; 4898 state->last_start = start_pfn;
4545 last_end_pfn = end_pfn; 4899 state->last_end = end_pfn;
4546 last_nid = nid; 4900 state->last_nid = nid;
4547 } 4901 }
4548 4902
4549 return nid; 4903 return nid;
4550} 4904}
4551#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 4905#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4552 4906
4553int __meminit early_pfn_to_nid(unsigned long pfn)
4554{
4555 int nid;
4556
4557 nid = __early_pfn_to_nid(pfn);
4558 if (nid >= 0)
4559 return nid;
4560 /* just returns 0 */
4561 return 0;
4562}
4563
4564#ifdef CONFIG_NODES_SPAN_OTHER_NODES
4565bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
4566{
4567 int nid;
4568
4569 nid = __early_pfn_to_nid(pfn);
4570 if (nid >= 0 && nid != node)
4571 return false;
4572 return true;
4573}
4574#endif
4575
4576/** 4907/**
4577 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range 4908 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
4578 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 4909 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
@@ -5090,6 +5421,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5090 /* pg_data_t should be reset to zero when it's allocated */ 5421 /* pg_data_t should be reset to zero when it's allocated */
5091 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); 5422 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
5092 5423
5424 reset_deferred_meminit(pgdat);
5093 pgdat->node_id = nid; 5425 pgdat->node_id = nid;
5094 pgdat->node_start_pfn = node_start_pfn; 5426 pgdat->node_start_pfn = node_start_pfn;
5095#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5427#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py
index 3c947f0c5dad..927d0d2a3145 100644
--- a/scripts/gdb/linux/dmesg.py
+++ b/scripts/gdb/linux/dmesg.py
@@ -12,7 +12,6 @@
12# 12#
13 13
14import gdb 14import gdb
15import string
16 15
17from linux import utils 16from linux import utils
18 17
diff --git a/scripts/gdb/linux/lists.py b/scripts/gdb/linux/lists.py
new file mode 100644
index 000000000000..3a3775bc162b
--- /dev/null
+++ b/scripts/gdb/linux/lists.py
@@ -0,0 +1,92 @@
1#
2# gdb helper commands and functions for Linux kernel debugging
3#
4# list tools
5#
6# Copyright (c) Thiebaud Weksteen, 2015
7#
8# Authors:
9# Thiebaud Weksteen <thiebaud@weksteen.fr>
10#
11# This work is licensed under the terms of the GNU GPL version 2.
12#
13
14import gdb
15
16from linux import utils
17
18list_head = utils.CachedType("struct list_head")
19
20
21def list_check(head):
22 nb = 0
23 if (head.type == list_head.get_type().pointer()):
24 head = head.dereference()
25 elif (head.type != list_head.get_type()):
26 raise gdb.GdbError('argument must be of type (struct list_head [*])')
27 c = head
28 try:
29 gdb.write("Starting with: {}\n".format(c))
30 except gdb.MemoryError:
31 gdb.write('head is not accessible\n')
32 return
33 while True:
34 p = c['prev'].dereference()
35 n = c['next'].dereference()
36 try:
37 if p['next'] != c.address:
38 gdb.write('prev.next != current: '
39 'current@{current_addr}={current} '
40 'prev@{p_addr}={p}\n'.format(
41 current_addr=c.address,
42 current=c,
43 p_addr=p.address,
44 p=p,
45 ))
46 return
47 except gdb.MemoryError:
48 gdb.write('prev is not accessible: '
49 'current@{current_addr}={current}\n'.format(
50 current_addr=c.address,
51 current=c
52 ))
53 return
54 try:
55 if n['prev'] != c.address:
56 gdb.write('next.prev != current: '
57 'current@{current_addr}={current} '
58 'next@{n_addr}={n}\n'.format(
59 current_addr=c.address,
60 current=c,
61 n_addr=n.address,
62 n=n,
63 ))
64 return
65 except gdb.MemoryError:
66 gdb.write('next is not accessible: '
67 'current@{current_addr}={current}\n'.format(
68 current_addr=c.address,
69 current=c
70 ))
71 return
72 c = n
73 nb += 1
74 if c == head:
75 gdb.write("list is consistent: {} node(s)\n".format(nb))
76 return
77
78
79class LxListChk(gdb.Command):
80 """Verify a list consistency"""
81
82 def __init__(self):
83 super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
84 gdb.COMPLETE_EXPRESSION)
85
86 def invoke(self, arg, from_tty):
87 argv = gdb.string_to_argv(arg)
88 if len(argv) != 1:
89 raise gdb.GdbError("lx-list-check takes one argument")
90 list_check(gdb.parse_and_eval(argv[0]))
91
92LxListChk()
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index cd5bea965d4e..627750cb420d 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -14,9 +14,8 @@
14import gdb 14import gdb
15import os 15import os
16import re 16import re
17import string
18 17
19from linux import modules, utils 18from linux import modules
20 19
21 20
22if hasattr(gdb, 'Breakpoint'): 21if hasattr(gdb, 'Breakpoint'):
@@ -97,7 +96,7 @@ lx-symbols command."""
97 return "" 96 return ""
98 attrs = sect_attrs['attrs'] 97 attrs = sect_attrs['attrs']
99 section_name_to_address = { 98 section_name_to_address = {
100 attrs[n]['name'].string() : attrs[n]['address'] 99 attrs[n]['name'].string(): attrs[n]['address']
101 for n in range(int(sect_attrs['nsections']))} 100 for n in range(int(sect_attrs['nsections']))}
102 args = [] 101 args = []
103 for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]: 102 for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
@@ -124,7 +123,7 @@ lx-symbols command."""
124 addr=module_addr, 123 addr=module_addr,
125 sections=self._section_arguments(module)) 124 sections=self._section_arguments(module))
126 gdb.execute(cmdline, to_string=True) 125 gdb.execute(cmdline, to_string=True)
127 if not module_name in self.loaded_modules: 126 if module_name not in self.loaded_modules:
128 self.loaded_modules.append(module_name) 127 self.loaded_modules.append(module_name)
129 else: 128 else:
130 gdb.write("no module object found for '{0}'\n".format(module_name)) 129 gdb.write("no module object found for '{0}'\n".format(module_name))
@@ -164,7 +163,7 @@ lx-symbols command."""
164 self.load_all_symbols() 163 self.load_all_symbols()
165 164
166 if hasattr(gdb, 'Breakpoint'): 165 if hasattr(gdb, 'Breakpoint'):
167 if not self.breakpoint is None: 166 if self.breakpoint is not None:
168 self.breakpoint.delete() 167 self.breakpoint.delete()
169 self.breakpoint = None 168 self.breakpoint = None
170 self.breakpoint = LoadModuleBreakpoint( 169 self.breakpoint = LoadModuleBreakpoint(
diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py
index e2037d9bb7eb..862a4ae24d49 100644
--- a/scripts/gdb/linux/tasks.py
+++ b/scripts/gdb/linux/tasks.py
@@ -18,8 +18,8 @@ from linux import utils
18 18
19task_type = utils.CachedType("struct task_struct") 19task_type = utils.CachedType("struct task_struct")
20 20
21
21def task_lists(): 22def task_lists():
22 global task_type
23 task_ptr_type = task_type.get_type().pointer() 23 task_ptr_type = task_type.get_type().pointer()
24 init_task = gdb.parse_and_eval("init_task").address 24 init_task = gdb.parse_and_eval("init_task").address
25 t = g = init_task 25 t = g = init_task
@@ -38,6 +38,7 @@ def task_lists():
38 if t == init_task: 38 if t == init_task:
39 return 39 return
40 40
41
41def get_task_by_pid(pid): 42def get_task_by_pid(pid):
42 for task in task_lists(): 43 for task in task_lists():
43 if int(task['pid']) == pid: 44 if int(task['pid']) == pid:
@@ -65,13 +66,28 @@ return that task_struct variable which PID matches."""
65LxTaskByPidFunc() 66LxTaskByPidFunc()
66 67
67 68
69class LxPs(gdb.Command):
70 """Dump Linux tasks."""
71
72 def __init__(self):
73 super(LxPs, self).__init__("lx-ps", gdb.COMMAND_DATA)
74
75 def invoke(self, arg, from_tty):
76 for task in task_lists():
77 gdb.write("{address} {pid} {comm}\n".format(
78 address=task,
79 pid=task["pid"],
80 comm=task["comm"].string()))
81
82LxPs()
83
84
68thread_info_type = utils.CachedType("struct thread_info") 85thread_info_type = utils.CachedType("struct thread_info")
69 86
70ia64_task_size = None 87ia64_task_size = None
71 88
72 89
73def get_thread_info(task): 90def get_thread_info(task):
74 global thread_info_type
75 thread_info_ptr_type = thread_info_type.get_type().pointer() 91 thread_info_ptr_type = thread_info_type.get_type().pointer()
76 if utils.is_target_arch("ia64"): 92 if utils.is_target_arch("ia64"):
77 global ia64_task_size 93 global ia64_task_size
diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py
index 128c306db3ee..0893b326a28b 100644
--- a/scripts/gdb/linux/utils.py
+++ b/scripts/gdb/linux/utils.py
@@ -83,7 +83,7 @@ def get_target_endianness():
83 elif "big endian" in endian: 83 elif "big endian" in endian:
84 target_endianness = BIG_ENDIAN 84 target_endianness = BIG_ENDIAN
85 else: 85 else:
86 raise gdb.GdgError("unknown endianness '{0}'".format(str(endian))) 86 raise gdb.GdbError("unknown endianness '{0}'".format(str(endian)))
87 return target_endianness 87 return target_endianness
88 88
89 89
@@ -151,6 +151,6 @@ def get_gdbserver_type():
151 gdbserver_type = GDBSERVER_QEMU 151 gdbserver_type = GDBSERVER_QEMU
152 elif probe_kgdb(): 152 elif probe_kgdb():
153 gdbserver_type = GDBSERVER_KGDB 153 gdbserver_type = GDBSERVER_KGDB
154 if not gdbserver_type is None and hasattr(gdb, 'events'): 154 if gdbserver_type is not None and hasattr(gdb, 'events'):
155 gdb.events.exited.connect(exit_handler) 155 gdb.events.exited.connect(exit_handler)
156 return gdbserver_type 156 return gdbserver_type
diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py
index 48489285f119..ce82bf5c3943 100644
--- a/scripts/gdb/vmlinux-gdb.py
+++ b/scripts/gdb/vmlinux-gdb.py
@@ -28,3 +28,4 @@ else:
28 import linux.dmesg 28 import linux.dmesg
29 import linux.tasks 29 import linux.tasks
30 import linux.cpus 30 import linux.cpus
31 import linux.lists
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 082509eb805d..f05cb6a8cbe0 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -124,7 +124,7 @@ static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size)
124 dmab->addr = 0; 124 dmab->addr = 0;
125 125
126 if (dev->of_node) 126 if (dev->of_node)
127 pool = of_get_named_gen_pool(dev->of_node, "iram", 0); 127 pool = of_gen_pool_get(dev->of_node, "iram", 0);
128 128
129 if (!pool) 129 if (!pool)
130 return; 130 return;