aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 19:32:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 19:32:31 -0500
commitd8fe4acc88da8fbbe360b6592c9d0abbb85117dc (patch)
treee3d5edc0ad3541a1daf37b60c5fe67b6fe347d22
parent3aeb58ab6216d864821e8dafb248e8d77403f3e9 (diff)
parent8d3ef556aba2b5b7d8b7144f7be1814d75ea3cc6 (diff)
Merge branch 'akpm' (patch-bomb from Andrew Morton)
Merge patches from Andrew Morton: - memstick fixes - the rest of MM - various misc bits that were awaiting merges from linux-next into mainline: seq_file, printk, rtc, completions, w1, softirqs, llist, kfifo, hfsplus * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (72 commits) cmdline-parser: fix build hfsplus: Fix undefined __divdi3 in hfsplus_init_header_node() kfifo API type safety kfifo: kfifo_copy_{to,from}_user: fix copied bytes calculation sound/core/memalloc.c: use gen_pool_dma_alloc() to allocate iram buffer llists-move-llist_reverse_order-from-raid5-to-llistc-fix llists: move llist_reverse_order from raid5 to llist.c kernel: fix generic_exec_single indentation kernel-provide-a-__smp_call_function_single-stub-for-config_smp-fix kernel: provide a __smp_call_function_single stub for !CONFIG_SMP kernel: remove CONFIG_USE_GENERIC_SMP_HELPERS revert "softirq: Add support for triggering softirq work on softirqs" drivers/w1/masters/w1-gpio.c: use dev_get_platdata() sched: remove INIT_COMPLETION tree-wide: use reinit_completion instead of INIT_COMPLETION sched: replace INIT_COMPLETION with reinit_completion drivers/rtc/rtc-hid-sensor-time.c: enable HID input processing early drivers/rtc/rtc-hid-sensor-time.c: use dev_get_platdata() vsprintf: ignore %n again seq_file: remove "%n" usage from seq_file users ...
-rw-r--r--Documentation/vm/split_page_table_lock94
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/pgalloc.h5
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/asm/pgalloc.h11
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/pgalloc.h12
-rw-r--r--arch/arm/mach-tegra/apbio.c2
-rw-r--r--arch/arm/mm/fault-armv.c6
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/pgalloc.h9
-rw-r--r--arch/avr32/include/asm/pgalloc.h5
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/cris/include/asm/pgalloc.h7
-rw-r--r--arch/frv/mm/pgalloc.c12
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/hexagon/include/asm/pgalloc.h10
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/pgalloc.h5
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m32r/include/asm/pgalloc.h7
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h4
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h8
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h5
-rw-r--r--arch/metag/Kconfig1
-rw-r--r--arch/metag/include/asm/pgalloc.h8
-rw-r--r--arch/microblaze/include/asm/pgalloc.h12
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/pgalloc.h9
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/include/asm/pgalloc.h1
-rw-r--r--arch/mn10300/mm/pgtable.c9
-rw-r--r--arch/openrisc/include/asm/pgalloc.h10
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/pgalloc.h8
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h5
-rw-r--r--arch/powerpc/mm/pgtable_32.c5
-rw-r--r--arch/powerpc/mm/pgtable_64.c7
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c6
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/mm/pgtable.c23
-rw-r--r--arch/score/include/asm/pgalloc.h9
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/include/asm/pgalloc.h5
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/mm/init_64.c11
-rw-r--r--arch/sparc/mm/srmmu.c5
-rw-r--r--arch/sparc/mm/tlb.c12
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/mm/pgtable.c6
-rw-r--r--arch/um/kernel/mem.c8
-rw-r--r--arch/unicore32/include/asm/pgalloc.h14
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/include/asm/pgalloc.h11
-rw-r--r--arch/x86/mm/pgtable.c19
-rw-r--r--arch/x86/xen/mmu.c8
-rw-r--r--arch/xtensa/include/asm/pgalloc.h29
-rw-r--r--arch/xtensa/include/asm/pgtable.h3
-rw-r--r--arch/xtensa/mm/mmu.c20
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-softirq.c4
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--crypto/af_alg.c2
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/testmgr.c12
-rw-r--r--drivers/ata/libata-eh.c4
-rw-r--r--drivers/base/power/main.c4
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/cciss.c4
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c2
-rw-r--r--drivers/crypto/tegra-aes.c2
-rw-r--r--drivers/firewire/core-transaction.c2
-rw-r--r--drivers/gpu/drm/drm_flip_work.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c2
-rw-r--r--drivers/hid/hid-wiimote.h2
-rw-r--r--drivers/hwmon/jz4740-hwmon.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c2
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-wmt.c4
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c6
-rw-r--r--drivers/iio/adc/nau7802.c2
-rw-r--r--drivers/iio/industrialio-event.c2
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c2
-rw-r--r--drivers/iommu/arm-smmu.c5
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c10
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c4
-rw-r--r--drivers/media/rc/iguanair.c2
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/memstick/core/ms_block.c4
-rw-r--r--drivers/memstick/core/ms_block.h2
-rw-r--r--drivers/memstick/host/r592.c2
-rw-r--r--drivers/misc/mic/card/mic_virtio.c2
-rw-r--r--drivers/misc/mic/host/mic_boot.c2
-rw-r--r--drivers/misc/ti-st/st_kim.c12
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/onenand/omap2.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c2
-rw-r--r--drivers/net/ieee802154/mrf24j40.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c18
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/parport/parport_ip32.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/platform/x86/apple-gmux.c2
-rw-r--r--drivers/power/ab8500_fg.c4
-rw-r--r--drivers/power/jz4740-battery.c2
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c13
-rw-r--r--drivers/spi/spi-bcm2835.c2
-rw-r--r--drivers/spi/spi-clps711x.c2
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-fsl-espi.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-mpc512x-psc.c2
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c2
-rw-r--r--drivers/spi/spi-sirf.c4
-rw-r--r--drivers/spi/spi-tegra114.c6
-rw-r--r--drivers/spi/spi-tegra20-sflash.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c6
-rw-r--r--drivers/spi/spi-xilinx.c2
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c2
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-p2m.c2
-rw-r--r--drivers/staging/tidspbridge/core/sync.c4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/sync.h2
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c6
-rw-r--r--drivers/tty/metag_da.c2
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c2
-rw-r--r--drivers/usb/gadget/f_fs.c2
-rw-r--r--drivers/usb/serial/mos7720.c2
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_common.c4
-rw-r--r--drivers/video/omap2/displays-new/encoder-tpd12s015.c2
-rw-r--r--drivers/w1/masters/w1-gpio.c10
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/hfsplus/xattr.c9
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/ocfs2/dlmglue.c4
-rw-r--r--fs/proc/consoles.c10
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/proc/nommu.c12
-rw-r--r--fs/proc/task_mmu.c36
-rw-r--r--fs/proc/task_nommu.c19
-rw-r--r--fs/seq_file.c15
-rw-r--r--include/linux/cmdline-parser.h2
-rw-r--r--include/linux/completion.h28
-rw-r--r--include/linux/huge_mm.h17
-rw-r--r--include/linux/hugetlb.h26
-rw-r--r--include/linux/interrupt.h22
-rw-r--r--include/linux/kfifo.h47
-rw-r--r--include/linux/llist.h2
-rw-r--r--include/linux/lockref.h7
-rw-r--r--include/linux/mm.h139
-rw-r--r--include/linux/mm_types.h21
-rw-r--r--include/linux/seq_file.h15
-rw-r--r--include/linux/smp.h16
-rw-r--r--include/linux/swapops.h7
-rw-r--r--init/main.c2
-rw-r--r--kernel/Kconfig.hz2
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/smp.c5
-rw-r--r--kernel/softirq.c131
-rw-r--r--kernel/up.c11
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/kfifo.c4
-rw-r--r--lib/llist.c22
-rw-r--r--lib/lockref.c2
-rw-r--r--lib/vsprintf.c20
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/huge_memory.c201
-rw-r--r--mm/hugetlb.c110
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c42
-rw-r--r--mm/mempolicy.c5
-rw-r--r--mm/migrate.c14
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/oom_kill.c6
-rw-r--r--mm/pgtable-generic.c16
-rw-r--r--mm/rmap.c15
-rw-r--r--net/ipv4/fib_trie.c13
-rw-r--r--net/ipv4/ping.c15
-rw-r--r--net/ipv4/tcp_ipv4.c33
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/phonet/socket.c24
-rw-r--r--net/sctp/objcnt.c9
-rw-r--r--samples/kfifo/bytestream-example.c4
-rw-r--r--samples/kfifo/dma-example.c2
-rw-r--r--samples/kfifo/inttype-example.c4
-rw-r--r--sound/core/memalloc.c6
-rw-r--r--sound/firewire/dice.c2
-rw-r--r--sound/soc/samsung/ac97.c6
213 files changed, 1164 insertions, 889 deletions
diff --git a/Documentation/vm/split_page_table_lock b/Documentation/vm/split_page_table_lock
new file mode 100644
index 000000000000..7521d367f21d
--- /dev/null
+++ b/Documentation/vm/split_page_table_lock
@@ -0,0 +1,94 @@
1Split page table lock
2=====================
3
4Originally, mm->page_table_lock spinlock protected all page tables of the
5mm_struct. But this approach leads to poor page fault scalability of
6multi-threaded applications due high contention on the lock. To improve
7scalability, split page table lock was introduced.
8
9With split page table lock we have separate per-table lock to serialize
10access to the table. At the moment we use split lock for PTE and PMD
11tables. Access to higher level tables protected by mm->page_table_lock.
12
13There are helpers to lock/unlock a table and other accessor functions:
14 - pte_offset_map_lock()
15 maps pte and takes PTE table lock, returns pointer to the taken
16 lock;
17 - pte_unmap_unlock()
18 unlocks and unmaps PTE table;
19 - pte_alloc_map_lock()
20 allocates PTE table if needed and take the lock, returns pointer
21 to taken lock or NULL if allocation failed;
22 - pte_lockptr()
23 returns pointer to PTE table lock;
24 - pmd_lock()
25 takes PMD table lock, returns pointer to taken lock;
26 - pmd_lockptr()
27 returns pointer to PMD table lock;
28
29Split page table lock for PTE tables is enabled compile-time if
30CONFIG_SPLIT_PTLOCK_CPUS (usually 4) is less or equal to NR_CPUS.
31If split lock is disabled, all tables guaded by mm->page_table_lock.
32
33Split page table lock for PMD tables is enabled, if it's enabled for PTE
34tables and the architecture supports it (see below).
35
36Hugetlb and split page table lock
37---------------------------------
38
39Hugetlb can support several page sizes. We use split lock only for PMD
40level, but not for PUD.
41
42Hugetlb-specific helpers:
43 - huge_pte_lock()
44 takes pmd split lock for PMD_SIZE page, mm->page_table_lock
45 otherwise;
46 - huge_pte_lockptr()
47 returns pointer to table lock;
48
49Support of split page table lock by an architecture
50---------------------------------------------------
51
52There's no need in special enabling of PTE split page table lock:
53everything required is done by pgtable_page_ctor() and pgtable_page_dtor(),
54which must be called on PTE table allocation / freeing.
55
56Make sure the architecture doesn't use slab allocator for page table
57allocation: slab uses page->slab_cache and page->first_page for its pages.
58These fields share storage with page->ptl.
59
60PMD split lock only makes sense if you have more than two page table
61levels.
62
63PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table
64allocation and pgtable_pmd_page_dtor() on freeing.
65
66Allocation usually happens in pmd_alloc_one(), freeing in pmd_free(), but
67make sure you cover all PMD table allocation / freeing paths: i.e X86_PAE
68preallocate few PMDs on pgd_alloc().
69
70With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK.
71
72NOTE: pgtable_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must
73be handled properly.
74
75page->ptl
76---------
77
78page->ptl is used to access split page table lock, where 'page' is struct
79page of page containing the table. It shares storage with page->private
80(and few other fields in union).
81
82To avoid increasing size of struct page and have best performance, we use a
83trick:
84 - if spinlock_t fits into long, we use page->ptr as spinlock, so we
85 can avoid indirect access and save a cache line.
86 - if size of spinlock_t is bigger then size of long, we use page->ptl as
87 pointer to spinlock_t and allocate it dynamically. This allows to use
88 split lock with enabled DEBUG_SPINLOCK or DEBUG_LOCK_ALLOC, but costs
89 one more cache line for indirect access;
90
91The spinlock_t allocated in pgtable_page_ctor() for PTE table and in
92pgtable_pmd_page_ctor() for PMD table.
93
94Please, never access page->ptl directly -- use appropriate helper.
diff --git a/arch/Kconfig b/arch/Kconfig
index ded747c7b74c..f1cf895c040f 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -207,9 +207,6 @@ config HAVE_DMA_ATTRS
207config HAVE_DMA_CONTIGUOUS 207config HAVE_DMA_CONTIGUOUS
208 bool 208 bool
209 209
210config USE_GENERIC_SMP_HELPERS
211 bool
212
213config GENERIC_SMP_IDLE_THREAD 210config GENERIC_SMP_IDLE_THREAD
214 bool 211 bool
215 212
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 35a300d4a9fb..8d2a4833acda 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -522,7 +522,6 @@ config ARCH_MAY_HAVE_PC_FDC
522config SMP 522config SMP
523 bool "Symmetric multi-processing support" 523 bool "Symmetric multi-processing support"
524 depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL 524 depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
525 select USE_GENERIC_SMP_HELPERS
526 ---help--- 525 ---help---
527 This enables support for systems with more than one CPU. If you have 526 This enables support for systems with more than one CPU. If you have
528 a system with only one CPU, like most personal computers, say N. If 527 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index bc2a0daf2d92..aab14a019c20 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -72,7 +72,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
72 if (!pte) 72 if (!pte)
73 return NULL; 73 return NULL;
74 page = virt_to_page(pte); 74 page = virt_to_page(pte);
75 pgtable_page_ctor(page); 75 if (!pgtable_page_ctor(page)) {
76 __free_page(page);
77 return NULL;
78 }
76 return page; 79 return page;
77} 80}
78 81
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 5ede5460c806..2ee0c9bfd032 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -125,7 +125,6 @@ config ARC_PLAT_NEEDS_CPU_TO_DMA
125config SMP 125config SMP
126 bool "Symmetric Multi-Processing (Incomplete)" 126 bool "Symmetric Multi-Processing (Incomplete)"
127 default n 127 default n
128 select USE_GENERIC_SMP_HELPERS
129 help 128 help
130 This enables support for systems with more than one CPU. If you have 129 This enables support for systems with more than one CPU. If you have
131 a system with only one CPU, like most personal computers, say N. If 130 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 36a9f20c21a3..81208bfd9dcb 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -105,11 +105,16 @@ static inline pgtable_t
105pte_alloc_one(struct mm_struct *mm, unsigned long address) 105pte_alloc_one(struct mm_struct *mm, unsigned long address)
106{ 106{
107 pgtable_t pte_pg; 107 pgtable_t pte_pg;
108 struct page *page;
108 109
109 pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); 110 pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
110 if (pte_pg) { 111 if (!pte_pg)
111 memzero((void *)pte_pg, PTRS_PER_PTE * 4); 112 return 0;
112 pgtable_page_ctor(virt_to_page(pte_pg)); 113 memzero((void *)pte_pg, PTRS_PER_PTE * 4);
114 page = virt_to_page(pte_pg);
115 if (!pgtable_page_ctor(page)) {
116 __free_page(page);
117 return 0;
113 } 118 }
114 119
115 return pte_pg; 120 return pte_pg;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 603d661b445d..00c1ff45a158 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1432,7 +1432,6 @@ config SMP
1432 depends on GENERIC_CLOCKEVENTS 1432 depends on GENERIC_CLOCKEVENTS
1433 depends on HAVE_SMP 1433 depends on HAVE_SMP
1434 depends on MMU || ARM_MPU 1434 depends on MMU || ARM_MPU
1435 select USE_GENERIC_SMP_HELPERS
1436 help 1435 help
1437 This enables support for systems with more than one CPU. If you have 1436 This enables support for systems with more than one CPU. If you have
1438 a system with only one CPU, like most personal computers, say N. If 1437 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 943504f53f57..78a779361682 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -102,12 +102,14 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
102#else 102#else
103 pte = alloc_pages(PGALLOC_GFP, 0); 103 pte = alloc_pages(PGALLOC_GFP, 0);
104#endif 104#endif
105 if (pte) { 105 if (!pte)
106 if (!PageHighMem(pte)) 106 return NULL;
107 clean_pte_table(page_address(pte)); 107 if (!PageHighMem(pte))
108 pgtable_page_ctor(pte); 108 clean_pte_table(page_address(pte));
109 if (!pgtable_page_ctor(pte)) {
110 __free_page(pte);
111 return NULL;
109 } 112 }
110
111 return pte; 113 return pte;
112} 114}
113 115
diff --git a/arch/arm/mach-tegra/apbio.c b/arch/arm/mach-tegra/apbio.c
index d7aa52ea6cfc..bc471973cf04 100644
--- a/arch/arm/mach-tegra/apbio.c
+++ b/arch/arm/mach-tegra/apbio.c
@@ -114,7 +114,7 @@ static int do_dma_transfer(unsigned long apb_add,
114 dma_desc->callback = apb_dma_complete; 114 dma_desc->callback = apb_dma_complete;
115 dma_desc->callback_param = NULL; 115 dma_desc->callback_param = NULL;
116 116
117 INIT_COMPLETION(tegra_apb_wait); 117 reinit_completion(&tegra_apb_wait);
118 118
119 dmaengine_submit(dma_desc); 119 dmaengine_submit(dma_desc);
120 dma_async_issue_pending(tegra_apb_dma_chan); 120 dma_async_issue_pending(tegra_apb_dma_chan);
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 2a5907b5c8d2..ff379ac115df 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -65,7 +65,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
65 return ret; 65 return ret;
66} 66}
67 67
68#if USE_SPLIT_PTLOCKS 68#if USE_SPLIT_PTE_PTLOCKS
69/* 69/*
70 * If we are using split PTE locks, then we need to take the page 70 * If we are using split PTE locks, then we need to take the page
71 * lock here. Otherwise we are using shared mm->page_table_lock 71 * lock here. Otherwise we are using shared mm->page_table_lock
@@ -84,10 +84,10 @@ static inline void do_pte_unlock(spinlock_t *ptl)
84{ 84{
85 spin_unlock(ptl); 85 spin_unlock(ptl);
86} 86}
87#else /* !USE_SPLIT_PTLOCKS */ 87#else /* !USE_SPLIT_PTE_PTLOCKS */
88static inline void do_pte_lock(spinlock_t *ptl) {} 88static inline void do_pte_lock(spinlock_t *ptl) {}
89static inline void do_pte_unlock(spinlock_t *ptl) {} 89static inline void do_pte_unlock(spinlock_t *ptl) {}
90#endif /* USE_SPLIT_PTLOCKS */ 90#endif /* USE_SPLIT_PTE_PTLOCKS */
91 91
92static int adjust_pte(struct vm_area_struct *vma, unsigned long address, 92static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
93 unsigned long pfn) 93 unsigned long pfn)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index bb0bf1bfc05d..9714fe0403b7 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -143,7 +143,6 @@ config CPU_BIG_ENDIAN
143 143
144config SMP 144config SMP
145 bool "Symmetric Multi-Processing" 145 bool "Symmetric Multi-Processing"
146 select USE_GENERIC_SMP_HELPERS
147 help 146 help
148 This enables support for systems with more than one CPU. If 147 This enables support for systems with more than one CPU. If
149 you say N here, the kernel will run on single and 148 you say N here, the kernel will run on single and
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index f214069ec5d5..9bea6e74a001 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -63,9 +63,12 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
63 struct page *pte; 63 struct page *pte;
64 64
65 pte = alloc_pages(PGALLOC_GFP, 0); 65 pte = alloc_pages(PGALLOC_GFP, 0);
66 if (pte) 66 if (!pte)
67 pgtable_page_ctor(pte); 67 return NULL;
68 68 if (!pgtable_page_ctor(pte)) {
69 __free_page(pte);
70 return NULL;
71 }
69 return pte; 72 return pte;
70} 73}
71 74
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h
index bc7e8ae479ee..1aba19d68c5e 100644
--- a/arch/avr32/include/asm/pgalloc.h
+++ b/arch/avr32/include/asm/pgalloc.h
@@ -68,7 +68,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
68 return NULL; 68 return NULL;
69 69
70 page = virt_to_page(pg); 70 page = virt_to_page(pg);
71 pgtable_page_ctor(page); 71 if (!pgtable_page_ctor(page)) {
72 quicklist_free(QUICK_PT, NULL, pg);
73 return NULL;
74 }
72 75
73 return page; 76 return page;
74} 77}
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index e887b57c3176..9ceccef9c649 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -34,7 +34,6 @@ config BLACKFIN
34 select ARCH_WANT_IPC_PARSE_VERSION 34 select ARCH_WANT_IPC_PARSE_VERSION
35 select GENERIC_ATOMIC64 35 select GENERIC_ATOMIC64
36 select GENERIC_IRQ_PROBE 36 select GENERIC_IRQ_PROBE
37 select USE_GENERIC_SMP_HELPERS if SMP
38 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG 37 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
39 select GENERIC_SMP_IDLE_THREAD 38 select GENERIC_SMP_IDLE_THREAD
40 select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS 39 select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
diff --git a/arch/cris/include/asm/pgalloc.h b/arch/cris/include/asm/pgalloc.h
index 6da975db112f..235ece437ddd 100644
--- a/arch/cris/include/asm/pgalloc.h
+++ b/arch/cris/include/asm/pgalloc.h
@@ -32,7 +32,12 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres
32{ 32{
33 struct page *pte; 33 struct page *pte;
34 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); 34 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
35 pgtable_page_ctor(pte); 35 if (!pte)
36 return NULL;
37 if (!pgtable_page_ctor(pte)) {
38 __free_page(pte);
39 return NULL;
40 }
36 return pte; 41 return pte;
37} 42}
38 43
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index f6084bc524e8..41907d25ed38 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -37,11 +37,15 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
37#else 37#else
38 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); 38 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
39#endif 39#endif
40 if (page) { 40 if (!page)
41 clear_highpage(page); 41 return NULL;
42 pgtable_page_ctor(page); 42
43 flush_dcache_page(page); 43 clear_highpage(page);
44 if (!pgtable_page_ctor(page)) {
45 __free_page(page);
46 return NULL;
44 } 47 }
48 flush_dcache_page(page);
45 return page; 49 return page;
46} 50}
47 51
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 99041b07e610..09df2608f40a 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -4,7 +4,6 @@ comment "Linux Kernel Configuration for Hexagon"
4config HEXAGON 4config HEXAGON
5 def_bool y 5 def_bool y
6 select HAVE_OPROFILE 6 select HAVE_OPROFILE
7 select USE_GENERIC_SMP_HELPERS if SMP
8 # Other pending projects/to-do items. 7 # Other pending projects/to-do items.
9 # select HAVE_REGS_AND_STACK_ACCESS_API 8 # select HAVE_REGS_AND_STACK_ACCESS_API
10 # select HAVE_HW_BREAKPOINT if PERF_EVENTS 9 # select HAVE_HW_BREAKPOINT if PERF_EVENTS
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index 679bf6d66487..4c9d382d7798 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -65,10 +65,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
65 struct page *pte; 65 struct page *pte;
66 66
67 pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); 67 pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
68 68 if (!pte)
69 if (pte) 69 return NULL;
70 pgtable_page_ctor(pte); 70 if (!pgtable_page_ctor(pte)) {
71 71 __free_page(pte);
72 return NULL;
73 }
72 return pte; 74 return pte;
73} 75}
74 76
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 7740ab10a171..dfe85e92ca2e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -343,7 +343,6 @@ config FORCE_MAX_ZONEORDER
343 343
344config SMP 344config SMP
345 bool "Symmetric multi-processing support" 345 bool "Symmetric multi-processing support"
346 select USE_GENERIC_SMP_HELPERS
347 help 346 help
348 This enables support for systems with more than one CPU. If you have 347 This enables support for systems with more than one CPU. If you have
349 a system with only one CPU, say N. If you have a system with more 348 a system with only one CPU, say N. If you have a system with more
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index 96a8d927db28..5767cdfc08db 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -91,7 +91,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
91 if (!pg) 91 if (!pg)
92 return NULL; 92 return NULL;
93 page = virt_to_page(pg); 93 page = virt_to_page(pg);
94 pgtable_page_ctor(page); 94 if (!pgtable_page_ctor(page)) {
95 quicklist_free(0, NULL, pg);
96 return NULL;
97 }
95 return page; 98 return page;
96} 99}
97 100
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 75661fbf4529..09ef94a8a7c3 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -275,7 +275,6 @@ source "kernel/Kconfig.preempt"
275 275
276config SMP 276config SMP
277 bool "Symmetric multi-processing support" 277 bool "Symmetric multi-processing support"
278 select USE_GENERIC_SMP_HELPERS
279 ---help--- 278 ---help---
280 This enables support for systems with more than one CPU. If you have 279 This enables support for systems with more than one CPU. If you have
281 a system with only one CPU, like most personal computers, say N. If 280 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/m32r/include/asm/pgalloc.h b/arch/m32r/include/asm/pgalloc.h
index 0fc736198979..2d55a064ccac 100644
--- a/arch/m32r/include/asm/pgalloc.h
+++ b/arch/m32r/include/asm/pgalloc.h
@@ -43,7 +43,12 @@ static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm,
43{ 43{
44 struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO); 44 struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
45 45
46 pgtable_page_ctor(pte); 46 if (!pte)
47 return NULL;
48 if (!pgtable_page_ctor(pte)) {
49 __free_page(pte);
50 return NULL;
51 }
47 return pte; 52 return pte;
48} 53}
49 54
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 313f3dd23cdc..f9924fbcfe42 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -56,6 +56,10 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
56 56
57 if (!page) 57 if (!page)
58 return NULL; 58 return NULL;
59 if (!pgtable_page_ctor(page)) {
60 __free_page(page);
61 return NULL;
62 }
59 63
60 pte = kmap(page); 64 pte = kmap(page);
61 if (pte) { 65 if (pte) {
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index 2f02f264e694..24bcba496c75 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -29,18 +29,22 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
29 29
30static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 30static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
31{ 31{
32 struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); 32 struct page *page;
33 pte_t *pte; 33 pte_t *pte;
34 34
35 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
35 if(!page) 36 if(!page)
36 return NULL; 37 return NULL;
38 if (!pgtable_page_ctor(page)) {
39 __free_page(page);
40 return NULL;
41 }
37 42
38 pte = kmap(page); 43 pte = kmap(page);
39 __flush_page_to_ram(pte); 44 __flush_page_to_ram(pte);
40 flush_tlb_kernel_page(pte); 45 flush_tlb_kernel_page(pte);
41 nocache_page(pte); 46 nocache_page(pte);
42 kunmap(page); 47 kunmap(page);
43 pgtable_page_ctor(page);
44 return page; 48 return page;
45} 49}
46 50
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 48d80d5a666f..f868506e3350 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -59,7 +59,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
59 return NULL; 59 return NULL;
60 60
61 clear_highpage(page); 61 clear_highpage(page);
62 pgtable_page_ctor(page); 62 if (!pgtable_page_ctor(page)) {
63 __free_page(page);
64 return NULL;
65 }
63 return page; 66 return page;
64 67
65} 68}
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index 36368eb07e13..e56abd2c1b4f 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -111,7 +111,6 @@ config METAG_META21
111config SMP 111config SMP
112 bool "Symmetric multi-processing support" 112 bool "Symmetric multi-processing support"
113 depends on METAG_META21 && METAG_META21_MMU 113 depends on METAG_META21 && METAG_META21_MMU
114 select USE_GENERIC_SMP_HELPERS
115 help 114 help
116 This enables support for systems with more than one thread running 115 This enables support for systems with more than one thread running
117 Linux. If you have a system with only one thread running Linux, 116 Linux. If you have a system with only one thread running Linux,
diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h
index 275d9285141c..3104df0a4822 100644
--- a/arch/metag/include/asm/pgalloc.h
+++ b/arch/metag/include/asm/pgalloc.h
@@ -52,8 +52,12 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
52{ 52{
53 struct page *pte; 53 struct page *pte;
54 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); 54 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
55 if (pte) 55 if (!pte)
56 pgtable_page_ctor(pte); 56 return NULL;
57 if (!pgtable_page_ctor(pte)) {
58 __free_page(pte);
59 return NULL;
60 }
57 return pte; 61 return pte;
58} 62}
59 63
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index ebd35792482c..7fdf7fabc7d7 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -122,8 +122,13 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
122#endif 122#endif
123 123
124 ptepage = alloc_pages(flags, 0); 124 ptepage = alloc_pages(flags, 0);
125 if (ptepage) 125 if (!ptepage)
126 clear_highpage(ptepage); 126 return NULL;
127 clear_highpage(ptepage);
128 if (!pgtable_page_ctor(ptepage)) {
129 __free_page(ptepage);
130 return NULL;
131 }
127 return ptepage; 132 return ptepage;
128} 133}
129 134
@@ -158,8 +163,9 @@ extern inline void pte_free_slow(struct page *ptepage)
158 __free_page(ptepage); 163 __free_page(ptepage);
159} 164}
160 165
161extern inline void pte_free(struct mm_struct *mm, struct page *ptepage) 166static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
162{ 167{
168 pgtable_page_dtor(ptepage);
163 __free_page(ptepage); 169 __free_page(ptepage);
164} 170}
165 171
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 17cc7ff8458c..867d7db11581 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2125,7 +2125,6 @@ source "mm/Kconfig"
2125config SMP 2125config SMP
2126 bool "Multi-Processing support" 2126 bool "Multi-Processing support"
2127 depends on SYS_SUPPORTS_SMP 2127 depends on SYS_SUPPORTS_SMP
2128 select USE_GENERIC_SMP_HELPERS
2129 help 2128 help
2130 This enables support for systems with more than one CPU. If you have 2129 This enables support for systems with more than one CPU. If you have
2131 a system with only one CPU, like most personal computers, say N. If 2130 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 881d18b4e298..b336037e8768 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -80,9 +80,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
80 struct page *pte; 80 struct page *pte;
81 81
82 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 82 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
83 if (pte) { 83 if (!pte)
84 clear_highpage(pte); 84 return NULL;
85 pgtable_page_ctor(pte); 85 clear_highpage(pte);
86 if (!pgtable_page_ctor(pte)) {
87 __free_page(pte);
88 return NULL;
86 } 89 }
87 return pte; 90 return pte;
88} 91}
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 6aaa1607001a..8bde9237d13b 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -181,7 +181,6 @@ endmenu
181config SMP 181config SMP
182 bool "Symmetric multi-processing support" 182 bool "Symmetric multi-processing support"
183 default y 183 default y
184 select USE_GENERIC_SMP_HELPERS
185 depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050 184 depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
186 ---help--- 185 ---help---
187 This enables support for systems with more than one CPU. If you have 186 This enables support for systems with more than one CPU. If you have
diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h
index 146bacf193ea..0f25d5fa86f3 100644
--- a/arch/mn10300/include/asm/pgalloc.h
+++ b/arch/mn10300/include/asm/pgalloc.h
@@ -46,6 +46,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
46 46
47static inline void pte_free(struct mm_struct *mm, struct page *pte) 47static inline void pte_free(struct mm_struct *mm, struct page *pte)
48{ 48{
49 pgtable_page_dtor(pte);
49 __free_page(pte); 50 __free_page(pte);
50} 51}
51 52
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index bd9ada693f95..e77a7c728081 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -78,8 +78,13 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
78#else 78#else
79 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); 79 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
80#endif 80#endif
81 if (pte) 81 if (!pte)
82 clear_highpage(pte); 82 return NULL;
83 clear_highpage(pte);
84 if (!pgtable_page_ctor(pte)) {
85 __free_page(pte);
86 return NULL;
87 }
83 return pte; 88 return pte;
84} 89}
85 90
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 05c39ecd2efd..21484e5b9e9a 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -78,8 +78,13 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
78{ 78{
79 struct page *pte; 79 struct page *pte;
80 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); 80 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
81 if (pte) 81 if (!pte)
82 clear_page(page_address(pte)); 82 return NULL;
83 clear_page(page_address(pte));
84 if (!pgtable_page_ctor(pte)) {
85 __free_page(pte);
86 return NULL;
87 }
83 return pte; 88 return pte;
84} 89}
85 90
@@ -90,6 +95,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
90 95
91static inline void pte_free(struct mm_struct *mm, struct page *pte) 96static inline void pte_free(struct mm_struct *mm, struct page *pte)
92{ 97{
98 pgtable_page_dtor(pte);
93 __free_page(pte); 99 __free_page(pte);
94} 100}
95 101
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 7dcde539d61e..c03567a9a915 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -226,7 +226,6 @@ endchoice
226 226
227config SMP 227config SMP
228 bool "Symmetric multi-processing support" 228 bool "Symmetric multi-processing support"
229 select USE_GENERIC_SMP_HELPERS
230 ---help--- 229 ---help---
231 This enables support for systems with more than one CPU. If you have 230 This enables support for systems with more than one CPU. If you have
232 a system with only one CPU, like most personal computers, say N. If 231 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index fc987a1c12a8..f213f5b4c423 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -121,8 +121,12 @@ static inline pgtable_t
121pte_alloc_one(struct mm_struct *mm, unsigned long address) 121pte_alloc_one(struct mm_struct *mm, unsigned long address)
122{ 122{
123 struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 123 struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
124 if (page) 124 if (!page)
125 pgtable_page_ctor(page); 125 return NULL;
126 if (!pgtable_page_ctor(page)) {
127 __free_page(page);
128 return NULL;
129 }
126 return page; 130 return page;
127} 131}
128 132
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2f898d63eb96..4740b0a15fa8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -106,7 +106,6 @@ config PPC
106 select HAVE_MEMBLOCK_NODE_MAP 106 select HAVE_MEMBLOCK_NODE_MAP
107 select HAVE_DMA_ATTRS 107 select HAVE_DMA_ATTRS
108 select HAVE_DMA_API_DEBUG 108 select HAVE_DMA_API_DEBUG
109 select USE_GENERIC_SMP_HELPERS if SMP
110 select HAVE_OPROFILE 109 select HAVE_OPROFILE
111 select HAVE_DEBUG_KMEMLEAK 110 select HAVE_DEBUG_KMEMLEAK
112 select GENERIC_ATOMIC64 if PPC32 111 select GENERIC_ATOMIC64 if PPC32
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index f65e27b09bd3..16cb92d215d2 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -91,7 +91,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
91 if (!pte) 91 if (!pte)
92 return NULL; 92 return NULL;
93 page = virt_to_page(pte); 93 page = virt_to_page(pte);
94 pgtable_page_ctor(page); 94 if (!pgtable_page_ctor(page)) {
95 __free_page(page);
96 return NULL;
97 }
95 return page; 98 return page;
96} 99}
97 100
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 6c856fb8c15b..5b9601715289 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -121,7 +121,10 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
121 ptepage = alloc_pages(flags, 0); 121 ptepage = alloc_pages(flags, 0);
122 if (!ptepage) 122 if (!ptepage)
123 return NULL; 123 return NULL;
124 pgtable_page_ctor(ptepage); 124 if (!pgtable_page_ctor(ptepage)) {
125 __free_page(ptepage);
126 return NULL;
127 }
125 return ptepage; 128 return ptepage;
126} 129}
127 130
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 536eec72c0f7..9d95786aa80f 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -378,6 +378,10 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
378 __GFP_REPEAT | __GFP_ZERO); 378 __GFP_REPEAT | __GFP_ZERO);
379 if (!page) 379 if (!page)
380 return NULL; 380 return NULL;
381 if (!kernel && !pgtable_page_ctor(page)) {
382 __free_page(page);
383 return NULL;
384 }
381 385
382 ret = page_address(page); 386 ret = page_address(page);
383 spin_lock(&mm->page_table_lock); 387 spin_lock(&mm->page_table_lock);
@@ -392,9 +396,6 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
392 } 396 }
393 spin_unlock(&mm->page_table_lock); 397 spin_unlock(&mm->page_table_lock);
394 398
395 if (!kernel)
396 pgtable_page_ctor(page);
397
398 return (pte_t *)ret; 399 return (pte_t *)ret;
399} 400}
400 401
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index fc536f2971c0..7553b6a77c64 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -452,7 +452,7 @@ static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
452 */ 452 */
453 if (use_irq) { 453 if (use_irq) {
454 /* Clear completion */ 454 /* Clear completion */
455 INIT_COMPLETION(host->complete); 455 reinit_completion(&host->complete);
456 /* Ack stale interrupts */ 456 /* Ack stale interrupts */
457 kw_write_reg(reg_isr, kw_read_reg(reg_isr)); 457 kw_write_reg(reg_isr, kw_read_reg(reg_isr));
458 /* Arm timeout */ 458 /* Arm timeout */
@@ -717,7 +717,7 @@ static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
717 return -EINVAL; 717 return -EINVAL;
718 } 718 }
719 719
720 INIT_COMPLETION(comp); 720 reinit_completion(&comp);
721 req->data[0] = PMU_I2C_CMD; 721 req->data[0] = PMU_I2C_CMD;
722 req->reply[0] = 0xff; 722 req->reply[0] = 0xff;
723 req->nbytes = sizeof(struct pmu_i2c_hdr) + 1; 723 req->nbytes = sizeof(struct pmu_i2c_hdr) + 1;
@@ -748,7 +748,7 @@ static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
748 748
749 hdr->bus = PMU_I2C_BUS_STATUS; 749 hdr->bus = PMU_I2C_BUS_STATUS;
750 750
751 INIT_COMPLETION(comp); 751 reinit_completion(&comp);
752 req->data[0] = PMU_I2C_CMD; 752 req->data[0] = PMU_I2C_CMD;
753 req->reply[0] = 0xff; 753 req->reply[0] = 0xff;
754 req->nbytes = 2; 754 req->nbytes = 2;
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 5f997e79d570..16a255255d30 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -106,7 +106,7 @@ static int pseries_prepare_late(void)
106 atomic_set(&suspend_data.done, 0); 106 atomic_set(&suspend_data.done, 0);
107 atomic_set(&suspend_data.error, 0); 107 atomic_set(&suspend_data.error, 0);
108 suspend_data.complete = &suspend_work; 108 suspend_data.complete = &suspend_work;
109 INIT_COMPLETION(suspend_work); 109 reinit_completion(&suspend_work);
110 return 0; 110 return 0;
111} 111}
112 112
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f75d7e517927..314fced4fc14 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -141,7 +141,6 @@ config S390
141 select OLD_SIGACTION 141 select OLD_SIGACTION
142 select OLD_SIGSUSPEND3 142 select OLD_SIGSUSPEND3
143 select SYSCTL_EXCEPTION_TRACE 143 select SYSCTL_EXCEPTION_TRACE
144 select USE_GENERIC_SMP_HELPERS if SMP
145 select VIRT_CPU_ACCOUNTING 144 select VIRT_CPU_ACCOUNTING
146 select VIRT_TO_BUS 145 select VIRT_TO_BUS
147 146
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 0a2e5e086749..e794c88f699a 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -772,7 +772,11 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
772 __free_page(page); 772 __free_page(page);
773 return NULL; 773 return NULL;
774 } 774 }
775 pgtable_page_ctor(page); 775 if (!pgtable_page_ctor(page)) {
776 kfree(mp);
777 __free_page(page);
778 return NULL;
779 }
776 mp->vmaddr = vmaddr & PMD_MASK; 780 mp->vmaddr = vmaddr & PMD_MASK;
777 INIT_LIST_HEAD(&mp->mapper); 781 INIT_LIST_HEAD(&mp->mapper);
778 page->index = (unsigned long) mp; 782 page->index = (unsigned long) mp;
@@ -902,7 +906,10 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
902 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 906 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
903 if (!page) 907 if (!page)
904 return NULL; 908 return NULL;
905 pgtable_page_ctor(page); 909 if (!pgtable_page_ctor(page)) {
910 __free_page(page);
911 return NULL;
912 }
906 atomic_set(&page->_mapcount, 1); 913 atomic_set(&page->_mapcount, 1);
907 table = (unsigned long *) page_to_phys(page); 914 table = (unsigned long *) page_to_phys(page);
908 clear_table(table, _PAGE_INVALID, PAGE_SIZE); 915 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
@@ -1244,11 +1251,11 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1244 assert_spin_locked(&mm->page_table_lock); 1251 assert_spin_locked(&mm->page_table_lock);
1245 1252
1246 /* FIFO */ 1253 /* FIFO */
1247 if (!mm->pmd_huge_pte) 1254 if (!pmd_huge_pte(mm, pmdp))
1248 INIT_LIST_HEAD(lh); 1255 INIT_LIST_HEAD(lh);
1249 else 1256 else
1250 list_add(lh, (struct list_head *) mm->pmd_huge_pte); 1257 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1251 mm->pmd_huge_pte = pgtable; 1258 pmd_huge_pte(mm, pmdp) = pgtable;
1252} 1259}
1253 1260
1254pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 1261pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
@@ -1260,12 +1267,12 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1260 assert_spin_locked(&mm->page_table_lock); 1267 assert_spin_locked(&mm->page_table_lock);
1261 1268
1262 /* FIFO */ 1269 /* FIFO */
1263 pgtable = mm->pmd_huge_pte; 1270 pgtable = pmd_huge_pte(mm, pmdp);
1264 lh = (struct list_head *) pgtable; 1271 lh = (struct list_head *) pgtable;
1265 if (list_empty(lh)) 1272 if (list_empty(lh))
1266 mm->pmd_huge_pte = NULL; 1273 pmd_huge_pte(mm, pmdp) = NULL;
1267 else { 1274 else {
1268 mm->pmd_huge_pte = (pgtable_t) lh->next; 1275 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1269 list_del(lh); 1276 list_del(lh);
1270 } 1277 }
1271 ptep = (pte_t *) pgtable; 1278 ptep = (pte_t *) pgtable;
diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h
index 716b3fd1d863..2e067657db98 100644
--- a/arch/score/include/asm/pgalloc.h
+++ b/arch/score/include/asm/pgalloc.h
@@ -54,9 +54,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
54 struct page *pte; 54 struct page *pte;
55 55
56 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 56 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
57 if (pte) { 57 if (!pte)
58 clear_highpage(pte); 58 return NULL;
59 pgtable_page_ctor(pte); 59 clear_highpage(pte);
60 if (!pgtable_page_ctor(pte)) {
61 __free_page(pte);
62 return NULL;
60 } 63 }
61 return pte; 64 return pte;
62} 65}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 224f4bc9925e..e78561bc30ef 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -711,7 +711,6 @@ config CC_STACKPROTECTOR
711config SMP 711config SMP
712 bool "Symmetric multi-processing support" 712 bool "Symmetric multi-processing support"
713 depends on SYS_SUPPORTS_SMP 713 depends on SYS_SUPPORTS_SMP
714 select USE_GENERIC_SMP_HELPERS
715 ---help--- 714 ---help---
716 This enables support for systems with more than one CPU. If you have 715 This enables support for systems with more than one CPU. If you have
717 a system with only one CPU, like most personal computers, say N. If 716 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 8c00785c60d5..a33673b3687d 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -47,7 +47,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
47 if (!pg) 47 if (!pg)
48 return NULL; 48 return NULL;
49 page = virt_to_page(pg); 49 page = virt_to_page(pg);
50 pgtable_page_ctor(page); 50 if (!pgtable_page_ctor(page)) {
51 quicklist_free(QUICK_PT, NULL, pg);
52 return NULL;
53 }
51 return page; 54 return page;
52} 55}
53 56
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 78c4fdb91bc5..8591b201d9cc 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -28,7 +28,6 @@ config SPARC
28 select HAVE_ARCH_JUMP_LABEL 28 select HAVE_ARCH_JUMP_LABEL
29 select GENERIC_IRQ_SHOW 29 select GENERIC_IRQ_SHOW
30 select ARCH_WANT_IPC_PARSE_VERSION 30 select ARCH_WANT_IPC_PARSE_VERSION
31 select USE_GENERIC_SMP_HELPERS if SMP
32 select GENERIC_PCI_IOMAP 31 select GENERIC_PCI_IOMAP
33 select HAVE_NMI_WATCHDOG if SPARC64 32 select HAVE_NMI_WATCHDOG if SPARC64
34 select HAVE_BPF_JIT 33 select HAVE_BPF_JIT
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ed82edad1a39..d6de9353ee11 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2519,12 +2519,13 @@ pgtable_t pte_alloc_one(struct mm_struct *mm,
2519 return pte; 2519 return pte;
2520 2520
2521 page = __alloc_for_cache(mm); 2521 page = __alloc_for_cache(mm);
2522 if (page) { 2522 if (!page)
2523 pgtable_page_ctor(page); 2523 return NULL;
2524 pte = (pte_t *) page_address(page); 2524 if (!pgtable_page_ctor(page)) {
2525 free_hot_cold_page(page, 0);
2526 return NULL;
2525 } 2527 }
2526 2528 return (pte_t *) page_address(page);
2527 return pte;
2528} 2529}
2529 2530
2530void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 2531void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 5d721df48a72..869023abe5a4 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -345,7 +345,10 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
345 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0) 345 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
346 return NULL; 346 return NULL;
347 page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT); 347 page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
348 pgtable_page_ctor(page); 348 if (!pgtable_page_ctor(page)) {
349 __free_page(page);
350 return NULL;
351 }
349 return page; 352 return page;
350} 353}
351 354
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 7a91f288c708..656cc46a81f5 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -196,11 +196,11 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
196 assert_spin_locked(&mm->page_table_lock); 196 assert_spin_locked(&mm->page_table_lock);
197 197
198 /* FIFO */ 198 /* FIFO */
199 if (!mm->pmd_huge_pte) 199 if (!pmd_huge_pte(mm, pmdp))
200 INIT_LIST_HEAD(lh); 200 INIT_LIST_HEAD(lh);
201 else 201 else
202 list_add(lh, (struct list_head *) mm->pmd_huge_pte); 202 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
203 mm->pmd_huge_pte = pgtable; 203 pmd_huge_pte(mm, pmdp) = pgtable;
204} 204}
205 205
206pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 206pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
@@ -211,12 +211,12 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
211 assert_spin_locked(&mm->page_table_lock); 211 assert_spin_locked(&mm->page_table_lock);
212 212
213 /* FIFO */ 213 /* FIFO */
214 pgtable = mm->pmd_huge_pte; 214 pgtable = pmd_huge_pte(mm, pmdp);
215 lh = (struct list_head *) pgtable; 215 lh = (struct list_head *) pgtable;
216 if (list_empty(lh)) 216 if (list_empty(lh))
217 mm->pmd_huge_pte = NULL; 217 pmd_huge_pte(mm, pmdp) = NULL;
218 else { 218 else {
219 mm->pmd_huge_pte = (pgtable_t) lh->next; 219 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
220 list_del(lh); 220 list_del(lh);
221 } 221 }
222 pte_val(pgtable[0]) = 0; 222 pte_val(pgtable[0]) = 0;
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index d45a2c48f185..b3692ce78f90 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -8,7 +8,6 @@ config TILE
8 select HAVE_KVM if !TILEGX 8 select HAVE_KVM if !TILEGX
9 select GENERIC_FIND_FIRST_BIT 9 select GENERIC_FIND_FIRST_BIT
10 select SYSCTL_EXCEPTION_TRACE 10 select SYSCTL_EXCEPTION_TRACE
11 select USE_GENERIC_SMP_HELPERS
12 select CC_OPTIMIZE_FOR_SIZE 11 select CC_OPTIMIZE_FOR_SIZE
13 select HAVE_DEBUG_KMEMLEAK 12 select HAVE_DEBUG_KMEMLEAK
14 select GENERIC_IRQ_PROBE 13 select GENERIC_IRQ_PROBE
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 4fd9ec0b58ed..5e86eac4bfae 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -241,6 +241,11 @@ struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
241 if (p == NULL) 241 if (p == NULL)
242 return NULL; 242 return NULL;
243 243
244 if (!pgtable_page_ctor(p)) {
245 __free_pages(p, L2_USER_PGTABLE_ORDER);
246 return NULL;
247 }
248
244 /* 249 /*
245 * Make every page have a page_count() of one, not just the first. 250 * Make every page have a page_count() of one, not just the first.
246 * We don't use __GFP_COMP since it doesn't look like it works 251 * We don't use __GFP_COMP since it doesn't look like it works
@@ -251,7 +256,6 @@ struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
251 inc_zone_page_state(p+i, NR_PAGETABLE); 256 inc_zone_page_state(p+i, NR_PAGETABLE);
252 } 257 }
253 258
254 pgtable_page_ctor(p);
255 return p; 259 return p;
256} 260}
257 261
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 7ddb64baf327..8636e905426f 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -279,8 +279,12 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
279 struct page *pte; 279 struct page *pte;
280 280
281 pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 281 pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
282 if (pte) 282 if (!pte)
283 pgtable_page_ctor(pte); 283 return NULL;
284 if (!pgtable_page_ctor(pte)) {
285 __free_page(pte);
286 return NULL;
287 }
284 return pte; 288 return pte;
285} 289}
286 290
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
index 0213e373a895..2e02d1356fdf 100644
--- a/arch/unicore32/include/asm/pgalloc.h
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -51,12 +51,14 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
51 struct page *pte; 51 struct page *pte;
52 52
53 pte = alloc_pages(PGALLOC_GFP, 0); 53 pte = alloc_pages(PGALLOC_GFP, 0);
54 if (pte) { 54 if (!pte)
55 if (!PageHighMem(pte)) { 55 return NULL;
56 void *page = page_address(pte); 56 if (!PageHighMem(pte)) {
57 clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t)); 57 void *page = page_address(pte);
58 } 58 clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
59 pgtable_page_ctor(pte); 59 }
60 if (!pgtable_page_ctor(pte)) {
61 __free_page(pte);
60 } 62 }
61 63
62 return pte; 64 return pte;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6e3e1cb3f6a0..83f521aa103f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -90,7 +90,6 @@ config X86
90 select GENERIC_IRQ_SHOW 90 select GENERIC_IRQ_SHOW
91 select GENERIC_CLOCKEVENTS_MIN_ADJUST 91 select GENERIC_CLOCKEVENTS_MIN_ADJUST
92 select IRQ_FORCED_THREADING 92 select IRQ_FORCED_THREADING
93 select USE_GENERIC_SMP_HELPERS if SMP
94 select HAVE_BPF_JIT if X86_64 93 select HAVE_BPF_JIT if X86_64
95 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 94 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
96 select CLKEVT_I8253 95 select CLKEVT_I8253
@@ -1885,6 +1884,10 @@ config USE_PERCPU_NUMA_NODE_ID
1885 def_bool y 1884 def_bool y
1886 depends on NUMA 1885 depends on NUMA
1887 1886
1887config ARCH_ENABLE_SPLIT_PMD_PTLOCK
1888 def_bool y
1889 depends on X86_64 || X86_PAE
1890
1888menu "Power management and ACPI options" 1891menu "Power management and ACPI options"
1889 1892
1890config ARCH_HIBERNATION_HEADER 1893config ARCH_HIBERNATION_HEADER
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index b4389a468fb6..c4412e972bbd 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -80,12 +80,21 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
80#if PAGETABLE_LEVELS > 2 80#if PAGETABLE_LEVELS > 2
81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
82{ 82{
83 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 83 struct page *page;
84 page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
85 if (!page)
86 return NULL;
87 if (!pgtable_pmd_page_ctor(page)) {
88 __free_pages(page, 0);
89 return NULL;
90 }
91 return (pmd_t *)page_address(page);
84} 92}
85 93
86static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 94static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
87{ 95{
88 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); 96 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
97 pgtable_pmd_page_dtor(virt_to_page(pmd));
89 free_page((unsigned long)pmd); 98 free_page((unsigned long)pmd);
90} 99}
91 100
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index dfa537a03be1..a7cccb6d7fec 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -25,8 +25,12 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
25 struct page *pte; 25 struct page *pte;
26 26
27 pte = alloc_pages(__userpte_alloc_gfp, 0); 27 pte = alloc_pages(__userpte_alloc_gfp, 0);
28 if (pte) 28 if (!pte)
29 pgtable_page_ctor(pte); 29 return NULL;
30 if (!pgtable_page_ctor(pte)) {
31 __free_page(pte);
32 return NULL;
33 }
30 return pte; 34 return pte;
31} 35}
32 36
@@ -189,8 +193,10 @@ static void free_pmds(pmd_t *pmds[])
189 int i; 193 int i;
190 194
191 for(i = 0; i < PREALLOCATED_PMDS; i++) 195 for(i = 0; i < PREALLOCATED_PMDS; i++)
192 if (pmds[i]) 196 if (pmds[i]) {
197 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
193 free_page((unsigned long)pmds[i]); 198 free_page((unsigned long)pmds[i]);
199 }
194} 200}
195 201
196static int preallocate_pmds(pmd_t *pmds[]) 202static int preallocate_pmds(pmd_t *pmds[])
@@ -200,8 +206,13 @@ static int preallocate_pmds(pmd_t *pmds[])
200 206
201 for(i = 0; i < PREALLOCATED_PMDS; i++) { 207 for(i = 0; i < PREALLOCATED_PMDS; i++) {
202 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); 208 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
203 if (pmd == NULL) 209 if (!pmd)
204 failed = true; 210 failed = true;
211 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
212 free_page((unsigned long)pmds[i]);
213 pmd = NULL;
214 failed = true;
215 }
205 pmds[i] = pmd; 216 pmds[i] = pmd;
206 } 217 }
207 218
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index fdc3ba28ca38..49c962fe7e62 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -796,8 +796,8 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
796{ 796{
797 spinlock_t *ptl = NULL; 797 spinlock_t *ptl = NULL;
798 798
799#if USE_SPLIT_PTLOCKS 799#if USE_SPLIT_PTE_PTLOCKS
800 ptl = __pte_lockptr(page); 800 ptl = ptlock_ptr(page);
801 spin_lock_nest_lock(ptl, &mm->page_table_lock); 801 spin_lock_nest_lock(ptl, &mm->page_table_lock);
802#endif 802#endif
803 803
@@ -1637,7 +1637,7 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1637 1637
1638 __set_pfn_prot(pfn, PAGE_KERNEL_RO); 1638 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1639 1639
1640 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 1640 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1641 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 1641 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1642 1642
1643 xen_mc_issue(PARAVIRT_LAZY_MMU); 1643 xen_mc_issue(PARAVIRT_LAZY_MMU);
@@ -1671,7 +1671,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1671 if (!PageHighMem(page)) { 1671 if (!PageHighMem(page)) {
1672 xen_mc_batch(); 1672 xen_mc_batch();
1673 1673
1674 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 1674 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1675 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); 1675 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1676 1676
1677 __set_pfn_prot(pfn, PAGE_KERNEL); 1677 __set_pfn_prot(pfn, PAGE_KERNEL);
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index cf914c8c249a..d38eb9237e64 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -38,35 +38,46 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
38 free_page((unsigned long)pgd); 38 free_page((unsigned long)pgd);
39} 39}
40 40
41/* Use a slab cache for the pte pages (see also sparc64 implementation) */
42
43extern struct kmem_cache *pgtable_cache;
44
45static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 41static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
46 unsigned long address) 42 unsigned long address)
47{ 43{
48 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); 44 pte_t *ptep;
45 int i;
46
47 ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
48 if (!ptep)
49 return NULL;
50 for (i = 0; i < 1024; i++)
51 pte_clear(NULL, 0, ptep + i);
52 return ptep;
49} 53}
50 54
51static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 55static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
52 unsigned long addr) 56 unsigned long addr)
53{ 57{
58 pte_t *pte;
54 struct page *page; 59 struct page *page;
55 60
56 page = virt_to_page(pte_alloc_one_kernel(mm, addr)); 61 pte = pte_alloc_one_kernel(mm, addr);
57 pgtable_page_ctor(page); 62 if (!pte)
63 return NULL;
64 page = virt_to_page(pte);
65 if (!pgtable_page_ctor(page)) {
66 __free_page(page);
67 return NULL;
68 }
58 return page; 69 return page;
59} 70}
60 71
61static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 72static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
62{ 73{
63 kmem_cache_free(pgtable_cache, pte); 74 free_page((unsigned long)pte);
64} 75}
65 76
66static inline void pte_free(struct mm_struct *mm, pgtable_t pte) 77static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
67{ 78{
68 pgtable_page_dtor(pte); 79 pgtable_page_dtor(pte);
69 kmem_cache_free(pgtable_cache, page_address(pte)); 80 __free_page(pte);
70} 81}
71#define pmd_pgtable(pmd) pmd_page(pmd) 82#define pmd_pgtable(pmd) pmd_page(pmd)
72 83
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 0fdf5d043f82..216446295ada 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -220,12 +220,11 @@ extern unsigned long empty_zero_page[1024];
220#ifdef CONFIG_MMU 220#ifdef CONFIG_MMU
221extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)]; 221extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
222extern void paging_init(void); 222extern void paging_init(void);
223extern void pgtable_cache_init(void);
224#else 223#else
225# define swapper_pg_dir NULL 224# define swapper_pg_dir NULL
226static inline void paging_init(void) { } 225static inline void paging_init(void) { }
227static inline void pgtable_cache_init(void) { }
228#endif 226#endif
227static inline void pgtable_cache_init(void) { }
229 228
230/* 229/*
231 * The pmd contains the kernel virtual address of the pte page. 230 * The pmd contains the kernel virtual address of the pte page.
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index a1077570e383..c43771c974be 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -50,23 +50,3 @@ void __init init_mmu(void)
50 */ 50 */
51 set_ptevaddr_register(PGTABLE_START); 51 set_ptevaddr_register(PGTABLE_START);
52} 52}
53
54struct kmem_cache *pgtable_cache __read_mostly;
55
56static void pgd_ctor(void *addr)
57{
58 pte_t *ptep = (pte_t *)addr;
59 int i;
60
61 for (i = 0; i < 1024; i++, ptep++)
62 pte_clear(NULL, 0, ptep);
63
64}
65
66void __init pgtable_cache_init(void)
67{
68 pgtable_cache = kmem_cache_create("pgd",
69 PAGE_SIZE, PAGE_SIZE,
70 SLAB_HWCACHE_ALIGN,
71 pgd_ctor);
72}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 88d4e864d4c0..c661896e2465 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -319,7 +319,7 @@ void __blk_mq_end_io(struct request *rq, int error)
319 blk_mq_complete_request(rq, error); 319 blk_mq_complete_request(rq, error);
320} 320}
321 321
322#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) 322#if defined(CONFIG_SMP)
323 323
324/* 324/*
325 * Called with interrupts disabled. 325 * Called with interrupts disabled.
@@ -361,7 +361,7 @@ static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
361 361
362 return true; 362 return true;
363} 363}
364#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */ 364#else /* CONFIG_SMP */
365static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu, 365static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
366 struct request *rq, const int error) 366 struct request *rq, const int error)
367{ 367{
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index ce4b8bfd3d27..57790c1a97eb 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -36,7 +36,7 @@ static void blk_done_softirq(struct softirq_action *h)
36 } 36 }
37} 37}
38 38
39#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) 39#ifdef CONFIG_SMP
40static void trigger_softirq(void *data) 40static void trigger_softirq(void *data)
41{ 41{
42 struct request *rq = data; 42 struct request *rq = data;
@@ -71,7 +71,7 @@ static int raise_blk_irq(int cpu, struct request *rq)
71 71
72 return 1; 72 return 1;
73} 73}
74#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */ 74#else /* CONFIG_SMP */
75static int raise_blk_irq(int cpu, struct request *rq) 75static int raise_blk_irq(int cpu, struct request *rq)
76{ 76{
77 return 1; 77 return 1;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 4f8c4d90ec73..97779522472f 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -288,7 +288,7 @@ static ssize_t
288queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 288queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
289{ 289{
290 ssize_t ret = -EINVAL; 290 ssize_t ret = -EINVAL;
291#if defined(CONFIG_USE_GENERIC_SMP_HELPERS) 291#ifdef CONFIG_SMP
292 unsigned long val; 292 unsigned long val;
293 293
294 ret = queue_var_store(&val, page, count); 294 ret = queue_var_store(&val, page, count);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index ac33d5f30778..966f893711b3 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -434,7 +434,7 @@ int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
434 case -EINPROGRESS: 434 case -EINPROGRESS:
435 case -EBUSY: 435 case -EBUSY:
436 wait_for_completion(&completion->completion); 436 wait_for_completion(&completion->completion);
437 INIT_COMPLETION(completion->completion); 437 reinit_completion(&completion->completion);
438 err = completion->err; 438 err = completion->err;
439 break; 439 break;
440 }; 440 };
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 25a5934f0e50..1ab8258fcf56 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -493,7 +493,7 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
493 ret = wait_for_completion_interruptible(&tr->completion); 493 ret = wait_for_completion_interruptible(&tr->completion);
494 if (!ret) 494 if (!ret)
495 ret = tr->err; 495 ret = tr->err;
496 INIT_COMPLETION(tr->completion); 496 reinit_completion(&tr->completion);
497 } 497 }
498 return ret; 498 return ret;
499} 499}
@@ -721,7 +721,7 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
721 ret = wait_for_completion_interruptible(&tr->completion); 721 ret = wait_for_completion_interruptible(&tr->completion);
722 if (!ret) 722 if (!ret)
723 ret = tr->err; 723 ret = tr->err;
724 INIT_COMPLETION(tr->completion); 724 reinit_completion(&tr->completion);
725 } 725 }
726 726
727 return ret; 727 return ret;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e091ef6e1791..432afc03e7c3 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -179,7 +179,7 @@ static int do_one_async_hash_op(struct ahash_request *req,
179 ret = wait_for_completion_interruptible(&tr->completion); 179 ret = wait_for_completion_interruptible(&tr->completion);
180 if (!ret) 180 if (!ret)
181 ret = tr->err; 181 ret = tr->err;
182 INIT_COMPLETION(tr->completion); 182 reinit_completion(&tr->completion);
183 } 183 }
184 return ret; 184 return ret;
185} 185}
@@ -336,7 +336,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
336 ret = wait_for_completion_interruptible( 336 ret = wait_for_completion_interruptible(
337 &tresult.completion); 337 &tresult.completion);
338 if (!ret && !(ret = tresult.err)) { 338 if (!ret && !(ret = tresult.err)) {
339 INIT_COMPLETION(tresult.completion); 339 reinit_completion(&tresult.completion);
340 break; 340 break;
341 } 341 }
342 /* fall through */ 342 /* fall through */
@@ -543,7 +543,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
543 ret = wait_for_completion_interruptible( 543 ret = wait_for_completion_interruptible(
544 &result.completion); 544 &result.completion);
545 if (!ret && !(ret = result.err)) { 545 if (!ret && !(ret = result.err)) {
546 INIT_COMPLETION(result.completion); 546 reinit_completion(&result.completion);
547 break; 547 break;
548 } 548 }
549 case -EBADMSG: 549 case -EBADMSG:
@@ -697,7 +697,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
697 ret = wait_for_completion_interruptible( 697 ret = wait_for_completion_interruptible(
698 &result.completion); 698 &result.completion);
699 if (!ret && !(ret = result.err)) { 699 if (!ret && !(ret = result.err)) {
700 INIT_COMPLETION(result.completion); 700 reinit_completion(&result.completion);
701 break; 701 break;
702 } 702 }
703 case -EBADMSG: 703 case -EBADMSG:
@@ -983,7 +983,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
983 ret = wait_for_completion_interruptible( 983 ret = wait_for_completion_interruptible(
984 &result.completion); 984 &result.completion);
985 if (!ret && !((ret = result.err))) { 985 if (!ret && !((ret = result.err))) {
986 INIT_COMPLETION(result.completion); 986 reinit_completion(&result.completion);
987 break; 987 break;
988 } 988 }
989 /* fall through */ 989 /* fall through */
@@ -1086,7 +1086,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
1086 ret = wait_for_completion_interruptible( 1086 ret = wait_for_completion_interruptible(
1087 &result.completion); 1087 &result.completion);
1088 if (!ret && !((ret = result.err))) { 1088 if (!ret && !((ret = result.err))) {
1089 INIT_COMPLETION(result.completion); 1089 reinit_completion(&result.completion);
1090 break; 1090 break;
1091 } 1091 }
1092 /* fall through */ 1092 /* fall through */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 77bbc8266883..92d7797223be 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3017,7 +3017,7 @@ static inline void ata_eh_pull_park_action(struct ata_port *ap)
3017 * ourselves at the beginning of each pass over the loop. 3017 * ourselves at the beginning of each pass over the loop.
3018 * 3018 *
3019 * Additionally, all write accesses to &ap->park_req_pending 3019 * Additionally, all write accesses to &ap->park_req_pending
3020 * through INIT_COMPLETION() (see below) or complete_all() 3020 * through reinit_completion() (see below) or complete_all()
3021 * (see ata_scsi_park_store()) are protected by the host lock. 3021 * (see ata_scsi_park_store()) are protected by the host lock.
3022 * As a result we have that park_req_pending.done is zero on 3022 * As a result we have that park_req_pending.done is zero on
3023 * exit from this function, i.e. when ATA_EH_PARK actions for 3023 * exit from this function, i.e. when ATA_EH_PARK actions for
@@ -3031,7 +3031,7 @@ static inline void ata_eh_pull_park_action(struct ata_port *ap)
3031 */ 3031 */
3032 3032
3033 spin_lock_irqsave(ap->lock, flags); 3033 spin_lock_irqsave(ap->lock, flags);
3034 INIT_COMPLETION(ap->park_req_pending); 3034 reinit_completion(&ap->park_req_pending);
3035 ata_for_each_link(link, ap, EDGE) { 3035 ata_for_each_link(link, ap, EDGE) {
3036 ata_for_each_dev(dev, link, ALL) { 3036 ata_for_each_dev(dev, link, ALL) {
3037 struct ata_eh_info *ehi = &link->eh_info; 3037 struct ata_eh_info *ehi = &link->eh_info;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index ee039afe9078..c12e9b9556be 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -757,7 +757,7 @@ void dpm_resume(pm_message_t state)
757 async_error = 0; 757 async_error = 0;
758 758
759 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 759 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
760 INIT_COMPLETION(dev->power.completion); 760 reinit_completion(&dev->power.completion);
761 if (is_async(dev)) { 761 if (is_async(dev)) {
762 get_device(dev); 762 get_device(dev);
763 async_schedule(async_resume, dev); 763 async_schedule(async_resume, dev);
@@ -1237,7 +1237,7 @@ static void async_suspend(void *data, async_cookie_t cookie)
1237 1237
1238static int device_suspend(struct device *dev) 1238static int device_suspend(struct device *dev)
1239{ 1239{
1240 INIT_COMPLETION(dev->power.completion); 1240 reinit_completion(&dev->power.completion);
1241 1241
1242 if (pm_async_enabled && dev->power.async_suspend) { 1242 if (pm_async_enabled && dev->power.async_suspend) {
1243 get_device(dev); 1243 get_device(dev);
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 4ff85b8785ee..748dea4f34dc 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -343,7 +343,7 @@ static int fd_motor_on(int nr)
343 unit[nr].motor = 1; 343 unit[nr].motor = 1;
344 fd_select(nr); 344 fd_select(nr);
345 345
346 INIT_COMPLETION(motor_on_completion); 346 reinit_completion(&motor_on_completion);
347 motor_on_timer.data = nr; 347 motor_on_timer.data = nr;
348 mod_timer(&motor_on_timer, jiffies + HZ/2); 348 mod_timer(&motor_on_timer, jiffies + HZ/2);
349 349
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 0c004ac05811..b35fc4f5237c 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2808,7 +2808,7 @@ resend_cmd2:
2808 /* erase the old error information */ 2808 /* erase the old error information */
2809 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 2809 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2810 return_status = IO_OK; 2810 return_status = IO_OK;
2811 INIT_COMPLETION(wait); 2811 reinit_completion(&wait);
2812 goto resend_cmd2; 2812 goto resend_cmd2;
2813 } 2813 }
2814 2814
@@ -3669,7 +3669,7 @@ static int add_to_scan_list(struct ctlr_info *h)
3669 } 3669 }
3670 } 3670 }
3671 if (!found && !h->busy_scanning) { 3671 if (!found && !h->busy_scanning) {
3672 INIT_COMPLETION(h->scan_wait); 3672 reinit_completion(&h->scan_wait);
3673 list_add_tail(&h->scan_list, &scan_q); 3673 list_add_tail(&h->scan_list, &scan_q);
3674 ret = 1; 3674 ret = 1;
3675 } 3675 }
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index d2120ba8f3f9..73ce739f8e19 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -79,7 +79,7 @@ static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data)
79 priv->expires = cur + delay; 79 priv->expires = cur + delay;
80 priv->present = 0; 80 priv->present = 0;
81 81
82 INIT_COMPLETION(priv->completion); 82 reinit_completion(&priv->completion);
83 mod_timer(&priv->timer, priv->expires); 83 mod_timer(&priv->timer, priv->expires);
84 84
85 return 4; 85 return 4;
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index 2d58da972ae2..fa05e3c329bd 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -268,7 +268,7 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
268 aes_writel(dd, value, TEGRA_AES_SECURE_INPUT_SELECT); 268 aes_writel(dd, value, TEGRA_AES_SECURE_INPUT_SELECT);
269 269
270 aes_writel(dd, out_addr, TEGRA_AES_SECURE_DEST_ADDR); 270 aes_writel(dd, out_addr, TEGRA_AES_SECURE_DEST_ADDR);
271 INIT_COMPLETION(dd->op_complete); 271 reinit_completion(&dd->op_complete);
272 272
273 for (i = 0; i < AES_HW_MAX_ICQ_LENGTH - 1; i++) { 273 for (i = 0; i < AES_HW_MAX_ICQ_LENGTH - 1; i++) {
274 do { 274 do {
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index e5af0e3a26ec..0e799516a2ab 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -477,7 +477,7 @@ void fw_send_phy_config(struct fw_card *card,
477 phy_config_packet.header[1] = data; 477 phy_config_packet.header[1] = data;
478 phy_config_packet.header[2] = ~data; 478 phy_config_packet.header[2] = ~data;
479 phy_config_packet.generation = generation; 479 phy_config_packet.generation = generation;
480 INIT_COMPLETION(phy_config_done); 480 reinit_completion(&phy_config_done);
481 481
482 card->driver->send_request(card, &phy_config_packet); 482 card->driver->send_request(card, &phy_config_packet);
483 wait_for_completion_timeout(&phy_config_done, timeout); 483 wait_for_completion_timeout(&phy_config_done, timeout);
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index e788882d9021..f9c7fa3d0012 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -34,7 +34,7 @@
34 */ 34 */
35void drm_flip_work_queue(struct drm_flip_work *work, void *val) 35void drm_flip_work_queue(struct drm_flip_work *work, void *val)
36{ 36{
37 if (kfifo_put(&work->fifo, (const void **)&val)) { 37 if (kfifo_put(&work->fifo, val)) {
38 atomic_inc(&work->pending); 38 atomic_inc(&work->pending);
39 } else { 39 } else {
40 DRM_ERROR("%s fifo full!\n", work->name); 40 DRM_ERROR("%s fifo full!\n", work->name);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 1eb86c79523e..e28107061148 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -99,7 +99,7 @@ static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
99 i2c_dev->status = I2C_STAT_INIT; 99 i2c_dev->status = I2C_STAT_INIT;
100 i2c_dev->msg = pmsg; 100 i2c_dev->msg = pmsg;
101 i2c_dev->buf_offset = 0; 101 i2c_dev->buf_offset = 0;
102 INIT_COMPLETION(i2c_dev->complete); 102 reinit_completion(&i2c_dev->complete);
103 103
104 /* Enable I2C transaction */ 104 /* Enable I2C transaction */
105 temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION; 105 temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
index 75db0c400037..cfa63b0825b0 100644
--- a/drivers/hid/hid-wiimote.h
+++ b/drivers/hid/hid-wiimote.h
@@ -327,7 +327,7 @@ static inline void wiimote_cmd_acquire_noint(struct wiimote_data *wdata)
327static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd, 327static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
328 __u32 opt) 328 __u32 opt)
329{ 329{
330 INIT_COMPLETION(wdata->state.ready); 330 reinit_completion(&wdata->state.ready);
331 wdata->state.cmd = cmd; 331 wdata->state.cmd = cmd;
332 wdata->state.opt = opt; 332 wdata->state.opt = opt;
333} 333}
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index e0d66b9590ab..a183e488db78 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -66,7 +66,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
66 66
67 mutex_lock(&hwmon->lock); 67 mutex_lock(&hwmon->lock);
68 68
69 INIT_COMPLETION(*completion); 69 reinit_completion(completion);
70 70
71 enable_irq(hwmon->irq); 71 enable_irq(hwmon->irq);
72 hwmon->cell->enable(to_platform_device(dev)); 72 hwmon->cell->enable(to_platform_device(dev));
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index fd059308affa..8edba9de76df 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -371,7 +371,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
371 dev_dbg(dev->dev, "transfer: %s %d bytes.\n", 371 dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
372 (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len); 372 (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
373 373
374 INIT_COMPLETION(dev->cmd_complete); 374 reinit_completion(&dev->cmd_complete);
375 dev->transfer_status = 0; 375 dev->transfer_status = 0;
376 376
377 if (!dev->buf_len) { 377 if (!dev->buf_len) {
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index ea4b08fc3353..d7e8600f31fb 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -151,7 +151,7 @@ static int bcm2835_i2c_xfer_msg(struct bcm2835_i2c_dev *i2c_dev,
151 151
152 i2c_dev->msg_buf = msg->buf; 152 i2c_dev->msg_buf = msg->buf;
153 i2c_dev->msg_buf_remaining = msg->len; 153 i2c_dev->msg_buf_remaining = msg->len;
154 INIT_COMPLETION(i2c_dev->completion); 154 reinit_completion(&i2c_dev->completion);
155 155
156 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR); 156 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
157 157
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 132369fad4e0..960dec61c64e 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -323,7 +323,7 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
323 323
324 davinci_i2c_write_reg(dev, DAVINCI_I2C_CNT_REG, dev->buf_len); 324 davinci_i2c_write_reg(dev, DAVINCI_I2C_CNT_REG, dev->buf_len);
325 325
326 INIT_COMPLETION(dev->cmd_complete); 326 reinit_completion(&dev->cmd_complete);
327 dev->cmd_err = 0; 327 dev->cmd_err = 0;
328 328
329 /* Take I2C out of reset and configure it as master */ 329 /* Take I2C out of reset and configure it as master */
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 5888feef1ac5..e89e3e2145e5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -613,7 +613,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
613 mutex_lock(&dev->lock); 613 mutex_lock(&dev->lock);
614 pm_runtime_get_sync(dev->dev); 614 pm_runtime_get_sync(dev->dev);
615 615
616 INIT_COMPLETION(dev->cmd_complete); 616 reinit_completion(&dev->cmd_complete);
617 dev->msgs = msgs; 617 dev->msgs = msgs;
618 dev->msgs_num = num; 618 dev->msgs_num = num;
619 dev->cmd_err = 0; 619 dev->cmd_err = 0;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 1672effbcebb..0043ede234c2 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -541,7 +541,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
541 desc->dptr_high = upper_32_bits(dma_addr); 541 desc->dptr_high = upper_32_bits(dma_addr);
542 } 542 }
543 543
544 INIT_COMPLETION(priv->cmp); 544 reinit_completion(&priv->cmp);
545 545
546 /* Add the descriptor */ 546 /* Add the descriptor */
547 ismt_submit_desc(priv); 547 ismt_submit_desc(priv);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index b7c857774708..3aedd86a6468 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -505,7 +505,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
505 return err; 505 return err;
506 } 506 }
507 } else { 507 } else {
508 INIT_COMPLETION(i2c->cmd_complete); 508 reinit_completion(&i2c->cmd_complete);
509 ret = mxs_i2c_dma_setup_xfer(adap, msg, flags); 509 ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
510 if (ret) 510 if (ret)
511 return ret; 511 return ret;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 9967a6f9c2ff..a6a891d7970d 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -543,7 +543,7 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
543 w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR; 543 w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
544 omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w); 544 omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w);
545 545
546 INIT_COMPLETION(dev->cmd_complete); 546 reinit_completion(&dev->cmd_complete);
547 dev->cmd_err = 0; 547 dev->cmd_err = 0;
548 548
549 w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT; 549 w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index c457cb447c66..e661edee4d0c 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -544,7 +544,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
544 i2c_dev->msg_buf_remaining = msg->len; 544 i2c_dev->msg_buf_remaining = msg->len;
545 i2c_dev->msg_err = I2C_ERR_NONE; 545 i2c_dev->msg_err = I2C_ERR_NONE;
546 i2c_dev->msg_read = (msg->flags & I2C_M_RD); 546 i2c_dev->msg_read = (msg->flags & I2C_M_RD);
547 INIT_COMPLETION(i2c_dev->msg_complete); 547 reinit_completion(&i2c_dev->msg_complete);
548 548
549 packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) | 549 packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
550 PACKET_HEADER0_PROTOCOL_I2C | 550 PACKET_HEADER0_PROTOCOL_I2C |
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index c65da3d913a0..31395fa8121d 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -158,7 +158,7 @@ static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
158 writew(val, i2c_dev->base + REG_CR); 158 writew(val, i2c_dev->base + REG_CR);
159 } 159 }
160 160
161 INIT_COMPLETION(i2c_dev->complete); 161 reinit_completion(&i2c_dev->complete);
162 162
163 if (i2c_dev->mode == I2C_MODE_STANDARD) 163 if (i2c_dev->mode == I2C_MODE_STANDARD)
164 tcr_val = TCR_STANDARD_MODE; 164 tcr_val = TCR_STANDARD_MODE;
@@ -247,7 +247,7 @@ static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg,
247 writew(val, i2c_dev->base + REG_CR); 247 writew(val, i2c_dev->base + REG_CR);
248 } 248 }
249 249
250 INIT_COMPLETION(i2c_dev->complete); 250 reinit_completion(&i2c_dev->complete);
251 251
252 if (i2c_dev->mode == I2C_MODE_STANDARD) 252 if (i2c_dev->mode == I2C_MODE_STANDARD)
253 tcr_val = TCR_STANDARD_MODE; 253 tcr_val = TCR_STANDARD_MODE;
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index e6fbd3e70981..9a4e0e32a771 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -188,7 +188,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
188 188
189 spi_bus_lock(sigma_delta->spi->master); 189 spi_bus_lock(sigma_delta->spi->master);
190 sigma_delta->bus_locked = true; 190 sigma_delta->bus_locked = true;
191 INIT_COMPLETION(sigma_delta->completion); 191 reinit_completion(&sigma_delta->completion);
192 192
193 ret = ad_sigma_delta_set_mode(sigma_delta, mode); 193 ret = ad_sigma_delta_set_mode(sigma_delta, mode);
194 if (ret < 0) 194 if (ret < 0)
@@ -259,7 +259,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
259 259
260 spi_bus_lock(sigma_delta->spi->master); 260 spi_bus_lock(sigma_delta->spi->master);
261 sigma_delta->bus_locked = true; 261 sigma_delta->bus_locked = true;
262 INIT_COMPLETION(sigma_delta->completion); 262 reinit_completion(&sigma_delta->completion);
263 263
264 ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE); 264 ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
265 265
@@ -343,7 +343,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
343{ 343{
344 struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev); 344 struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
345 345
346 INIT_COMPLETION(sigma_delta->completion); 346 reinit_completion(&sigma_delta->completion);
347 wait_for_completion_timeout(&sigma_delta->completion, HZ); 347 wait_for_completion_timeout(&sigma_delta->completion, HZ);
348 348
349 if (!sigma_delta->irq_dis) { 349 if (!sigma_delta->irq_dis) {
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index 54c5babe6746..e525aa6475c4 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -190,7 +190,7 @@ static int nau7802_read_irq(struct iio_dev *indio_dev,
190 struct nau7802_state *st = iio_priv(indio_dev); 190 struct nau7802_state *st = iio_priv(indio_dev);
191 int ret; 191 int ret;
192 192
193 INIT_COMPLETION(st->value_ok); 193 reinit_completion(&st->value_ok);
194 enable_irq(st->client->irq); 194 enable_irq(st->client->irq);
195 195
196 nau7802_sync(st); 196 nau7802_sync(st);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index dac15b9f9df8..c10eab64bc05 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -56,7 +56,7 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
56 ev.id = ev_code; 56 ev.id = ev_code;
57 ev.timestamp = timestamp; 57 ev.timestamp = timestamp;
58 58
59 copied = kfifo_put(&ev_int->det_events, &ev); 59 copied = kfifo_put(&ev_int->det_events, ev);
60 if (copied != 0) 60 if (copied != 0)
61 wake_up_locked_poll(&ev_int->wait, POLLIN); 61 wake_up_locked_poll(&ev_int->wait, POLLIN);
62 } 62 }
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index d53e0b72a407..4204841cdc49 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -242,7 +242,7 @@ static int cyttsp_soft_reset(struct cyttsp *ts)
242 int retval; 242 int retval;
243 243
244 /* wait for interrupt to set ready completion */ 244 /* wait for interrupt to set ready completion */
245 INIT_COMPLETION(ts->bl_ready); 245 reinit_completion(&ts->bl_ready);
246 ts->state = CY_BL_STATE; 246 ts->state = CY_BL_STATE;
247 247
248 enable_irq(ts->irq); 248 enable_irq(ts->irq);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 181c9ba929cd..2349d6272aef 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1212,7 +1212,10 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1212 1212
1213 arm_smmu_flush_pgtable(smmu, page_address(table), 1213 arm_smmu_flush_pgtable(smmu, page_address(table),
1214 ARM_SMMU_PTE_HWTABLE_SIZE); 1214 ARM_SMMU_PTE_HWTABLE_SIZE);
1215 pgtable_page_ctor(table); 1215 if (!pgtable_page_ctor(table)) {
1216 __free_page(table);
1217 return -ENOMEM;
1218 }
1216 pmd_populate(NULL, pmd, table); 1219 pmd_populate(NULL, pmd, table);
1217 arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); 1220 arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
1218 } 1221 }
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 50ea7ed24dce..81b0fa660452 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -950,7 +950,7 @@ static int crypt_convert(struct crypt_config *cc,
950 /* async */ 950 /* async */
951 case -EBUSY: 951 case -EBUSY:
952 wait_for_completion(&ctx->restart); 952 wait_for_completion(&ctx->restart);
953 INIT_COMPLETION(ctx->restart); 953 reinit_completion(&ctx->restart);
954 /* fall through*/ 954 /* fall through*/
955 case -EINPROGRESS: 955 case -EINPROGRESS:
956 this_cc->req = NULL; 956 this_cc->req = NULL;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f8b906843926..7f0e17a27aeb 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -293,20 +293,6 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
293 do_release_stripe(conf, sh); 293 do_release_stripe(conf, sh);
294} 294}
295 295
296static struct llist_node *llist_reverse_order(struct llist_node *head)
297{
298 struct llist_node *new_head = NULL;
299
300 while (head) {
301 struct llist_node *tmp = head;
302 head = head->next;
303 tmp->next = new_head;
304 new_head = tmp;
305 }
306
307 return new_head;
308}
309
310/* should hold conf->device_lock already */ 296/* should hold conf->device_lock already */
311static int release_stripe_list(struct r5conf *conf) 297static int release_stripe_list(struct r5conf *conf)
312{ 298{
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 4c1105977090..281916591437 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -422,7 +422,7 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
422 return ret; 422 return ret;
423 } 423 }
424 424
425 INIT_COMPLETION(bcap_dev->comp); 425 reinit_completion(&bcap_dev->comp);
426 bcap_dev->stop = false; 426 bcap_dev->stop = false;
427 return 0; 427 return 0;
428} 428}
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 97c2c18803ef..9cf6731fb816 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -375,7 +375,7 @@ static int wl1273_fm_set_tx_freq(struct wl1273_device *radio, unsigned int freq)
375 if (r) 375 if (r)
376 return r; 376 return r;
377 377
378 INIT_COMPLETION(radio->busy); 378 reinit_completion(&radio->busy);
379 379
380 /* wait for the FR IRQ */ 380 /* wait for the FR IRQ */
381 r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000)); 381 r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
@@ -389,7 +389,7 @@ static int wl1273_fm_set_tx_freq(struct wl1273_device *radio, unsigned int freq)
389 if (r) 389 if (r)
390 return r; 390 return r;
391 391
392 INIT_COMPLETION(radio->busy); 392 reinit_completion(&radio->busy);
393 393
394 /* wait for the POWER_ENB IRQ */ 394 /* wait for the POWER_ENB IRQ */
395 r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000)); 395 r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
@@ -444,7 +444,7 @@ static int wl1273_fm_set_rx_freq(struct wl1273_device *radio, unsigned int freq)
444 goto err; 444 goto err;
445 } 445 }
446 446
447 INIT_COMPLETION(radio->busy); 447 reinit_completion(&radio->busy);
448 448
449 r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000)); 449 r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
450 if (!r) { 450 if (!r) {
@@ -805,7 +805,7 @@ static int wl1273_fm_set_seek(struct wl1273_device *radio,
805 if (level < SCHAR_MIN || level > SCHAR_MAX) 805 if (level < SCHAR_MIN || level > SCHAR_MAX)
806 return -EINVAL; 806 return -EINVAL;
807 807
808 INIT_COMPLETION(radio->busy); 808 reinit_completion(&radio->busy);
809 dev_dbg(radio->dev, "%s: BUSY\n", __func__); 809 dev_dbg(radio->dev, "%s: BUSY\n", __func__);
810 810
811 r = core->write(core, WL1273_INT_MASK_SET, radio->irq_flags); 811 r = core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
@@ -847,7 +847,7 @@ static int wl1273_fm_set_seek(struct wl1273_device *radio,
847 if (r) 847 if (r)
848 goto out; 848 goto out;
849 849
850 INIT_COMPLETION(radio->busy); 850 reinit_completion(&radio->busy);
851 dev_dbg(radio->dev, "%s: BUSY\n", __func__); 851 dev_dbg(radio->dev, "%s: BUSY\n", __func__);
852 852
853 r = core->write(core, WL1273_TUNER_MODE_SET, TUNER_MODE_AUTO_SEEK); 853 r = core->write(core, WL1273_TUNER_MODE_SET, TUNER_MODE_AUTO_SEEK);
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 5c57e5b0f949..0bd250068285 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -218,7 +218,7 @@ static int si470x_set_chan(struct si470x_device *radio, unsigned short chan)
218 goto done; 218 goto done;
219 219
220 /* wait till tune operation has completed */ 220 /* wait till tune operation has completed */
221 INIT_COMPLETION(radio->completion); 221 reinit_completion(&radio->completion);
222 retval = wait_for_completion_timeout(&radio->completion, 222 retval = wait_for_completion_timeout(&radio->completion,
223 msecs_to_jiffies(tune_timeout)); 223 msecs_to_jiffies(tune_timeout));
224 if (!retval) 224 if (!retval)
@@ -341,7 +341,7 @@ static int si470x_set_seek(struct si470x_device *radio,
341 return retval; 341 return retval;
342 342
343 /* wait till tune operation has completed */ 343 /* wait till tune operation has completed */
344 INIT_COMPLETION(radio->completion); 344 reinit_completion(&radio->completion);
345 retval = wait_for_completion_timeout(&radio->completion, 345 retval = wait_for_completion_timeout(&radio->completion,
346 msecs_to_jiffies(seek_timeout)); 346 msecs_to_jiffies(seek_timeout));
347 if (!retval) 347 if (!retval)
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 19632b1c2190..b53626ba6f49 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -207,7 +207,7 @@ static int iguanair_send(struct iguanair *ir, unsigned size)
207{ 207{
208 int rc; 208 int rc;
209 209
210 INIT_COMPLETION(ir->completion); 210 reinit_completion(&ir->completion);
211 211
212 ir->urb_out->transfer_buffer_length = size; 212 ir->urb_out->transfer_buffer_length = size;
213 rc = usb_submit_urb(ir->urb_out, GFP_KERNEL); 213 rc = usb_submit_urb(ir->urb_out, GFP_KERNEL);
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index bbf4aea1627d..a0547dbf9806 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -253,7 +253,7 @@ void memstick_new_req(struct memstick_host *host)
253{ 253{
254 if (host->card) { 254 if (host->card) {
255 host->retries = cmd_retries; 255 host->retries = cmd_retries;
256 INIT_COMPLETION(host->card->mrq_complete); 256 reinit_completion(&host->card->mrq_complete);
257 host->request(host); 257 host->request(host);
258 } 258 }
259} 259}
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 9188ef5d677e..24f2f8473dee 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -401,10 +401,10 @@ again:
401 sizeof(struct ms_status_register))) 401 sizeof(struct ms_status_register)))
402 return 0; 402 return 0;
403 403
404 msb->state = MSB_RP_RECIVE_STATUS_REG; 404 msb->state = MSB_RP_RECEIVE_STATUS_REG;
405 return 0; 405 return 0;
406 406
407 case MSB_RP_RECIVE_STATUS_REG: 407 case MSB_RP_RECEIVE_STATUS_REG:
408 msb->regs.status = *(struct ms_status_register *)mrq->data; 408 msb->regs.status = *(struct ms_status_register *)mrq->data;
409 msb->state = MSB_RP_SEND_OOB_READ; 409 msb->state = MSB_RP_SEND_OOB_READ;
410 /* fallthrough */ 410 /* fallthrough */
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
index 96e637550988..c75198dbf139 100644
--- a/drivers/memstick/core/ms_block.h
+++ b/drivers/memstick/core/ms_block.h
@@ -223,7 +223,7 @@ enum msb_readpage_states {
223 MSB_RP_RECEIVE_INT_REQ_RESULT, 223 MSB_RP_RECEIVE_INT_REQ_RESULT,
224 224
225 MSB_RP_SEND_READ_STATUS_REG, 225 MSB_RP_SEND_READ_STATUS_REG,
226 MSB_RP_RECIVE_STATUS_REG, 226 MSB_RP_RECEIVE_STATUS_REG,
227 227
228 MSB_RP_SEND_OOB_READ, 228 MSB_RP_SEND_OOB_READ,
229 MSB_RP_RECEIVE_OOB_READ, 229 MSB_RP_RECEIVE_OOB_READ,
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 1b6e91345222..31727bf285d0 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -290,7 +290,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev)
290 dbg_verbose("doing dma transfer"); 290 dbg_verbose("doing dma transfer");
291 291
292 dev->dma_error = 0; 292 dev->dma_error = 0;
293 INIT_COMPLETION(dev->dma_done); 293 reinit_completion(&dev->dma_done);
294 294
295 /* TODO: hidden assumption about nenth beeing always 1 */ 295 /* TODO: hidden assumption about nenth beeing always 1 */
296 sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ? 296 sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
index 914cc9b2caad..8aa42e738acc 100644
--- a/drivers/misc/mic/card/mic_virtio.c
+++ b/drivers/misc/mic/card/mic_virtio.c
@@ -493,7 +493,7 @@ static int mic_remove_device(struct mic_device_desc __iomem *d,
493 ioread8(&dc->config_change), ioread8(&d->type), mvdev); 493 ioread8(&dc->config_change), ioread8(&d->type), mvdev);
494 494
495 status = ioread8(&d->status); 495 status = ioread8(&d->status);
496 INIT_COMPLETION(mvdev->reset_done); 496 reinit_completion(&mvdev->reset_done);
497 unregister_virtio_device(&mvdev->vdev); 497 unregister_virtio_device(&mvdev->vdev);
498 mic_free_card_irq(mvdev->virtio_cookie, mvdev); 498 mic_free_card_irq(mvdev->virtio_cookie, mvdev);
499 if (status & VIRTIO_CONFIG_S_DRIVER_OK) 499 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index b079c65eed6d..7558d9186438 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -38,7 +38,7 @@ static void mic_reset(struct mic_device *mdev)
38 38
39#define MIC_RESET_TO (45) 39#define MIC_RESET_TO (45)
40 40
41 INIT_COMPLETION(mdev->reset_wait); 41 reinit_completion(&mdev->reset_wait);
42 mdev->ops->reset_fw_ready(mdev); 42 mdev->ops->reset_fw_ready(mdev);
43 mdev->ops->reset(mdev); 43 mdev->ops->reset(mdev);
44 44
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 83907c720594..96853a09788a 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -218,7 +218,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
218 218
219 pr_debug("%s", __func__); 219 pr_debug("%s", __func__);
220 220
221 INIT_COMPLETION(kim_gdata->kim_rcvd); 221 reinit_completion(&kim_gdata->kim_rcvd);
222 if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) { 222 if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) {
223 pr_err("kim: couldn't write 4 bytes"); 223 pr_err("kim: couldn't write 4 bytes");
224 return -EIO; 224 return -EIO;
@@ -229,7 +229,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
229 pr_err(" waiting for ver info- timed out "); 229 pr_err(" waiting for ver info- timed out ");
230 return -ETIMEDOUT; 230 return -ETIMEDOUT;
231 } 231 }
232 INIT_COMPLETION(kim_gdata->kim_rcvd); 232 reinit_completion(&kim_gdata->kim_rcvd);
233 /* the positions 12 & 13 in the response buffer provide with the 233 /* the positions 12 & 13 in the response buffer provide with the
234 * chip, major & minor numbers 234 * chip, major & minor numbers
235 */ 235 */
@@ -362,7 +362,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
362 /* reinit completion before sending for the 362 /* reinit completion before sending for the
363 * relevant wait 363 * relevant wait
364 */ 364 */
365 INIT_COMPLETION(kim_gdata->kim_rcvd); 365 reinit_completion(&kim_gdata->kim_rcvd);
366 366
367 /* 367 /*
368 * Free space found in uart buffer, call st_int_write 368 * Free space found in uart buffer, call st_int_write
@@ -398,7 +398,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
398 release_firmware(kim_gdata->fw_entry); 398 release_firmware(kim_gdata->fw_entry);
399 return -ETIMEDOUT; 399 return -ETIMEDOUT;
400 } 400 }
401 INIT_COMPLETION(kim_gdata->kim_rcvd); 401 reinit_completion(&kim_gdata->kim_rcvd);
402 break; 402 break;
403 case ACTION_DELAY: /* sleep */ 403 case ACTION_DELAY: /* sleep */
404 pr_info("sleep command in scr"); 404 pr_info("sleep command in scr");
@@ -474,7 +474,7 @@ long st_kim_start(void *kim_data)
474 gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); 474 gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
475 mdelay(100); 475 mdelay(100);
476 /* re-initialize the completion */ 476 /* re-initialize the completion */
477 INIT_COMPLETION(kim_gdata->ldisc_installed); 477 reinit_completion(&kim_gdata->ldisc_installed);
478 /* send notification to UIM */ 478 /* send notification to UIM */
479 kim_gdata->ldisc_install = 1; 479 kim_gdata->ldisc_install = 1;
480 pr_info("ldisc_install = 1"); 480 pr_info("ldisc_install = 1");
@@ -525,7 +525,7 @@ long st_kim_stop(void *kim_data)
525 kim_gdata->kim_pdev->dev.platform_data; 525 kim_gdata->kim_pdev->dev.platform_data;
526 struct tty_struct *tty = kim_gdata->core_data->tty; 526 struct tty_struct *tty = kim_gdata->core_data->tty;
527 527
528 INIT_COMPLETION(kim_gdata->ldisc_installed); 528 reinit_completion(&kim_gdata->ldisc_installed);
529 529
530 if (tty) { /* can be called before ldisc is installed */ 530 if (tty) { /* can be called before ldisc is installed */
531 /* Flush any pending characters in the driver and discipline. */ 531 /* Flush any pending characters in the driver and discipline. */
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 4edea7f4462f..9dfdb06c508b 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -396,7 +396,7 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
396 396
397 if (useirq) { 397 if (useirq) {
398 if (!host->devtype_data->check_int(host)) { 398 if (!host->devtype_data->check_int(host)) {
399 INIT_COMPLETION(host->op_completion); 399 reinit_completion(&host->op_completion);
400 irq_control(host, 1); 400 irq_control(host, 1);
401 wait_for_completion(&host->op_completion); 401 wait_for_completion(&host->op_completion);
402 } 402 }
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 9dcf02d22aa8..325930db3f04 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -181,7 +181,7 @@ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
181 /* Set dma direction */ 181 /* Set dma direction */
182 dev->dma_dir = do_read; 182 dev->dma_dir = do_read;
183 dev->dma_stage = 1; 183 dev->dma_stage = 1;
184 INIT_COMPLETION(dev->dma_done); 184 reinit_completion(&dev->dma_done);
185 185
186 dbg_verbose("doing dma %s ", do_read ? "read" : "write"); 186 dbg_verbose("doing dma %s ", do_read ? "read" : "write");
187 187
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 2362909d20c0..6547c84afc3a 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -159,7 +159,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
159 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); 159 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
160 } 160 }
161 161
162 INIT_COMPLETION(c->irq_done); 162 reinit_completion(&c->irq_done);
163 if (c->gpio_irq) { 163 if (c->gpio_irq) {
164 result = gpio_get_value(c->gpio_irq); 164 result = gpio_get_value(c->gpio_irq);
165 if (result == -1) { 165 if (result == -1) {
@@ -349,7 +349,7 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
349 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, 349 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
350 dma_dst, 0, 0); 350 dma_dst, 0, 0);
351 351
352 INIT_COMPLETION(c->dma_done); 352 reinit_completion(&c->dma_done);
353 omap_start_dma(c->dma_channel); 353 omap_start_dma(c->dma_channel);
354 354
355 timeout = jiffies + msecs_to_jiffies(20); 355 timeout = jiffies + msecs_to_jiffies(20);
@@ -420,7 +420,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
420 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, 420 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
421 dma_dst, 0, 0); 421 dma_dst, 0, 0);
422 422
423 INIT_COMPLETION(c->dma_done); 423 reinit_completion(&c->dma_done);
424 omap_start_dma(c->dma_channel); 424 omap_start_dma(c->dma_channel);
425 425
426 timeout = jiffies + msecs_to_jiffies(20); 426 timeout = jiffies + msecs_to_jiffies(20);
@@ -499,7 +499,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
499 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, 499 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
500 dma_dst, 0, 0); 500 dma_dst, 0, 0);
501 501
502 INIT_COMPLETION(c->dma_done); 502 reinit_completion(&c->dma_done);
503 omap_start_dma(c->dma_channel); 503 omap_start_dma(c->dma_channel);
504 wait_for_completion(&c->dma_done); 504 wait_for_completion(&c->dma_done);
505 505
@@ -544,7 +544,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
544 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, 544 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
545 dma_dst, 0, 0); 545 dma_dst, 0, 0);
546 546
547 INIT_COMPLETION(c->dma_done); 547 reinit_completion(&c->dma_done);
548 omap_start_dma(c->dma_channel); 548 omap_start_dma(c->dma_channel);
549 wait_for_completion(&c->dma_done); 549 wait_for_completion(&c->dma_done);
550 550
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 09810ddd11ec..a01a6a74ee3a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3537,7 +3537,7 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
3537 3537
3538void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx) 3538void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
3539{ 3539{
3540 INIT_COMPLETION(mbx->completion); 3540 reinit_completion(&mbx->completion);
3541 set_bit(QLC_83XX_MBX_READY, &mbx->status); 3541 set_bit(QLC_83XX_MBX_READY, &mbx->status);
3542} 3542}
3543 3543
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 6f10b4964726..2cbe1c249996 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -561,7 +561,7 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
561 561
562 spin_lock_irqsave(&lp->lock, flags); 562 spin_lock_irqsave(&lp->lock, flags);
563 lp->is_tx = 1; 563 lp->is_tx = 1;
564 INIT_COMPLETION(lp->tx_complete); 564 reinit_completion(&lp->tx_complete);
565 spin_unlock_irqrestore(&lp->lock, flags); 565 spin_unlock_irqrestore(&lp->lock, flags);
566 566
567 rc = at86rf230_write_fbuf(lp, skb->data, skb->len); 567 rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 0632d34905c7..c6e46d6e9f75 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -343,7 +343,7 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
343 if (ret) 343 if (ret)
344 goto err; 344 goto err;
345 345
346 INIT_COMPLETION(devrec->tx_complete); 346 reinit_completion(&devrec->tx_complete);
347 347
348 /* Set TXNTRIG bit of TXNCON to send packet */ 348 /* Set TXNTRIG bit of TXNCON to send packet */
349 ret = read_short_reg(devrec, REG_TXNCON, &val); 349 ret = read_short_reg(devrec, REG_TXNCON, &val);
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 3118d7506734..edae50b52806 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -534,7 +534,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
534 u16 credit_count; 534 u16 credit_count;
535 u16 credit_size; 535 u16 credit_size;
536 536
537 INIT_COMPLETION(htc->ctl_resp); 537 reinit_completion(&htc->ctl_resp);
538 538
539 status = ath10k_hif_start(htc->ar); 539 status = ath10k_hif_start(htc->ar);
540 if (status) { 540 if (status) {
@@ -669,7 +669,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
669 req_msg->flags = __cpu_to_le16(flags); 669 req_msg->flags = __cpu_to_le16(flags);
670 req_msg->service_id = __cpu_to_le16(conn_req->service_id); 670 req_msg->service_id = __cpu_to_le16(conn_req->service_id);
671 671
672 INIT_COMPLETION(htc->ctl_resp); 672 reinit_completion(&htc->ctl_resp);
673 673
674 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); 674 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
675 if (status) { 675 if (status) {
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 0b1cc516e778..97ac8c87cba2 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -92,7 +92,7 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
92 92
93 lockdep_assert_held(&ar->conf_mutex); 93 lockdep_assert_held(&ar->conf_mutex);
94 94
95 INIT_COMPLETION(ar->install_key_done); 95 reinit_completion(&ar->install_key_done);
96 96
97 ret = ath10k_send_key(arvif, key, cmd, macaddr); 97 ret = ath10k_send_key(arvif, key, cmd, macaddr);
98 if (ret) 98 if (ret)
@@ -438,7 +438,7 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
438 438
439 lockdep_assert_held(&ar->conf_mutex); 439 lockdep_assert_held(&ar->conf_mutex);
440 440
441 INIT_COMPLETION(ar->vdev_setup_done); 441 reinit_completion(&ar->vdev_setup_done);
442 442
443 arg.vdev_id = arvif->vdev_id; 443 arg.vdev_id = arvif->vdev_id;
444 arg.dtim_period = arvif->dtim_period; 444 arg.dtim_period = arvif->dtim_period;
@@ -491,7 +491,7 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
491 491
492 lockdep_assert_held(&ar->conf_mutex); 492 lockdep_assert_held(&ar->conf_mutex);
493 493
494 INIT_COMPLETION(ar->vdev_setup_done); 494 reinit_completion(&ar->vdev_setup_done);
495 495
496 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 496 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
497 if (ret) { 497 if (ret) {
@@ -1666,7 +1666,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
1666 } 1666 }
1667 1667
1668 spin_lock_bh(&ar->data_lock); 1668 spin_lock_bh(&ar->data_lock);
1669 INIT_COMPLETION(ar->offchan_tx_completed); 1669 reinit_completion(&ar->offchan_tx_completed);
1670 ar->offchan_tx_skb = skb; 1670 ar->offchan_tx_skb = skb;
1671 spin_unlock_bh(&ar->data_lock); 1671 spin_unlock_bh(&ar->data_lock);
1672 1672
@@ -2476,8 +2476,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
2476 goto exit; 2476 goto exit;
2477 } 2477 }
2478 2478
2479 INIT_COMPLETION(ar->scan.started); 2479 reinit_completion(&ar->scan.started);
2480 INIT_COMPLETION(ar->scan.completed); 2480 reinit_completion(&ar->scan.completed);
2481 ar->scan.in_progress = true; 2481 ar->scan.in_progress = true;
2482 ar->scan.aborting = false; 2482 ar->scan.aborting = false;
2483 ar->scan.is_roc = false; 2483 ar->scan.is_roc = false;
@@ -2832,9 +2832,9 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
2832 goto exit; 2832 goto exit;
2833 } 2833 }
2834 2834
2835 INIT_COMPLETION(ar->scan.started); 2835 reinit_completion(&ar->scan.started);
2836 INIT_COMPLETION(ar->scan.completed); 2836 reinit_completion(&ar->scan.completed);
2837 INIT_COMPLETION(ar->scan.on_channel); 2837 reinit_completion(&ar->scan.on_channel);
2838 ar->scan.in_progress = true; 2838 ar->scan.in_progress = true;
2839 ar->scan.aborting = false; 2839 ar->scan.aborting = false;
2840 ar->scan.is_roc = true; 2840 ar->scan.is_roc = true;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 307bc0ddff99..ca115f33746f 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -773,7 +773,7 @@ void carl9170_usb_stop(struct ar9170 *ar)
773 complete_all(&ar->cmd_wait); 773 complete_all(&ar->cmd_wait);
774 774
775 /* This is required to prevent an early completion on _start */ 775 /* This is required to prevent an early completion on _start */
776 INIT_COMPLETION(ar->cmd_wait); 776 reinit_completion(&ar->cmd_wait);
777 777
778 /* 778 /*
779 * Note: 779 * Note:
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 0a2844c48a60..fd30cddd5882 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -250,7 +250,7 @@ int wil_reset(struct wil6210_priv *wil)
250 250
251 /* init after reset */ 251 /* init after reset */
252 wil->pending_connect_cid = -1; 252 wil->pending_connect_cid = -1;
253 INIT_COMPLETION(wil->wmi_ready); 253 reinit_completion(&wil->wmi_ready);
254 254
255 /* TODO: release MAC reset */ 255 /* TODO: release MAC reset */
256 wil6210_enable_irq(wil); 256 wil6210_enable_irq(wil);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index d7a974532909..5b5b952d47b1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -1148,7 +1148,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
1148 1148
1149 pri_vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; 1149 pri_vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
1150 1150
1151 INIT_COMPLETION(afx_hdl->act_frm_scan); 1151 reinit_completion(&afx_hdl->act_frm_scan);
1152 set_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status); 1152 set_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
1153 afx_hdl->is_active = true; 1153 afx_hdl->is_active = true;
1154 afx_hdl->peer_chan = P2P_INVALID_CHANNEL; 1154 afx_hdl->peer_chan = P2P_INVALID_CHANNEL;
@@ -1501,7 +1501,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
1501 1501
1502 brcmf_dbg(TRACE, "Enter\n"); 1502 brcmf_dbg(TRACE, "Enter\n");
1503 1503
1504 INIT_COMPLETION(p2p->send_af_done); 1504 reinit_completion(&p2p->send_af_done);
1505 clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); 1505 clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
1506 clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); 1506 clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
1507 1507
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.c b/drivers/net/wireless/rt2x00/rt2800mmio.c
index ae152280e071..a8cc736b5063 100644
--- a/drivers/net/wireless/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.c
@@ -446,7 +446,7 @@ static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
446 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) 446 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
447 break; 447 break;
448 448
449 if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) { 449 if (!kfifo_put(&rt2x00dev->txstatus_fifo, status)) {
450 rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n"); 450 rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
451 break; 451 break;
452 } 452 }
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 997df03a0c2e..a81ceb61d746 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -164,7 +164,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
164 164
165 valid = rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID); 165 valid = rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID);
166 if (valid) { 166 if (valid) {
167 if (!kfifo_put(&rt2x00dev->txstatus_fifo, &tx_status)) 167 if (!kfifo_put(&rt2x00dev->txstatus_fifo, tx_status))
168 rt2x00_warn(rt2x00dev, "TX status FIFO overrun\n"); 168 rt2x00_warn(rt2x00dev, "TX status FIFO overrun\n");
169 169
170 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 170 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 7ef0b4a181e1..84d94f572a46 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1619,7 +1619,7 @@ static void prepare_read_regs_int(struct zd_usb *usb,
1619 atomic_set(&intr->read_regs_enabled, 1); 1619 atomic_set(&intr->read_regs_enabled, 1);
1620 intr->read_regs.req = req; 1620 intr->read_regs.req = req;
1621 intr->read_regs.req_count = count; 1621 intr->read_regs.req_count = count;
1622 INIT_COMPLETION(intr->read_regs.completion); 1622 reinit_completion(&intr->read_regs.completion);
1623 spin_unlock_irq(&intr->lock); 1623 spin_unlock_irq(&intr->lock);
1624} 1624}
1625 1625
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index d4716273651e..c864f82bd37d 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -1331,7 +1331,7 @@ static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
1331 break; 1331 break;
1332 1332
1333 /* Initialize mutex used to take interrupts into account */ 1333 /* Initialize mutex used to take interrupts into account */
1334 INIT_COMPLETION(priv->irq_complete); 1334 reinit_completion(&priv->irq_complete);
1335 1335
1336 /* Enable serviceIntr */ 1336 /* Enable serviceIntr */
1337 parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0); 1337 parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
@@ -1446,7 +1446,7 @@ static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
1446 priv->irq_mode = PARPORT_IP32_IRQ_HERE; 1446 priv->irq_mode = PARPORT_IP32_IRQ_HERE;
1447 1447
1448 parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len); 1448 parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len);
1449 INIT_COMPLETION(priv->irq_complete); 1449 reinit_completion(&priv->irq_complete);
1450 parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN); 1450 parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
1451 1451
1452 nfault_timeout = min((unsigned long)physport->cad->timeout, 1452 nfault_timeout = min((unsigned long)physport->cad->timeout,
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 85ca36f2136d..6b3a958e1be6 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -574,7 +574,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
574 }; 574 };
575 575
576 spin_lock_irqsave(&aer_recover_ring_lock, flags); 576 spin_lock_irqsave(&aer_recover_ring_lock, flags);
577 if (kfifo_put(&aer_recover_ring, &entry)) 577 if (kfifo_put(&aer_recover_ring, entry))
578 schedule_work(&aer_recover_work); 578 schedule_work(&aer_recover_work);
579 else 579 else
580 pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n", 580 pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 8eea2efbbb6d..605a9be55129 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -289,7 +289,7 @@ static int gmux_switchto(enum vga_switcheroo_client_id id)
289static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data, 289static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data,
290 enum vga_switcheroo_state state) 290 enum vga_switcheroo_state state)
291{ 291{
292 INIT_COMPLETION(gmux_data->powerchange_done); 292 reinit_completion(&gmux_data->powerchange_done);
293 293
294 if (state == VGA_SWITCHEROO_ON) { 294 if (state == VGA_SWITCHEROO_ON) {
295 gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); 295 gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1);
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index 754970717c31..3cb4178e397c 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -574,8 +574,8 @@ int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
574 } 574 }
575 575
576 /* Return and WFI */ 576 /* Return and WFI */
577 INIT_COMPLETION(di->ab8500_fg_started); 577 reinit_completion(&di->ab8500_fg_started);
578 INIT_COMPLETION(di->ab8500_fg_complete); 578 reinit_completion(&di->ab8500_fg_complete);
579 enable_irq(di->irq); 579 enable_irq(di->irq);
580 580
581 /* Note: cc_lock is still locked */ 581 /* Note: cc_lock is still locked */
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index d9686aa9270a..6c8931d4ad62 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -73,7 +73,7 @@ static long jz_battery_read_voltage(struct jz_battery *battery)
73 73
74 mutex_lock(&battery->lock); 74 mutex_lock(&battery->lock);
75 75
76 INIT_COMPLETION(battery->read_completion); 76 reinit_completion(&battery->read_completion);
77 77
78 enable_irq(battery->irq); 78 enable_irq(battery->irq);
79 battery->cell->enable(battery->pdev); 79 battery->cell->enable(battery->pdev);
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 45560ffb038d..965a9da70867 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -209,7 +209,7 @@ static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm)
209 platform_get_drvdata(to_platform_device(dev)); 209 platform_get_drvdata(to_platform_device(dev));
210 int ret; 210 int ret;
211 211
212 INIT_COMPLETION(time_state->comp_last_time); 212 reinit_completion(&time_state->comp_last_time);
213 /* get a report with all values through requesting one value */ 213 /* get a report with all values through requesting one value */
214 sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev, 214 sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev,
215 HID_USAGE_SENSOR_TIME, hid_time_addresses[0], 215 HID_USAGE_SENSOR_TIME, hid_time_addresses[0],
@@ -236,7 +236,7 @@ static const struct rtc_class_ops hid_time_rtc_ops = {
236static int hid_time_probe(struct platform_device *pdev) 236static int hid_time_probe(struct platform_device *pdev)
237{ 237{
238 int ret = 0; 238 int ret = 0;
239 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; 239 struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
240 struct hid_time_state *time_state = devm_kzalloc(&pdev->dev, 240 struct hid_time_state *time_state = devm_kzalloc(&pdev->dev,
241 sizeof(struct hid_time_state), GFP_KERNEL); 241 sizeof(struct hid_time_state), GFP_KERNEL);
242 242
@@ -281,11 +281,18 @@ static int hid_time_probe(struct platform_device *pdev)
281 goto err_open; 281 goto err_open;
282 } 282 }
283 283
284 /*
285 * Enable HID input processing early in order to be able to read the
286 * clock already in devm_rtc_device_register().
287 */
288 hid_device_io_start(hsdev->hdev);
289
284 time_state->rtc = devm_rtc_device_register(&pdev->dev, 290 time_state->rtc = devm_rtc_device_register(&pdev->dev,
285 "hid-sensor-time", &hid_time_rtc_ops, 291 "hid-sensor-time", &hid_time_rtc_ops,
286 THIS_MODULE); 292 THIS_MODULE);
287 293
288 if (IS_ERR_OR_NULL(time_state->rtc)) { 294 if (IS_ERR_OR_NULL(time_state->rtc)) {
295 hid_device_io_stop(hsdev->hdev);
289 ret = time_state->rtc ? PTR_ERR(time_state->rtc) : -ENODEV; 296 ret = time_state->rtc ? PTR_ERR(time_state->rtc) : -ENODEV;
290 time_state->rtc = NULL; 297 time_state->rtc = NULL;
291 dev_err(&pdev->dev, "rtc device register failed!\n"); 298 dev_err(&pdev->dev, "rtc device register failed!\n");
@@ -303,7 +310,7 @@ err_open:
303 310
304static int hid_time_remove(struct platform_device *pdev) 311static int hid_time_remove(struct platform_device *pdev)
305{ 312{
306 struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; 313 struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
307 314
308 sensor_hub_device_close(hsdev); 315 sensor_hub_device_close(hsdev);
309 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME); 316 sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 4c332143a310..3ed666fe840a 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -217,7 +217,7 @@ static int bcm2835_spi_start_transfer(struct spi_device *spi,
217 cs |= spi->chip_select; 217 cs |= spi->chip_select;
218 } 218 }
219 219
220 INIT_COMPLETION(bs->done); 220 reinit_completion(&bs->done);
221 bs->tx_buf = tfr->tx_buf; 221 bs->tx_buf = tfr->tx_buf;
222 bs->rx_buf = tfr->rx_buf; 222 bs->rx_buf = tfr->rx_buf;
223 bs->len = tfr->len; 223 bs->len = tfr->len;
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index e2a5a426b2ef..6f03d7e6435d 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -105,7 +105,7 @@ static int spi_clps711x_transfer_one_message(struct spi_master *master,
105 105
106 gpio_set_value(cs, !!(msg->spi->mode & SPI_CS_HIGH)); 106 gpio_set_value(cs, !!(msg->spi->mode & SPI_CS_HIGH));
107 107
108 INIT_COMPLETION(hw->done); 108 reinit_completion(&hw->done);
109 109
110 hw->count = 0; 110 hw->count = 0;
111 hw->len = xfer->len; 111 hw->len = xfer->len;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index dd72445ba2ea..50b2d88c8190 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -554,7 +554,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
554 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 554 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
555 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 555 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
556 556
557 INIT_COMPLETION(dspi->done); 557 reinit_completion(&dspi->done);
558 558
559 if (spicfg->io_type == SPI_IO_TYPE_INTR) 559 if (spicfg->io_type == SPI_IO_TYPE_INTR)
560 set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); 560 set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 32200d4f8780..80d8f40f7e05 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -232,7 +232,7 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
232 mpc8xxx_spi->tx = t->tx_buf; 232 mpc8xxx_spi->tx = t->tx_buf;
233 mpc8xxx_spi->rx = t->rx_buf; 233 mpc8xxx_spi->rx = t->rx_buf;
234 234
235 INIT_COMPLETION(mpc8xxx_spi->done); 235 reinit_completion(&mpc8xxx_spi->done);
236 236
237 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ 237 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
238 if ((t->len - 1) > SPCOM_TRANLEN_MAX) { 238 if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 2129fcd1c31b..119f7af94537 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -339,7 +339,7 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
339 mpc8xxx_spi->tx = t->tx_buf; 339 mpc8xxx_spi->tx = t->tx_buf;
340 mpc8xxx_spi->rx = t->rx_buf; 340 mpc8xxx_spi->rx = t->rx_buf;
341 341
342 INIT_COMPLETION(mpc8xxx_spi->done); 342 reinit_completion(&mpc8xxx_spi->done);
343 343
344 if (mpc8xxx_spi->flags & SPI_CPM_MODE) 344 if (mpc8xxx_spi->flags & SPI_CPM_MODE)
345 ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); 345 ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 58d5ee0e4443..9602bbd8d7ea 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -167,7 +167,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
167 } 167 }
168 168
169 /* have the ISR trigger when the TX FIFO is empty */ 169 /* have the ISR trigger when the TX FIFO is empty */
170 INIT_COMPLETION(mps->txisrdone); 170 reinit_completion(&mps->txisrdone);
171 out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); 171 out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
172 out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY); 172 out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY);
173 wait_for_completion(&mps->txisrdone); 173 wait_for_completion(&mps->txisrdone);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index de333059a9a7..73afb56c08cc 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -202,7 +202,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
202 if (!dma_xfer) 202 if (!dma_xfer)
203 return -ENOMEM; 203 return -ENOMEM;
204 204
205 INIT_COMPLETION(spi->c); 205 reinit_completion(&spi->c);
206 206
207 /* Chip select was already programmed into CTRL0 */ 207 /* Chip select was already programmed into CTRL0 */
208 ctrl0 = readl(ssp->base + HW_SSP_CTRL0); 208 ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 9e2020df9e0f..4c4b0a1219a7 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -890,7 +890,7 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
890 unsigned long flags; 890 unsigned long flags;
891 int use_dma; 891 int use_dma;
892 892
893 INIT_COMPLETION(sdd->xfer_completion); 893 reinit_completion(&sdd->xfer_completion);
894 894
895 /* Only BPW and Speed may change across transfers */ 895 /* Only BPW and Speed may change across transfers */
896 bpw = xfer->bits_per_word; 896 bpw = xfer->bits_per_word;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 2a95435a6a11..c74298cf70e2 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -465,7 +465,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
465 ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE); 465 ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
466 466
467 /* start by setting frame bit */ 467 /* start by setting frame bit */
468 INIT_COMPLETION(p->done); 468 reinit_completion(&p->done);
469 ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE); 469 ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
470 if (ret) { 470 if (ret) {
471 dev_err(&p->pdev->dev, "failed to start hardware\n"); 471 dev_err(&p->pdev->dev, "failed to start hardware\n");
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 592b4aff651f..ed5e501c4652 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -305,8 +305,8 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
305 sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage; 305 sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
306 sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage; 306 sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
307 sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width; 307 sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
308 INIT_COMPLETION(sspi->rx_done); 308 reinit_completion(&sspi->rx_done);
309 INIT_COMPLETION(sspi->tx_done); 309 reinit_completion(&sspi->tx_done);
310 310
311 writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS); 311 writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
312 312
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 9146bb3c2489..aaecfb3ebf58 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -451,7 +451,7 @@ static void tegra_spi_dma_complete(void *args)
451 451
452static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len) 452static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
453{ 453{
454 INIT_COMPLETION(tspi->tx_dma_complete); 454 reinit_completion(&tspi->tx_dma_complete);
455 tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan, 455 tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
456 tspi->tx_dma_phys, len, DMA_MEM_TO_DEV, 456 tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
457 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 457 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -470,7 +470,7 @@ static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
470 470
471static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len) 471static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
472{ 472{
473 INIT_COMPLETION(tspi->rx_dma_complete); 473 reinit_completion(&tspi->rx_dma_complete);
474 tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan, 474 tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
475 tspi->rx_dma_phys, len, DMA_DEV_TO_MEM, 475 tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
476 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 476 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -844,7 +844,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
844 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 844 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
845 unsigned long cmd1; 845 unsigned long cmd1;
846 846
847 INIT_COMPLETION(tspi->xfer_completion); 847 reinit_completion(&tspi->xfer_completion);
848 848
849 cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg); 849 cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg);
850 850
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 79be8ce6a9d1..4dc8e8129459 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -339,7 +339,7 @@ static int tegra_sflash_transfer_one_message(struct spi_master *master,
339 msg->actual_length = 0; 339 msg->actual_length = 0;
340 single_xfer = list_is_singular(&msg->transfers); 340 single_xfer = list_is_singular(&msg->transfers);
341 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 341 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
342 INIT_COMPLETION(tsd->xfer_completion); 342 reinit_completion(&tsd->xfer_completion);
343 ret = tegra_sflash_start_transfer_one(spi, xfer, 343 ret = tegra_sflash_start_transfer_one(spi, xfer,
344 is_first_msg, single_xfer); 344 is_first_msg, single_xfer);
345 if (ret < 0) { 345 if (ret < 0) {
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index af0a67886ae8..e66715ba37ed 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -462,7 +462,7 @@ static void tegra_slink_dma_complete(void *args)
462 462
463static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len) 463static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
464{ 464{
465 INIT_COMPLETION(tspi->tx_dma_complete); 465 reinit_completion(&tspi->tx_dma_complete);
466 tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan, 466 tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
467 tspi->tx_dma_phys, len, DMA_MEM_TO_DEV, 467 tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
468 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 468 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -481,7 +481,7 @@ static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
481 481
482static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len) 482static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
483{ 483{
484 INIT_COMPLETION(tspi->rx_dma_complete); 484 reinit_completion(&tspi->rx_dma_complete);
485 tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan, 485 tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
486 tspi->rx_dma_phys, len, DMA_DEV_TO_MEM, 486 tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
487 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 487 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -836,7 +836,7 @@ static int tegra_slink_transfer_one(struct spi_master *master,
836 struct tegra_slink_data *tspi = spi_master_get_devdata(master); 836 struct tegra_slink_data *tspi = spi_master_get_devdata(master);
837 int ret; 837 int ret;
838 838
839 INIT_COMPLETION(tspi->xfer_completion); 839 reinit_completion(&tspi->xfer_completion);
840 ret = tegra_slink_start_transfer_one(spi, xfer); 840 ret = tegra_slink_start_transfer_one(spi, xfer);
841 if (ret < 0) { 841 if (ret < 0) {
842 dev_err(tspi->dev, 842 dev_err(tspi->dev,
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index ec3a83f52ea2..6d4ce4615163 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -258,7 +258,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
258 xspi->tx_ptr = t->tx_buf; 258 xspi->tx_ptr = t->tx_buf;
259 xspi->rx_ptr = t->rx_buf; 259 xspi->rx_ptr = t->rx_buf;
260 xspi->remaining_bytes = t->len; 260 xspi->remaining_bytes = t->len;
261 INIT_COMPLETION(xspi->done); 261 reinit_completion(&xspi->done);
262 262
263 263
264 /* Enable the transmit empty interrupt, which we use to determine 264 /* Enable the transmit empty interrupt, which we use to determine
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 927998aa5e71..8d85ddc46011 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -571,7 +571,7 @@ static int spi_transfer_one_message(struct spi_master *master,
571 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 571 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
572 trace_spi_transfer_start(msg, xfer); 572 trace_spi_transfer_start(msg, xfer);
573 573
574 INIT_COMPLETION(master->xfer_completion); 574 reinit_completion(&master->xfer_completion);
575 575
576 ret = master->transfer_one(master, msg->spi, xfer); 576 ret = master->transfer_one(master, msg->spi, xfer);
577 if (ret < 0) { 577 if (ret < 0) {
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index aeae76b77be5..e2dd7830b320 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -783,7 +783,7 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
783 if (!ret) 783 if (!ret)
784 return -EBUSY; 784 return -EBUSY;
785 785
786 INIT_COMPLETION(lradc->completion); 786 reinit_completion(&lradc->completion);
787 787
788 /* 788 /*
789 * No buffered operation in progress, map the channel and trigger it. 789 * No buffered operation in progress, map the channel and trigger it.
diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
index 333594189b81..7f2f2472655b 100644
--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
@@ -87,7 +87,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
87 if (mutex_lock_interruptible(&p2m_dev->mutex)) 87 if (mutex_lock_interruptible(&p2m_dev->mutex))
88 return -EINTR; 88 return -EINTR;
89 89
90 INIT_COMPLETION(p2m_dev->completion); 90 reinit_completion(&p2m_dev->completion);
91 p2m_dev->error = 0; 91 p2m_dev->error = 0;
92 92
93 if (desc_cnt > 1 && solo_dev->type != SOLO_DEV_6110 && desc_mode) { 93 if (desc_cnt > 1 && solo_dev->type != SOLO_DEV_6110 && desc_mode) {
diff --git a/drivers/staging/tidspbridge/core/sync.c b/drivers/staging/tidspbridge/core/sync.c
index 7bb550acaf4a..743ff09d82d2 100644
--- a/drivers/staging/tidspbridge/core/sync.c
+++ b/drivers/staging/tidspbridge/core/sync.c
@@ -72,7 +72,7 @@ int sync_wait_on_multiple_events(struct sync_object **events,
72 spin_lock_bh(&sync_lock); 72 spin_lock_bh(&sync_lock);
73 for (i = 0; i < count; i++) { 73 for (i = 0; i < count; i++) {
74 if (completion_done(&events[i]->comp)) { 74 if (completion_done(&events[i]->comp)) {
75 INIT_COMPLETION(events[i]->comp); 75 reinit_completion(&events[i]->comp);
76 *index = i; 76 *index = i;
77 spin_unlock_bh(&sync_lock); 77 spin_unlock_bh(&sync_lock);
78 status = 0; 78 status = 0;
@@ -92,7 +92,7 @@ int sync_wait_on_multiple_events(struct sync_object **events,
92 spin_lock_bh(&sync_lock); 92 spin_lock_bh(&sync_lock);
93 for (i = 0; i < count; i++) { 93 for (i = 0; i < count; i++) {
94 if (completion_done(&events[i]->comp)) { 94 if (completion_done(&events[i]->comp)) {
95 INIT_COMPLETION(events[i]->comp); 95 reinit_completion(&events[i]->comp);
96 *index = i; 96 *index = i;
97 status = 0; 97 status = 0;
98 } 98 }
diff --git a/drivers/staging/tidspbridge/include/dspbridge/sync.h b/drivers/staging/tidspbridge/include/dspbridge/sync.h
index 58a0d5c5543d..fc19b9707087 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/sync.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/sync.h
@@ -59,7 +59,7 @@ static inline void sync_init_event(struct sync_object *event)
59 59
60static inline void sync_reset_event(struct sync_object *event) 60static inline void sync_reset_event(struct sync_object *event)
61{ 61{
62 INIT_COMPLETION(event->comp); 62 reinit_completion(&event->comp);
63 event->multi_comp = NULL; 63 event->multi_comp = NULL;
64} 64}
65 65
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 6d04eb48bfbc..1aa4a3fd0f1b 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -332,7 +332,7 @@ static void bridge_recover(struct work_struct *work)
332 struct dev_object *dev; 332 struct dev_object *dev;
333 struct cfg_devnode *dev_node; 333 struct cfg_devnode *dev_node;
334 if (atomic_read(&bridge_cref)) { 334 if (atomic_read(&bridge_cref)) {
335 INIT_COMPLETION(bridge_comp); 335 reinit_completion(&bridge_comp);
336 while (!wait_for_completion_timeout(&bridge_comp, 336 while (!wait_for_completion_timeout(&bridge_comp,
337 msecs_to_jiffies(REC_TIMEOUT))) 337 msecs_to_jiffies(REC_TIMEOUT)))
338 pr_info("%s:%d handle(s) still opened\n", 338 pr_info("%s:%d handle(s) still opened\n",
@@ -348,7 +348,7 @@ static void bridge_recover(struct work_struct *work)
348 348
349void bridge_recover_schedule(void) 349void bridge_recover_schedule(void)
350{ 350{
351 INIT_COMPLETION(bridge_open_comp); 351 reinit_completion(&bridge_open_comp);
352 recover = true; 352 recover = true;
353 queue_work(bridge_rec_queue, &bridge_recovery_work); 353 queue_work(bridge_rec_queue, &bridge_recovery_work);
354} 354}
@@ -389,7 +389,7 @@ static int omap3_bridge_startup(struct platform_device *pdev)
389#ifdef CONFIG_TIDSPBRIDGE_RECOVERY 389#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
390 bridge_rec_queue = create_workqueue("bridge_rec_queue"); 390 bridge_rec_queue = create_workqueue("bridge_rec_queue");
391 INIT_WORK(&bridge_recovery_work, bridge_recover); 391 INIT_WORK(&bridge_recovery_work, bridge_recover);
392 INIT_COMPLETION(bridge_comp); 392 reinit_completion(&bridge_comp);
393#endif 393#endif
394 394
395#ifdef CONFIG_PM 395#ifdef CONFIG_PM
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
index 0e888621f484..7332e2ca4615 100644
--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -495,7 +495,7 @@ static int dashtty_write(struct tty_struct *tty, const unsigned char *buf,
495 count = dport->xmit_cnt; 495 count = dport->xmit_cnt;
496 /* xmit buffer no longer empty? */ 496 /* xmit buffer no longer empty? */
497 if (count) 497 if (count)
498 INIT_COMPLETION(dport->xmit_empty); 498 reinit_completion(&dport->xmit_empty);
499 mutex_unlock(&dport->xmit_lock); 499 mutex_unlock(&dport->xmit_lock);
500 500
501 if (total) { 501 if (total) {
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index aa491627a45b..892cc96466eb 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -344,7 +344,7 @@ void c67x00_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
344 /* it could happen that we reinitialize this completion, while 344 /* it could happen that we reinitialize this completion, while
345 * somebody was waiting for that completion. The timeout and 345 * somebody was waiting for that completion. The timeout and
346 * while loop handle such cases, but this might be improved */ 346 * while loop handle such cases, but this might be improved */
347 INIT_COMPLETION(c67x00->endpoint_disable); 347 reinit_completion(&c67x00->endpoint_disable);
348 c67x00_sched_kick(c67x00); 348 c67x00_sched_kick(c67x00);
349 wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ); 349 wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
350 350
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 44cf775a8627..774e8b89cdb5 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -373,7 +373,7 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
373 if (req->buf == NULL) 373 if (req->buf == NULL)
374 req->buf = (void *)0xDEADBABE; 374 req->buf = (void *)0xDEADBABE;
375 375
376 INIT_COMPLETION(ffs->ep0req_completion); 376 reinit_completion(&ffs->ep0req_completion);
377 377
378 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC); 378 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
379 if (unlikely(ret < 0)) 379 if (unlikely(ret < 0))
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 84657e07dc5d..439c951f261b 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -455,7 +455,7 @@ static int parport_prologue(struct parport *pp)
455 return -1; 455 return -1;
456 } 456 }
457 mos_parport->msg_pending = true; /* synch usb call pending */ 457 mos_parport->msg_pending = true; /* synch usb call pending */
458 INIT_COMPLETION(mos_parport->syncmsg_compl); 458 reinit_completion(&mos_parport->syncmsg_compl);
459 spin_unlock(&release_lock); 459 spin_unlock(&release_lock);
460 460
461 mutex_lock(&mos_parport->serial->disc_mutex); 461 mutex_lock(&mos_parport->serial->disc_mutex);
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 7eed957b6014..85edabfdef5a 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -220,7 +220,7 @@ int exynos_mipi_dsi_wr_data(struct mipi_dsim_device *dsim, unsigned int data_id,
220 case MIPI_DSI_DCS_LONG_WRITE: 220 case MIPI_DSI_DCS_LONG_WRITE:
221 { 221 {
222 unsigned int size, payload = 0; 222 unsigned int size, payload = 0;
223 INIT_COMPLETION(dsim_wr_comp); 223 reinit_completion(&dsim_wr_comp);
224 224
225 size = data_size * 4; 225 size = data_size * 4;
226 226
@@ -356,7 +356,7 @@ int exynos_mipi_dsi_rd_data(struct mipi_dsim_device *dsim, unsigned int data_id,
356 msleep(20); 356 msleep(20);
357 357
358 mutex_lock(&dsim->lock); 358 mutex_lock(&dsim->lock);
359 INIT_COMPLETION(dsim_rd_comp); 359 reinit_completion(&dsim_rd_comp);
360 exynos_mipi_dsi_rd_tx_header(dsim, 360 exynos_mipi_dsi_rd_tx_header(dsim,
361 MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, req_size); 361 MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, req_size);
362 362
diff --git a/drivers/video/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/omap2/displays-new/encoder-tpd12s015.c
index 798ef200b055..d5c936cb217f 100644
--- a/drivers/video/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/omap2/displays-new/encoder-tpd12s015.c
@@ -69,7 +69,7 @@ static int tpd_connect(struct omap_dss_device *dssdev,
69 dst->src = dssdev; 69 dst->src = dssdev;
70 dssdev->dst = dst; 70 dssdev->dst = dst;
71 71
72 INIT_COMPLETION(ddata->hpd_completion); 72 reinit_completion(&ddata->hpd_completion);
73 73
74 gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1); 74 gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
75 /* DC-DC converter needs at max 300us to get to 90% of 5V */ 75 /* DC-DC converter needs at max 300us to get to 90% of 5V */
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 264ad1c583ab..e36b18b2817b 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -56,7 +56,7 @@ MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids);
56 56
57static int w1_gpio_probe_dt(struct platform_device *pdev) 57static int w1_gpio_probe_dt(struct platform_device *pdev)
58{ 58{
59 struct w1_gpio_platform_data *pdata = pdev->dev.platform_data; 59 struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
60 struct device_node *np = pdev->dev.of_node; 60 struct device_node *np = pdev->dev.of_node;
61 int gpio; 61 int gpio;
62 62
@@ -92,7 +92,7 @@ static int w1_gpio_probe(struct platform_device *pdev)
92 } 92 }
93 } 93 }
94 94
95 pdata = pdev->dev.platform_data; 95 pdata = dev_get_platdata(&pdev->dev);
96 96
97 if (!pdata) { 97 if (!pdata) {
98 dev_err(&pdev->dev, "No configuration data\n"); 98 dev_err(&pdev->dev, "No configuration data\n");
@@ -154,7 +154,7 @@ static int w1_gpio_probe(struct platform_device *pdev)
154static int w1_gpio_remove(struct platform_device *pdev) 154static int w1_gpio_remove(struct platform_device *pdev)
155{ 155{
156 struct w1_bus_master *master = platform_get_drvdata(pdev); 156 struct w1_bus_master *master = platform_get_drvdata(pdev);
157 struct w1_gpio_platform_data *pdata = pdev->dev.platform_data; 157 struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
158 158
159 if (pdata->enable_external_pullup) 159 if (pdata->enable_external_pullup)
160 pdata->enable_external_pullup(0); 160 pdata->enable_external_pullup(0);
@@ -171,7 +171,7 @@ static int w1_gpio_remove(struct platform_device *pdev)
171 171
172static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state) 172static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state)
173{ 173{
174 struct w1_gpio_platform_data *pdata = pdev->dev.platform_data; 174 struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
175 175
176 if (pdata->enable_external_pullup) 176 if (pdata->enable_external_pullup)
177 pdata->enable_external_pullup(0); 177 pdata->enable_external_pullup(0);
@@ -181,7 +181,7 @@ static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state)
181 181
182static int w1_gpio_resume(struct platform_device *pdev) 182static int w1_gpio_resume(struct platform_device *pdev)
183{ 183{
184 struct w1_gpio_platform_data *pdata = pdev->dev.platform_data; 184 struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
185 185
186 if (pdata->enable_external_pullup) 186 if (pdata->enable_external_pullup)
187 pdata->enable_external_pullup(1); 187 pdata->enable_external_pullup(1);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 000eae2782b6..2f6735dbf1a9 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -392,7 +392,7 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
392 392
393 wait_for_completion(&ecr->completion); 393 wait_for_completion(&ecr->completion);
394 rc = ecr->rc; 394 rc = ecr->rc;
395 INIT_COMPLETION(ecr->completion); 395 reinit_completion(&ecr->completion);
396 } 396 }
397out: 397out:
398 ablkcipher_request_free(req); 398 ablkcipher_request_free(req);
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index efc85b1377cc..3c6136f98c73 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -129,7 +129,7 @@ static int can_set_xattr(struct inode *inode, const char *name,
129 129
130static void hfsplus_init_header_node(struct inode *attr_file, 130static void hfsplus_init_header_node(struct inode *attr_file,
131 u32 clump_size, 131 u32 clump_size,
132 char *buf, size_t node_size) 132 char *buf, u16 node_size)
133{ 133{
134 struct hfs_bnode_desc *desc; 134 struct hfs_bnode_desc *desc;
135 struct hfs_btree_header_rec *head; 135 struct hfs_btree_header_rec *head;
@@ -139,8 +139,9 @@ static void hfsplus_init_header_node(struct inode *attr_file,
139 char *bmp; 139 char *bmp;
140 u32 used_nodes; 140 u32 used_nodes;
141 u32 used_bmp_bytes; 141 u32 used_bmp_bytes;
142 loff_t tmp;
142 143
143 hfs_dbg(ATTR_MOD, "init_hdr_attr_file: clump %u, node_size %zu\n", 144 hfs_dbg(ATTR_MOD, "init_hdr_attr_file: clump %u, node_size %u\n",
144 clump_size, node_size); 145 clump_size, node_size);
145 146
146 /* The end of the node contains list of record offsets */ 147 /* The end of the node contains list of record offsets */
@@ -154,7 +155,9 @@ static void hfsplus_init_header_node(struct inode *attr_file,
154 155
155 head = (struct hfs_btree_header_rec *)(buf + offset); 156 head = (struct hfs_btree_header_rec *)(buf + offset);
156 head->node_size = cpu_to_be16(node_size); 157 head->node_size = cpu_to_be16(node_size);
157 head->node_count = cpu_to_be32(i_size_read(attr_file) / node_size); 158 tmp = i_size_read(attr_file);
159 do_div(tmp, node_size);
160 head->node_count = cpu_to_be32(tmp);
158 head->free_nodes = cpu_to_be32(be32_to_cpu(head->node_count) - 1); 161 head->free_nodes = cpu_to_be32(be32_to_cpu(head->node_count) - 1);
159 head->clump_size = cpu_to_be32(clump_size); 162 head->clump_size = cpu_to_be32(clump_size);
160 head->attributes |= cpu_to_be32(HFS_TREE_BIGKEYS | HFS_TREE_VARIDXKEYS); 163 head->attributes |= cpu_to_be32(HFS_TREE_BIGKEYS | HFS_TREE_VARIDXKEYS);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index c8e729deb4f7..74a7e12e10df 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -244,7 +244,7 @@ static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
244 set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); 244 set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
245 spin_lock(&tbl->slot_tbl_lock); 245 spin_lock(&tbl->slot_tbl_lock);
246 if (tbl->highest_used_slotid != NFS4_NO_SLOT) { 246 if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
247 INIT_COMPLETION(tbl->complete); 247 reinit_completion(&tbl->complete);
248 spin_unlock(&tbl->slot_tbl_lock); 248 spin_unlock(&tbl->slot_tbl_lock);
249 return wait_for_completion_interruptible(&tbl->complete); 249 return wait_for_completion_interruptible(&tbl->complete);
250 } 250 }
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 3a44a648dae7..3407b2c62b21 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1304,7 +1304,7 @@ static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
1304{ 1304{
1305 wait_for_completion(&mw->mw_complete); 1305 wait_for_completion(&mw->mw_complete);
1306 /* Re-arm the completion in case we want to wait on it again */ 1306 /* Re-arm the completion in case we want to wait on it again */
1307 INIT_COMPLETION(mw->mw_complete); 1307 reinit_completion(&mw->mw_complete);
1308 return mw->mw_status; 1308 return mw->mw_status;
1309} 1309}
1310 1310
@@ -1355,7 +1355,7 @@ static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1355 else 1355 else
1356 ret = mw->mw_status; 1356 ret = mw->mw_status;
1357 /* Re-arm the completion in case we want to wait on it again */ 1357 /* Re-arm the completion in case we want to wait on it again */
1358 INIT_COMPLETION(mw->mw_complete); 1358 reinit_completion(&mw->mw_complete);
1359 return ret; 1359 return ret;
1360} 1360}
1361 1361
diff --git a/fs/proc/consoles.c b/fs/proc/consoles.c
index b701eaa482bf..51942d5abcec 100644
--- a/fs/proc/consoles.c
+++ b/fs/proc/consoles.c
@@ -29,7 +29,6 @@ static int show_console_dev(struct seq_file *m, void *v)
29 char flags[ARRAY_SIZE(con_flags) + 1]; 29 char flags[ARRAY_SIZE(con_flags) + 1];
30 struct console *con = v; 30 struct console *con = v;
31 unsigned int a; 31 unsigned int a;
32 int len;
33 dev_t dev = 0; 32 dev_t dev = 0;
34 33
35 if (con->device) { 34 if (con->device) {
@@ -47,11 +46,10 @@ static int show_console_dev(struct seq_file *m, void *v)
47 con_flags[a].name : ' '; 46 con_flags[a].name : ' ';
48 flags[a] = 0; 47 flags[a] = 0;
49 48
50 seq_printf(m, "%s%d%n", con->name, con->index, &len); 49 seq_setwidth(m, 21 - 1);
51 len = 21 - len; 50 seq_printf(m, "%s%d", con->name, con->index);
52 if (len < 1) 51 seq_pad(m, ' ');
53 len = 1; 52 seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-',
54 seq_printf(m, "%*c%c%c%c (%s)", len, ' ', con->read ? 'R' : '-',
55 con->write ? 'W' : '-', con->unblank ? 'U' : '-', 53 con->write ? 'W' : '-', con->unblank ? 'U' : '-',
56 flags); 54 flags);
57 if (dev) 55 if (dev)
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index c805d5b69ba1..a77d2b299199 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -1,8 +1,8 @@
1#include <linux/fs.h> 1#include <linux/fs.h>
2#include <linux/hugetlb.h>
3#include <linux/init.h> 2#include <linux/init.h>
4#include <linux/kernel.h> 3#include <linux/kernel.h>
5#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/hugetlb.h>
6#include <linux/mman.h> 6#include <linux/mman.h>
7#include <linux/mmzone.h> 7#include <linux/mmzone.h>
8#include <linux/proc_fs.h> 8#include <linux/proc_fs.h>
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index ccfd99bd1c5a..5f9bc8a746c9 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -39,7 +39,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
39 unsigned long ino = 0; 39 unsigned long ino = 0;
40 struct file *file; 40 struct file *file;
41 dev_t dev = 0; 41 dev_t dev = 0;
42 int flags, len; 42 int flags;
43 43
44 flags = region->vm_flags; 44 flags = region->vm_flags;
45 file = region->vm_file; 45 file = region->vm_file;
@@ -50,8 +50,9 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
50 ino = inode->i_ino; 50 ino = inode->i_ino;
51 } 51 }
52 52
53 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
53 seq_printf(m, 54 seq_printf(m,
54 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 55 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
55 region->vm_start, 56 region->vm_start,
56 region->vm_end, 57 region->vm_end,
57 flags & VM_READ ? 'r' : '-', 58 flags & VM_READ ? 'r' : '-',
@@ -59,13 +60,10 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
59 flags & VM_EXEC ? 'x' : '-', 60 flags & VM_EXEC ? 'x' : '-',
60 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', 61 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
61 ((loff_t)region->vm_pgoff) << PAGE_SHIFT, 62 ((loff_t)region->vm_pgoff) << PAGE_SHIFT,
62 MAJOR(dev), MINOR(dev), ino, &len); 63 MAJOR(dev), MINOR(dev), ino);
63 64
64 if (file) { 65 if (file) {
65 len = 25 + sizeof(void *) * 6 - len; 66 seq_pad(m, ' ');
66 if (len < 1)
67 len = 1;
68 seq_printf(m, "%*c", len, ' ');
69 seq_path(m, &file->f_path, ""); 67 seq_path(m, &file->f_path, "");
70 } 68 }
71 69
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index abbe825d20ff..fb52b548080d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -62,7 +62,8 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
62 total_rss << (PAGE_SHIFT-10), 62 total_rss << (PAGE_SHIFT-10),
63 data << (PAGE_SHIFT-10), 63 data << (PAGE_SHIFT-10),
64 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 64 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
65 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10, 65 (PTRS_PER_PTE * sizeof(pte_t) *
66 atomic_long_read(&mm->nr_ptes)) >> 10,
66 swap << (PAGE_SHIFT-10)); 67 swap << (PAGE_SHIFT-10));
67} 68}
68 69
@@ -83,14 +84,6 @@ unsigned long task_statm(struct mm_struct *mm,
83 return mm->total_vm; 84 return mm->total_vm;
84} 85}
85 86
86static void pad_len_spaces(struct seq_file *m, int len)
87{
88 len = 25 + sizeof(void*) * 6 - len;
89 if (len < 1)
90 len = 1;
91 seq_printf(m, "%*c", len, ' ');
92}
93
94#ifdef CONFIG_NUMA 87#ifdef CONFIG_NUMA
95/* 88/*
96 * These functions are for numa_maps but called in generic **maps seq_file 89 * These functions are for numa_maps but called in generic **maps seq_file
@@ -268,7 +261,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
268 unsigned long long pgoff = 0; 261 unsigned long long pgoff = 0;
269 unsigned long start, end; 262 unsigned long start, end;
270 dev_t dev = 0; 263 dev_t dev = 0;
271 int len;
272 const char *name = NULL; 264 const char *name = NULL;
273 265
274 if (file) { 266 if (file) {
@@ -286,7 +278,8 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
286 if (stack_guard_page_end(vma, end)) 278 if (stack_guard_page_end(vma, end))
287 end -= PAGE_SIZE; 279 end -= PAGE_SIZE;
288 280
289 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 281 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
282 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
290 start, 283 start,
291 end, 284 end,
292 flags & VM_READ ? 'r' : '-', 285 flags & VM_READ ? 'r' : '-',
@@ -294,14 +287,14 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
294 flags & VM_EXEC ? 'x' : '-', 287 flags & VM_EXEC ? 'x' : '-',
295 flags & VM_MAYSHARE ? 's' : 'p', 288 flags & VM_MAYSHARE ? 's' : 'p',
296 pgoff, 289 pgoff,
297 MAJOR(dev), MINOR(dev), ino, &len); 290 MAJOR(dev), MINOR(dev), ino);
298 291
299 /* 292 /*
300 * Print the dentry name for named mappings, and a 293 * Print the dentry name for named mappings, and a
301 * special [heap] marker for the heap: 294 * special [heap] marker for the heap:
302 */ 295 */
303 if (file) { 296 if (file) {
304 pad_len_spaces(m, len); 297 seq_pad(m, ' ');
305 seq_path(m, &file->f_path, "\n"); 298 seq_path(m, &file->f_path, "\n");
306 goto done; 299 goto done;
307 } 300 }
@@ -333,7 +326,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
333 name = "[stack]"; 326 name = "[stack]";
334 } else { 327 } else {
335 /* Thread stack in /proc/PID/maps */ 328 /* Thread stack in /proc/PID/maps */
336 pad_len_spaces(m, len); 329 seq_pad(m, ' ');
337 seq_printf(m, "[stack:%d]", tid); 330 seq_printf(m, "[stack:%d]", tid);
338 } 331 }
339 } 332 }
@@ -341,7 +334,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
341 334
342done: 335done:
343 if (name) { 336 if (name) {
344 pad_len_spaces(m, len); 337 seq_pad(m, ' ');
345 seq_puts(m, name); 338 seq_puts(m, name);
346 } 339 }
347 seq_putc(m, '\n'); 340 seq_putc(m, '\n');
@@ -505,9 +498,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
505 pte_t *pte; 498 pte_t *pte;
506 spinlock_t *ptl; 499 spinlock_t *ptl;
507 500
508 if (pmd_trans_huge_lock(pmd, vma) == 1) { 501 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
509 smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk); 502 smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
510 spin_unlock(&walk->mm->page_table_lock); 503 spin_unlock(ptl);
511 mss->anonymous_thp += HPAGE_PMD_SIZE; 504 mss->anonymous_thp += HPAGE_PMD_SIZE;
512 return 0; 505 return 0;
513 } 506 }
@@ -998,13 +991,14 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
998{ 991{
999 struct vm_area_struct *vma; 992 struct vm_area_struct *vma;
1000 struct pagemapread *pm = walk->private; 993 struct pagemapread *pm = walk->private;
994 spinlock_t *ptl;
1001 pte_t *pte; 995 pte_t *pte;
1002 int err = 0; 996 int err = 0;
1003 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); 997 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
1004 998
1005 /* find the first VMA at or above 'addr' */ 999 /* find the first VMA at or above 'addr' */
1006 vma = find_vma(walk->mm, addr); 1000 vma = find_vma(walk->mm, addr);
1007 if (vma && pmd_trans_huge_lock(pmd, vma) == 1) { 1001 if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1008 int pmd_flags2; 1002 int pmd_flags2;
1009 1003
1010 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) 1004 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
@@ -1022,7 +1016,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
1022 if (err) 1016 if (err)
1023 break; 1017 break;
1024 } 1018 }
1025 spin_unlock(&walk->mm->page_table_lock); 1019 spin_unlock(ptl);
1026 return err; 1020 return err;
1027 } 1021 }
1028 1022
@@ -1324,7 +1318,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1324 1318
1325 md = walk->private; 1319 md = walk->private;
1326 1320
1327 if (pmd_trans_huge_lock(pmd, md->vma) == 1) { 1321 if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
1328 pte_t huge_pte = *(pte_t *)pmd; 1322 pte_t huge_pte = *(pte_t *)pmd;
1329 struct page *page; 1323 struct page *page;
1330 1324
@@ -1332,7 +1326,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1332 if (page) 1326 if (page)
1333 gather_stats(page, md, pte_dirty(huge_pte), 1327 gather_stats(page, md, pte_dirty(huge_pte),
1334 HPAGE_PMD_SIZE/PAGE_SIZE); 1328 HPAGE_PMD_SIZE/PAGE_SIZE);
1335 spin_unlock(&walk->mm->page_table_lock); 1329 spin_unlock(ptl);
1336 return 0; 1330 return 0;
1337 } 1331 }
1338 1332
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 56123a6f462e..678455d2d683 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -123,14 +123,6 @@ unsigned long task_statm(struct mm_struct *mm,
123 return size; 123 return size;
124} 124}
125 125
126static void pad_len_spaces(struct seq_file *m, int len)
127{
128 len = 25 + sizeof(void*) * 6 - len;
129 if (len < 1)
130 len = 1;
131 seq_printf(m, "%*c", len, ' ');
132}
133
134/* 126/*
135 * display a single VMA to a sequenced file 127 * display a single VMA to a sequenced file
136 */ 128 */
@@ -142,7 +134,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
142 unsigned long ino = 0; 134 unsigned long ino = 0;
143 struct file *file; 135 struct file *file;
144 dev_t dev = 0; 136 dev_t dev = 0;
145 int flags, len; 137 int flags;
146 unsigned long long pgoff = 0; 138 unsigned long long pgoff = 0;
147 139
148 flags = vma->vm_flags; 140 flags = vma->vm_flags;
@@ -155,8 +147,9 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
155 pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 147 pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
156 } 148 }
157 149
150 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
158 seq_printf(m, 151 seq_printf(m,
159 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 152 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
160 vma->vm_start, 153 vma->vm_start,
161 vma->vm_end, 154 vma->vm_end,
162 flags & VM_READ ? 'r' : '-', 155 flags & VM_READ ? 'r' : '-',
@@ -164,16 +157,16 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
164 flags & VM_EXEC ? 'x' : '-', 157 flags & VM_EXEC ? 'x' : '-',
165 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', 158 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
166 pgoff, 159 pgoff,
167 MAJOR(dev), MINOR(dev), ino, &len); 160 MAJOR(dev), MINOR(dev), ino);
168 161
169 if (file) { 162 if (file) {
170 pad_len_spaces(m, len); 163 seq_pad(m, ' ');
171 seq_path(m, &file->f_path, ""); 164 seq_path(m, &file->f_path, "");
172 } else if (mm) { 165 } else if (mm) {
173 pid_t tid = vm_is_stack(priv->task, vma, is_pid); 166 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
174 167
175 if (tid != 0) { 168 if (tid != 0) {
176 pad_len_spaces(m, len); 169 seq_pad(m, ' ');
177 /* 170 /*
178 * Thread stack in /proc/PID/task/TID/maps or 171 * Thread stack in /proc/PID/task/TID/maps or
179 * the main process stack. 172 * the main process stack.
diff --git a/fs/seq_file.c b/fs/seq_file.c
index a290157265ef..1cd2388ca5bd 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -766,6 +766,21 @@ int seq_write(struct seq_file *seq, const void *data, size_t len)
766} 766}
767EXPORT_SYMBOL(seq_write); 767EXPORT_SYMBOL(seq_write);
768 768
769/**
770 * seq_pad - write padding spaces to buffer
771 * @m: seq_file identifying the buffer to which data should be written
772 * @c: the byte to append after padding if non-zero
773 */
774void seq_pad(struct seq_file *m, char c)
775{
776 int size = m->pad_until - m->count;
777 if (size > 0)
778 seq_printf(m, "%*s", size, "");
779 if (c)
780 seq_putc(m, c);
781}
782EXPORT_SYMBOL(seq_pad);
783
769struct list_head *seq_list_start(struct list_head *head, loff_t pos) 784struct list_head *seq_list_start(struct list_head *head, loff_t pos)
770{ 785{
771 struct list_head *lh; 786 struct list_head *lh;
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
index 98e892ef6d5a..a0f9280421ec 100644
--- a/include/linux/cmdline-parser.h
+++ b/include/linux/cmdline-parser.h
@@ -8,6 +8,8 @@
8#define CMDLINEPARSEH 8#define CMDLINEPARSEH
9 9
10#include <linux/blkdev.h> 10#include <linux/blkdev.h>
11#include <linux/fs.h>
12#include <linux/slab.h>
11 13
12/* partition flags */ 14/* partition flags */
13#define PF_RDONLY 0x01 /* Device is read only */ 15#define PF_RDONLY 0x01 /* Device is read only */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 22c33e35bcb2..5d5aaae3af43 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -19,8 +19,8 @@
19 * 19 *
20 * See also: complete(), wait_for_completion() (and friends _timeout, 20 * See also: complete(), wait_for_completion() (and friends _timeout,
21 * _interruptible, _interruptible_timeout, and _killable), init_completion(), 21 * _interruptible, _interruptible_timeout, and _killable), init_completion(),
22 * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and 22 * reinit_completion(), and macros DECLARE_COMPLETION(),
23 * INIT_COMPLETION(). 23 * DECLARE_COMPLETION_ONSTACK().
24 */ 24 */
25struct completion { 25struct completion {
26 unsigned int done; 26 unsigned int done;
@@ -65,7 +65,7 @@ struct completion {
65 65
66/** 66/**
67 * init_completion - Initialize a dynamically allocated completion 67 * init_completion - Initialize a dynamically allocated completion
68 * @x: completion structure that is to be initialized 68 * @x: pointer to completion structure that is to be initialized
69 * 69 *
70 * This inline function will initialize a dynamically created completion 70 * This inline function will initialize a dynamically created completion
71 * structure. 71 * structure.
@@ -76,6 +76,18 @@ static inline void init_completion(struct completion *x)
76 init_waitqueue_head(&x->wait); 76 init_waitqueue_head(&x->wait);
77} 77}
78 78
79/**
80 * reinit_completion - reinitialize a completion structure
81 * @x: pointer to completion structure that is to be reinitialized
82 *
83 * This inline function should be used to reinitialize a completion structure so it can
84 * be reused. This is especially important after complete_all() is used.
85 */
86static inline void reinit_completion(struct completion *x)
87{
88 x->done = 0;
89}
90
79extern void wait_for_completion(struct completion *); 91extern void wait_for_completion(struct completion *);
80extern void wait_for_completion_io(struct completion *); 92extern void wait_for_completion_io(struct completion *);
81extern int wait_for_completion_interruptible(struct completion *x); 93extern int wait_for_completion_interruptible(struct completion *x);
@@ -94,14 +106,4 @@ extern bool completion_done(struct completion *x);
94extern void complete(struct completion *); 106extern void complete(struct completion *);
95extern void complete_all(struct completion *); 107extern void complete_all(struct completion *);
96 108
97/**
98 * INIT_COMPLETION - reinitialize a completion structure
99 * @x: completion structure to be reinitialized
100 *
101 * This macro should be used to reinitialize a completion structure so it can
102 * be reused. This is especially important after complete_all() is used.
103 */
104#define INIT_COMPLETION(x) ((x).done = 0)
105
106
107#endif 109#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 3935428c57cf..91672e2deec3 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -54,7 +54,8 @@ enum page_check_address_pmd_flag {
54extern pmd_t *page_check_address_pmd(struct page *page, 54extern pmd_t *page_check_address_pmd(struct page *page,
55 struct mm_struct *mm, 55 struct mm_struct *mm,
56 unsigned long address, 56 unsigned long address,
57 enum page_check_address_pmd_flag flag); 57 enum page_check_address_pmd_flag flag,
58 spinlock_t **ptl);
58 59
59#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 60#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
60#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 61#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
@@ -129,15 +130,15 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
129 unsigned long start, 130 unsigned long start,
130 unsigned long end, 131 unsigned long end,
131 long adjust_next); 132 long adjust_next);
132extern int __pmd_trans_huge_lock(pmd_t *pmd, 133extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
133 struct vm_area_struct *vma); 134 spinlock_t **ptl);
134/* mmap_sem must be held on entry */ 135/* mmap_sem must be held on entry */
135static inline int pmd_trans_huge_lock(pmd_t *pmd, 136static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
136 struct vm_area_struct *vma) 137 spinlock_t **ptl)
137{ 138{
138 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); 139 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
139 if (pmd_trans_huge(*pmd)) 140 if (pmd_trans_huge(*pmd))
140 return __pmd_trans_huge_lock(pmd, vma); 141 return __pmd_trans_huge_lock(pmd, vma, ptl);
141 else 142 else
142 return 0; 143 return 0;
143} 144}
@@ -215,8 +216,8 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
215 long adjust_next) 216 long adjust_next)
216{ 217{
217} 218}
218static inline int pmd_trans_huge_lock(pmd_t *pmd, 219static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
219 struct vm_area_struct *vma) 220 spinlock_t **ptl)
220{ 221{
221 return 0; 222 return 0;
222} 223}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0393270466c3..acd2010328f3 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -392,6 +392,15 @@ static inline int hugepage_migration_support(struct hstate *h)
392 return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); 392 return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
393} 393}
394 394
395static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
396 struct mm_struct *mm, pte_t *pte)
397{
398 if (huge_page_size(h) == PMD_SIZE)
399 return pmd_lockptr(mm, (pmd_t *) pte);
400 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
401 return &mm->page_table_lock;
402}
403
395#else /* CONFIG_HUGETLB_PAGE */ 404#else /* CONFIG_HUGETLB_PAGE */
396struct hstate {}; 405struct hstate {};
397#define alloc_huge_page_node(h, nid) NULL 406#define alloc_huge_page_node(h, nid) NULL
@@ -401,6 +410,7 @@ struct hstate {};
401#define hstate_sizelog(s) NULL 410#define hstate_sizelog(s) NULL
402#define hstate_vma(v) NULL 411#define hstate_vma(v) NULL
403#define hstate_inode(i) NULL 412#define hstate_inode(i) NULL
413#define page_hstate(page) NULL
404#define huge_page_size(h) PAGE_SIZE 414#define huge_page_size(h) PAGE_SIZE
405#define huge_page_mask(h) PAGE_MASK 415#define huge_page_mask(h) PAGE_MASK
406#define vma_kernel_pagesize(v) PAGE_SIZE 416#define vma_kernel_pagesize(v) PAGE_SIZE
@@ -421,6 +431,22 @@ static inline pgoff_t basepage_index(struct page *page)
421#define dissolve_free_huge_pages(s, e) do {} while (0) 431#define dissolve_free_huge_pages(s, e) do {} while (0)
422#define pmd_huge_support() 0 432#define pmd_huge_support() 0
423#define hugepage_migration_support(h) 0 433#define hugepage_migration_support(h) 0
434
435static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
436 struct mm_struct *mm, pte_t *pte)
437{
438 return &mm->page_table_lock;
439}
424#endif /* CONFIG_HUGETLB_PAGE */ 440#endif /* CONFIG_HUGETLB_PAGE */
425 441
442static inline spinlock_t *huge_pte_lock(struct hstate *h,
443 struct mm_struct *mm, pte_t *pte)
444{
445 spinlock_t *ptl;
446
447 ptl = huge_pte_lockptr(h, mm, pte);
448 spin_lock(ptl);
449 return ptl;
450}
451
426#endif /* _LINUX_HUGETLB_H */ 452#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c9e831dc80bc..db43b58a3355 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -11,8 +11,6 @@
11#include <linux/irqnr.h> 11#include <linux/irqnr.h>
12#include <linux/hardirq.h> 12#include <linux/hardirq.h>
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/smp.h>
15#include <linux/percpu.h>
16#include <linux/hrtimer.h> 14#include <linux/hrtimer.h>
17#include <linux/kref.h> 15#include <linux/kref.h>
18#include <linux/workqueue.h> 16#include <linux/workqueue.h>
@@ -392,15 +390,6 @@ extern void __raise_softirq_irqoff(unsigned int nr);
392extern void raise_softirq_irqoff(unsigned int nr); 390extern void raise_softirq_irqoff(unsigned int nr);
393extern void raise_softirq(unsigned int nr); 391extern void raise_softirq(unsigned int nr);
394 392
395/* This is the worklist that queues up per-cpu softirq work.
396 *
397 * send_remote_sendirq() adds work to these lists, and
398 * the softirq handler itself dequeues from them. The queues
399 * are protected by disabling local cpu interrupts and they must
400 * only be accessed by the local cpu that they are for.
401 */
402DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
403
404DECLARE_PER_CPU(struct task_struct *, ksoftirqd); 393DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
405 394
406static inline struct task_struct *this_cpu_ksoftirqd(void) 395static inline struct task_struct *this_cpu_ksoftirqd(void)
@@ -408,17 +397,6 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
408 return this_cpu_read(ksoftirqd); 397 return this_cpu_read(ksoftirqd);
409} 398}
410 399
411/* Try to send a softirq to a remote cpu. If this cannot be done, the
412 * work will be queued to the local cpu.
413 */
414extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
415
416/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
417 * and compute the current cpu, passed in as 'this_cpu'.
418 */
419extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
420 int this_cpu, int softirq);
421
422/* Tasklets --- multithreaded analogue of BHs. 400/* Tasklets --- multithreaded analogue of BHs.
423 401
424 Main feature differing them of generic softirqs: tasklet 402 Main feature differing them of generic softirqs: tasklet
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 10308c6a3d1c..552d51efb429 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * A generic kernel FIFO implementation 2 * A generic kernel FIFO implementation
3 * 3 *
4 * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net> 4 * Copyright (C) 2013 Stefani Seibold <stefani@seibold.net>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -67,9 +67,10 @@ struct __kfifo {
67 union { \ 67 union { \
68 struct __kfifo kfifo; \ 68 struct __kfifo kfifo; \
69 datatype *type; \ 69 datatype *type; \
70 const datatype *const_type; \
70 char (*rectype)[recsize]; \ 71 char (*rectype)[recsize]; \
71 ptrtype *ptr; \ 72 ptrtype *ptr; \
72 const ptrtype *ptr_const; \ 73 ptrtype const *ptr_const; \
73 } 74 }
74 75
75#define __STRUCT_KFIFO(type, size, recsize, ptrtype) \ 76#define __STRUCT_KFIFO(type, size, recsize, ptrtype) \
@@ -386,16 +387,12 @@ __kfifo_int_must_check_helper( \
386#define kfifo_put(fifo, val) \ 387#define kfifo_put(fifo, val) \
387({ \ 388({ \
388 typeof((fifo) + 1) __tmp = (fifo); \ 389 typeof((fifo) + 1) __tmp = (fifo); \
389 typeof((val) + 1) __val = (val); \ 390 typeof(*__tmp->const_type) __val = (val); \
390 unsigned int __ret; \ 391 unsigned int __ret; \
391 const size_t __recsize = sizeof(*__tmp->rectype); \ 392 size_t __recsize = sizeof(*__tmp->rectype); \
392 struct __kfifo *__kfifo = &__tmp->kfifo; \ 393 struct __kfifo *__kfifo = &__tmp->kfifo; \
393 if (0) { \
394 typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \
395 __dummy = (typeof(__val))NULL; \
396 } \
397 if (__recsize) \ 394 if (__recsize) \
398 __ret = __kfifo_in_r(__kfifo, __val, sizeof(*__val), \ 395 __ret = __kfifo_in_r(__kfifo, &__val, sizeof(__val), \
399 __recsize); \ 396 __recsize); \
400 else { \ 397 else { \
401 __ret = !kfifo_is_full(__tmp); \ 398 __ret = !kfifo_is_full(__tmp); \
@@ -404,7 +401,7 @@ __kfifo_int_must_check_helper( \
404 ((typeof(__tmp->type))__kfifo->data) : \ 401 ((typeof(__tmp->type))__kfifo->data) : \
405 (__tmp->buf) \ 402 (__tmp->buf) \
406 )[__kfifo->in & __tmp->kfifo.mask] = \ 403 )[__kfifo->in & __tmp->kfifo.mask] = \
407 *(typeof(__tmp->type))__val; \ 404 (typeof(*__tmp->type))__val; \
408 smp_wmb(); \ 405 smp_wmb(); \
409 __kfifo->in++; \ 406 __kfifo->in++; \
410 } \ 407 } \
@@ -415,7 +412,7 @@ __kfifo_int_must_check_helper( \
415/** 412/**
416 * kfifo_get - get data from the fifo 413 * kfifo_get - get data from the fifo
417 * @fifo: address of the fifo to be used 414 * @fifo: address of the fifo to be used
418 * @val: the var where to store the data to be added 415 * @val: address where to store the data
419 * 416 *
420 * This macro reads the data from the fifo. 417 * This macro reads the data from the fifo.
421 * It returns 0 if the fifo was empty. Otherwise it returns the number 418 * It returns 0 if the fifo was empty. Otherwise it returns the number
@@ -428,12 +425,10 @@ __kfifo_int_must_check_helper( \
428__kfifo_uint_must_check_helper( \ 425__kfifo_uint_must_check_helper( \
429({ \ 426({ \
430 typeof((fifo) + 1) __tmp = (fifo); \ 427 typeof((fifo) + 1) __tmp = (fifo); \
431 typeof((val) + 1) __val = (val); \ 428 typeof(__tmp->ptr) __val = (val); \
432 unsigned int __ret; \ 429 unsigned int __ret; \
433 const size_t __recsize = sizeof(*__tmp->rectype); \ 430 const size_t __recsize = sizeof(*__tmp->rectype); \
434 struct __kfifo *__kfifo = &__tmp->kfifo; \ 431 struct __kfifo *__kfifo = &__tmp->kfifo; \
435 if (0) \
436 __val = (typeof(__tmp->ptr))0; \
437 if (__recsize) \ 432 if (__recsize) \
438 __ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \ 433 __ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \
439 __recsize); \ 434 __recsize); \
@@ -456,7 +451,7 @@ __kfifo_uint_must_check_helper( \
456/** 451/**
457 * kfifo_peek - get data from the fifo without removing 452 * kfifo_peek - get data from the fifo without removing
458 * @fifo: address of the fifo to be used 453 * @fifo: address of the fifo to be used
459 * @val: the var where to store the data to be added 454 * @val: address where to store the data
460 * 455 *
461 * This reads the data from the fifo without removing it from the fifo. 456 * This reads the data from the fifo without removing it from the fifo.
462 * It returns 0 if the fifo was empty. Otherwise it returns the number 457 * It returns 0 if the fifo was empty. Otherwise it returns the number
@@ -469,12 +464,10 @@ __kfifo_uint_must_check_helper( \
469__kfifo_uint_must_check_helper( \ 464__kfifo_uint_must_check_helper( \
470({ \ 465({ \
471 typeof((fifo) + 1) __tmp = (fifo); \ 466 typeof((fifo) + 1) __tmp = (fifo); \
472 typeof((val) + 1) __val = (val); \ 467 typeof(__tmp->ptr) __val = (val); \
473 unsigned int __ret; \ 468 unsigned int __ret; \
474 const size_t __recsize = sizeof(*__tmp->rectype); \ 469 const size_t __recsize = sizeof(*__tmp->rectype); \
475 struct __kfifo *__kfifo = &__tmp->kfifo; \ 470 struct __kfifo *__kfifo = &__tmp->kfifo; \
476 if (0) \
477 __val = (typeof(__tmp->ptr))NULL; \
478 if (__recsize) \ 471 if (__recsize) \
479 __ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \ 472 __ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \
480 __recsize); \ 473 __recsize); \
@@ -508,14 +501,10 @@ __kfifo_uint_must_check_helper( \
508#define kfifo_in(fifo, buf, n) \ 501#define kfifo_in(fifo, buf, n) \
509({ \ 502({ \
510 typeof((fifo) + 1) __tmp = (fifo); \ 503 typeof((fifo) + 1) __tmp = (fifo); \
511 typeof((buf) + 1) __buf = (buf); \ 504 typeof(__tmp->ptr_const) __buf = (buf); \
512 unsigned long __n = (n); \ 505 unsigned long __n = (n); \
513 const size_t __recsize = sizeof(*__tmp->rectype); \ 506 const size_t __recsize = sizeof(*__tmp->rectype); \
514 struct __kfifo *__kfifo = &__tmp->kfifo; \ 507 struct __kfifo *__kfifo = &__tmp->kfifo; \
515 if (0) { \
516 typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \
517 __dummy = (typeof(__buf))NULL; \
518 } \
519 (__recsize) ?\ 508 (__recsize) ?\
520 __kfifo_in_r(__kfifo, __buf, __n, __recsize) : \ 509 __kfifo_in_r(__kfifo, __buf, __n, __recsize) : \
521 __kfifo_in(__kfifo, __buf, __n); \ 510 __kfifo_in(__kfifo, __buf, __n); \
@@ -561,14 +550,10 @@ __kfifo_uint_must_check_helper( \
561__kfifo_uint_must_check_helper( \ 550__kfifo_uint_must_check_helper( \
562({ \ 551({ \
563 typeof((fifo) + 1) __tmp = (fifo); \ 552 typeof((fifo) + 1) __tmp = (fifo); \
564 typeof((buf) + 1) __buf = (buf); \ 553 typeof(__tmp->ptr) __buf = (buf); \
565 unsigned long __n = (n); \ 554 unsigned long __n = (n); \
566 const size_t __recsize = sizeof(*__tmp->rectype); \ 555 const size_t __recsize = sizeof(*__tmp->rectype); \
567 struct __kfifo *__kfifo = &__tmp->kfifo; \ 556 struct __kfifo *__kfifo = &__tmp->kfifo; \
568 if (0) { \
569 typeof(__tmp->ptr) __dummy = NULL; \
570 __buf = __dummy; \
571 } \
572 (__recsize) ?\ 557 (__recsize) ?\
573 __kfifo_out_r(__kfifo, __buf, __n, __recsize) : \ 558 __kfifo_out_r(__kfifo, __buf, __n, __recsize) : \
574 __kfifo_out(__kfifo, __buf, __n); \ 559 __kfifo_out(__kfifo, __buf, __n); \
@@ -773,14 +758,10 @@ __kfifo_uint_must_check_helper( \
773__kfifo_uint_must_check_helper( \ 758__kfifo_uint_must_check_helper( \
774({ \ 759({ \
775 typeof((fifo) + 1) __tmp = (fifo); \ 760 typeof((fifo) + 1) __tmp = (fifo); \
776 typeof((buf) + 1) __buf = (buf); \ 761 typeof(__tmp->ptr) __buf = (buf); \
777 unsigned long __n = (n); \ 762 unsigned long __n = (n); \
778 const size_t __recsize = sizeof(*__tmp->rectype); \ 763 const size_t __recsize = sizeof(*__tmp->rectype); \
779 struct __kfifo *__kfifo = &__tmp->kfifo; \ 764 struct __kfifo *__kfifo = &__tmp->kfifo; \
780 if (0) { \
781 typeof(__tmp->ptr) __dummy __attribute__ ((unused)) = NULL; \
782 __buf = __dummy; \
783 } \
784 (__recsize) ? \ 765 (__recsize) ? \
785 __kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \ 766 __kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \
786 __kfifo_out_peek(__kfifo, __buf, __n); \ 767 __kfifo_out_peek(__kfifo, __buf, __n); \
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 8828a78dec9a..fbf10a0bc095 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -195,4 +195,6 @@ static inline struct llist_node *llist_del_all(struct llist_head *head)
195 195
196extern struct llist_node *llist_del_first(struct llist_head *head); 196extern struct llist_node *llist_del_first(struct llist_head *head);
197 197
198struct llist_node *llist_reverse_order(struct llist_node *head);
199
198#endif /* LLIST_H */ 200#endif /* LLIST_H */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 13dfd36a3294..c8929c3832db 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -15,10 +15,15 @@
15 */ 15 */
16 16
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <generated/bounds.h>
19
20#define USE_CMPXCHG_LOCKREF \
21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
22 IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS)
18 23
19struct lockref { 24struct lockref {
20 union { 25 union {
21#ifdef CONFIG_CMPXCHG_LOCKREF 26#if USE_CMPXCHG_LOCKREF
22 aligned_u64 lock_count; 27 aligned_u64 lock_count;
23#endif 28#endif
24 struct { 29 struct {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 42a35d94b82c..0548eb201e05 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1316,32 +1316,85 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
1316} 1316}
1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1318 1318
1319#if USE_SPLIT_PTLOCKS 1319#if USE_SPLIT_PTE_PTLOCKS
1320/* 1320#if BLOATED_SPINLOCKS
1321 * We tuck a spinlock to guard each pagetable page into its struct page, 1321void __init ptlock_cache_init(void);
1322 * at page->private, with BUILD_BUG_ON to make sure that this will not 1322extern bool ptlock_alloc(struct page *page);
1323 * overflow into the next struct page (as it might with DEBUG_SPINLOCK). 1323extern void ptlock_free(struct page *page);
1324 * When freeing, reset page->mapping so free_pages_check won't complain. 1324
1325 */ 1325static inline spinlock_t *ptlock_ptr(struct page *page)
1326#define __pte_lockptr(page) &((page)->ptl) 1326{
1327#define pte_lock_init(_page) do { \ 1327 return page->ptl;
1328 spin_lock_init(__pte_lockptr(_page)); \ 1328}
1329} while (0) 1329#else /* BLOATED_SPINLOCKS */
1330#define pte_lock_deinit(page) ((page)->mapping = NULL) 1330static inline void ptlock_cache_init(void) {}
1331#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) 1331static inline bool ptlock_alloc(struct page *page)
1332#else /* !USE_SPLIT_PTLOCKS */ 1332{
1333 return true;
1334}
1335
1336static inline void ptlock_free(struct page *page)
1337{
1338}
1339
1340static inline spinlock_t *ptlock_ptr(struct page *page)
1341{
1342 return &page->ptl;
1343}
1344#endif /* BLOATED_SPINLOCKS */
1345
1346static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1347{
1348 return ptlock_ptr(pmd_page(*pmd));
1349}
1350
1351static inline bool ptlock_init(struct page *page)
1352{
1353 /*
1354 * prep_new_page() initialize page->private (and therefore page->ptl)
1355 * with 0. Make sure nobody took it in use in between.
1356 *
1357 * It can happen if arch try to use slab for page table allocation:
1358 * slab code uses page->slab_cache and page->first_page (for tail
1359 * pages), which share storage with page->ptl.
1360 */
1361 VM_BUG_ON(*(unsigned long *)&page->ptl);
1362 if (!ptlock_alloc(page))
1363 return false;
1364 spin_lock_init(ptlock_ptr(page));
1365 return true;
1366}
1367
1368/* Reset page->mapping so free_pages_check won't complain. */
1369static inline void pte_lock_deinit(struct page *page)
1370{
1371 page->mapping = NULL;
1372 ptlock_free(page);
1373}
1374
1375#else /* !USE_SPLIT_PTE_PTLOCKS */
1333/* 1376/*
1334 * We use mm->page_table_lock to guard all pagetable pages of the mm. 1377 * We use mm->page_table_lock to guard all pagetable pages of the mm.
1335 */ 1378 */
1336#define pte_lock_init(page) do {} while (0) 1379static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1337#define pte_lock_deinit(page) do {} while (0) 1380{
1338#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) 1381 return &mm->page_table_lock;
1339#endif /* USE_SPLIT_PTLOCKS */ 1382}
1383static inline void ptlock_cache_init(void) {}
1384static inline bool ptlock_init(struct page *page) { return true; }
1385static inline void pte_lock_deinit(struct page *page) {}
1386#endif /* USE_SPLIT_PTE_PTLOCKS */
1387
1388static inline void pgtable_init(void)
1389{
1390 ptlock_cache_init();
1391 pgtable_cache_init();
1392}
1340 1393
1341static inline void pgtable_page_ctor(struct page *page) 1394static inline bool pgtable_page_ctor(struct page *page)
1342{ 1395{
1343 pte_lock_init(page);
1344 inc_zone_page_state(page, NR_PAGETABLE); 1396 inc_zone_page_state(page, NR_PAGETABLE);
1397 return ptlock_init(page);
1345} 1398}
1346 1399
1347static inline void pgtable_page_dtor(struct page *page) 1400static inline void pgtable_page_dtor(struct page *page)
@@ -1378,6 +1431,52 @@ static inline void pgtable_page_dtor(struct page *page)
1378 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 1431 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1379 NULL: pte_offset_kernel(pmd, address)) 1432 NULL: pte_offset_kernel(pmd, address))
1380 1433
1434#if USE_SPLIT_PMD_PTLOCKS
1435
1436static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1437{
1438 return ptlock_ptr(virt_to_page(pmd));
1439}
1440
1441static inline bool pgtable_pmd_page_ctor(struct page *page)
1442{
1443#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1444 page->pmd_huge_pte = NULL;
1445#endif
1446 return ptlock_init(page);
1447}
1448
1449static inline void pgtable_pmd_page_dtor(struct page *page)
1450{
1451#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1452 VM_BUG_ON(page->pmd_huge_pte);
1453#endif
1454 ptlock_free(page);
1455}
1456
1457#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte)
1458
1459#else
1460
1461static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1462{
1463 return &mm->page_table_lock;
1464}
1465
1466static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1467static inline void pgtable_pmd_page_dtor(struct page *page) {}
1468
1469#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1470
1471#endif
1472
1473static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1474{
1475 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1476 spin_lock(ptl);
1477 return ptl;
1478}
1479
1381extern void free_area_init(unsigned long * zones_size); 1480extern void free_area_init(unsigned long * zones_size);
1382extern void free_area_init_node(int nid, unsigned long * zones_size, 1481extern void free_area_init_node(int nid, unsigned long * zones_size,
1383 unsigned long zone_start_pfn, unsigned long *zholes_size); 1482 unsigned long zone_start_pfn, unsigned long *zholes_size);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index a3198e5aaf4e..10f5a7272b80 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -23,7 +23,9 @@
23 23
24struct address_space; 24struct address_space;
25 25
26#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
27 29
28/* 30/*
29 * Each physical page in the system has a struct page associated with 31 * Each physical page in the system has a struct page associated with
@@ -63,6 +65,9 @@ struct page {
63 * this page is only used to 65 * this page is only used to
64 * free other pages. 66 * free other pages.
65 */ 67 */
68#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
69 pgtable_t pmd_huge_pte; /* protected by page->ptl */
70#endif
66 }; 71 };
67 72
68 union { 73 union {
@@ -141,9 +146,13 @@ struct page {
141 * indicates order in the buddy 146 * indicates order in the buddy
142 * system if PG_buddy is set. 147 * system if PG_buddy is set.
143 */ 148 */
144#if USE_SPLIT_PTLOCKS 149#if USE_SPLIT_PTE_PTLOCKS
150#if BLOATED_SPINLOCKS
151 spinlock_t *ptl;
152#else
145 spinlock_t ptl; 153 spinlock_t ptl;
146#endif 154#endif
155#endif
147 struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ 156 struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
148 struct page *first_page; /* Compound tail pages */ 157 struct page *first_page; /* Compound tail pages */
149 }; 158 };
@@ -309,14 +318,14 @@ enum {
309 NR_MM_COUNTERS 318 NR_MM_COUNTERS
310}; 319};
311 320
312#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU) 321#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
313#define SPLIT_RSS_COUNTING 322#define SPLIT_RSS_COUNTING
314/* per-thread cached information, */ 323/* per-thread cached information, */
315struct task_rss_stat { 324struct task_rss_stat {
316 int events; /* for synchronization threshold */ 325 int events; /* for synchronization threshold */
317 int count[NR_MM_COUNTERS]; 326 int count[NR_MM_COUNTERS];
318}; 327};
319#endif /* USE_SPLIT_PTLOCKS */ 328#endif /* USE_SPLIT_PTE_PTLOCKS */
320 329
321struct mm_rss_stat { 330struct mm_rss_stat {
322 atomic_long_t count[NR_MM_COUNTERS]; 331 atomic_long_t count[NR_MM_COUNTERS];
@@ -339,6 +348,7 @@ struct mm_struct {
339 pgd_t * pgd; 348 pgd_t * pgd;
340 atomic_t mm_users; /* How many users with user space? */ 349 atomic_t mm_users; /* How many users with user space? */
341 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ 350 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
351 atomic_long_t nr_ptes; /* Page table pages */
342 int map_count; /* number of VMAs */ 352 int map_count; /* number of VMAs */
343 353
344 spinlock_t page_table_lock; /* Protects page tables and some counters */ 354 spinlock_t page_table_lock; /* Protects page tables and some counters */
@@ -360,7 +370,6 @@ struct mm_struct {
360 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ 370 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
361 unsigned long stack_vm; /* VM_GROWSUP/DOWN */ 371 unsigned long stack_vm; /* VM_GROWSUP/DOWN */
362 unsigned long def_flags; 372 unsigned long def_flags;
363 unsigned long nr_ptes; /* Page table pages */
364 unsigned long start_code, end_code, start_data, end_data; 373 unsigned long start_code, end_code, start_data, end_data;
365 unsigned long start_brk, brk, start_stack; 374 unsigned long start_brk, brk, start_stack;
366 unsigned long arg_start, arg_end, env_start, env_end; 375 unsigned long arg_start, arg_end, env_start, env_end;
@@ -406,7 +415,7 @@ struct mm_struct {
406#ifdef CONFIG_MMU_NOTIFIER 415#ifdef CONFIG_MMU_NOTIFIER
407 struct mmu_notifier_mm *mmu_notifier_mm; 416 struct mmu_notifier_mm *mmu_notifier_mm;
408#endif 417#endif
409#ifdef CONFIG_TRANSPARENT_HUGEPAGE 418#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
410 pgtable_t pmd_huge_pte; /* protected by page_table_lock */ 419 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
411#endif 420#endif
412#ifdef CONFIG_CPUMASK_OFFSTACK 421#ifdef CONFIG_CPUMASK_OFFSTACK
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 4e32edc8f506..52e0097f61f0 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -20,6 +20,7 @@ struct seq_file {
20 size_t size; 20 size_t size;
21 size_t from; 21 size_t from;
22 size_t count; 22 size_t count;
23 size_t pad_until;
23 loff_t index; 24 loff_t index;
24 loff_t read_pos; 25 loff_t read_pos;
25 u64 version; 26 u64 version;
@@ -79,6 +80,20 @@ static inline void seq_commit(struct seq_file *m, int num)
79 } 80 }
80} 81}
81 82
83/**
84 * seq_setwidth - set padding width
85 * @m: the seq_file handle
86 * @size: the max number of bytes to pad.
87 *
88 * Call seq_setwidth() for setting max width, then call seq_printf() etc. and
89 * finally call seq_pad() to pad the remaining bytes.
90 */
91static inline void seq_setwidth(struct seq_file *m, size_t size)
92{
93 m->pad_until = m->count + size;
94}
95void seq_pad(struct seq_file *m, char c);
96
82char *mangle_path(char *s, const char *p, const char *esc); 97char *mangle_path(char *s, const char *p, const char *esc);
83int seq_open(struct file *, const struct seq_operations *); 98int seq_open(struct file *, const struct seq_operations *);
84ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); 99ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 731f5237d5f4..5da22ee42e16 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -49,6 +49,9 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
49 smp_call_func_t func, void *info, bool wait, 49 smp_call_func_t func, void *info, bool wait,
50 gfp_t gfp_flags); 50 gfp_t gfp_flags);
51 51
52void __smp_call_function_single(int cpuid, struct call_single_data *data,
53 int wait);
54
52#ifdef CONFIG_SMP 55#ifdef CONFIG_SMP
53 56
54#include <linux/preempt.h> 57#include <linux/preempt.h>
@@ -95,9 +98,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait);
95void smp_call_function_many(const struct cpumask *mask, 98void smp_call_function_many(const struct cpumask *mask,
96 smp_call_func_t func, void *info, bool wait); 99 smp_call_func_t func, void *info, bool wait);
97 100
98void __smp_call_function_single(int cpuid, struct call_single_data *data,
99 int wait);
100
101int smp_call_function_any(const struct cpumask *mask, 101int smp_call_function_any(const struct cpumask *mask,
102 smp_call_func_t func, void *info, int wait); 102 smp_call_func_t func, void *info, int wait);
103 103
@@ -106,14 +106,10 @@ void kick_all_cpus_sync(void);
106/* 106/*
107 * Generic and arch helpers 107 * Generic and arch helpers
108 */ 108 */
109#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
110void __init call_function_init(void); 109void __init call_function_init(void);
111void generic_smp_call_function_single_interrupt(void); 110void generic_smp_call_function_single_interrupt(void);
112#define generic_smp_call_function_interrupt \ 111#define generic_smp_call_function_interrupt \
113 generic_smp_call_function_single_interrupt 112 generic_smp_call_function_single_interrupt
114#else
115static inline void call_function_init(void) { }
116#endif
117 113
118/* 114/*
119 * Mark the boot cpu "online" so that it can call console drivers in 115 * Mark the boot cpu "online" so that it can call console drivers in
@@ -155,12 +151,6 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
155 151
156static inline void kick_all_cpus_sync(void) { } 152static inline void kick_all_cpus_sync(void) { }
157 153
158static inline void __smp_call_function_single(int cpuid,
159 struct call_single_data *data, int wait)
160{
161 on_each_cpu(data->func, data->info, wait);
162}
163
164#endif /* !SMP */ 154#endif /* !SMP */
165 155
166/* 156/*
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 8d4fa82bfb91..c0f75261a728 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -139,7 +139,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
139 139
140extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 140extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
141 unsigned long address); 141 unsigned long address);
142extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte); 142extern void migration_entry_wait_huge(struct vm_area_struct *vma,
143 struct mm_struct *mm, pte_t *pte);
143#else 144#else
144 145
145#define make_migration_entry(page, write) swp_entry(0, 0) 146#define make_migration_entry(page, write) swp_entry(0, 0)
@@ -151,8 +152,8 @@ static inline int is_migration_entry(swp_entry_t swp)
151static inline void make_migration_entry_read(swp_entry_t *entryp) { } 152static inline void make_migration_entry_read(swp_entry_t *entryp) { }
152static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 153static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
153 unsigned long address) { } 154 unsigned long address) { }
154static inline void migration_entry_wait_huge(struct mm_struct *mm, 155static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
155 pte_t *pte) { } 156 struct mm_struct *mm, pte_t *pte) { }
156static inline int is_write_migration_entry(swp_entry_t entry) 157static inline int is_write_migration_entry(swp_entry_t entry)
157{ 158{
158 return 0; 159 return 0;
diff --git a/init/main.c b/init/main.c
index 6ad1a533a8c7..5f191133376f 100644
--- a/init/main.c
+++ b/init/main.c
@@ -473,7 +473,7 @@ static void __init mm_init(void)
473 mem_init(); 473 mem_init();
474 kmem_cache_init(); 474 kmem_cache_init();
475 percpu_init_late(); 475 percpu_init_late();
476 pgtable_cache_init(); 476 pgtable_init();
477 vmalloc_init(); 477 vmalloc_init();
478} 478}
479 479
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 94fabd534b03..2a202a846757 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -55,4 +55,4 @@ config HZ
55 default 1000 if HZ_1000 55 default 1000 if HZ_1000
56 56
57config SCHED_HRTICK 57config SCHED_HRTICK
58 def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS) 58 def_bool HIGH_RES_TIMERS
diff --git a/kernel/bounds.c b/kernel/bounds.c
index e8ca97b5c386..578782ef6ae1 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -11,6 +11,7 @@
11#include <linux/kbuild.h> 11#include <linux/kbuild.h>
12#include <linux/page_cgroup.h> 12#include <linux/page_cgroup.h>
13#include <linux/log2.h> 13#include <linux/log2.h>
14#include <linux/spinlock.h>
14 15
15void foo(void) 16void foo(void)
16{ 17{
@@ -21,5 +22,6 @@ void foo(void)
21#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
22 DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); 23 DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
23#endif 24#endif
25 DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int));
24 /* End of constants */ 26 /* End of constants */
25} 27}
diff --git a/kernel/fork.c b/kernel/fork.c
index f6d11fc67f72..728d5be9548c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -532,7 +532,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
532 mm->flags = (current->mm) ? 532 mm->flags = (current->mm) ?
533 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; 533 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
534 mm->core_state = NULL; 534 mm->core_state = NULL;
535 mm->nr_ptes = 0; 535 atomic_long_set(&mm->nr_ptes, 0);
536 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 536 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
537 spin_lock_init(&mm->page_table_lock); 537 spin_lock_init(&mm->page_table_lock);
538 mm_init_aio(mm); 538 mm_init_aio(mm);
@@ -560,7 +560,7 @@ static void check_mm(struct mm_struct *mm)
560 "mm:%p idx:%d val:%ld\n", mm, i, x); 560 "mm:%p idx:%d val:%ld\n", mm, i, x);
561 } 561 }
562 562
563#ifdef CONFIG_TRANSPARENT_HUGEPAGE 563#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
564 VM_BUG_ON(mm->pmd_huge_pte); 564 VM_BUG_ON(mm->pmd_huge_pte);
565#endif 565#endif
566} 566}
@@ -814,7 +814,7 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
814 memcpy(mm, oldmm, sizeof(*mm)); 814 memcpy(mm, oldmm, sizeof(*mm));
815 mm_init_cpumask(mm); 815 mm_init_cpumask(mm);
816 816
817#ifdef CONFIG_TRANSPARENT_HUGEPAGE 817#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
818 mm->pmd_huge_pte = NULL; 818 mm->pmd_huge_pte = NULL;
819#endif 819#endif
820 if (!mm_init(mm, tsk)) 820 if (!mm_init(mm, tsk))
diff --git a/kernel/smp.c b/kernel/smp.c
index 46116100f0ee..bd9f94028838 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -15,7 +15,6 @@
15 15
16#include "smpboot.h" 16#include "smpboot.h"
17 17
18#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
19enum { 18enum {
20 CSD_FLAG_LOCK = 0x01, 19 CSD_FLAG_LOCK = 0x01,
21 CSD_FLAG_WAIT = 0x02, 20 CSD_FLAG_WAIT = 0x02,
@@ -140,8 +139,7 @@ static void csd_unlock(struct call_single_data *csd)
140 * for execution on the given CPU. data must already have 139 * for execution on the given CPU. data must already have
141 * ->func, ->info, and ->flags set. 140 * ->func, ->info, and ->flags set.
142 */ 141 */
143static 142static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
144void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
145{ 143{
146 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); 144 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
147 unsigned long flags; 145 unsigned long flags;
@@ -464,7 +462,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
464 return 0; 462 return 0;
465} 463}
466EXPORT_SYMBOL(smp_call_function); 464EXPORT_SYMBOL(smp_call_function);
467#endif /* USE_GENERIC_SMP_HELPERS */
468 465
469/* Setup configured maximum number of CPUs to activate */ 466/* Setup configured maximum number of CPUs to activate */
470unsigned int setup_max_cpus = NR_CPUS; 467unsigned int setup_max_cpus = NR_CPUS;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b24988353458..11025ccc06dd 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -6,8 +6,6 @@
6 * Distribute under GPLv2. 6 * Distribute under GPLv2.
7 * 7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 *
10 * Remote softirq infrastructure is by Jens Axboe.
11 */ 9 */
12 10
13#include <linux/export.h> 11#include <linux/export.h>
@@ -627,146 +625,17 @@ void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
627} 625}
628EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); 626EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
629 627
630/*
631 * Remote softirq bits
632 */
633
634DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
635EXPORT_PER_CPU_SYMBOL(softirq_work_list);
636
637static void __local_trigger(struct call_single_data *cp, int softirq)
638{
639 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
640
641 list_add_tail(&cp->list, head);
642
643 /* Trigger the softirq only if the list was previously empty. */
644 if (head->next == &cp->list)
645 raise_softirq_irqoff(softirq);
646}
647
648#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
649static void remote_softirq_receive(void *data)
650{
651 struct call_single_data *cp = data;
652 unsigned long flags;
653 int softirq;
654
655 softirq = *(int *)cp->info;
656 local_irq_save(flags);
657 __local_trigger(cp, softirq);
658 local_irq_restore(flags);
659}
660
661static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
662{
663 if (cpu_online(cpu)) {
664 cp->func = remote_softirq_receive;
665 cp->info = &softirq;
666 cp->flags = 0;
667
668 __smp_call_function_single(cpu, cp, 0);
669 return 0;
670 }
671 return 1;
672}
673#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
674static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
675{
676 return 1;
677}
678#endif
679
680/**
681 * __send_remote_softirq - try to schedule softirq work on a remote cpu
682 * @cp: private SMP call function data area
683 * @cpu: the remote cpu
684 * @this_cpu: the currently executing cpu
685 * @softirq: the softirq for the work
686 *
687 * Attempt to schedule softirq work on a remote cpu. If this cannot be
688 * done, the work is instead queued up on the local cpu.
689 *
690 * Interrupts must be disabled.
691 */
692void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
693{
694 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
695 __local_trigger(cp, softirq);
696}
697EXPORT_SYMBOL(__send_remote_softirq);
698
699/**
700 * send_remote_softirq - try to schedule softirq work on a remote cpu
701 * @cp: private SMP call function data area
702 * @cpu: the remote cpu
703 * @softirq: the softirq for the work
704 *
705 * Like __send_remote_softirq except that disabling interrupts and
706 * computing the current cpu is done for the caller.
707 */
708void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
709{
710 unsigned long flags;
711 int this_cpu;
712
713 local_irq_save(flags);
714 this_cpu = smp_processor_id();
715 __send_remote_softirq(cp, cpu, this_cpu, softirq);
716 local_irq_restore(flags);
717}
718EXPORT_SYMBOL(send_remote_softirq);
719
720static int remote_softirq_cpu_notify(struct notifier_block *self,
721 unsigned long action, void *hcpu)
722{
723 /*
724 * If a CPU goes away, splice its entries to the current CPU
725 * and trigger a run of the softirq
726 */
727 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
728 int cpu = (unsigned long) hcpu;
729 int i;
730
731 local_irq_disable();
732 for (i = 0; i < NR_SOFTIRQS; i++) {
733 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
734 struct list_head *local_head;
735
736 if (list_empty(head))
737 continue;
738
739 local_head = &__get_cpu_var(softirq_work_list[i]);
740 list_splice_init(head, local_head);
741 raise_softirq_irqoff(i);
742 }
743 local_irq_enable();
744 }
745
746 return NOTIFY_OK;
747}
748
749static struct notifier_block remote_softirq_cpu_notifier = {
750 .notifier_call = remote_softirq_cpu_notify,
751};
752
753void __init softirq_init(void) 628void __init softirq_init(void)
754{ 629{
755 int cpu; 630 int cpu;
756 631
757 for_each_possible_cpu(cpu) { 632 for_each_possible_cpu(cpu) {
758 int i;
759
760 per_cpu(tasklet_vec, cpu).tail = 633 per_cpu(tasklet_vec, cpu).tail =
761 &per_cpu(tasklet_vec, cpu).head; 634 &per_cpu(tasklet_vec, cpu).head;
762 per_cpu(tasklet_hi_vec, cpu).tail = 635 per_cpu(tasklet_hi_vec, cpu).tail =
763 &per_cpu(tasklet_hi_vec, cpu).head; 636 &per_cpu(tasklet_hi_vec, cpu).head;
764 for (i = 0; i < NR_SOFTIRQS; i++)
765 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
766 } 637 }
767 638
768 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
769
770 open_softirq(TASKLET_SOFTIRQ, tasklet_action); 639 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
771 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 640 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
772} 641}
diff --git a/kernel/up.c b/kernel/up.c
index 630d72bf7e41..509403e3fbc6 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -22,6 +22,17 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
22} 22}
23EXPORT_SYMBOL(smp_call_function_single); 23EXPORT_SYMBOL(smp_call_function_single);
24 24
25void __smp_call_function_single(int cpu, struct call_single_data *csd,
26 int wait)
27{
28 unsigned long flags;
29
30 local_irq_save(flags);
31 csd->func(csd->info);
32 local_irq_restore(flags);
33}
34EXPORT_SYMBOL(__smp_call_function_single);
35
25int on_each_cpu(smp_call_func_t func, void *info, int wait) 36int on_each_cpu(smp_call_func_t func, void *info, int wait)
26{ 37{
27 unsigned long flags; 38 unsigned long flags;
diff --git a/lib/Kconfig b/lib/Kconfig
index 75485e163ca3..06dc74200a51 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -51,13 +51,6 @@ config PERCPU_RWSEM
51config ARCH_USE_CMPXCHG_LOCKREF 51config ARCH_USE_CMPXCHG_LOCKREF
52 bool 52 bool
53 53
54config CMPXCHG_LOCKREF
55 def_bool y if ARCH_USE_CMPXCHG_LOCKREF
56 depends on SMP
57 depends on !GENERIC_LOCKBREAK
58 depends on !DEBUG_SPINLOCK
59 depends on !DEBUG_LOCK_ALLOC
60
61config CRC_CCITT 54config CRC_CCITT
62 tristate "CRC-CCITT functions" 55 tristate "CRC-CCITT functions"
63 help 56 help
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 7b7f83027b7b..d79b9d222065 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -215,7 +215,7 @@ static unsigned long kfifo_copy_from_user(struct __kfifo *fifo,
215 * incrementing the fifo->in index counter 215 * incrementing the fifo->in index counter
216 */ 216 */
217 smp_wmb(); 217 smp_wmb();
218 *copied = len - ret; 218 *copied = len - ret * esize;
219 /* return the number of elements which are not copied */ 219 /* return the number of elements which are not copied */
220 return ret; 220 return ret;
221} 221}
@@ -275,7 +275,7 @@ static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to,
275 * incrementing the fifo->out index counter 275 * incrementing the fifo->out index counter
276 */ 276 */
277 smp_wmb(); 277 smp_wmb();
278 *copied = len - ret; 278 *copied = len - ret * esize;
279 /* return the number of elements which are not copied */ 279 /* return the number of elements which are not copied */
280 return ret; 280 return ret;
281} 281}
diff --git a/lib/llist.c b/lib/llist.c
index 4a70d120138c..f76196d07409 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -81,3 +81,25 @@ struct llist_node *llist_del_first(struct llist_head *head)
81 return entry; 81 return entry;
82} 82}
83EXPORT_SYMBOL_GPL(llist_del_first); 83EXPORT_SYMBOL_GPL(llist_del_first);
84
85/**
86 * llist_reverse_order - reverse order of a llist chain
87 * @head: first item of the list to be reversed
88 *
89 * Reverse the order of a chain of llist entries and return the
90 * new first entry.
91 */
92struct llist_node *llist_reverse_order(struct llist_node *head)
93{
94 struct llist_node *new_head = NULL;
95
96 while (head) {
97 struct llist_node *tmp = head;
98 head = head->next;
99 tmp->next = new_head;
100 new_head = tmp;
101 }
102
103 return new_head;
104}
105EXPORT_SYMBOL_GPL(llist_reverse_order);
diff --git a/lib/lockref.c b/lib/lockref.c
index af6e95d0bed6..d2b123f8456b 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -1,7 +1,7 @@
1#include <linux/export.h> 1#include <linux/export.h>
2#include <linux/lockref.h> 2#include <linux/lockref.h>
3 3
4#ifdef CONFIG_CMPXCHG_LOCKREF 4#if USE_CMPXCHG_LOCKREF
5 5
6/* 6/*
7 * Allow weakly-ordered memory architectures to provide barrier-less 7 * Allow weakly-ordered memory architectures to provide barrier-less
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 48586ac3a62e..10909c571494 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1712,18 +1712,16 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1712 break; 1712 break;
1713 1713
1714 case FORMAT_TYPE_NRCHARS: { 1714 case FORMAT_TYPE_NRCHARS: {
1715 u8 qualifier = spec.qualifier; 1715 /*
1716 * Since %n poses a greater security risk than
1717 * utility, ignore %n and skip its argument.
1718 */
1719 void *skip_arg;
1716 1720
1717 if (qualifier == 'l') { 1721 WARN_ONCE(1, "Please remove ignored %%n in '%s'\n",
1718 long *ip = va_arg(args, long *); 1722 old_fmt);
1719 *ip = (str - buf); 1723
1720 } else if (_tolower(qualifier) == 'z') { 1724 skip_arg = va_arg(args, void *);
1721 size_t *ip = va_arg(args, size_t *);
1722 *ip = (str - buf);
1723 } else {
1724 int *ip = va_arg(args, int *);
1725 *ip = (str - buf);
1726 }
1727 break; 1725 break;
1728 } 1726 }
1729 1727
diff --git a/mm/Kconfig b/mm/Kconfig
index 3f4ffda152bb..de31af256207 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -218,9 +218,11 @@ config SPLIT_PTLOCK_CPUS
218 int 218 int
219 default "999999" if ARM && !CPU_CACHE_VIPT 219 default "999999" if ARM && !CPU_CACHE_VIPT
220 default "999999" if PARISC && !PA20 220 default "999999" if PARISC && !PA20
221 default "999999" if DEBUG_SPINLOCK || DEBUG_LOCK_ALLOC
222 default "4" 221 default "4"
223 222
223config ARCH_ENABLE_SPLIT_PMD_PTLOCK
224 boolean
225
224# 226#
225# support for memory balloon compaction 227# support for memory balloon compaction
226config BALLOON_COMPACTION 228config BALLOON_COMPACTION
diff --git a/mm/filemap.c b/mm/filemap.c
index ae4846ff4849..b7749a92021c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1090,7 +1090,6 @@ static void shrink_readahead_size_eio(struct file *filp,
1090 * @filp: the file to read 1090 * @filp: the file to read
1091 * @ppos: current file position 1091 * @ppos: current file position
1092 * @desc: read_descriptor 1092 * @desc: read_descriptor
1093 * @actor: read method
1094 * 1093 *
1095 * This is a generic file read routine, and uses the 1094 * This is a generic file read routine, and uses the
1096 * mapping->a_ops->readpage() function for the actual low-level stuff. 1095 * mapping->a_ops->readpage() function for the actual low-level stuff.
@@ -1099,7 +1098,7 @@ static void shrink_readahead_size_eio(struct file *filp,
1099 * of the logic when it comes to error handling etc. 1098 * of the logic when it comes to error handling etc.
1100 */ 1099 */
1101static void do_generic_file_read(struct file *filp, loff_t *ppos, 1100static void do_generic_file_read(struct file *filp, loff_t *ppos,
1102 read_descriptor_t *desc, read_actor_t actor) 1101 read_descriptor_t *desc)
1103{ 1102{
1104 struct address_space *mapping = filp->f_mapping; 1103 struct address_space *mapping = filp->f_mapping;
1105 struct inode *inode = mapping->host; 1104 struct inode *inode = mapping->host;
@@ -1200,13 +1199,14 @@ page_ok:
1200 * Ok, we have the page, and it's up-to-date, so 1199 * Ok, we have the page, and it's up-to-date, so
1201 * now we can copy it to user space... 1200 * now we can copy it to user space...
1202 * 1201 *
1203 * The actor routine returns how many bytes were actually used.. 1202 * The file_read_actor routine returns how many bytes were
1203 * actually used..
1204 * NOTE! This may not be the same as how much of a user buffer 1204 * NOTE! This may not be the same as how much of a user buffer
1205 * we filled up (we may be padding etc), so we can only update 1205 * we filled up (we may be padding etc), so we can only update
1206 * "pos" here (the actor routine has to update the user buffer 1206 * "pos" here (the actor routine has to update the user buffer
1207 * pointers and the remaining count). 1207 * pointers and the remaining count).
1208 */ 1208 */
1209 ret = actor(desc, page, offset, nr); 1209 ret = file_read_actor(desc, page, offset, nr);
1210 offset += ret; 1210 offset += ret;
1211 index += offset >> PAGE_CACHE_SHIFT; 1211 index += offset >> PAGE_CACHE_SHIFT;
1212 offset &= ~PAGE_CACHE_MASK; 1212 offset &= ~PAGE_CACHE_MASK;
@@ -1479,7 +1479,7 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1479 if (desc.count == 0) 1479 if (desc.count == 0)
1480 continue; 1480 continue;
1481 desc.error = 0; 1481 desc.error = 0;
1482 do_generic_file_read(filp, ppos, &desc, file_read_actor); 1482 do_generic_file_read(filp, ppos, &desc);
1483 retval += desc.written; 1483 retval += desc.written;
1484 if (desc.error) { 1484 if (desc.error) {
1485 retval = retval ?: desc.error; 1485 retval = retval ?: desc.error;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0556c6a44959..bccd5a628ea6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -710,6 +710,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
710 struct page *page) 710 struct page *page)
711{ 711{
712 pgtable_t pgtable; 712 pgtable_t pgtable;
713 spinlock_t *ptl;
713 714
714 VM_BUG_ON(!PageCompound(page)); 715 VM_BUG_ON(!PageCompound(page));
715 pgtable = pte_alloc_one(mm, haddr); 716 pgtable = pte_alloc_one(mm, haddr);
@@ -724,9 +725,9 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
724 */ 725 */
725 __SetPageUptodate(page); 726 __SetPageUptodate(page);
726 727
727 spin_lock(&mm->page_table_lock); 728 ptl = pmd_lock(mm, pmd);
728 if (unlikely(!pmd_none(*pmd))) { 729 if (unlikely(!pmd_none(*pmd))) {
729 spin_unlock(&mm->page_table_lock); 730 spin_unlock(ptl);
730 mem_cgroup_uncharge_page(page); 731 mem_cgroup_uncharge_page(page);
731 put_page(page); 732 put_page(page);
732 pte_free(mm, pgtable); 733 pte_free(mm, pgtable);
@@ -738,8 +739,8 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
738 pgtable_trans_huge_deposit(mm, pmd, pgtable); 739 pgtable_trans_huge_deposit(mm, pmd, pgtable);
739 set_pmd_at(mm, haddr, pmd, entry); 740 set_pmd_at(mm, haddr, pmd, entry);
740 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 741 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
741 mm->nr_ptes++; 742 atomic_long_inc(&mm->nr_ptes);
742 spin_unlock(&mm->page_table_lock); 743 spin_unlock(ptl);
743 } 744 }
744 745
745 return 0; 746 return 0;
@@ -759,6 +760,7 @@ static inline struct page *alloc_hugepage_vma(int defrag,
759 HPAGE_PMD_ORDER, vma, haddr, nd); 760 HPAGE_PMD_ORDER, vma, haddr, nd);
760} 761}
761 762
763/* Caller must hold page table lock. */
762static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 764static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
763 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 765 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
764 struct page *zero_page) 766 struct page *zero_page)
@@ -771,7 +773,7 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
771 entry = pmd_mkhuge(entry); 773 entry = pmd_mkhuge(entry);
772 pgtable_trans_huge_deposit(mm, pmd, pgtable); 774 pgtable_trans_huge_deposit(mm, pmd, pgtable);
773 set_pmd_at(mm, haddr, pmd, entry); 775 set_pmd_at(mm, haddr, pmd, entry);
774 mm->nr_ptes++; 776 atomic_long_inc(&mm->nr_ptes);
775 return true; 777 return true;
776} 778}
777 779
@@ -790,6 +792,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
790 return VM_FAULT_OOM; 792 return VM_FAULT_OOM;
791 if (!(flags & FAULT_FLAG_WRITE) && 793 if (!(flags & FAULT_FLAG_WRITE) &&
792 transparent_hugepage_use_zero_page()) { 794 transparent_hugepage_use_zero_page()) {
795 spinlock_t *ptl;
793 pgtable_t pgtable; 796 pgtable_t pgtable;
794 struct page *zero_page; 797 struct page *zero_page;
795 bool set; 798 bool set;
@@ -802,10 +805,10 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
802 count_vm_event(THP_FAULT_FALLBACK); 805 count_vm_event(THP_FAULT_FALLBACK);
803 return VM_FAULT_FALLBACK; 806 return VM_FAULT_FALLBACK;
804 } 807 }
805 spin_lock(&mm->page_table_lock); 808 ptl = pmd_lock(mm, pmd);
806 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, 809 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
807 zero_page); 810 zero_page);
808 spin_unlock(&mm->page_table_lock); 811 spin_unlock(ptl);
809 if (!set) { 812 if (!set) {
810 pte_free(mm, pgtable); 813 pte_free(mm, pgtable);
811 put_huge_zero_page(); 814 put_huge_zero_page();
@@ -838,6 +841,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
838 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 841 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
839 struct vm_area_struct *vma) 842 struct vm_area_struct *vma)
840{ 843{
844 spinlock_t *dst_ptl, *src_ptl;
841 struct page *src_page; 845 struct page *src_page;
842 pmd_t pmd; 846 pmd_t pmd;
843 pgtable_t pgtable; 847 pgtable_t pgtable;
@@ -848,8 +852,9 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
848 if (unlikely(!pgtable)) 852 if (unlikely(!pgtable))
849 goto out; 853 goto out;
850 854
851 spin_lock(&dst_mm->page_table_lock); 855 dst_ptl = pmd_lock(dst_mm, dst_pmd);
852 spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING); 856 src_ptl = pmd_lockptr(src_mm, src_pmd);
857 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
853 858
854 ret = -EAGAIN; 859 ret = -EAGAIN;
855 pmd = *src_pmd; 860 pmd = *src_pmd;
@@ -858,7 +863,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
858 goto out_unlock; 863 goto out_unlock;
859 } 864 }
860 /* 865 /*
861 * mm->page_table_lock is enough to be sure that huge zero pmd is not 866 * When page table lock is held, the huge zero pmd should not be
862 * under splitting since we don't split the page itself, only pmd to 867 * under splitting since we don't split the page itself, only pmd to
863 * a page table. 868 * a page table.
864 */ 869 */
@@ -879,8 +884,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
879 } 884 }
880 if (unlikely(pmd_trans_splitting(pmd))) { 885 if (unlikely(pmd_trans_splitting(pmd))) {
881 /* split huge page running from under us */ 886 /* split huge page running from under us */
882 spin_unlock(&src_mm->page_table_lock); 887 spin_unlock(src_ptl);
883 spin_unlock(&dst_mm->page_table_lock); 888 spin_unlock(dst_ptl);
884 pte_free(dst_mm, pgtable); 889 pte_free(dst_mm, pgtable);
885 890
886 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ 891 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
@@ -896,12 +901,12 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
896 pmd = pmd_mkold(pmd_wrprotect(pmd)); 901 pmd = pmd_mkold(pmd_wrprotect(pmd));
897 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 902 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
898 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 903 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
899 dst_mm->nr_ptes++; 904 atomic_long_inc(&dst_mm->nr_ptes);
900 905
901 ret = 0; 906 ret = 0;
902out_unlock: 907out_unlock:
903 spin_unlock(&src_mm->page_table_lock); 908 spin_unlock(src_ptl);
904 spin_unlock(&dst_mm->page_table_lock); 909 spin_unlock(dst_ptl);
905out: 910out:
906 return ret; 911 return ret;
907} 912}
@@ -912,10 +917,11 @@ void huge_pmd_set_accessed(struct mm_struct *mm,
912 pmd_t *pmd, pmd_t orig_pmd, 917 pmd_t *pmd, pmd_t orig_pmd,
913 int dirty) 918 int dirty)
914{ 919{
920 spinlock_t *ptl;
915 pmd_t entry; 921 pmd_t entry;
916 unsigned long haddr; 922 unsigned long haddr;
917 923
918 spin_lock(&mm->page_table_lock); 924 ptl = pmd_lock(mm, pmd);
919 if (unlikely(!pmd_same(*pmd, orig_pmd))) 925 if (unlikely(!pmd_same(*pmd, orig_pmd)))
920 goto unlock; 926 goto unlock;
921 927
@@ -925,13 +931,14 @@ void huge_pmd_set_accessed(struct mm_struct *mm,
925 update_mmu_cache_pmd(vma, address, pmd); 931 update_mmu_cache_pmd(vma, address, pmd);
926 932
927unlock: 933unlock:
928 spin_unlock(&mm->page_table_lock); 934 spin_unlock(ptl);
929} 935}
930 936
931static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm, 937static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
932 struct vm_area_struct *vma, unsigned long address, 938 struct vm_area_struct *vma, unsigned long address,
933 pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr) 939 pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr)
934{ 940{
941 spinlock_t *ptl;
935 pgtable_t pgtable; 942 pgtable_t pgtable;
936 pmd_t _pmd; 943 pmd_t _pmd;
937 struct page *page; 944 struct page *page;
@@ -958,7 +965,7 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
958 mmun_end = haddr + HPAGE_PMD_SIZE; 965 mmun_end = haddr + HPAGE_PMD_SIZE;
959 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 966 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
960 967
961 spin_lock(&mm->page_table_lock); 968 ptl = pmd_lock(mm, pmd);
962 if (unlikely(!pmd_same(*pmd, orig_pmd))) 969 if (unlikely(!pmd_same(*pmd, orig_pmd)))
963 goto out_free_page; 970 goto out_free_page;
964 971
@@ -985,7 +992,7 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
985 } 992 }
986 smp_wmb(); /* make pte visible before pmd */ 993 smp_wmb(); /* make pte visible before pmd */
987 pmd_populate(mm, pmd, pgtable); 994 pmd_populate(mm, pmd, pgtable);
988 spin_unlock(&mm->page_table_lock); 995 spin_unlock(ptl);
989 put_huge_zero_page(); 996 put_huge_zero_page();
990 inc_mm_counter(mm, MM_ANONPAGES); 997 inc_mm_counter(mm, MM_ANONPAGES);
991 998
@@ -995,7 +1002,7 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
995out: 1002out:
996 return ret; 1003 return ret;
997out_free_page: 1004out_free_page:
998 spin_unlock(&mm->page_table_lock); 1005 spin_unlock(ptl);
999 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1006 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1000 mem_cgroup_uncharge_page(page); 1007 mem_cgroup_uncharge_page(page);
1001 put_page(page); 1008 put_page(page);
@@ -1009,6 +1016,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1009 struct page *page, 1016 struct page *page,
1010 unsigned long haddr) 1017 unsigned long haddr)
1011{ 1018{
1019 spinlock_t *ptl;
1012 pgtable_t pgtable; 1020 pgtable_t pgtable;
1013 pmd_t _pmd; 1021 pmd_t _pmd;
1014 int ret = 0, i; 1022 int ret = 0, i;
@@ -1055,7 +1063,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1055 mmun_end = haddr + HPAGE_PMD_SIZE; 1063 mmun_end = haddr + HPAGE_PMD_SIZE;
1056 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1064 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1057 1065
1058 spin_lock(&mm->page_table_lock); 1066 ptl = pmd_lock(mm, pmd);
1059 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1067 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1060 goto out_free_pages; 1068 goto out_free_pages;
1061 VM_BUG_ON(!PageHead(page)); 1069 VM_BUG_ON(!PageHead(page));
@@ -1081,7 +1089,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1081 smp_wmb(); /* make pte visible before pmd */ 1089 smp_wmb(); /* make pte visible before pmd */
1082 pmd_populate(mm, pmd, pgtable); 1090 pmd_populate(mm, pmd, pgtable);
1083 page_remove_rmap(page); 1091 page_remove_rmap(page);
1084 spin_unlock(&mm->page_table_lock); 1092 spin_unlock(ptl);
1085 1093
1086 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1094 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1087 1095
@@ -1092,7 +1100,7 @@ out:
1092 return ret; 1100 return ret;
1093 1101
1094out_free_pages: 1102out_free_pages:
1095 spin_unlock(&mm->page_table_lock); 1103 spin_unlock(ptl);
1096 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1104 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1097 mem_cgroup_uncharge_start(); 1105 mem_cgroup_uncharge_start();
1098 for (i = 0; i < HPAGE_PMD_NR; i++) { 1106 for (i = 0; i < HPAGE_PMD_NR; i++) {
@@ -1107,17 +1115,19 @@ out_free_pages:
1107int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 1115int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1108 unsigned long address, pmd_t *pmd, pmd_t orig_pmd) 1116 unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1109{ 1117{
1118 spinlock_t *ptl;
1110 int ret = 0; 1119 int ret = 0;
1111 struct page *page = NULL, *new_page; 1120 struct page *page = NULL, *new_page;
1112 unsigned long haddr; 1121 unsigned long haddr;
1113 unsigned long mmun_start; /* For mmu_notifiers */ 1122 unsigned long mmun_start; /* For mmu_notifiers */
1114 unsigned long mmun_end; /* For mmu_notifiers */ 1123 unsigned long mmun_end; /* For mmu_notifiers */
1115 1124
1125 ptl = pmd_lockptr(mm, pmd);
1116 VM_BUG_ON(!vma->anon_vma); 1126 VM_BUG_ON(!vma->anon_vma);
1117 haddr = address & HPAGE_PMD_MASK; 1127 haddr = address & HPAGE_PMD_MASK;
1118 if (is_huge_zero_pmd(orig_pmd)) 1128 if (is_huge_zero_pmd(orig_pmd))
1119 goto alloc; 1129 goto alloc;
1120 spin_lock(&mm->page_table_lock); 1130 spin_lock(ptl);
1121 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1131 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1122 goto out_unlock; 1132 goto out_unlock;
1123 1133
@@ -1133,7 +1143,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1133 goto out_unlock; 1143 goto out_unlock;
1134 } 1144 }
1135 get_page(page); 1145 get_page(page);
1136 spin_unlock(&mm->page_table_lock); 1146 spin_unlock(ptl);
1137alloc: 1147alloc:
1138 if (transparent_hugepage_enabled(vma) && 1148 if (transparent_hugepage_enabled(vma) &&
1139 !transparent_hugepage_debug_cow()) 1149 !transparent_hugepage_debug_cow())
@@ -1180,11 +1190,11 @@ alloc:
1180 mmun_end = haddr + HPAGE_PMD_SIZE; 1190 mmun_end = haddr + HPAGE_PMD_SIZE;
1181 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1191 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1182 1192
1183 spin_lock(&mm->page_table_lock); 1193 spin_lock(ptl);
1184 if (page) 1194 if (page)
1185 put_page(page); 1195 put_page(page);
1186 if (unlikely(!pmd_same(*pmd, orig_pmd))) { 1196 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1187 spin_unlock(&mm->page_table_lock); 1197 spin_unlock(ptl);
1188 mem_cgroup_uncharge_page(new_page); 1198 mem_cgroup_uncharge_page(new_page);
1189 put_page(new_page); 1199 put_page(new_page);
1190 goto out_mn; 1200 goto out_mn;
@@ -1206,13 +1216,13 @@ alloc:
1206 } 1216 }
1207 ret |= VM_FAULT_WRITE; 1217 ret |= VM_FAULT_WRITE;
1208 } 1218 }
1209 spin_unlock(&mm->page_table_lock); 1219 spin_unlock(ptl);
1210out_mn: 1220out_mn:
1211 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1221 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1212out: 1222out:
1213 return ret; 1223 return ret;
1214out_unlock: 1224out_unlock:
1215 spin_unlock(&mm->page_table_lock); 1225 spin_unlock(ptl);
1216 return ret; 1226 return ret;
1217} 1227}
1218 1228
@@ -1224,7 +1234,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1224 struct mm_struct *mm = vma->vm_mm; 1234 struct mm_struct *mm = vma->vm_mm;
1225 struct page *page = NULL; 1235 struct page *page = NULL;
1226 1236
1227 assert_spin_locked(&mm->page_table_lock); 1237 assert_spin_locked(pmd_lockptr(mm, pmd));
1228 1238
1229 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1239 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1230 goto out; 1240 goto out;
@@ -1271,6 +1281,7 @@ out:
1271int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 1281int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1272 unsigned long addr, pmd_t pmd, pmd_t *pmdp) 1282 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1273{ 1283{
1284 spinlock_t *ptl;
1274 struct anon_vma *anon_vma = NULL; 1285 struct anon_vma *anon_vma = NULL;
1275 struct page *page; 1286 struct page *page;
1276 unsigned long haddr = addr & HPAGE_PMD_MASK; 1287 unsigned long haddr = addr & HPAGE_PMD_MASK;
@@ -1280,7 +1291,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1280 bool migrated = false; 1291 bool migrated = false;
1281 int flags = 0; 1292 int flags = 0;
1282 1293
1283 spin_lock(&mm->page_table_lock); 1294 ptl = pmd_lock(mm, pmdp);
1284 if (unlikely(!pmd_same(pmd, *pmdp))) 1295 if (unlikely(!pmd_same(pmd, *pmdp)))
1285 goto out_unlock; 1296 goto out_unlock;
1286 1297
@@ -1318,7 +1329,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1318 * relock and check_same as the page may no longer be mapped. 1329 * relock and check_same as the page may no longer be mapped.
1319 * As the fault is being retried, do not account for it. 1330 * As the fault is being retried, do not account for it.
1320 */ 1331 */
1321 spin_unlock(&mm->page_table_lock); 1332 spin_unlock(ptl);
1322 wait_on_page_locked(page); 1333 wait_on_page_locked(page);
1323 page_nid = -1; 1334 page_nid = -1;
1324 goto out; 1335 goto out;
@@ -1326,13 +1337,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1326 1337
1327 /* Page is misplaced, serialise migrations and parallel THP splits */ 1338 /* Page is misplaced, serialise migrations and parallel THP splits */
1328 get_page(page); 1339 get_page(page);
1329 spin_unlock(&mm->page_table_lock); 1340 spin_unlock(ptl);
1330 if (!page_locked) 1341 if (!page_locked)
1331 lock_page(page); 1342 lock_page(page);
1332 anon_vma = page_lock_anon_vma_read(page); 1343 anon_vma = page_lock_anon_vma_read(page);
1333 1344
1334 /* Confirm the PMD did not change while page_table_lock was released */ 1345 /* Confirm the PMD did not change while page_table_lock was released */
1335 spin_lock(&mm->page_table_lock); 1346 spin_lock(ptl);
1336 if (unlikely(!pmd_same(pmd, *pmdp))) { 1347 if (unlikely(!pmd_same(pmd, *pmdp))) {
1337 unlock_page(page); 1348 unlock_page(page);
1338 put_page(page); 1349 put_page(page);
@@ -1344,7 +1355,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1344 * Migrate the THP to the requested node, returns with page unlocked 1355 * Migrate the THP to the requested node, returns with page unlocked
1345 * and pmd_numa cleared. 1356 * and pmd_numa cleared.
1346 */ 1357 */
1347 spin_unlock(&mm->page_table_lock); 1358 spin_unlock(ptl);
1348 migrated = migrate_misplaced_transhuge_page(mm, vma, 1359 migrated = migrate_misplaced_transhuge_page(mm, vma,
1349 pmdp, pmd, addr, page, target_nid); 1360 pmdp, pmd, addr, page, target_nid);
1350 if (migrated) { 1361 if (migrated) {
@@ -1361,7 +1372,7 @@ clear_pmdnuma:
1361 update_mmu_cache_pmd(vma, addr, pmdp); 1372 update_mmu_cache_pmd(vma, addr, pmdp);
1362 unlock_page(page); 1373 unlock_page(page);
1363out_unlock: 1374out_unlock:
1364 spin_unlock(&mm->page_table_lock); 1375 spin_unlock(ptl);
1365 1376
1366out: 1377out:
1367 if (anon_vma) 1378 if (anon_vma)
@@ -1376,9 +1387,10 @@ out:
1376int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1387int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1377 pmd_t *pmd, unsigned long addr) 1388 pmd_t *pmd, unsigned long addr)
1378{ 1389{
1390 spinlock_t *ptl;
1379 int ret = 0; 1391 int ret = 0;
1380 1392
1381 if (__pmd_trans_huge_lock(pmd, vma) == 1) { 1393 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1382 struct page *page; 1394 struct page *page;
1383 pgtable_t pgtable; 1395 pgtable_t pgtable;
1384 pmd_t orig_pmd; 1396 pmd_t orig_pmd;
@@ -1392,8 +1404,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1392 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1404 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1393 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); 1405 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
1394 if (is_huge_zero_pmd(orig_pmd)) { 1406 if (is_huge_zero_pmd(orig_pmd)) {
1395 tlb->mm->nr_ptes--; 1407 atomic_long_dec(&tlb->mm->nr_ptes);
1396 spin_unlock(&tlb->mm->page_table_lock); 1408 spin_unlock(ptl);
1397 put_huge_zero_page(); 1409 put_huge_zero_page();
1398 } else { 1410 } else {
1399 page = pmd_page(orig_pmd); 1411 page = pmd_page(orig_pmd);
@@ -1401,8 +1413,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1401 VM_BUG_ON(page_mapcount(page) < 0); 1413 VM_BUG_ON(page_mapcount(page) < 0);
1402 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1414 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1403 VM_BUG_ON(!PageHead(page)); 1415 VM_BUG_ON(!PageHead(page));
1404 tlb->mm->nr_ptes--; 1416 atomic_long_dec(&tlb->mm->nr_ptes);
1405 spin_unlock(&tlb->mm->page_table_lock); 1417 spin_unlock(ptl);
1406 tlb_remove_page(tlb, page); 1418 tlb_remove_page(tlb, page);
1407 } 1419 }
1408 pte_free(tlb->mm, pgtable); 1420 pte_free(tlb->mm, pgtable);
@@ -1415,14 +1427,15 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1415 unsigned long addr, unsigned long end, 1427 unsigned long addr, unsigned long end,
1416 unsigned char *vec) 1428 unsigned char *vec)
1417{ 1429{
1430 spinlock_t *ptl;
1418 int ret = 0; 1431 int ret = 0;
1419 1432
1420 if (__pmd_trans_huge_lock(pmd, vma) == 1) { 1433 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1421 /* 1434 /*
1422 * All logical pages in the range are present 1435 * All logical pages in the range are present
1423 * if backed by a huge page. 1436 * if backed by a huge page.
1424 */ 1437 */
1425 spin_unlock(&vma->vm_mm->page_table_lock); 1438 spin_unlock(ptl);
1426 memset(vec, 1, (end - addr) >> PAGE_SHIFT); 1439 memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1427 ret = 1; 1440 ret = 1;
1428 } 1441 }
@@ -1435,6 +1448,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1435 unsigned long new_addr, unsigned long old_end, 1448 unsigned long new_addr, unsigned long old_end,
1436 pmd_t *old_pmd, pmd_t *new_pmd) 1449 pmd_t *old_pmd, pmd_t *new_pmd)
1437{ 1450{
1451 spinlock_t *old_ptl, *new_ptl;
1438 int ret = 0; 1452 int ret = 0;
1439 pmd_t pmd; 1453 pmd_t pmd;
1440 1454
@@ -1455,12 +1469,21 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1455 goto out; 1469 goto out;
1456 } 1470 }
1457 1471
1458 ret = __pmd_trans_huge_lock(old_pmd, vma); 1472 /*
1473 * We don't have to worry about the ordering of src and dst
1474 * ptlocks because exclusive mmap_sem prevents deadlock.
1475 */
1476 ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
1459 if (ret == 1) { 1477 if (ret == 1) {
1478 new_ptl = pmd_lockptr(mm, new_pmd);
1479 if (new_ptl != old_ptl)
1480 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1460 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); 1481 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1461 VM_BUG_ON(!pmd_none(*new_pmd)); 1482 VM_BUG_ON(!pmd_none(*new_pmd));
1462 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1483 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1463 spin_unlock(&mm->page_table_lock); 1484 if (new_ptl != old_ptl)
1485 spin_unlock(new_ptl);
1486 spin_unlock(old_ptl);
1464 } 1487 }
1465out: 1488out:
1466 return ret; 1489 return ret;
@@ -1476,9 +1499,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1476 unsigned long addr, pgprot_t newprot, int prot_numa) 1499 unsigned long addr, pgprot_t newprot, int prot_numa)
1477{ 1500{
1478 struct mm_struct *mm = vma->vm_mm; 1501 struct mm_struct *mm = vma->vm_mm;
1502 spinlock_t *ptl;
1479 int ret = 0; 1503 int ret = 0;
1480 1504
1481 if (__pmd_trans_huge_lock(pmd, vma) == 1) { 1505 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1482 pmd_t entry; 1506 pmd_t entry;
1483 ret = 1; 1507 ret = 1;
1484 if (!prot_numa) { 1508 if (!prot_numa) {
@@ -1507,7 +1531,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1507 if (ret == HPAGE_PMD_NR) 1531 if (ret == HPAGE_PMD_NR)
1508 set_pmd_at(mm, addr, pmd, entry); 1532 set_pmd_at(mm, addr, pmd, entry);
1509 1533
1510 spin_unlock(&vma->vm_mm->page_table_lock); 1534 spin_unlock(ptl);
1511 } 1535 }
1512 1536
1513 return ret; 1537 return ret;
@@ -1520,12 +1544,13 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1520 * Note that if it returns 1, this routine returns without unlocking page 1544 * Note that if it returns 1, this routine returns without unlocking page
1521 * table locks. So callers must unlock them. 1545 * table locks. So callers must unlock them.
1522 */ 1546 */
1523int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1547int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
1548 spinlock_t **ptl)
1524{ 1549{
1525 spin_lock(&vma->vm_mm->page_table_lock); 1550 *ptl = pmd_lock(vma->vm_mm, pmd);
1526 if (likely(pmd_trans_huge(*pmd))) { 1551 if (likely(pmd_trans_huge(*pmd))) {
1527 if (unlikely(pmd_trans_splitting(*pmd))) { 1552 if (unlikely(pmd_trans_splitting(*pmd))) {
1528 spin_unlock(&vma->vm_mm->page_table_lock); 1553 spin_unlock(*ptl);
1529 wait_split_huge_page(vma->anon_vma, pmd); 1554 wait_split_huge_page(vma->anon_vma, pmd);
1530 return -1; 1555 return -1;
1531 } else { 1556 } else {
@@ -1534,27 +1559,37 @@ int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1534 return 1; 1559 return 1;
1535 } 1560 }
1536 } 1561 }
1537 spin_unlock(&vma->vm_mm->page_table_lock); 1562 spin_unlock(*ptl);
1538 return 0; 1563 return 0;
1539} 1564}
1540 1565
1566/*
1567 * This function returns whether a given @page is mapped onto the @address
1568 * in the virtual space of @mm.
1569 *
1570 * When it's true, this function returns *pmd with holding the page table lock
1571 * and passing it back to the caller via @ptl.
1572 * If it's false, returns NULL without holding the page table lock.
1573 */
1541pmd_t *page_check_address_pmd(struct page *page, 1574pmd_t *page_check_address_pmd(struct page *page,
1542 struct mm_struct *mm, 1575 struct mm_struct *mm,
1543 unsigned long address, 1576 unsigned long address,
1544 enum page_check_address_pmd_flag flag) 1577 enum page_check_address_pmd_flag flag,
1578 spinlock_t **ptl)
1545{ 1579{
1546 pmd_t *pmd, *ret = NULL; 1580 pmd_t *pmd;
1547 1581
1548 if (address & ~HPAGE_PMD_MASK) 1582 if (address & ~HPAGE_PMD_MASK)
1549 goto out; 1583 return NULL;
1550 1584
1551 pmd = mm_find_pmd(mm, address); 1585 pmd = mm_find_pmd(mm, address);
1552 if (!pmd) 1586 if (!pmd)
1553 goto out; 1587 return NULL;
1588 *ptl = pmd_lock(mm, pmd);
1554 if (pmd_none(*pmd)) 1589 if (pmd_none(*pmd))
1555 goto out; 1590 goto unlock;
1556 if (pmd_page(*pmd) != page) 1591 if (pmd_page(*pmd) != page)
1557 goto out; 1592 goto unlock;
1558 /* 1593 /*
1559 * split_vma() may create temporary aliased mappings. There is 1594 * split_vma() may create temporary aliased mappings. There is
1560 * no risk as long as all huge pmd are found and have their 1595 * no risk as long as all huge pmd are found and have their
@@ -1564,14 +1599,15 @@ pmd_t *page_check_address_pmd(struct page *page,
1564 */ 1599 */
1565 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && 1600 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1566 pmd_trans_splitting(*pmd)) 1601 pmd_trans_splitting(*pmd))
1567 goto out; 1602 goto unlock;
1568 if (pmd_trans_huge(*pmd)) { 1603 if (pmd_trans_huge(*pmd)) {
1569 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && 1604 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1570 !pmd_trans_splitting(*pmd)); 1605 !pmd_trans_splitting(*pmd));
1571 ret = pmd; 1606 return pmd;
1572 } 1607 }
1573out: 1608unlock:
1574 return ret; 1609 spin_unlock(*ptl);
1610 return NULL;
1575} 1611}
1576 1612
1577static int __split_huge_page_splitting(struct page *page, 1613static int __split_huge_page_splitting(struct page *page,
@@ -1579,6 +1615,7 @@ static int __split_huge_page_splitting(struct page *page,
1579 unsigned long address) 1615 unsigned long address)
1580{ 1616{
1581 struct mm_struct *mm = vma->vm_mm; 1617 struct mm_struct *mm = vma->vm_mm;
1618 spinlock_t *ptl;
1582 pmd_t *pmd; 1619 pmd_t *pmd;
1583 int ret = 0; 1620 int ret = 0;
1584 /* For mmu_notifiers */ 1621 /* For mmu_notifiers */
@@ -1586,9 +1623,8 @@ static int __split_huge_page_splitting(struct page *page,
1586 const unsigned long mmun_end = address + HPAGE_PMD_SIZE; 1623 const unsigned long mmun_end = address + HPAGE_PMD_SIZE;
1587 1624
1588 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1625 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1589 spin_lock(&mm->page_table_lock);
1590 pmd = page_check_address_pmd(page, mm, address, 1626 pmd = page_check_address_pmd(page, mm, address,
1591 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG); 1627 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
1592 if (pmd) { 1628 if (pmd) {
1593 /* 1629 /*
1594 * We can't temporarily set the pmd to null in order 1630 * We can't temporarily set the pmd to null in order
@@ -1599,8 +1635,8 @@ static int __split_huge_page_splitting(struct page *page,
1599 */ 1635 */
1600 pmdp_splitting_flush(vma, address, pmd); 1636 pmdp_splitting_flush(vma, address, pmd);
1601 ret = 1; 1637 ret = 1;
1638 spin_unlock(ptl);
1602 } 1639 }
1603 spin_unlock(&mm->page_table_lock);
1604 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1640 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1605 1641
1606 return ret; 1642 return ret;
@@ -1731,14 +1767,14 @@ static int __split_huge_page_map(struct page *page,
1731 unsigned long address) 1767 unsigned long address)
1732{ 1768{
1733 struct mm_struct *mm = vma->vm_mm; 1769 struct mm_struct *mm = vma->vm_mm;
1770 spinlock_t *ptl;
1734 pmd_t *pmd, _pmd; 1771 pmd_t *pmd, _pmd;
1735 int ret = 0, i; 1772 int ret = 0, i;
1736 pgtable_t pgtable; 1773 pgtable_t pgtable;
1737 unsigned long haddr; 1774 unsigned long haddr;
1738 1775
1739 spin_lock(&mm->page_table_lock);
1740 pmd = page_check_address_pmd(page, mm, address, 1776 pmd = page_check_address_pmd(page, mm, address,
1741 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); 1777 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
1742 if (pmd) { 1778 if (pmd) {
1743 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1779 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1744 pmd_populate(mm, &_pmd, pgtable); 1780 pmd_populate(mm, &_pmd, pgtable);
@@ -1793,8 +1829,8 @@ static int __split_huge_page_map(struct page *page,
1793 pmdp_invalidate(vma, address, pmd); 1829 pmdp_invalidate(vma, address, pmd);
1794 pmd_populate(mm, pmd, pgtable); 1830 pmd_populate(mm, pmd, pgtable);
1795 ret = 1; 1831 ret = 1;
1832 spin_unlock(ptl);
1796 } 1833 }
1797 spin_unlock(&mm->page_table_lock);
1798 1834
1799 return ret; 1835 return ret;
1800} 1836}
@@ -2346,7 +2382,7 @@ static void collapse_huge_page(struct mm_struct *mm,
2346 pte_t *pte; 2382 pte_t *pte;
2347 pgtable_t pgtable; 2383 pgtable_t pgtable;
2348 struct page *new_page; 2384 struct page *new_page;
2349 spinlock_t *ptl; 2385 spinlock_t *pmd_ptl, *pte_ptl;
2350 int isolated; 2386 int isolated;
2351 unsigned long hstart, hend; 2387 unsigned long hstart, hend;
2352 unsigned long mmun_start; /* For mmu_notifiers */ 2388 unsigned long mmun_start; /* For mmu_notifiers */
@@ -2389,12 +2425,12 @@ static void collapse_huge_page(struct mm_struct *mm,
2389 anon_vma_lock_write(vma->anon_vma); 2425 anon_vma_lock_write(vma->anon_vma);
2390 2426
2391 pte = pte_offset_map(pmd, address); 2427 pte = pte_offset_map(pmd, address);
2392 ptl = pte_lockptr(mm, pmd); 2428 pte_ptl = pte_lockptr(mm, pmd);
2393 2429
2394 mmun_start = address; 2430 mmun_start = address;
2395 mmun_end = address + HPAGE_PMD_SIZE; 2431 mmun_end = address + HPAGE_PMD_SIZE;
2396 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2432 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2397 spin_lock(&mm->page_table_lock); /* probably unnecessary */ 2433 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
2398 /* 2434 /*
2399 * After this gup_fast can't run anymore. This also removes 2435 * After this gup_fast can't run anymore. This also removes
2400 * any huge TLB entry from the CPU so we won't allow 2436 * any huge TLB entry from the CPU so we won't allow
@@ -2402,16 +2438,16 @@ static void collapse_huge_page(struct mm_struct *mm,
2402 * to avoid the risk of CPU bugs in that area. 2438 * to avoid the risk of CPU bugs in that area.
2403 */ 2439 */
2404 _pmd = pmdp_clear_flush(vma, address, pmd); 2440 _pmd = pmdp_clear_flush(vma, address, pmd);
2405 spin_unlock(&mm->page_table_lock); 2441 spin_unlock(pmd_ptl);
2406 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2442 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2407 2443
2408 spin_lock(ptl); 2444 spin_lock(pte_ptl);
2409 isolated = __collapse_huge_page_isolate(vma, address, pte); 2445 isolated = __collapse_huge_page_isolate(vma, address, pte);
2410 spin_unlock(ptl); 2446 spin_unlock(pte_ptl);
2411 2447
2412 if (unlikely(!isolated)) { 2448 if (unlikely(!isolated)) {
2413 pte_unmap(pte); 2449 pte_unmap(pte);
2414 spin_lock(&mm->page_table_lock); 2450 spin_lock(pmd_ptl);
2415 BUG_ON(!pmd_none(*pmd)); 2451 BUG_ON(!pmd_none(*pmd));
2416 /* 2452 /*
2417 * We can only use set_pmd_at when establishing 2453 * We can only use set_pmd_at when establishing
@@ -2419,7 +2455,7 @@ static void collapse_huge_page(struct mm_struct *mm,
2419 * points to regular pagetables. Use pmd_populate for that 2455 * points to regular pagetables. Use pmd_populate for that
2420 */ 2456 */
2421 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 2457 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
2422 spin_unlock(&mm->page_table_lock); 2458 spin_unlock(pmd_ptl);
2423 anon_vma_unlock_write(vma->anon_vma); 2459 anon_vma_unlock_write(vma->anon_vma);
2424 goto out; 2460 goto out;
2425 } 2461 }
@@ -2430,7 +2466,7 @@ static void collapse_huge_page(struct mm_struct *mm,
2430 */ 2466 */
2431 anon_vma_unlock_write(vma->anon_vma); 2467 anon_vma_unlock_write(vma->anon_vma);
2432 2468
2433 __collapse_huge_page_copy(pte, new_page, vma, address, ptl); 2469 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
2434 pte_unmap(pte); 2470 pte_unmap(pte);
2435 __SetPageUptodate(new_page); 2471 __SetPageUptodate(new_page);
2436 pgtable = pmd_pgtable(_pmd); 2472 pgtable = pmd_pgtable(_pmd);
@@ -2445,13 +2481,13 @@ static void collapse_huge_page(struct mm_struct *mm,
2445 */ 2481 */
2446 smp_wmb(); 2482 smp_wmb();
2447 2483
2448 spin_lock(&mm->page_table_lock); 2484 spin_lock(pmd_ptl);
2449 BUG_ON(!pmd_none(*pmd)); 2485 BUG_ON(!pmd_none(*pmd));
2450 page_add_new_anon_rmap(new_page, vma, address); 2486 page_add_new_anon_rmap(new_page, vma, address);
2451 pgtable_trans_huge_deposit(mm, pmd, pgtable); 2487 pgtable_trans_huge_deposit(mm, pmd, pgtable);
2452 set_pmd_at(mm, address, pmd, _pmd); 2488 set_pmd_at(mm, address, pmd, _pmd);
2453 update_mmu_cache_pmd(vma, address, pmd); 2489 update_mmu_cache_pmd(vma, address, pmd);
2454 spin_unlock(&mm->page_table_lock); 2490 spin_unlock(pmd_ptl);
2455 2491
2456 *hpage = NULL; 2492 *hpage = NULL;
2457 2493
@@ -2780,6 +2816,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2780void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, 2816void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
2781 pmd_t *pmd) 2817 pmd_t *pmd)
2782{ 2818{
2819 spinlock_t *ptl;
2783 struct page *page; 2820 struct page *page;
2784 struct mm_struct *mm = vma->vm_mm; 2821 struct mm_struct *mm = vma->vm_mm;
2785 unsigned long haddr = address & HPAGE_PMD_MASK; 2822 unsigned long haddr = address & HPAGE_PMD_MASK;
@@ -2792,22 +2829,22 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
2792 mmun_end = haddr + HPAGE_PMD_SIZE; 2829 mmun_end = haddr + HPAGE_PMD_SIZE;
2793again: 2830again:
2794 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2831 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2795 spin_lock(&mm->page_table_lock); 2832 ptl = pmd_lock(mm, pmd);
2796 if (unlikely(!pmd_trans_huge(*pmd))) { 2833 if (unlikely(!pmd_trans_huge(*pmd))) {
2797 spin_unlock(&mm->page_table_lock); 2834 spin_unlock(ptl);
2798 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2835 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2799 return; 2836 return;
2800 } 2837 }
2801 if (is_huge_zero_pmd(*pmd)) { 2838 if (is_huge_zero_pmd(*pmd)) {
2802 __split_huge_zero_page_pmd(vma, haddr, pmd); 2839 __split_huge_zero_page_pmd(vma, haddr, pmd);
2803 spin_unlock(&mm->page_table_lock); 2840 spin_unlock(ptl);
2804 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2841 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2805 return; 2842 return;
2806 } 2843 }
2807 page = pmd_page(*pmd); 2844 page = pmd_page(*pmd);
2808 VM_BUG_ON(!page_count(page)); 2845 VM_BUG_ON(!page_count(page));
2809 get_page(page); 2846 get_page(page);
2810 spin_unlock(&mm->page_table_lock); 2847 spin_unlock(ptl);
2811 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2848 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2812 2849
2813 split_huge_page(page); 2850 split_huge_page(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0b7656e804d1..7d57af21f49e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2376,6 +2376,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2376 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 2376 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2377 2377
2378 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 2378 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2379 spinlock_t *src_ptl, *dst_ptl;
2379 src_pte = huge_pte_offset(src, addr); 2380 src_pte = huge_pte_offset(src, addr);
2380 if (!src_pte) 2381 if (!src_pte)
2381 continue; 2382 continue;
@@ -2387,8 +2388,9 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2387 if (dst_pte == src_pte) 2388 if (dst_pte == src_pte)
2388 continue; 2389 continue;
2389 2390
2390 spin_lock(&dst->page_table_lock); 2391 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2391 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 2392 src_ptl = huge_pte_lockptr(h, src, src_pte);
2393 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2392 if (!huge_pte_none(huge_ptep_get(src_pte))) { 2394 if (!huge_pte_none(huge_ptep_get(src_pte))) {
2393 if (cow) 2395 if (cow)
2394 huge_ptep_set_wrprotect(src, addr, src_pte); 2396 huge_ptep_set_wrprotect(src, addr, src_pte);
@@ -2398,8 +2400,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2398 page_dup_rmap(ptepage); 2400 page_dup_rmap(ptepage);
2399 set_huge_pte_at(dst, addr, dst_pte, entry); 2401 set_huge_pte_at(dst, addr, dst_pte, entry);
2400 } 2402 }
2401 spin_unlock(&src->page_table_lock); 2403 spin_unlock(src_ptl);
2402 spin_unlock(&dst->page_table_lock); 2404 spin_unlock(dst_ptl);
2403 } 2405 }
2404 return 0; 2406 return 0;
2405 2407
@@ -2442,6 +2444,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2442 unsigned long address; 2444 unsigned long address;
2443 pte_t *ptep; 2445 pte_t *ptep;
2444 pte_t pte; 2446 pte_t pte;
2447 spinlock_t *ptl;
2445 struct page *page; 2448 struct page *page;
2446 struct hstate *h = hstate_vma(vma); 2449 struct hstate *h = hstate_vma(vma);
2447 unsigned long sz = huge_page_size(h); 2450 unsigned long sz = huge_page_size(h);
@@ -2455,25 +2458,25 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2455 tlb_start_vma(tlb, vma); 2458 tlb_start_vma(tlb, vma);
2456 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2459 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2457again: 2460again:
2458 spin_lock(&mm->page_table_lock);
2459 for (address = start; address < end; address += sz) { 2461 for (address = start; address < end; address += sz) {
2460 ptep = huge_pte_offset(mm, address); 2462 ptep = huge_pte_offset(mm, address);
2461 if (!ptep) 2463 if (!ptep)
2462 continue; 2464 continue;
2463 2465
2466 ptl = huge_pte_lock(h, mm, ptep);
2464 if (huge_pmd_unshare(mm, &address, ptep)) 2467 if (huge_pmd_unshare(mm, &address, ptep))
2465 continue; 2468 goto unlock;
2466 2469
2467 pte = huge_ptep_get(ptep); 2470 pte = huge_ptep_get(ptep);
2468 if (huge_pte_none(pte)) 2471 if (huge_pte_none(pte))
2469 continue; 2472 goto unlock;
2470 2473
2471 /* 2474 /*
2472 * HWPoisoned hugepage is already unmapped and dropped reference 2475 * HWPoisoned hugepage is already unmapped and dropped reference
2473 */ 2476 */
2474 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 2477 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
2475 huge_pte_clear(mm, address, ptep); 2478 huge_pte_clear(mm, address, ptep);
2476 continue; 2479 goto unlock;
2477 } 2480 }
2478 2481
2479 page = pte_page(pte); 2482 page = pte_page(pte);
@@ -2484,7 +2487,7 @@ again:
2484 */ 2487 */
2485 if (ref_page) { 2488 if (ref_page) {
2486 if (page != ref_page) 2489 if (page != ref_page)
2487 continue; 2490 goto unlock;
2488 2491
2489 /* 2492 /*
2490 * Mark the VMA as having unmapped its page so that 2493 * Mark the VMA as having unmapped its page so that
@@ -2501,13 +2504,18 @@ again:
2501 2504
2502 page_remove_rmap(page); 2505 page_remove_rmap(page);
2503 force_flush = !__tlb_remove_page(tlb, page); 2506 force_flush = !__tlb_remove_page(tlb, page);
2504 if (force_flush) 2507 if (force_flush) {
2508 spin_unlock(ptl);
2505 break; 2509 break;
2510 }
2506 /* Bail out after unmapping reference page if supplied */ 2511 /* Bail out after unmapping reference page if supplied */
2507 if (ref_page) 2512 if (ref_page) {
2513 spin_unlock(ptl);
2508 break; 2514 break;
2515 }
2516unlock:
2517 spin_unlock(ptl);
2509 } 2518 }
2510 spin_unlock(&mm->page_table_lock);
2511 /* 2519 /*
2512 * mmu_gather ran out of room to batch pages, we break out of 2520 * mmu_gather ran out of room to batch pages, we break out of
2513 * the PTE lock to avoid doing the potential expensive TLB invalidate 2521 * the PTE lock to avoid doing the potential expensive TLB invalidate
@@ -2613,7 +2621,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2613 */ 2621 */
2614static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 2622static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2615 unsigned long address, pte_t *ptep, pte_t pte, 2623 unsigned long address, pte_t *ptep, pte_t pte,
2616 struct page *pagecache_page) 2624 struct page *pagecache_page, spinlock_t *ptl)
2617{ 2625{
2618 struct hstate *h = hstate_vma(vma); 2626 struct hstate *h = hstate_vma(vma);
2619 struct page *old_page, *new_page; 2627 struct page *old_page, *new_page;
@@ -2647,8 +2655,8 @@ retry_avoidcopy:
2647 2655
2648 page_cache_get(old_page); 2656 page_cache_get(old_page);
2649 2657
2650 /* Drop page_table_lock as buddy allocator may be called */ 2658 /* Drop page table lock as buddy allocator may be called */
2651 spin_unlock(&mm->page_table_lock); 2659 spin_unlock(ptl);
2652 new_page = alloc_huge_page(vma, address, outside_reserve); 2660 new_page = alloc_huge_page(vma, address, outside_reserve);
2653 2661
2654 if (IS_ERR(new_page)) { 2662 if (IS_ERR(new_page)) {
@@ -2666,13 +2674,13 @@ retry_avoidcopy:
2666 BUG_ON(huge_pte_none(pte)); 2674 BUG_ON(huge_pte_none(pte));
2667 if (unmap_ref_private(mm, vma, old_page, address)) { 2675 if (unmap_ref_private(mm, vma, old_page, address)) {
2668 BUG_ON(huge_pte_none(pte)); 2676 BUG_ON(huge_pte_none(pte));
2669 spin_lock(&mm->page_table_lock); 2677 spin_lock(ptl);
2670 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2678 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2671 if (likely(pte_same(huge_ptep_get(ptep), pte))) 2679 if (likely(pte_same(huge_ptep_get(ptep), pte)))
2672 goto retry_avoidcopy; 2680 goto retry_avoidcopy;
2673 /* 2681 /*
2674 * race occurs while re-acquiring page_table_lock, and 2682 * race occurs while re-acquiring page table
2675 * our job is done. 2683 * lock, and our job is done.
2676 */ 2684 */
2677 return 0; 2685 return 0;
2678 } 2686 }
@@ -2680,7 +2688,7 @@ retry_avoidcopy:
2680 } 2688 }
2681 2689
2682 /* Caller expects lock to be held */ 2690 /* Caller expects lock to be held */
2683 spin_lock(&mm->page_table_lock); 2691 spin_lock(ptl);
2684 if (err == -ENOMEM) 2692 if (err == -ENOMEM)
2685 return VM_FAULT_OOM; 2693 return VM_FAULT_OOM;
2686 else 2694 else
@@ -2695,7 +2703,7 @@ retry_avoidcopy:
2695 page_cache_release(new_page); 2703 page_cache_release(new_page);
2696 page_cache_release(old_page); 2704 page_cache_release(old_page);
2697 /* Caller expects lock to be held */ 2705 /* Caller expects lock to be held */
2698 spin_lock(&mm->page_table_lock); 2706 spin_lock(ptl);
2699 return VM_FAULT_OOM; 2707 return VM_FAULT_OOM;
2700 } 2708 }
2701 2709
@@ -2707,10 +2715,10 @@ retry_avoidcopy:
2707 mmun_end = mmun_start + huge_page_size(h); 2715 mmun_end = mmun_start + huge_page_size(h);
2708 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2716 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2709 /* 2717 /*
2710 * Retake the page_table_lock to check for racing updates 2718 * Retake the page table lock to check for racing updates
2711 * before the page tables are altered 2719 * before the page tables are altered
2712 */ 2720 */
2713 spin_lock(&mm->page_table_lock); 2721 spin_lock(ptl);
2714 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2722 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2715 if (likely(pte_same(huge_ptep_get(ptep), pte))) { 2723 if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2716 ClearPagePrivate(new_page); 2724 ClearPagePrivate(new_page);
@@ -2724,13 +2732,13 @@ retry_avoidcopy:
2724 /* Make the old page be freed below */ 2732 /* Make the old page be freed below */
2725 new_page = old_page; 2733 new_page = old_page;
2726 } 2734 }
2727 spin_unlock(&mm->page_table_lock); 2735 spin_unlock(ptl);
2728 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2736 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2729 page_cache_release(new_page); 2737 page_cache_release(new_page);
2730 page_cache_release(old_page); 2738 page_cache_release(old_page);
2731 2739
2732 /* Caller expects lock to be held */ 2740 /* Caller expects lock to be held */
2733 spin_lock(&mm->page_table_lock); 2741 spin_lock(ptl);
2734 return 0; 2742 return 0;
2735} 2743}
2736 2744
@@ -2778,6 +2786,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2778 struct page *page; 2786 struct page *page;
2779 struct address_space *mapping; 2787 struct address_space *mapping;
2780 pte_t new_pte; 2788 pte_t new_pte;
2789 spinlock_t *ptl;
2781 2790
2782 /* 2791 /*
2783 * Currently, we are forced to kill the process in the event the 2792 * Currently, we are forced to kill the process in the event the
@@ -2864,7 +2873,8 @@ retry:
2864 goto backout_unlocked; 2873 goto backout_unlocked;
2865 } 2874 }
2866 2875
2867 spin_lock(&mm->page_table_lock); 2876 ptl = huge_pte_lockptr(h, mm, ptep);
2877 spin_lock(ptl);
2868 size = i_size_read(mapping->host) >> huge_page_shift(h); 2878 size = i_size_read(mapping->host) >> huge_page_shift(h);
2869 if (idx >= size) 2879 if (idx >= size)
2870 goto backout; 2880 goto backout;
@@ -2885,16 +2895,16 @@ retry:
2885 2895
2886 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 2896 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2887 /* Optimization, do the COW without a second fault */ 2897 /* Optimization, do the COW without a second fault */
2888 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 2898 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
2889 } 2899 }
2890 2900
2891 spin_unlock(&mm->page_table_lock); 2901 spin_unlock(ptl);
2892 unlock_page(page); 2902 unlock_page(page);
2893out: 2903out:
2894 return ret; 2904 return ret;
2895 2905
2896backout: 2906backout:
2897 spin_unlock(&mm->page_table_lock); 2907 spin_unlock(ptl);
2898backout_unlocked: 2908backout_unlocked:
2899 unlock_page(page); 2909 unlock_page(page);
2900 put_page(page); 2910 put_page(page);
@@ -2906,6 +2916,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2906{ 2916{
2907 pte_t *ptep; 2917 pte_t *ptep;
2908 pte_t entry; 2918 pte_t entry;
2919 spinlock_t *ptl;
2909 int ret; 2920 int ret;
2910 struct page *page = NULL; 2921 struct page *page = NULL;
2911 struct page *pagecache_page = NULL; 2922 struct page *pagecache_page = NULL;
@@ -2918,7 +2929,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2918 if (ptep) { 2929 if (ptep) {
2919 entry = huge_ptep_get(ptep); 2930 entry = huge_ptep_get(ptep);
2920 if (unlikely(is_hugetlb_entry_migration(entry))) { 2931 if (unlikely(is_hugetlb_entry_migration(entry))) {
2921 migration_entry_wait_huge(mm, ptep); 2932 migration_entry_wait_huge(vma, mm, ptep);
2922 return 0; 2933 return 0;
2923 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 2934 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2924 return VM_FAULT_HWPOISON_LARGE | 2935 return VM_FAULT_HWPOISON_LARGE |
@@ -2974,17 +2985,18 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2974 if (page != pagecache_page) 2985 if (page != pagecache_page)
2975 lock_page(page); 2986 lock_page(page);
2976 2987
2977 spin_lock(&mm->page_table_lock); 2988 ptl = huge_pte_lockptr(h, mm, ptep);
2989 spin_lock(ptl);
2978 /* Check for a racing update before calling hugetlb_cow */ 2990 /* Check for a racing update before calling hugetlb_cow */
2979 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 2991 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2980 goto out_page_table_lock; 2992 goto out_ptl;
2981 2993
2982 2994
2983 if (flags & FAULT_FLAG_WRITE) { 2995 if (flags & FAULT_FLAG_WRITE) {
2984 if (!huge_pte_write(entry)) { 2996 if (!huge_pte_write(entry)) {
2985 ret = hugetlb_cow(mm, vma, address, ptep, entry, 2997 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2986 pagecache_page); 2998 pagecache_page, ptl);
2987 goto out_page_table_lock; 2999 goto out_ptl;
2988 } 3000 }
2989 entry = huge_pte_mkdirty(entry); 3001 entry = huge_pte_mkdirty(entry);
2990 } 3002 }
@@ -2993,8 +3005,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2993 flags & FAULT_FLAG_WRITE)) 3005 flags & FAULT_FLAG_WRITE))
2994 update_mmu_cache(vma, address, ptep); 3006 update_mmu_cache(vma, address, ptep);
2995 3007
2996out_page_table_lock: 3008out_ptl:
2997 spin_unlock(&mm->page_table_lock); 3009 spin_unlock(ptl);
2998 3010
2999 if (pagecache_page) { 3011 if (pagecache_page) {
3000 unlock_page(pagecache_page); 3012 unlock_page(pagecache_page);
@@ -3020,9 +3032,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3020 unsigned long remainder = *nr_pages; 3032 unsigned long remainder = *nr_pages;
3021 struct hstate *h = hstate_vma(vma); 3033 struct hstate *h = hstate_vma(vma);
3022 3034
3023 spin_lock(&mm->page_table_lock);
3024 while (vaddr < vma->vm_end && remainder) { 3035 while (vaddr < vma->vm_end && remainder) {
3025 pte_t *pte; 3036 pte_t *pte;
3037 spinlock_t *ptl = NULL;
3026 int absent; 3038 int absent;
3027 struct page *page; 3039 struct page *page;
3028 3040
@@ -3030,8 +3042,12 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3030 * Some archs (sparc64, sh*) have multiple pte_ts to 3042 * Some archs (sparc64, sh*) have multiple pte_ts to
3031 * each hugepage. We have to make sure we get the 3043 * each hugepage. We have to make sure we get the
3032 * first, for the page indexing below to work. 3044 * first, for the page indexing below to work.
3045 *
3046 * Note that page table lock is not held when pte is null.
3033 */ 3047 */
3034 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 3048 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3049 if (pte)
3050 ptl = huge_pte_lock(h, mm, pte);
3035 absent = !pte || huge_pte_none(huge_ptep_get(pte)); 3051 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3036 3052
3037 /* 3053 /*
@@ -3043,6 +3059,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3043 */ 3059 */
3044 if (absent && (flags & FOLL_DUMP) && 3060 if (absent && (flags & FOLL_DUMP) &&
3045 !hugetlbfs_pagecache_present(h, vma, vaddr)) { 3061 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3062 if (pte)
3063 spin_unlock(ptl);
3046 remainder = 0; 3064 remainder = 0;
3047 break; 3065 break;
3048 } 3066 }
@@ -3062,10 +3080,10 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3062 !huge_pte_write(huge_ptep_get(pte)))) { 3080 !huge_pte_write(huge_ptep_get(pte)))) {
3063 int ret; 3081 int ret;
3064 3082
3065 spin_unlock(&mm->page_table_lock); 3083 if (pte)
3084 spin_unlock(ptl);
3066 ret = hugetlb_fault(mm, vma, vaddr, 3085 ret = hugetlb_fault(mm, vma, vaddr,
3067 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); 3086 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3068 spin_lock(&mm->page_table_lock);
3069 if (!(ret & VM_FAULT_ERROR)) 3087 if (!(ret & VM_FAULT_ERROR))
3070 continue; 3088 continue;
3071 3089
@@ -3096,8 +3114,8 @@ same_page:
3096 */ 3114 */
3097 goto same_page; 3115 goto same_page;
3098 } 3116 }
3117 spin_unlock(ptl);
3099 } 3118 }
3100 spin_unlock(&mm->page_table_lock);
3101 *nr_pages = remainder; 3119 *nr_pages = remainder;
3102 *position = vaddr; 3120 *position = vaddr;
3103 3121
@@ -3118,13 +3136,15 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3118 flush_cache_range(vma, address, end); 3136 flush_cache_range(vma, address, end);
3119 3137
3120 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 3138 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
3121 spin_lock(&mm->page_table_lock);
3122 for (; address < end; address += huge_page_size(h)) { 3139 for (; address < end; address += huge_page_size(h)) {
3140 spinlock_t *ptl;
3123 ptep = huge_pte_offset(mm, address); 3141 ptep = huge_pte_offset(mm, address);
3124 if (!ptep) 3142 if (!ptep)
3125 continue; 3143 continue;
3144 ptl = huge_pte_lock(h, mm, ptep);
3126 if (huge_pmd_unshare(mm, &address, ptep)) { 3145 if (huge_pmd_unshare(mm, &address, ptep)) {
3127 pages++; 3146 pages++;
3147 spin_unlock(ptl);
3128 continue; 3148 continue;
3129 } 3149 }
3130 if (!huge_pte_none(huge_ptep_get(ptep))) { 3150 if (!huge_pte_none(huge_ptep_get(ptep))) {
@@ -3134,8 +3154,8 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3134 set_huge_pte_at(mm, address, ptep, pte); 3154 set_huge_pte_at(mm, address, ptep, pte);
3135 pages++; 3155 pages++;
3136 } 3156 }
3157 spin_unlock(ptl);
3137 } 3158 }
3138 spin_unlock(&mm->page_table_lock);
3139 /* 3159 /*
3140 * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare 3160 * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
3141 * may have cleared our pud entry and done put_page on the page table: 3161 * may have cleared our pud entry and done put_page on the page table:
@@ -3298,6 +3318,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3298 unsigned long saddr; 3318 unsigned long saddr;
3299 pte_t *spte = NULL; 3319 pte_t *spte = NULL;
3300 pte_t *pte; 3320 pte_t *pte;
3321 spinlock_t *ptl;
3301 3322
3302 if (!vma_shareable(vma, addr)) 3323 if (!vma_shareable(vma, addr))
3303 return (pte_t *)pmd_alloc(mm, pud, addr); 3324 return (pte_t *)pmd_alloc(mm, pud, addr);
@@ -3320,13 +3341,14 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3320 if (!spte) 3341 if (!spte)
3321 goto out; 3342 goto out;
3322 3343
3323 spin_lock(&mm->page_table_lock); 3344 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
3345 spin_lock(ptl);
3324 if (pud_none(*pud)) 3346 if (pud_none(*pud))
3325 pud_populate(mm, pud, 3347 pud_populate(mm, pud,
3326 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 3348 (pmd_t *)((unsigned long)spte & PAGE_MASK));
3327 else 3349 else
3328 put_page(virt_to_page(spte)); 3350 put_page(virt_to_page(spte));
3329 spin_unlock(&mm->page_table_lock); 3351 spin_unlock(ptl);
3330out: 3352out:
3331 pte = (pte_t *)pmd_alloc(mm, pud, addr); 3353 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3332 mutex_unlock(&mapping->i_mmap_mutex); 3354 mutex_unlock(&mapping->i_mmap_mutex);
@@ -3340,7 +3362,7 @@ out:
3340 * indicated by page_count > 1, unmap is achieved by clearing pud and 3362 * indicated by page_count > 1, unmap is achieved by clearing pud and
3341 * decrementing the ref count. If count == 1, the pte page is not shared. 3363 * decrementing the ref count. If count == 1, the pte page is not shared.
3342 * 3364 *
3343 * called with vma->vm_mm->page_table_lock held. 3365 * called with page table lock held.
3344 * 3366 *
3345 * returns: 1 successfully unmapped a shared pte page 3367 * returns: 1 successfully unmapped a shared pte page
3346 * 0 the underlying pte page is not shared, or it is the last user 3368 * 0 the underlying pte page is not shared, or it is the last user
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e3cd40b2d5d9..f1a0ae6e11b8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6605,10 +6605,10 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6605 pte_t *pte; 6605 pte_t *pte;
6606 spinlock_t *ptl; 6606 spinlock_t *ptl;
6607 6607
6608 if (pmd_trans_huge_lock(pmd, vma) == 1) { 6608 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6609 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 6609 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6610 mc.precharge += HPAGE_PMD_NR; 6610 mc.precharge += HPAGE_PMD_NR;
6611 spin_unlock(&vma->vm_mm->page_table_lock); 6611 spin_unlock(ptl);
6612 return 0; 6612 return 0;
6613 } 6613 }
6614 6614
@@ -6797,9 +6797,9 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6797 * to be unlocked in __split_huge_page_splitting(), where the main 6797 * to be unlocked in __split_huge_page_splitting(), where the main
6798 * part of thp split is not executed yet. 6798 * part of thp split is not executed yet.
6799 */ 6799 */
6800 if (pmd_trans_huge_lock(pmd, vma) == 1) { 6800 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6801 if (mc.precharge < HPAGE_PMD_NR) { 6801 if (mc.precharge < HPAGE_PMD_NR) {
6802 spin_unlock(&vma->vm_mm->page_table_lock); 6802 spin_unlock(ptl);
6803 return 0; 6803 return 0;
6804 } 6804 }
6805 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6805 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
@@ -6816,7 +6816,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6816 } 6816 }
6817 put_page(page); 6817 put_page(page);
6818 } 6818 }
6819 spin_unlock(&vma->vm_mm->page_table_lock); 6819 spin_unlock(ptl);
6820 return 0; 6820 return 0;
6821 } 6821 }
6822 6822
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index f9d78ec7831f..b7c171602ba1 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1269,7 +1269,7 @@ void memory_failure_queue(unsigned long pfn, int trapno, int flags)
1269 1269
1270 mf_cpu = &get_cpu_var(memory_failure_cpu); 1270 mf_cpu = &get_cpu_var(memory_failure_cpu);
1271 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 1271 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1272 if (kfifo_put(&mf_cpu->fifo, &entry)) 1272 if (kfifo_put(&mf_cpu->fifo, entry))
1273 schedule_work_on(smp_processor_id(), &mf_cpu->work); 1273 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1274 else 1274 else
1275 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n", 1275 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
diff --git a/mm/memory.c b/mm/memory.c
index bf8665849a5f..0409e8f43fa0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -382,7 +382,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
382 pgtable_t token = pmd_pgtable(*pmd); 382 pgtable_t token = pmd_pgtable(*pmd);
383 pmd_clear(pmd); 383 pmd_clear(pmd);
384 pte_free_tlb(tlb, token, addr); 384 pte_free_tlb(tlb, token, addr);
385 tlb->mm->nr_ptes--; 385 atomic_long_dec(&tlb->mm->nr_ptes);
386} 386}
387 387
388static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 388static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -550,6 +550,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
550int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 550int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
551 pmd_t *pmd, unsigned long address) 551 pmd_t *pmd, unsigned long address)
552{ 552{
553 spinlock_t *ptl;
553 pgtable_t new = pte_alloc_one(mm, address); 554 pgtable_t new = pte_alloc_one(mm, address);
554 int wait_split_huge_page; 555 int wait_split_huge_page;
555 if (!new) 556 if (!new)
@@ -570,15 +571,15 @@ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
570 */ 571 */
571 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 572 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
572 573
573 spin_lock(&mm->page_table_lock); 574 ptl = pmd_lock(mm, pmd);
574 wait_split_huge_page = 0; 575 wait_split_huge_page = 0;
575 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 576 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
576 mm->nr_ptes++; 577 atomic_long_inc(&mm->nr_ptes);
577 pmd_populate(mm, pmd, new); 578 pmd_populate(mm, pmd, new);
578 new = NULL; 579 new = NULL;
579 } else if (unlikely(pmd_trans_splitting(*pmd))) 580 } else if (unlikely(pmd_trans_splitting(*pmd)))
580 wait_split_huge_page = 1; 581 wait_split_huge_page = 1;
581 spin_unlock(&mm->page_table_lock); 582 spin_unlock(ptl);
582 if (new) 583 if (new)
583 pte_free(mm, new); 584 pte_free(mm, new);
584 if (wait_split_huge_page) 585 if (wait_split_huge_page)
@@ -1516,20 +1517,20 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
1516 split_huge_page_pmd(vma, address, pmd); 1517 split_huge_page_pmd(vma, address, pmd);
1517 goto split_fallthrough; 1518 goto split_fallthrough;
1518 } 1519 }
1519 spin_lock(&mm->page_table_lock); 1520 ptl = pmd_lock(mm, pmd);
1520 if (likely(pmd_trans_huge(*pmd))) { 1521 if (likely(pmd_trans_huge(*pmd))) {
1521 if (unlikely(pmd_trans_splitting(*pmd))) { 1522 if (unlikely(pmd_trans_splitting(*pmd))) {
1522 spin_unlock(&mm->page_table_lock); 1523 spin_unlock(ptl);
1523 wait_split_huge_page(vma->anon_vma, pmd); 1524 wait_split_huge_page(vma->anon_vma, pmd);
1524 } else { 1525 } else {
1525 page = follow_trans_huge_pmd(vma, address, 1526 page = follow_trans_huge_pmd(vma, address,
1526 pmd, flags); 1527 pmd, flags);
1527 spin_unlock(&mm->page_table_lock); 1528 spin_unlock(ptl);
1528 *page_mask = HPAGE_PMD_NR - 1; 1529 *page_mask = HPAGE_PMD_NR - 1;
1529 goto out; 1530 goto out;
1530 } 1531 }
1531 } else 1532 } else
1532 spin_unlock(&mm->page_table_lock); 1533 spin_unlock(ptl);
1533 /* fall through */ 1534 /* fall through */
1534 } 1535 }
1535split_fallthrough: 1536split_fallthrough:
@@ -4269,3 +4270,28 @@ void copy_user_huge_page(struct page *dst, struct page *src,
4269 } 4270 }
4270} 4271}
4271#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 4272#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4273
4274#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
4275static struct kmem_cache *page_ptl_cachep;
4276void __init ptlock_cache_init(void)
4277{
4278 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
4279 SLAB_PANIC, NULL);
4280}
4281
4282bool ptlock_alloc(struct page *page)
4283{
4284 spinlock_t *ptl;
4285
4286 ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
4287 if (!ptl)
4288 return false;
4289 page->ptl = ptl;
4290 return true;
4291}
4292
4293void ptlock_free(struct page *page)
4294{
4295 kfree(page->ptl);
4296}
4297#endif
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4cc19f6ab6c6..c4403cdf3433 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -525,8 +525,9 @@ static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
525#ifdef CONFIG_HUGETLB_PAGE 525#ifdef CONFIG_HUGETLB_PAGE
526 int nid; 526 int nid;
527 struct page *page; 527 struct page *page;
528 spinlock_t *ptl;
528 529
529 spin_lock(&vma->vm_mm->page_table_lock); 530 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
530 page = pte_page(huge_ptep_get((pte_t *)pmd)); 531 page = pte_page(huge_ptep_get((pte_t *)pmd));
531 nid = page_to_nid(page); 532 nid = page_to_nid(page);
532 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 533 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
@@ -536,7 +537,7 @@ static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
536 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 537 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
537 isolate_huge_page(page, private); 538 isolate_huge_page(page, private);
538unlock: 539unlock:
539 spin_unlock(&vma->vm_mm->page_table_lock); 540 spin_unlock(ptl);
540#else 541#else
541 BUG(); 542 BUG();
542#endif 543#endif
diff --git a/mm/migrate.c b/mm/migrate.c
index dfc8300ecbb2..316e720a2023 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -130,7 +130,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
130 ptep = huge_pte_offset(mm, addr); 130 ptep = huge_pte_offset(mm, addr);
131 if (!ptep) 131 if (!ptep)
132 goto out; 132 goto out;
133 ptl = &mm->page_table_lock; 133 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
134 } else { 134 } else {
135 pmd = mm_find_pmd(mm, addr); 135 pmd = mm_find_pmd(mm, addr);
136 if (!pmd) 136 if (!pmd)
@@ -249,9 +249,10 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
249 __migration_entry_wait(mm, ptep, ptl); 249 __migration_entry_wait(mm, ptep, ptl);
250} 250}
251 251
252void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte) 252void migration_entry_wait_huge(struct vm_area_struct *vma,
253 struct mm_struct *mm, pte_t *pte)
253{ 254{
254 spinlock_t *ptl = &(mm)->page_table_lock; 255 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
255 __migration_entry_wait(mm, pte, ptl); 256 __migration_entry_wait(mm, pte, ptl);
256} 257}
257 258
@@ -1666,6 +1667,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1666 unsigned long address, 1667 unsigned long address,
1667 struct page *page, int node) 1668 struct page *page, int node)
1668{ 1669{
1670 spinlock_t *ptl;
1669 unsigned long haddr = address & HPAGE_PMD_MASK; 1671 unsigned long haddr = address & HPAGE_PMD_MASK;
1670 pg_data_t *pgdat = NODE_DATA(node); 1672 pg_data_t *pgdat = NODE_DATA(node);
1671 int isolated = 0; 1673 int isolated = 0;
@@ -1705,9 +1707,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1705 WARN_ON(PageLRU(new_page)); 1707 WARN_ON(PageLRU(new_page));
1706 1708
1707 /* Recheck the target PMD */ 1709 /* Recheck the target PMD */
1708 spin_lock(&mm->page_table_lock); 1710 ptl = pmd_lock(mm, pmd);
1709 if (unlikely(!pmd_same(*pmd, entry))) { 1711 if (unlikely(!pmd_same(*pmd, entry))) {
1710 spin_unlock(&mm->page_table_lock); 1712 spin_unlock(ptl);
1711 1713
1712 /* Reverse changes made by migrate_page_copy() */ 1714 /* Reverse changes made by migrate_page_copy() */
1713 if (TestClearPageActive(new_page)) 1715 if (TestClearPageActive(new_page))
@@ -1752,7 +1754,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1752 * before it's fully transferred to the new page. 1754 * before it's fully transferred to the new page.
1753 */ 1755 */
1754 mem_cgroup_end_migration(memcg, page, new_page, true); 1756 mem_cgroup_end_migration(memcg, page, new_page, true);
1755 spin_unlock(&mm->page_table_lock); 1757 spin_unlock(ptl);
1756 1758
1757 unlock_page(new_page); 1759 unlock_page(new_page);
1758 unlock_page(page); 1760 unlock_page(page);
diff --git a/mm/mmap.c b/mm/mmap.c
index 5a6baddde15d..834b2d785f1e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2724,7 +2724,8 @@ void exit_mmap(struct mm_struct *mm)
2724 } 2724 }
2725 vm_unacct_memory(nr_accounted); 2725 vm_unacct_memory(nr_accounted);
2726 2726
2727 WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); 2727 WARN_ON(atomic_long_read(&mm->nr_ptes) >
2728 (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2728} 2729}
2729 2730
2730/* Insert vm structure into process list sorted by address 2731/* Insert vm structure into process list sorted by address
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6738c47f1f72..1e4a600a6163 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -161,7 +161,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
161 * The baseline for the badness score is the proportion of RAM that each 161 * The baseline for the badness score is the proportion of RAM that each
162 * task's rss, pagetable and swap space use. 162 * task's rss, pagetable and swap space use.
163 */ 163 */
164 points = get_mm_rss(p->mm) + p->mm->nr_ptes + 164 points = get_mm_rss(p->mm) + atomic_long_read(&p->mm->nr_ptes) +
165 get_mm_counter(p->mm, MM_SWAPENTS); 165 get_mm_counter(p->mm, MM_SWAPENTS);
166 task_unlock(p); 166 task_unlock(p);
167 167
@@ -364,10 +364,10 @@ static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemas
364 continue; 364 continue;
365 } 365 }
366 366
367 pr_info("[%5d] %5d %5d %8lu %8lu %7lu %8lu %5hd %s\n", 367 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %8lu %5hd %s\n",
368 task->pid, from_kuid(&init_user_ns, task_uid(task)), 368 task->pid, from_kuid(&init_user_ns, task_uid(task)),
369 task->tgid, task->mm->total_vm, get_mm_rss(task->mm), 369 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
370 task->mm->nr_ptes, 370 atomic_long_read(&task->mm->nr_ptes),
371 get_mm_counter(task->mm, MM_SWAPENTS), 371 get_mm_counter(task->mm, MM_SWAPENTS),
372 task->signal->oom_score_adj, task->comm); 372 task->signal->oom_score_adj, task->comm);
373 task_unlock(task); 373 task_unlock(task);
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 3929a40bd6c0..cbb38545d9d6 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -151,14 +151,14 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
151void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 151void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
152 pgtable_t pgtable) 152 pgtable_t pgtable)
153{ 153{
154 assert_spin_locked(&mm->page_table_lock); 154 assert_spin_locked(pmd_lockptr(mm, pmdp));
155 155
156 /* FIFO */ 156 /* FIFO */
157 if (!mm->pmd_huge_pte) 157 if (!pmd_huge_pte(mm, pmdp))
158 INIT_LIST_HEAD(&pgtable->lru); 158 INIT_LIST_HEAD(&pgtable->lru);
159 else 159 else
160 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru); 160 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
161 mm->pmd_huge_pte = pgtable; 161 pmd_huge_pte(mm, pmdp) = pgtable;
162} 162}
163#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 163#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
164#endif 164#endif
@@ -170,14 +170,14 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
170{ 170{
171 pgtable_t pgtable; 171 pgtable_t pgtable;
172 172
173 assert_spin_locked(&mm->page_table_lock); 173 assert_spin_locked(pmd_lockptr(mm, pmdp));
174 174
175 /* FIFO */ 175 /* FIFO */
176 pgtable = mm->pmd_huge_pte; 176 pgtable = pmd_huge_pte(mm, pmdp);
177 if (list_empty(&pgtable->lru)) 177 if (list_empty(&pgtable->lru))
178 mm->pmd_huge_pte = NULL; 178 pmd_huge_pte(mm, pmdp) = NULL;
179 else { 179 else {
180 mm->pmd_huge_pte = list_entry(pgtable->lru.next, 180 pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next,
181 struct page, lru); 181 struct page, lru);
182 list_del(&pgtable->lru); 182 list_del(&pgtable->lru);
183 } 183 }
diff --git a/mm/rmap.c b/mm/rmap.c
index fd3ee7a54a13..55c8b8dc9ffb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -601,7 +601,7 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
601 601
602 if (unlikely(PageHuge(page))) { 602 if (unlikely(PageHuge(page))) {
603 pte = huge_pte_offset(mm, address); 603 pte = huge_pte_offset(mm, address);
604 ptl = &mm->page_table_lock; 604 ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
605 goto check; 605 goto check;
606 } 606 }
607 607
@@ -665,25 +665,23 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
665 unsigned long *vm_flags) 665 unsigned long *vm_flags)
666{ 666{
667 struct mm_struct *mm = vma->vm_mm; 667 struct mm_struct *mm = vma->vm_mm;
668 spinlock_t *ptl;
668 int referenced = 0; 669 int referenced = 0;
669 670
670 if (unlikely(PageTransHuge(page))) { 671 if (unlikely(PageTransHuge(page))) {
671 pmd_t *pmd; 672 pmd_t *pmd;
672 673
673 spin_lock(&mm->page_table_lock);
674 /* 674 /*
675 * rmap might return false positives; we must filter 675 * rmap might return false positives; we must filter
676 * these out using page_check_address_pmd(). 676 * these out using page_check_address_pmd().
677 */ 677 */
678 pmd = page_check_address_pmd(page, mm, address, 678 pmd = page_check_address_pmd(page, mm, address,
679 PAGE_CHECK_ADDRESS_PMD_FLAG); 679 PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl);
680 if (!pmd) { 680 if (!pmd)
681 spin_unlock(&mm->page_table_lock);
682 goto out; 681 goto out;
683 }
684 682
685 if (vma->vm_flags & VM_LOCKED) { 683 if (vma->vm_flags & VM_LOCKED) {
686 spin_unlock(&mm->page_table_lock); 684 spin_unlock(ptl);
687 *mapcount = 0; /* break early from loop */ 685 *mapcount = 0; /* break early from loop */
688 *vm_flags |= VM_LOCKED; 686 *vm_flags |= VM_LOCKED;
689 goto out; 687 goto out;
@@ -692,10 +690,9 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
692 /* go ahead even if the pmd is pmd_trans_splitting() */ 690 /* go ahead even if the pmd is pmd_trans_splitting() */
693 if (pmdp_clear_flush_young_notify(vma, address, pmd)) 691 if (pmdp_clear_flush_young_notify(vma, address, pmd))
694 referenced++; 692 referenced++;
695 spin_unlock(&mm->page_table_lock); 693 spin_unlock(ptl);
696 } else { 694 } else {
697 pte_t *pte; 695 pte_t *pte;
698 spinlock_t *ptl;
699 696
700 /* 697 /*
701 * rmap might return false positives; we must filter 698 * rmap might return false positives; we must filter
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index ec9a9ef4ce50..5afeb5aa4c7c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2523,16 +2523,17 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2523 list_for_each_entry_rcu(fa, &li->falh, fa_list) { 2523 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2524 const struct fib_info *fi = fa->fa_info; 2524 const struct fib_info *fi = fa->fa_info;
2525 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi); 2525 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
2526 int len;
2527 2526
2528 if (fa->fa_type == RTN_BROADCAST 2527 if (fa->fa_type == RTN_BROADCAST
2529 || fa->fa_type == RTN_MULTICAST) 2528 || fa->fa_type == RTN_MULTICAST)
2530 continue; 2529 continue;
2531 2530
2531 seq_setwidth(seq, 127);
2532
2532 if (fi) 2533 if (fi)
2533 seq_printf(seq, 2534 seq_printf(seq,
2534 "%s\t%08X\t%08X\t%04X\t%d\t%u\t" 2535 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2535 "%d\t%08X\t%d\t%u\t%u%n", 2536 "%d\t%08X\t%d\t%u\t%u",
2536 fi->fib_dev ? fi->fib_dev->name : "*", 2537 fi->fib_dev ? fi->fib_dev->name : "*",
2537 prefix, 2538 prefix,
2538 fi->fib_nh->nh_gw, flags, 0, 0, 2539 fi->fib_nh->nh_gw, flags, 0, 0,
@@ -2541,15 +2542,15 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2541 (fi->fib_advmss ? 2542 (fi->fib_advmss ?
2542 fi->fib_advmss + 40 : 0), 2543 fi->fib_advmss + 40 : 0),
2543 fi->fib_window, 2544 fi->fib_window,
2544 fi->fib_rtt >> 3, &len); 2545 fi->fib_rtt >> 3);
2545 else 2546 else
2546 seq_printf(seq, 2547 seq_printf(seq,
2547 "*\t%08X\t%08X\t%04X\t%d\t%u\t" 2548 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2548 "%d\t%08X\t%d\t%u\t%u%n", 2549 "%d\t%08X\t%d\t%u\t%u",
2549 prefix, 0, flags, 0, 0, 0, 2550 prefix, 0, flags, 0, 0, 0,
2550 mask, 0, 0, 0, &len); 2551 mask, 0, 0, 0);
2551 2552
2552 seq_printf(seq, "%*s\n", 127 - len, ""); 2553 seq_pad(seq, '\n');
2553 } 2554 }
2554 } 2555 }
2555 2556
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 9afbdb19f4a2..cbc85f660d54 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -1076,7 +1076,7 @@ void ping_seq_stop(struct seq_file *seq, void *v)
1076EXPORT_SYMBOL_GPL(ping_seq_stop); 1076EXPORT_SYMBOL_GPL(ping_seq_stop);
1077 1077
1078static void ping_v4_format_sock(struct sock *sp, struct seq_file *f, 1078static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
1079 int bucket, int *len) 1079 int bucket)
1080{ 1080{
1081 struct inet_sock *inet = inet_sk(sp); 1081 struct inet_sock *inet = inet_sk(sp);
1082 __be32 dest = inet->inet_daddr; 1082 __be32 dest = inet->inet_daddr;
@@ -1085,7 +1085,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
1085 __u16 srcp = ntohs(inet->inet_sport); 1085 __u16 srcp = ntohs(inet->inet_sport);
1086 1086
1087 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 1087 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
1088 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n", 1088 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
1089 bucket, src, srcp, dest, destp, sp->sk_state, 1089 bucket, src, srcp, dest, destp, sp->sk_state,
1090 sk_wmem_alloc_get(sp), 1090 sk_wmem_alloc_get(sp),
1091 sk_rmem_alloc_get(sp), 1091 sk_rmem_alloc_get(sp),
@@ -1093,23 +1093,22 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
1093 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 1093 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
1094 0, sock_i_ino(sp), 1094 0, sock_i_ino(sp),
1095 atomic_read(&sp->sk_refcnt), sp, 1095 atomic_read(&sp->sk_refcnt), sp,
1096 atomic_read(&sp->sk_drops), len); 1096 atomic_read(&sp->sk_drops));
1097} 1097}
1098 1098
1099static int ping_v4_seq_show(struct seq_file *seq, void *v) 1099static int ping_v4_seq_show(struct seq_file *seq, void *v)
1100{ 1100{
1101 seq_setwidth(seq, 127);
1101 if (v == SEQ_START_TOKEN) 1102 if (v == SEQ_START_TOKEN)
1102 seq_printf(seq, "%-127s\n", 1103 seq_puts(seq, " sl local_address rem_address st tx_queue "
1103 " sl local_address rem_address st tx_queue "
1104 "rx_queue tr tm->when retrnsmt uid timeout " 1104 "rx_queue tr tm->when retrnsmt uid timeout "
1105 "inode ref pointer drops"); 1105 "inode ref pointer drops");
1106 else { 1106 else {
1107 struct ping_iter_state *state = seq->private; 1107 struct ping_iter_state *state = seq->private;
1108 int len;
1109 1108
1110 ping_v4_format_sock(v, seq, state->bucket, &len); 1109 ping_v4_format_sock(v, seq, state->bucket);
1111 seq_printf(seq, "%*s\n", 127 - len, "");
1112 } 1110 }
1111 seq_pad(seq, '\n');
1113 return 0; 1112 return 0;
1114} 1113}
1115 1114
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 14bba8a1c5a7..59a6f8b90cd9 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2541,13 +2541,13 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2541EXPORT_SYMBOL(tcp_proc_unregister); 2541EXPORT_SYMBOL(tcp_proc_unregister);
2542 2542
2543static void get_openreq4(const struct sock *sk, const struct request_sock *req, 2543static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2544 struct seq_file *f, int i, kuid_t uid, int *len) 2544 struct seq_file *f, int i, kuid_t uid)
2545{ 2545{
2546 const struct inet_request_sock *ireq = inet_rsk(req); 2546 const struct inet_request_sock *ireq = inet_rsk(req);
2547 long delta = req->expires - jiffies; 2547 long delta = req->expires - jiffies;
2548 2548
2549 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2549 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2550 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n", 2550 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2551 i, 2551 i,
2552 ireq->ir_loc_addr, 2552 ireq->ir_loc_addr,
2553 ntohs(inet_sk(sk)->inet_sport), 2553 ntohs(inet_sk(sk)->inet_sport),
@@ -2562,11 +2562,10 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2562 0, /* non standard timer */ 2562 0, /* non standard timer */
2563 0, /* open_requests have no inode */ 2563 0, /* open_requests have no inode */
2564 atomic_read(&sk->sk_refcnt), 2564 atomic_read(&sk->sk_refcnt),
2565 req, 2565 req);
2566 len);
2567} 2566}
2568 2567
2569static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) 2568static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2570{ 2569{
2571 int timer_active; 2570 int timer_active;
2572 unsigned long timer_expires; 2571 unsigned long timer_expires;
@@ -2605,7 +2604,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2605 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 2604 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2606 2605
2607 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2606 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2608 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d%n", 2607 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2609 i, src, srcp, dest, destp, sk->sk_state, 2608 i, src, srcp, dest, destp, sk->sk_state,
2610 tp->write_seq - tp->snd_una, 2609 tp->write_seq - tp->snd_una,
2611 rx_queue, 2610 rx_queue,
@@ -2622,12 +2621,11 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2622 tp->snd_cwnd, 2621 tp->snd_cwnd,
2623 sk->sk_state == TCP_LISTEN ? 2622 sk->sk_state == TCP_LISTEN ?
2624 (fastopenq ? fastopenq->max_qlen : 0) : 2623 (fastopenq ? fastopenq->max_qlen : 0) :
2625 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh), 2624 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2626 len);
2627} 2625}
2628 2626
2629static void get_timewait4_sock(const struct inet_timewait_sock *tw, 2627static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2630 struct seq_file *f, int i, int *len) 2628 struct seq_file *f, int i)
2631{ 2629{
2632 __be32 dest, src; 2630 __be32 dest, src;
2633 __u16 destp, srcp; 2631 __u16 destp, srcp;
@@ -2639,10 +2637,10 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2639 srcp = ntohs(tw->tw_sport); 2637 srcp = ntohs(tw->tw_sport);
2640 2638
2641 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2639 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2642 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n", 2640 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2643 i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 2641 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2644 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, 2642 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2645 atomic_read(&tw->tw_refcnt), tw, len); 2643 atomic_read(&tw->tw_refcnt), tw);
2646} 2644}
2647 2645
2648#define TMPSZ 150 2646#define TMPSZ 150
@@ -2651,11 +2649,10 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
2651{ 2649{
2652 struct tcp_iter_state *st; 2650 struct tcp_iter_state *st;
2653 struct sock *sk = v; 2651 struct sock *sk = v;
2654 int len;
2655 2652
2653 seq_setwidth(seq, TMPSZ - 1);
2656 if (v == SEQ_START_TOKEN) { 2654 if (v == SEQ_START_TOKEN) {
2657 seq_printf(seq, "%-*s\n", TMPSZ - 1, 2655 seq_puts(seq, " sl local_address rem_address st tx_queue "
2658 " sl local_address rem_address st tx_queue "
2659 "rx_queue tr tm->when retrnsmt uid timeout " 2656 "rx_queue tr tm->when retrnsmt uid timeout "
2660 "inode"); 2657 "inode");
2661 goto out; 2658 goto out;
@@ -2666,16 +2663,16 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
2666 case TCP_SEQ_STATE_LISTENING: 2663 case TCP_SEQ_STATE_LISTENING:
2667 case TCP_SEQ_STATE_ESTABLISHED: 2664 case TCP_SEQ_STATE_ESTABLISHED:
2668 if (sk->sk_state == TCP_TIME_WAIT) 2665 if (sk->sk_state == TCP_TIME_WAIT)
2669 get_timewait4_sock(v, seq, st->num, &len); 2666 get_timewait4_sock(v, seq, st->num);
2670 else 2667 else
2671 get_tcp4_sock(v, seq, st->num, &len); 2668 get_tcp4_sock(v, seq, st->num);
2672 break; 2669 break;
2673 case TCP_SEQ_STATE_OPENREQ: 2670 case TCP_SEQ_STATE_OPENREQ:
2674 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len); 2671 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2675 break; 2672 break;
2676 } 2673 }
2677 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2678out: 2674out:
2675 seq_pad(seq, '\n');
2679 return 0; 2676 return 0;
2680} 2677}
2681 2678
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 89909dd730dd..de86e5bc4462 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2331,7 +2331,7 @@ EXPORT_SYMBOL(udp_proc_unregister);
2331 2331
2332/* ------------------------------------------------------------------------ */ 2332/* ------------------------------------------------------------------------ */
2333static void udp4_format_sock(struct sock *sp, struct seq_file *f, 2333static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2334 int bucket, int *len) 2334 int bucket)
2335{ 2335{
2336 struct inet_sock *inet = inet_sk(sp); 2336 struct inet_sock *inet = inet_sk(sp);
2337 __be32 dest = inet->inet_daddr; 2337 __be32 dest = inet->inet_daddr;
@@ -2340,7 +2340,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2340 __u16 srcp = ntohs(inet->inet_sport); 2340 __u16 srcp = ntohs(inet->inet_sport);
2341 2341
2342 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 2342 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
2343 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n", 2343 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
2344 bucket, src, srcp, dest, destp, sp->sk_state, 2344 bucket, src, srcp, dest, destp, sp->sk_state,
2345 sk_wmem_alloc_get(sp), 2345 sk_wmem_alloc_get(sp),
2346 sk_rmem_alloc_get(sp), 2346 sk_rmem_alloc_get(sp),
@@ -2348,23 +2348,22 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2348 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 2348 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
2349 0, sock_i_ino(sp), 2349 0, sock_i_ino(sp),
2350 atomic_read(&sp->sk_refcnt), sp, 2350 atomic_read(&sp->sk_refcnt), sp,
2351 atomic_read(&sp->sk_drops), len); 2351 atomic_read(&sp->sk_drops));
2352} 2352}
2353 2353
2354int udp4_seq_show(struct seq_file *seq, void *v) 2354int udp4_seq_show(struct seq_file *seq, void *v)
2355{ 2355{
2356 seq_setwidth(seq, 127);
2356 if (v == SEQ_START_TOKEN) 2357 if (v == SEQ_START_TOKEN)
2357 seq_printf(seq, "%-127s\n", 2358 seq_puts(seq, " sl local_address rem_address st tx_queue "
2358 " sl local_address rem_address st tx_queue "
2359 "rx_queue tr tm->when retrnsmt uid timeout " 2359 "rx_queue tr tm->when retrnsmt uid timeout "
2360 "inode ref pointer drops"); 2360 "inode ref pointer drops");
2361 else { 2361 else {
2362 struct udp_iter_state *state = seq->private; 2362 struct udp_iter_state *state = seq->private;
2363 int len;
2364 2363
2365 udp4_format_sock(v, seq, state->bucket, &len); 2364 udp4_format_sock(v, seq, state->bucket);
2366 seq_printf(seq, "%*s\n", 127 - len, "");
2367 } 2365 }
2366 seq_pad(seq, '\n');
2368 return 0; 2367 return 0;
2369} 2368}
2370 2369
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 77e38f733496..008214a3d5eb 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -595,26 +595,25 @@ static void pn_sock_seq_stop(struct seq_file *seq, void *v)
595 595
596static int pn_sock_seq_show(struct seq_file *seq, void *v) 596static int pn_sock_seq_show(struct seq_file *seq, void *v)
597{ 597{
598 int len; 598 seq_setwidth(seq, 127);
599
600 if (v == SEQ_START_TOKEN) 599 if (v == SEQ_START_TOKEN)
601 seq_printf(seq, "%s%n", "pt loc rem rs st tx_queue rx_queue " 600 seq_puts(seq, "pt loc rem rs st tx_queue rx_queue "
602 " uid inode ref pointer drops", &len); 601 " uid inode ref pointer drops");
603 else { 602 else {
604 struct sock *sk = v; 603 struct sock *sk = v;
605 struct pn_sock *pn = pn_sk(sk); 604 struct pn_sock *pn = pn_sk(sk);
606 605
607 seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " 606 seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
608 "%d %pK %d%n", 607 "%d %pK %d",
609 sk->sk_protocol, pn->sobject, pn->dobject, 608 sk->sk_protocol, pn->sobject, pn->dobject,
610 pn->resource, sk->sk_state, 609 pn->resource, sk->sk_state,
611 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), 610 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
612 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 611 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
613 sock_i_ino(sk), 612 sock_i_ino(sk),
614 atomic_read(&sk->sk_refcnt), sk, 613 atomic_read(&sk->sk_refcnt), sk,
615 atomic_read(&sk->sk_drops), &len); 614 atomic_read(&sk->sk_drops));
616 } 615 }
617 seq_printf(seq, "%*s\n", 127 - len, ""); 616 seq_pad(seq, '\n');
618 return 0; 617 return 0;
619} 618}
620 619
@@ -785,20 +784,19 @@ static void pn_res_seq_stop(struct seq_file *seq, void *v)
785 784
786static int pn_res_seq_show(struct seq_file *seq, void *v) 785static int pn_res_seq_show(struct seq_file *seq, void *v)
787{ 786{
788 int len; 787 seq_setwidth(seq, 63);
789
790 if (v == SEQ_START_TOKEN) 788 if (v == SEQ_START_TOKEN)
791 seq_printf(seq, "%s%n", "rs uid inode", &len); 789 seq_puts(seq, "rs uid inode");
792 else { 790 else {
793 struct sock **psk = v; 791 struct sock **psk = v;
794 struct sock *sk = *psk; 792 struct sock *sk = *psk;
795 793
796 seq_printf(seq, "%02X %5u %lu%n", 794 seq_printf(seq, "%02X %5u %lu",
797 (int) (psk - pnres.sk), 795 (int) (psk - pnres.sk),
798 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 796 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
799 sock_i_ino(sk), &len); 797 sock_i_ino(sk));
800 } 798 }
801 seq_printf(seq, "%*s\n", 63 - len, ""); 799 seq_pad(seq, '\n');
802 return 0; 800 return 0;
803} 801}
804 802
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index 5ea573b37648..647396baa56f 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -79,12 +79,13 @@ static sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = {
79 */ 79 */
80static int sctp_objcnt_seq_show(struct seq_file *seq, void *v) 80static int sctp_objcnt_seq_show(struct seq_file *seq, void *v)
81{ 81{
82 int i, len; 82 int i;
83 83
84 i = (int)*(loff_t *)v; 84 i = (int)*(loff_t *)v;
85 seq_printf(seq, "%s: %d%n", sctp_dbg_objcnt[i].label, 85 seq_setwidth(seq, 127);
86 atomic_read(sctp_dbg_objcnt[i].counter), &len); 86 seq_printf(seq, "%s: %d", sctp_dbg_objcnt[i].label,
87 seq_printf(seq, "%*s\n", 127 - len, ""); 87 atomic_read(sctp_dbg_objcnt[i].counter));
88 seq_pad(seq, '\n');
88 return 0; 89 return 0;
89} 90}
90 91
diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
index cfe40addda76..2fca916d9edf 100644
--- a/samples/kfifo/bytestream-example.c
+++ b/samples/kfifo/bytestream-example.c
@@ -64,7 +64,7 @@ static int __init testfunc(void)
64 64
65 /* put values into the fifo */ 65 /* put values into the fifo */
66 for (i = 0; i != 10; i++) 66 for (i = 0; i != 10; i++)
67 kfifo_put(&test, &i); 67 kfifo_put(&test, i);
68 68
69 /* show the number of used elements */ 69 /* show the number of used elements */
70 printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test)); 70 printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));
@@ -85,7 +85,7 @@ static int __init testfunc(void)
85 kfifo_skip(&test); 85 kfifo_skip(&test);
86 86
87 /* put values into the fifo until is full */ 87 /* put values into the fifo until is full */
88 for (i = 20; kfifo_put(&test, &i); i++) 88 for (i = 20; kfifo_put(&test, i); i++)
89 ; 89 ;
90 90
91 printk(KERN_INFO "queue len: %u\n", kfifo_len(&test)); 91 printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c
index 06473791c08a..aa243db93f01 100644
--- a/samples/kfifo/dma-example.c
+++ b/samples/kfifo/dma-example.c
@@ -39,7 +39,7 @@ static int __init example_init(void)
39 kfifo_in(&fifo, "test", 4); 39 kfifo_in(&fifo, "test", 4);
40 40
41 for (i = 0; i != 9; i++) 41 for (i = 0; i != 9; i++)
42 kfifo_put(&fifo, &i); 42 kfifo_put(&fifo, i);
43 43
44 /* kick away first byte */ 44 /* kick away first byte */
45 kfifo_skip(&fifo); 45 kfifo_skip(&fifo);
diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
index 6f8e79e76c9e..8dc3c2e7105a 100644
--- a/samples/kfifo/inttype-example.c
+++ b/samples/kfifo/inttype-example.c
@@ -61,7 +61,7 @@ static int __init testfunc(void)
61 61
62 /* put values into the fifo */ 62 /* put values into the fifo */
63 for (i = 0; i != 10; i++) 63 for (i = 0; i != 10; i++)
64 kfifo_put(&test, &i); 64 kfifo_put(&test, i);
65 65
66 /* show the number of used elements */ 66 /* show the number of used elements */
67 printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test)); 67 printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));
@@ -78,7 +78,7 @@ static int __init testfunc(void)
78 kfifo_skip(&test); 78 kfifo_skip(&test);
79 79
80 /* put values into the fifo until is full */ 80 /* put values into the fifo until is full */
81 for (i = 20; kfifo_put(&test, &i); i++) 81 for (i = 20; kfifo_put(&test, i); i++)
82 ; 82 ;
83 83
84 printk(KERN_INFO "queue len: %u\n", kfifo_len(&test)); 84 printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 9d93f02c6285..5e1c7bc73b29 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -184,11 +184,7 @@ static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size)
184 /* Assign the pool into private_data field */ 184 /* Assign the pool into private_data field */
185 dmab->private_data = pool; 185 dmab->private_data = pool;
186 186
187 dmab->area = (void *)gen_pool_alloc(pool, size); 187 dmab->area = gen_pool_dma_alloc(pool, size, &dmab->addr);
188 if (!dmab->area)
189 return;
190
191 dmab->addr = gen_pool_virt_to_phys(pool, (unsigned long)dmab->area);
192} 188}
193 189
194/** 190/**
diff --git a/sound/firewire/dice.c b/sound/firewire/dice.c
index 6feee6614193..57bcd31fcc12 100644
--- a/sound/firewire/dice.c
+++ b/sound/firewire/dice.c
@@ -543,7 +543,7 @@ static int dice_change_rate(struct dice *dice, unsigned int clock_rate)
543 __be32 value; 543 __be32 value;
544 int err; 544 int err;
545 545
546 INIT_COMPLETION(dice->clock_accepted); 546 reinit_completion(&dice->clock_accepted);
547 547
548 value = cpu_to_be32(clock_rate | CLOCK_SOURCE_ARX1); 548 value = cpu_to_be32(clock_rate | CLOCK_SOURCE_ARX1);
549 err = snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST, 549 err = snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index 2acf987844e8..350ba23a9893 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -74,7 +74,7 @@ static void s3c_ac97_activate(struct snd_ac97 *ac97)
74 if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE) 74 if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
75 return; /* Return if already active */ 75 return; /* Return if already active */
76 76
77 INIT_COMPLETION(s3c_ac97.done); 77 reinit_completion(&s3c_ac97.done);
78 78
79 ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL); 79 ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
80 ac_glbctrl = S3C_AC97_GLBCTRL_ACLINKON; 80 ac_glbctrl = S3C_AC97_GLBCTRL_ACLINKON;
@@ -103,7 +103,7 @@ static unsigned short s3c_ac97_read(struct snd_ac97 *ac97,
103 103
104 s3c_ac97_activate(ac97); 104 s3c_ac97_activate(ac97);
105 105
106 INIT_COMPLETION(s3c_ac97.done); 106 reinit_completion(&s3c_ac97.done);
107 107
108 ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD); 108 ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
109 ac_codec_cmd = S3C_AC97_CODEC_CMD_READ | AC_CMD_ADDR(reg); 109 ac_codec_cmd = S3C_AC97_CODEC_CMD_READ | AC_CMD_ADDR(reg);
@@ -140,7 +140,7 @@ static void s3c_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
140 140
141 s3c_ac97_activate(ac97); 141 s3c_ac97_activate(ac97);
142 142
143 INIT_COMPLETION(s3c_ac97.done); 143 reinit_completion(&s3c_ac97.done);
144 144
145 ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD); 145 ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
146 ac_codec_cmd = AC_CMD_ADDR(reg) | AC_CMD_DATA(val); 146 ac_codec_cmd = AC_CMD_ADDR(reg) | AC_CMD_DATA(val);