aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-29 19:38:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-29 19:38:48 -0400
commita7ca10f263d7e673c74d8e0946d6b9993405cc9c (patch)
tree7c50f0e728ca1a426235356acba1115c45dfe809
parentd506aa68c23db708ad45ca8c17f0d7f5d7029a37 (diff)
parent4d88e6f7d5ffc84e6094a47925870f4a130555c2 (diff)
Merge branch 'akpm' (incoming from Andrew Morton)
Merge misc fixes from Andrew Morton: "21 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (21 commits) mm/balloon_compaction: fix deflation when compaction is disabled sh: fix sh770x SCIF memory regions zram: avoid NULL pointer access in concurrent situation mm/slab_common: don't check for duplicate cache names ocfs2: fix d_splice_alias() return code checking mm: rmap: split out page_remove_file_rmap() mm: memcontrol: fix missed end-writeback page accounting mm: page-writeback: inline account_page_dirtied() into single caller lib/bitmap.c: fix undefined shift in __bitmap_shift_{left|right}() drivers/rtc/rtc-bq32k.c: fix register value memory-hotplug: clear pgdat which is allocated by bootmem in try_offline_node() drivers/rtc/rtc-s3c.c: fix initialization failure without rtc source clock kernel/kmod: fix use-after-free of the sub_info structure drivers/rtc/rtc-pm8xxx.c: rework to support pm8941 rtc mm, thp: fix collapsing of hugepages on madvise drivers: of: add return value to of_reserved_mem_device_init() mm: free compound page with correct order gcov: add ARM64 to GCOV_PROFILE_ALL fsnotify: next_i is freed during fsnotify_unmount_inodes. mm/compaction.c: avoid premature range skip in isolate_migratepages_range ...
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c6
-rw-r--r--drivers/base/dma-contiguous.c3
-rw-r--r--drivers/block/zram/zram_drv.c10
-rw-r--r--drivers/of/of_reserved_mem.c14
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/rtc-bq32k.c2
-rw-r--r--drivers/rtc/rtc-pm8xxx.c222
-rw-r--r--drivers/rtc/rtc-s3c.c14
-rw-r--r--fs/notify/inode_mark.c17
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--include/linux/khugepaged.h17
-rw-r--r--include/linux/memcontrol.h58
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/of_reserved_mem.h9
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/kmod.c76
-rw-r--r--lib/bitmap.c8
-rw-r--r--mm/balloon_compaction.c2
-rw-r--r--mm/compaction.c3
-rw-r--r--mm/huge_memory.c15
-rw-r--r--mm/memcontrol.c105
-rw-r--r--mm/memory_hotplug.c5
-rw-r--r--mm/mmap.c8
-rw-r--r--mm/page-writeback.c43
-rw-r--r--mm/page_cgroup.c1
-rw-r--r--mm/rmap.c88
-rw-r--r--mm/slab_common.c10
27 files changed, 394 insertions, 349 deletions
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 9139d14b9c53..538c10db3537 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -118,7 +118,7 @@ static struct plat_sci_port scif0_platform_data = {
118}; 118};
119 119
120static struct resource scif0_resources[] = { 120static struct resource scif0_resources[] = {
121 DEFINE_RES_MEM(0xfffffe80, 0x100), 121 DEFINE_RES_MEM(0xfffffe80, 0x10),
122 DEFINE_RES_IRQ(evt2irq(0x4e0)), 122 DEFINE_RES_IRQ(evt2irq(0x4e0)),
123}; 123};
124 124
@@ -143,7 +143,7 @@ static struct plat_sci_port scif1_platform_data = {
143}; 143};
144 144
145static struct resource scif1_resources[] = { 145static struct resource scif1_resources[] = {
146 DEFINE_RES_MEM(0xa4000150, 0x100), 146 DEFINE_RES_MEM(0xa4000150, 0x10),
147 DEFINE_RES_IRQ(evt2irq(0x900)), 147 DEFINE_RES_IRQ(evt2irq(0x900)),
148}; 148};
149 149
@@ -169,7 +169,7 @@ static struct plat_sci_port scif2_platform_data = {
169}; 169};
170 170
171static struct resource scif2_resources[] = { 171static struct resource scif2_resources[] = {
172 DEFINE_RES_MEM(0xa4000140, 0x100), 172 DEFINE_RES_MEM(0xa4000140, 0x10),
173 DEFINE_RES_IRQ(evt2irq(0x880)), 173 DEFINE_RES_IRQ(evt2irq(0x880)),
174}; 174};
175 175
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 473ff4892401..950fff9ce453 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -223,9 +223,10 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
223#undef pr_fmt 223#undef pr_fmt
224#define pr_fmt(fmt) fmt 224#define pr_fmt(fmt) fmt
225 225
226static void rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) 226static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
227{ 227{
228 dev_set_cma_area(dev, rmem->priv); 228 dev_set_cma_area(dev, rmem->priv);
229 return 0;
229} 230}
230 231
231static void rmem_cma_device_release(struct reserved_mem *rmem, 232static void rmem_cma_device_release(struct reserved_mem *rmem,
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 0e63e8aa8279..2ad0b5bce44b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -99,11 +99,12 @@ static ssize_t mem_used_total_show(struct device *dev,
99{ 99{
100 u64 val = 0; 100 u64 val = 0;
101 struct zram *zram = dev_to_zram(dev); 101 struct zram *zram = dev_to_zram(dev);
102 struct zram_meta *meta = zram->meta;
103 102
104 down_read(&zram->init_lock); 103 down_read(&zram->init_lock);
105 if (init_done(zram)) 104 if (init_done(zram)) {
105 struct zram_meta *meta = zram->meta;
106 val = zs_get_total_pages(meta->mem_pool); 106 val = zs_get_total_pages(meta->mem_pool);
107 }
107 up_read(&zram->init_lock); 108 up_read(&zram->init_lock);
108 109
109 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); 110 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
@@ -173,16 +174,17 @@ static ssize_t mem_used_max_store(struct device *dev,
173 int err; 174 int err;
174 unsigned long val; 175 unsigned long val;
175 struct zram *zram = dev_to_zram(dev); 176 struct zram *zram = dev_to_zram(dev);
176 struct zram_meta *meta = zram->meta;
177 177
178 err = kstrtoul(buf, 10, &val); 178 err = kstrtoul(buf, 10, &val);
179 if (err || val != 0) 179 if (err || val != 0)
180 return -EINVAL; 180 return -EINVAL;
181 181
182 down_read(&zram->init_lock); 182 down_read(&zram->init_lock);
183 if (init_done(zram)) 183 if (init_done(zram)) {
184 struct zram_meta *meta = zram->meta;
184 atomic_long_set(&zram->stats.max_used_pages, 185 atomic_long_set(&zram->stats.max_used_pages,
185 zs_get_total_pages(meta->mem_pool)); 186 zs_get_total_pages(meta->mem_pool));
187 }
186 up_read(&zram->init_lock); 188 up_read(&zram->init_lock);
187 189
188 return len; 190 return len;
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 59fb12e84e6b..dc566b38645f 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -243,23 +243,27 @@ static inline struct reserved_mem *__find_rmem(struct device_node *node)
243 * This function assign memory region pointed by "memory-region" device tree 243 * This function assign memory region pointed by "memory-region" device tree
244 * property to the given device. 244 * property to the given device.
245 */ 245 */
246void of_reserved_mem_device_init(struct device *dev) 246int of_reserved_mem_device_init(struct device *dev)
247{ 247{
248 struct reserved_mem *rmem; 248 struct reserved_mem *rmem;
249 struct device_node *np; 249 struct device_node *np;
250 int ret;
250 251
251 np = of_parse_phandle(dev->of_node, "memory-region", 0); 252 np = of_parse_phandle(dev->of_node, "memory-region", 0);
252 if (!np) 253 if (!np)
253 return; 254 return -ENODEV;
254 255
255 rmem = __find_rmem(np); 256 rmem = __find_rmem(np);
256 of_node_put(np); 257 of_node_put(np);
257 258
258 if (!rmem || !rmem->ops || !rmem->ops->device_init) 259 if (!rmem || !rmem->ops || !rmem->ops->device_init)
259 return; 260 return -EINVAL;
261
262 ret = rmem->ops->device_init(rmem, dev);
263 if (ret == 0)
264 dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
260 265
261 rmem->ops->device_init(rmem, dev); 266 return ret;
262 dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
263} 267}
264 268
265/** 269/**
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 94ae1798d48a..6dd12ddbabc6 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1320,7 +1320,7 @@ config RTC_DRV_LPC32XX
1320 1320
1321config RTC_DRV_PM8XXX 1321config RTC_DRV_PM8XXX
1322 tristate "Qualcomm PMIC8XXX RTC" 1322 tristate "Qualcomm PMIC8XXX RTC"
1323 depends on MFD_PM8XXX 1323 depends on MFD_PM8XXX || MFD_SPMI_PMIC
1324 help 1324 help
1325 If you say yes here you get support for the 1325 If you say yes here you get support for the
1326 Qualcomm PMIC8XXX RTC. 1326 Qualcomm PMIC8XXX RTC.
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 314129e66d6e..92679df6d6e2 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -160,7 +160,7 @@ static int trickle_charger_of_init(struct device *dev, struct device_node *node)
160 dev_err(dev, "bq32k: diode and resistor mismatch\n"); 160 dev_err(dev, "bq32k: diode and resistor mismatch\n");
161 return -EINVAL; 161 return -EINVAL;
162 } 162 }
163 reg = 0x25; 163 reg = 0x45;
164 break; 164 break;
165 165
166 default: 166 default:
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 197699f358c7..5adcf111fc14 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -27,21 +27,36 @@
27 27
28/* RTC_CTRL register bit fields */ 28/* RTC_CTRL register bit fields */
29#define PM8xxx_RTC_ENABLE BIT(7) 29#define PM8xxx_RTC_ENABLE BIT(7)
30#define PM8xxx_RTC_ALARM_ENABLE BIT(1)
31#define PM8xxx_RTC_ALARM_CLEAR BIT(0) 30#define PM8xxx_RTC_ALARM_CLEAR BIT(0)
32 31
33#define NUM_8_BIT_RTC_REGS 0x4 32#define NUM_8_BIT_RTC_REGS 0x4
34 33
35/** 34/**
35 * struct pm8xxx_rtc_regs - describe RTC registers per PMIC versions
36 * @ctrl: base address of control register
37 * @write: base address of write register
38 * @read: base address of read register
39 * @alarm_ctrl: base address of alarm control register
40 * @alarm_ctrl2: base address of alarm control2 register
41 * @alarm_rw: base address of alarm read-write register
42 * @alarm_en: alarm enable mask
43 */
44struct pm8xxx_rtc_regs {
45 unsigned int ctrl;
46 unsigned int write;
47 unsigned int read;
48 unsigned int alarm_ctrl;
49 unsigned int alarm_ctrl2;
50 unsigned int alarm_rw;
51 unsigned int alarm_en;
52};
53
54/**
36 * struct pm8xxx_rtc - rtc driver internal structure 55 * struct pm8xxx_rtc - rtc driver internal structure
37 * @rtc: rtc device for this driver. 56 * @rtc: rtc device for this driver.
38 * @regmap: regmap used to access RTC registers 57 * @regmap: regmap used to access RTC registers
39 * @allow_set_time: indicates whether writing to the RTC is allowed 58 * @allow_set_time: indicates whether writing to the RTC is allowed
40 * @rtc_alarm_irq: rtc alarm irq number. 59 * @rtc_alarm_irq: rtc alarm irq number.
41 * @rtc_base: address of rtc control register.
42 * @rtc_read_base: base address of read registers.
43 * @rtc_write_base: base address of write registers.
44 * @alarm_rw_base: base address of alarm registers.
45 * @ctrl_reg: rtc control register. 60 * @ctrl_reg: rtc control register.
46 * @rtc_dev: device structure. 61 * @rtc_dev: device structure.
47 * @ctrl_reg_lock: spinlock protecting access to ctrl_reg. 62 * @ctrl_reg_lock: spinlock protecting access to ctrl_reg.
@@ -51,11 +66,7 @@ struct pm8xxx_rtc {
51 struct regmap *regmap; 66 struct regmap *regmap;
52 bool allow_set_time; 67 bool allow_set_time;
53 int rtc_alarm_irq; 68 int rtc_alarm_irq;
54 int rtc_base; 69 const struct pm8xxx_rtc_regs *regs;
55 int rtc_read_base;
56 int rtc_write_base;
57 int alarm_rw_base;
58 u8 ctrl_reg;
59 struct device *rtc_dev; 70 struct device *rtc_dev;
60 spinlock_t ctrl_reg_lock; 71 spinlock_t ctrl_reg_lock;
61}; 72};
@@ -71,8 +82,10 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
71{ 82{
72 int rc, i; 83 int rc, i;
73 unsigned long secs, irq_flags; 84 unsigned long secs, irq_flags;
74 u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0, ctrl_reg; 85 u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0;
86 unsigned int ctrl_reg;
75 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 87 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
88 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
76 89
77 if (!rtc_dd->allow_set_time) 90 if (!rtc_dd->allow_set_time)
78 return -EACCES; 91 return -EACCES;
@@ -87,30 +100,30 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
87 dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs); 100 dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
88 101
89 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 102 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
90 ctrl_reg = rtc_dd->ctrl_reg;
91 103
92 if (ctrl_reg & PM8xxx_RTC_ALARM_ENABLE) { 104 rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
105 if (rc)
106 goto rtc_rw_fail;
107
108 if (ctrl_reg & regs->alarm_en) {
93 alarm_enabled = 1; 109 alarm_enabled = 1;
94 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 110 ctrl_reg &= ~regs->alarm_en;
95 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 111 rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
96 if (rc) { 112 if (rc) {
97 dev_err(dev, "Write to RTC control register failed\n"); 113 dev_err(dev, "Write to RTC control register failed\n");
98 goto rtc_rw_fail; 114 goto rtc_rw_fail;
99 } 115 }
100 rtc_dd->ctrl_reg = ctrl_reg;
101 } else {
102 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
103 } 116 }
104 117
105 /* Write 0 to Byte[0] */ 118 /* Write 0 to Byte[0] */
106 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, 0); 119 rc = regmap_write(rtc_dd->regmap, regs->write, 0);
107 if (rc) { 120 if (rc) {
108 dev_err(dev, "Write to RTC write data register failed\n"); 121 dev_err(dev, "Write to RTC write data register failed\n");
109 goto rtc_rw_fail; 122 goto rtc_rw_fail;
110 } 123 }
111 124
112 /* Write Byte[1], Byte[2], Byte[3] */ 125 /* Write Byte[1], Byte[2], Byte[3] */
113 rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->rtc_write_base + 1, 126 rc = regmap_bulk_write(rtc_dd->regmap, regs->write + 1,
114 &value[1], sizeof(value) - 1); 127 &value[1], sizeof(value) - 1);
115 if (rc) { 128 if (rc) {
116 dev_err(dev, "Write to RTC write data register failed\n"); 129 dev_err(dev, "Write to RTC write data register failed\n");
@@ -118,25 +131,23 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
118 } 131 }
119 132
120 /* Write Byte[0] */ 133 /* Write Byte[0] */
121 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, value[0]); 134 rc = regmap_write(rtc_dd->regmap, regs->write, value[0]);
122 if (rc) { 135 if (rc) {
123 dev_err(dev, "Write to RTC write data register failed\n"); 136 dev_err(dev, "Write to RTC write data register failed\n");
124 goto rtc_rw_fail; 137 goto rtc_rw_fail;
125 } 138 }
126 139
127 if (alarm_enabled) { 140 if (alarm_enabled) {
128 ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; 141 ctrl_reg |= regs->alarm_en;
129 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 142 rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
130 if (rc) { 143 if (rc) {
131 dev_err(dev, "Write to RTC control register failed\n"); 144 dev_err(dev, "Write to RTC control register failed\n");
132 goto rtc_rw_fail; 145 goto rtc_rw_fail;
133 } 146 }
134 rtc_dd->ctrl_reg = ctrl_reg;
135 } 147 }
136 148
137rtc_rw_fail: 149rtc_rw_fail:
138 if (alarm_enabled) 150 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
139 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
140 151
141 return rc; 152 return rc;
142} 153}
@@ -148,9 +159,9 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
148 unsigned long secs; 159 unsigned long secs;
149 unsigned int reg; 160 unsigned int reg;
150 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 161 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
162 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
151 163
152 rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base, 164 rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value, sizeof(value));
153 value, sizeof(value));
154 if (rc) { 165 if (rc) {
155 dev_err(dev, "RTC read data register failed\n"); 166 dev_err(dev, "RTC read data register failed\n");
156 return rc; 167 return rc;
@@ -160,14 +171,14 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
160 * Read the LSB again and check if there has been a carry over. 171 * Read the LSB again and check if there has been a carry over.
161 * If there is, redo the read operation. 172 * If there is, redo the read operation.
162 */ 173 */
163 rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_read_base, &reg); 174 rc = regmap_read(rtc_dd->regmap, regs->read, &reg);
164 if (rc < 0) { 175 if (rc < 0) {
165 dev_err(dev, "RTC read data register failed\n"); 176 dev_err(dev, "RTC read data register failed\n");
166 return rc; 177 return rc;
167 } 178 }
168 179
169 if (unlikely(reg < value[0])) { 180 if (unlikely(reg < value[0])) {
170 rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base, 181 rc = regmap_bulk_read(rtc_dd->regmap, regs->read,
171 value, sizeof(value)); 182 value, sizeof(value));
172 if (rc) { 183 if (rc) {
173 dev_err(dev, "RTC read data register failed\n"); 184 dev_err(dev, "RTC read data register failed\n");
@@ -195,9 +206,11 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
195static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) 206static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
196{ 207{
197 int rc, i; 208 int rc, i;
198 u8 value[NUM_8_BIT_RTC_REGS], ctrl_reg; 209 u8 value[NUM_8_BIT_RTC_REGS];
210 unsigned int ctrl_reg;
199 unsigned long secs, irq_flags; 211 unsigned long secs, irq_flags;
200 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 212 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
213 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
201 214
202 rtc_tm_to_time(&alarm->time, &secs); 215 rtc_tm_to_time(&alarm->time, &secs);
203 216
@@ -208,28 +221,28 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
208 221
209 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 222 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
210 223
211 rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->alarm_rw_base, value, 224 rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
212 sizeof(value)); 225 sizeof(value));
213 if (rc) { 226 if (rc) {
214 dev_err(dev, "Write to RTC ALARM register failed\n"); 227 dev_err(dev, "Write to RTC ALARM register failed\n");
215 goto rtc_rw_fail; 228 goto rtc_rw_fail;
216 } 229 }
217 230
218 ctrl_reg = rtc_dd->ctrl_reg; 231 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
232 if (rc)
233 goto rtc_rw_fail;
219 234
220 if (alarm->enabled) 235 if (alarm->enabled)
221 ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; 236 ctrl_reg |= regs->alarm_en;
222 else 237 else
223 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 238 ctrl_reg &= ~regs->alarm_en;
224 239
225 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 240 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
226 if (rc) { 241 if (rc) {
227 dev_err(dev, "Write to RTC control register failed\n"); 242 dev_err(dev, "Write to RTC alarm control register failed\n");
228 goto rtc_rw_fail; 243 goto rtc_rw_fail;
229 } 244 }
230 245
231 rtc_dd->ctrl_reg = ctrl_reg;
232
233 dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n", 246 dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
234 alarm->time.tm_hour, alarm->time.tm_min, 247 alarm->time.tm_hour, alarm->time.tm_min,
235 alarm->time.tm_sec, alarm->time.tm_mday, 248 alarm->time.tm_sec, alarm->time.tm_mday,
@@ -245,8 +258,9 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
245 u8 value[NUM_8_BIT_RTC_REGS]; 258 u8 value[NUM_8_BIT_RTC_REGS];
246 unsigned long secs; 259 unsigned long secs;
247 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 260 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
261 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
248 262
249 rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->alarm_rw_base, value, 263 rc = regmap_bulk_read(rtc_dd->regmap, regs->alarm_rw, value,
250 sizeof(value)); 264 sizeof(value));
251 if (rc) { 265 if (rc) {
252 dev_err(dev, "RTC alarm time read failed\n"); 266 dev_err(dev, "RTC alarm time read failed\n");
@@ -276,25 +290,26 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
276 int rc; 290 int rc;
277 unsigned long irq_flags; 291 unsigned long irq_flags;
278 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 292 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
279 u8 ctrl_reg; 293 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
294 unsigned int ctrl_reg;
280 295
281 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 296 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
282 297
283 ctrl_reg = rtc_dd->ctrl_reg; 298 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
299 if (rc)
300 goto rtc_rw_fail;
284 301
285 if (enable) 302 if (enable)
286 ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; 303 ctrl_reg |= regs->alarm_en;
287 else 304 else
288 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 305 ctrl_reg &= ~regs->alarm_en;
289 306
290 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 307 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
291 if (rc) { 308 if (rc) {
292 dev_err(dev, "Write to RTC control register failed\n"); 309 dev_err(dev, "Write to RTC control register failed\n");
293 goto rtc_rw_fail; 310 goto rtc_rw_fail;
294 } 311 }
295 312
296 rtc_dd->ctrl_reg = ctrl_reg;
297
298rtc_rw_fail: 313rtc_rw_fail:
299 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); 314 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
300 return rc; 315 return rc;
@@ -311,6 +326,7 @@ static const struct rtc_class_ops pm8xxx_rtc_ops = {
311static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id) 326static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
312{ 327{
313 struct pm8xxx_rtc *rtc_dd = dev_id; 328 struct pm8xxx_rtc *rtc_dd = dev_id;
329 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
314 unsigned int ctrl_reg; 330 unsigned int ctrl_reg;
315 int rc; 331 int rc;
316 unsigned long irq_flags; 332 unsigned long irq_flags;
@@ -320,48 +336,100 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
320 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 336 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
321 337
322 /* Clear the alarm enable bit */ 338 /* Clear the alarm enable bit */
323 ctrl_reg = rtc_dd->ctrl_reg; 339 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
324 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 340 if (rc) {
341 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
342 goto rtc_alarm_handled;
343 }
344
345 ctrl_reg &= ~regs->alarm_en;
325 346
326 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 347 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
327 if (rc) { 348 if (rc) {
328 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); 349 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
329 dev_err(rtc_dd->rtc_dev, 350 dev_err(rtc_dd->rtc_dev,
330 "Write to RTC control register failed\n"); 351 "Write to alarm control register failed\n");
331 goto rtc_alarm_handled; 352 goto rtc_alarm_handled;
332 } 353 }
333 354
334 rtc_dd->ctrl_reg = ctrl_reg;
335 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); 355 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
336 356
337 /* Clear RTC alarm register */ 357 /* Clear RTC alarm register */
338 rc = regmap_read(rtc_dd->regmap, 358 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl2, &ctrl_reg);
339 rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
340 &ctrl_reg);
341 if (rc) { 359 if (rc) {
342 dev_err(rtc_dd->rtc_dev, 360 dev_err(rtc_dd->rtc_dev,
343 "RTC Alarm control register read failed\n"); 361 "RTC Alarm control2 register read failed\n");
344 goto rtc_alarm_handled; 362 goto rtc_alarm_handled;
345 } 363 }
346 364
347 ctrl_reg &= ~PM8xxx_RTC_ALARM_CLEAR; 365 ctrl_reg |= PM8xxx_RTC_ALARM_CLEAR;
348 rc = regmap_write(rtc_dd->regmap, 366 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl2, ctrl_reg);
349 rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
350 ctrl_reg);
351 if (rc) 367 if (rc)
352 dev_err(rtc_dd->rtc_dev, 368 dev_err(rtc_dd->rtc_dev,
353 "Write to RTC Alarm control register failed\n"); 369 "Write to RTC Alarm control2 register failed\n");
354 370
355rtc_alarm_handled: 371rtc_alarm_handled:
356 return IRQ_HANDLED; 372 return IRQ_HANDLED;
357} 373}
358 374
375static int pm8xxx_rtc_enable(struct pm8xxx_rtc *rtc_dd)
376{
377 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
378 unsigned int ctrl_reg;
379 int rc;
380
381 /* Check if the RTC is on, else turn it on */
382 rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
383 if (rc)
384 return rc;
385
386 if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
387 ctrl_reg |= PM8xxx_RTC_ENABLE;
388 rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
389 if (rc)
390 return rc;
391 }
392
393 return 0;
394}
395
396static const struct pm8xxx_rtc_regs pm8921_regs = {
397 .ctrl = 0x11d,
398 .write = 0x11f,
399 .read = 0x123,
400 .alarm_rw = 0x127,
401 .alarm_ctrl = 0x11d,
402 .alarm_ctrl2 = 0x11e,
403 .alarm_en = BIT(1),
404};
405
406static const struct pm8xxx_rtc_regs pm8058_regs = {
407 .ctrl = 0x1e8,
408 .write = 0x1ea,
409 .read = 0x1ee,
410 .alarm_rw = 0x1f2,
411 .alarm_ctrl = 0x1e8,
412 .alarm_ctrl2 = 0x1e9,
413 .alarm_en = BIT(1),
414};
415
416static const struct pm8xxx_rtc_regs pm8941_regs = {
417 .ctrl = 0x6046,
418 .write = 0x6040,
419 .read = 0x6048,
420 .alarm_rw = 0x6140,
421 .alarm_ctrl = 0x6146,
422 .alarm_ctrl2 = 0x6148,
423 .alarm_en = BIT(7),
424};
425
359/* 426/*
360 * Hardcoded RTC bases until IORESOURCE_REG mapping is figured out 427 * Hardcoded RTC bases until IORESOURCE_REG mapping is figured out
361 */ 428 */
362static const struct of_device_id pm8xxx_id_table[] = { 429static const struct of_device_id pm8xxx_id_table[] = {
363 { .compatible = "qcom,pm8921-rtc", .data = (void *) 0x11D }, 430 { .compatible = "qcom,pm8921-rtc", .data = &pm8921_regs },
364 { .compatible = "qcom,pm8058-rtc", .data = (void *) 0x1E8 }, 431 { .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs },
432 { .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs },
365 { }, 433 { },
366}; 434};
367MODULE_DEVICE_TABLE(of, pm8xxx_id_table); 435MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
@@ -369,7 +437,6 @@ MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
369static int pm8xxx_rtc_probe(struct platform_device *pdev) 437static int pm8xxx_rtc_probe(struct platform_device *pdev)
370{ 438{
371 int rc; 439 int rc;
372 unsigned int ctrl_reg;
373 struct pm8xxx_rtc *rtc_dd; 440 struct pm8xxx_rtc *rtc_dd;
374 const struct of_device_id *match; 441 const struct of_device_id *match;
375 442
@@ -399,33 +466,12 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
399 rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node, 466 rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node,
400 "allow-set-time"); 467 "allow-set-time");
401 468
402 rtc_dd->rtc_base = (long) match->data; 469 rtc_dd->regs = match->data;
403
404 /* Setup RTC register addresses */
405 rtc_dd->rtc_write_base = rtc_dd->rtc_base + PM8XXX_RTC_WRITE_OFFSET;
406 rtc_dd->rtc_read_base = rtc_dd->rtc_base + PM8XXX_RTC_READ_OFFSET;
407 rtc_dd->alarm_rw_base = rtc_dd->rtc_base + PM8XXX_ALARM_RW_OFFSET;
408
409 rtc_dd->rtc_dev = &pdev->dev; 470 rtc_dd->rtc_dev = &pdev->dev;
410 471
411 /* Check if the RTC is on, else turn it on */ 472 rc = pm8xxx_rtc_enable(rtc_dd);
412 rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_base, &ctrl_reg); 473 if (rc)
413 if (rc) {
414 dev_err(&pdev->dev, "RTC control register read failed!\n");
415 return rc; 474 return rc;
416 }
417
418 if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
419 ctrl_reg |= PM8xxx_RTC_ENABLE;
420 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
421 if (rc) {
422 dev_err(&pdev->dev,
423 "Write to RTC control register failed\n");
424 return rc;
425 }
426 }
427
428 rtc_dd->ctrl_reg = ctrl_reg;
429 475
430 platform_set_drvdata(pdev, rtc_dd); 476 platform_set_drvdata(pdev, rtc_dd);
431 477
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a6b1252c9941..806072238c00 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -535,13 +535,15 @@ static int s3c_rtc_probe(struct platform_device *pdev)
535 } 535 }
536 clk_prepare_enable(info->rtc_clk); 536 clk_prepare_enable(info->rtc_clk);
537 537
538 info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src"); 538 if (info->data->needs_src_clk) {
539 if (IS_ERR(info->rtc_src_clk)) { 539 info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src");
540 dev_err(&pdev->dev, "failed to find rtc source clock\n"); 540 if (IS_ERR(info->rtc_src_clk)) {
541 return PTR_ERR(info->rtc_src_clk); 541 dev_err(&pdev->dev,
542 "failed to find rtc source clock\n");
543 return PTR_ERR(info->rtc_src_clk);
544 }
545 clk_prepare_enable(info->rtc_src_clk);
542 } 546 }
543 clk_prepare_enable(info->rtc_src_clk);
544
545 547
546 /* check to see if everything is setup correctly */ 548 /* check to see if everything is setup correctly */
547 if (info->data->enable) 549 if (info->data->enable)
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 9ce062218de9..e8497144b323 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -288,20 +288,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
288 spin_unlock(&inode->i_lock); 288 spin_unlock(&inode->i_lock);
289 289
290 /* In case the dropping of a reference would nuke next_i. */ 290 /* In case the dropping of a reference would nuke next_i. */
291 if ((&next_i->i_sb_list != list) && 291 while (&next_i->i_sb_list != list) {
292 atomic_read(&next_i->i_count)) {
293 spin_lock(&next_i->i_lock); 292 spin_lock(&next_i->i_lock);
294 if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) { 293 if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
294 atomic_read(&next_i->i_count)) {
295 __iget(next_i); 295 __iget(next_i);
296 need_iput = next_i; 296 need_iput = next_i;
297 spin_unlock(&next_i->i_lock);
298 break;
297 } 299 }
298 spin_unlock(&next_i->i_lock); 300 spin_unlock(&next_i->i_lock);
301 next_i = list_entry(next_i->i_sb_list.next,
302 struct inode, i_sb_list);
299 } 303 }
300 304
301 /* 305 /*
302 * We can safely drop inode_sb_list_lock here because we hold 306 * We can safely drop inode_sb_list_lock here because either
303 * references on both inode and next_i. Also no new inodes 307 * we actually hold references on both inode and next_i or
304 * will be added since the umount has begun. 308 * end of list. Also no new inodes will be added since the
309 * umount has begun.
305 */ 310 */
306 spin_unlock(&inode_sb_list_lock); 311 spin_unlock(&inode_sb_list_lock);
307 312
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 8add6f1030d7..b931e04e3388 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -158,7 +158,7 @@ bail_add:
158 * NOTE: This dentry already has ->d_op set from 158 * NOTE: This dentry already has ->d_op set from
159 * ocfs2_get_parent() and ocfs2_get_dentry() 159 * ocfs2_get_parent() and ocfs2_get_dentry()
160 */ 160 */
161 if (ret) 161 if (!IS_ERR_OR_NULL(ret))
162 dentry = ret; 162 dentry = ret;
163 163
164 status = ocfs2_dentry_attach_lock(dentry, inode, 164 status = ocfs2_dentry_attach_lock(dentry, inode,
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 6b394f0b5148..eeb307985715 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -6,7 +6,8 @@
6#ifdef CONFIG_TRANSPARENT_HUGEPAGE 6#ifdef CONFIG_TRANSPARENT_HUGEPAGE
7extern int __khugepaged_enter(struct mm_struct *mm); 7extern int __khugepaged_enter(struct mm_struct *mm);
8extern void __khugepaged_exit(struct mm_struct *mm); 8extern void __khugepaged_exit(struct mm_struct *mm);
9extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma); 9extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
10 unsigned long vm_flags);
10 11
11#define khugepaged_enabled() \ 12#define khugepaged_enabled() \
12 (transparent_hugepage_flags & \ 13 (transparent_hugepage_flags & \
@@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
35 __khugepaged_exit(mm); 36 __khugepaged_exit(mm);
36} 37}
37 38
38static inline int khugepaged_enter(struct vm_area_struct *vma) 39static inline int khugepaged_enter(struct vm_area_struct *vma,
40 unsigned long vm_flags)
39{ 41{
40 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) 42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
41 if ((khugepaged_always() || 43 if ((khugepaged_always() ||
42 (khugepaged_req_madv() && 44 (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
43 vma->vm_flags & VM_HUGEPAGE)) && 45 !(vm_flags & VM_NOHUGEPAGE))
44 !(vma->vm_flags & VM_NOHUGEPAGE))
45 if (__khugepaged_enter(vma->vm_mm)) 46 if (__khugepaged_enter(vma->vm_mm))
46 return -ENOMEM; 47 return -ENOMEM;
47 return 0; 48 return 0;
@@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
54static inline void khugepaged_exit(struct mm_struct *mm) 55static inline void khugepaged_exit(struct mm_struct *mm)
55{ 56{
56} 57}
57static inline int khugepaged_enter(struct vm_area_struct *vma) 58static inline int khugepaged_enter(struct vm_area_struct *vma,
59 unsigned long vm_flags)
58{ 60{
59 return 0; 61 return 0;
60} 62}
61static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 63static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
64 unsigned long vm_flags)
62{ 65{
63 return 0; 66 return 0;
64} 67}
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 19df5d857411..6b75640ef5ab 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -139,48 +139,23 @@ static inline bool mem_cgroup_disabled(void)
139 return false; 139 return false;
140} 140}
141 141
142void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, 142struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
143 unsigned long *flags); 143 unsigned long *flags);
144 144void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
145extern atomic_t memcg_moving; 145 unsigned long flags);
146 146void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
147static inline void mem_cgroup_begin_update_page_stat(struct page *page, 147 enum mem_cgroup_stat_index idx, int val);
148 bool *locked, unsigned long *flags) 148
149{ 149static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
150 if (mem_cgroup_disabled())
151 return;
152 rcu_read_lock();
153 *locked = false;
154 if (atomic_read(&memcg_moving))
155 __mem_cgroup_begin_update_page_stat(page, locked, flags);
156}
157
158void __mem_cgroup_end_update_page_stat(struct page *page,
159 unsigned long *flags);
160static inline void mem_cgroup_end_update_page_stat(struct page *page,
161 bool *locked, unsigned long *flags)
162{
163 if (mem_cgroup_disabled())
164 return;
165 if (*locked)
166 __mem_cgroup_end_update_page_stat(page, flags);
167 rcu_read_unlock();
168}
169
170void mem_cgroup_update_page_stat(struct page *page,
171 enum mem_cgroup_stat_index idx,
172 int val);
173
174static inline void mem_cgroup_inc_page_stat(struct page *page,
175 enum mem_cgroup_stat_index idx) 150 enum mem_cgroup_stat_index idx)
176{ 151{
177 mem_cgroup_update_page_stat(page, idx, 1); 152 mem_cgroup_update_page_stat(memcg, idx, 1);
178} 153}
179 154
180static inline void mem_cgroup_dec_page_stat(struct page *page, 155static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
181 enum mem_cgroup_stat_index idx) 156 enum mem_cgroup_stat_index idx)
182{ 157{
183 mem_cgroup_update_page_stat(page, idx, -1); 158 mem_cgroup_update_page_stat(memcg, idx, -1);
184} 159}
185 160
186unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 161unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -315,13 +290,14 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
315{ 290{
316} 291}
317 292
318static inline void mem_cgroup_begin_update_page_stat(struct page *page, 293static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
319 bool *locked, unsigned long *flags) 294 bool *locked, unsigned long *flags)
320{ 295{
296 return NULL;
321} 297}
322 298
323static inline void mem_cgroup_end_update_page_stat(struct page *page, 299static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
324 bool *locked, unsigned long *flags) 300 bool locked, unsigned long flags)
325{ 301{
326} 302}
327 303
@@ -343,12 +319,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
343 return false; 319 return false;
344} 320}
345 321
346static inline void mem_cgroup_inc_page_stat(struct page *page, 322static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
347 enum mem_cgroup_stat_index idx) 323 enum mem_cgroup_stat_index idx)
348{ 324{
349} 325}
350 326
351static inline void mem_cgroup_dec_page_stat(struct page *page, 327static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
352 enum mem_cgroup_stat_index idx) 328 enum mem_cgroup_stat_index idx)
353{ 329{
354} 330}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 27eb1bfbe704..b46461116cd2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1235,7 +1235,6 @@ int __set_page_dirty_no_writeback(struct page *page);
1235int redirty_page_for_writepage(struct writeback_control *wbc, 1235int redirty_page_for_writepage(struct writeback_control *wbc,
1236 struct page *page); 1236 struct page *page);
1237void account_page_dirtied(struct page *page, struct address_space *mapping); 1237void account_page_dirtied(struct page *page, struct address_space *mapping);
1238void account_page_writeback(struct page *page);
1239int set_page_dirty(struct page *page); 1238int set_page_dirty(struct page *page);
1240int set_page_dirty_lock(struct page *page); 1239int set_page_dirty_lock(struct page *page);
1241int clear_page_dirty_for_io(struct page *page); 1240int clear_page_dirty_for_io(struct page *page);
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 5b5efae09135..ad2f67054372 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -16,7 +16,7 @@ struct reserved_mem {
16}; 16};
17 17
18struct reserved_mem_ops { 18struct reserved_mem_ops {
19 void (*device_init)(struct reserved_mem *rmem, 19 int (*device_init)(struct reserved_mem *rmem,
20 struct device *dev); 20 struct device *dev);
21 void (*device_release)(struct reserved_mem *rmem, 21 void (*device_release)(struct reserved_mem *rmem,
22 struct device *dev); 22 struct device *dev);
@@ -28,14 +28,17 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
28 _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) 28 _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
29 29
30#ifdef CONFIG_OF_RESERVED_MEM 30#ifdef CONFIG_OF_RESERVED_MEM
31void of_reserved_mem_device_init(struct device *dev); 31int of_reserved_mem_device_init(struct device *dev);
32void of_reserved_mem_device_release(struct device *dev); 32void of_reserved_mem_device_release(struct device *dev);
33 33
34void fdt_init_reserved_mem(void); 34void fdt_init_reserved_mem(void);
35void fdt_reserved_mem_save_node(unsigned long node, const char *uname, 35void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
36 phys_addr_t base, phys_addr_t size); 36 phys_addr_t base, phys_addr_t size);
37#else 37#else
38static inline void of_reserved_mem_device_init(struct device *dev) { } 38static inline int of_reserved_mem_device_init(struct device *dev)
39{
40 return -ENOSYS;
41}
39static inline void of_reserved_mem_device_release(struct device *pdev) { } 42static inline void of_reserved_mem_device_release(struct device *pdev) { }
40 43
41static inline void fdt_init_reserved_mem(void) { } 44static inline void fdt_init_reserved_mem(void) { }
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index cf66c5c8458e..3b7408759bdf 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -35,7 +35,7 @@ config GCOV_KERNEL
35config GCOV_PROFILE_ALL 35config GCOV_PROFILE_ALL
36 bool "Profile entire Kernel" 36 bool "Profile entire Kernel"
37 depends on GCOV_KERNEL 37 depends on GCOV_KERNEL
38 depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM 38 depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM || ARM64
39 default n 39 default n
40 ---help--- 40 ---help---
41 This options activates profiling for the entire kernel. 41 This options activates profiling for the entire kernel.
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 8637e041a247..80f7a6d00519 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -196,12 +196,34 @@ int __request_module(bool wait, const char *fmt, ...)
196EXPORT_SYMBOL(__request_module); 196EXPORT_SYMBOL(__request_module);
197#endif /* CONFIG_MODULES */ 197#endif /* CONFIG_MODULES */
198 198
199static void call_usermodehelper_freeinfo(struct subprocess_info *info)
200{
201 if (info->cleanup)
202 (*info->cleanup)(info);
203 kfree(info);
204}
205
206static void umh_complete(struct subprocess_info *sub_info)
207{
208 struct completion *comp = xchg(&sub_info->complete, NULL);
209 /*
210 * See call_usermodehelper_exec(). If xchg() returns NULL
211 * we own sub_info, the UMH_KILLABLE caller has gone away
212 * or the caller used UMH_NO_WAIT.
213 */
214 if (comp)
215 complete(comp);
216 else
217 call_usermodehelper_freeinfo(sub_info);
218}
219
199/* 220/*
200 * This is the task which runs the usermode application 221 * This is the task which runs the usermode application
201 */ 222 */
202static int ____call_usermodehelper(void *data) 223static int ____call_usermodehelper(void *data)
203{ 224{
204 struct subprocess_info *sub_info = data; 225 struct subprocess_info *sub_info = data;
226 int wait = sub_info->wait & ~UMH_KILLABLE;
205 struct cred *new; 227 struct cred *new;
206 int retval; 228 int retval;
207 229
@@ -221,7 +243,7 @@ static int ____call_usermodehelper(void *data)
221 retval = -ENOMEM; 243 retval = -ENOMEM;
222 new = prepare_kernel_cred(current); 244 new = prepare_kernel_cred(current);
223 if (!new) 245 if (!new)
224 goto fail; 246 goto out;
225 247
226 spin_lock(&umh_sysctl_lock); 248 spin_lock(&umh_sysctl_lock);
227 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); 249 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
@@ -233,7 +255,7 @@ static int ____call_usermodehelper(void *data)
233 retval = sub_info->init(sub_info, new); 255 retval = sub_info->init(sub_info, new);
234 if (retval) { 256 if (retval) {
235 abort_creds(new); 257 abort_creds(new);
236 goto fail; 258 goto out;
237 } 259 }
238 } 260 }
239 261
@@ -242,12 +264,13 @@ static int ____call_usermodehelper(void *data)
242 retval = do_execve(getname_kernel(sub_info->path), 264 retval = do_execve(getname_kernel(sub_info->path),
243 (const char __user *const __user *)sub_info->argv, 265 (const char __user *const __user *)sub_info->argv,
244 (const char __user *const __user *)sub_info->envp); 266 (const char __user *const __user *)sub_info->envp);
267out:
268 sub_info->retval = retval;
269 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
270 if (wait != UMH_WAIT_PROC)
271 umh_complete(sub_info);
245 if (!retval) 272 if (!retval)
246 return 0; 273 return 0;
247
248 /* Exec failed? */
249fail:
250 sub_info->retval = retval;
251 do_exit(0); 274 do_exit(0);
252} 275}
253 276
@@ -258,26 +281,6 @@ static int call_helper(void *data)
258 return ____call_usermodehelper(data); 281 return ____call_usermodehelper(data);
259} 282}
260 283
261static void call_usermodehelper_freeinfo(struct subprocess_info *info)
262{
263 if (info->cleanup)
264 (*info->cleanup)(info);
265 kfree(info);
266}
267
268static void umh_complete(struct subprocess_info *sub_info)
269{
270 struct completion *comp = xchg(&sub_info->complete, NULL);
271 /*
272 * See call_usermodehelper_exec(). If xchg() returns NULL
273 * we own sub_info, the UMH_KILLABLE caller has gone away.
274 */
275 if (comp)
276 complete(comp);
277 else
278 call_usermodehelper_freeinfo(sub_info);
279}
280
281/* Keventd can't block, but this (a child) can. */ 284/* Keventd can't block, but this (a child) can. */
282static int wait_for_helper(void *data) 285static int wait_for_helper(void *data)
283{ 286{
@@ -336,18 +339,8 @@ static void __call_usermodehelper(struct work_struct *work)
336 kmod_thread_locker = NULL; 339 kmod_thread_locker = NULL;
337 } 340 }
338 341
339 switch (wait) { 342 if (pid < 0) {
340 case UMH_NO_WAIT: 343 sub_info->retval = pid;
341 call_usermodehelper_freeinfo(sub_info);
342 break;
343
344 case UMH_WAIT_PROC:
345 if (pid > 0)
346 break;
347 /* FALLTHROUGH */
348 case UMH_WAIT_EXEC:
349 if (pid < 0)
350 sub_info->retval = pid;
351 umh_complete(sub_info); 344 umh_complete(sub_info);
352 } 345 }
353} 346}
@@ -588,7 +581,12 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
588 goto out; 581 goto out;
589 } 582 }
590 583
591 sub_info->complete = &done; 584 /*
585 * Set the completion pointer only if there is a waiter.
586 * This makes it possible to use umh_complete to free
587 * the data structure in case of UMH_NO_WAIT.
588 */
589 sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
592 sub_info->wait = wait; 590 sub_info->wait = wait;
593 591
594 queue_work(khelper_wq, &sub_info->work); 592 queue_work(khelper_wq, &sub_info->work);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index cd250a2e14cb..b499ab6ada29 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
131 lower = src[off + k]; 131 lower = src[off + k];
132 if (left && off + k == lim - 1) 132 if (left && off + k == lim - 1)
133 lower &= mask; 133 lower &= mask;
134 dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; 134 dst[k] = lower >> rem;
135 if (rem)
136 dst[k] |= upper << (BITS_PER_LONG - rem);
135 if (left && k == lim - 1) 137 if (left && k == lim - 1)
136 dst[k] &= mask; 138 dst[k] &= mask;
137 } 139 }
@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
172 upper = src[k]; 174 upper = src[k];
173 if (left && k == lim - 1) 175 if (left && k == lim - 1)
174 upper &= (1UL << left) - 1; 176 upper &= (1UL << left) - 1;
175 dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; 177 dst[k + off] = upper << rem;
178 if (rem)
179 dst[k + off] |= lower >> (BITS_PER_LONG - rem);
176 if (left && k + off == lim - 1) 180 if (left && k + off == lim - 1)
177 dst[k + off] &= (1UL << left) - 1; 181 dst[k + off] &= (1UL << left) - 1;
178 } 182 }
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index b3cbe19f71b5..fcad8322ef36 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -68,11 +68,13 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
68 * to be released by the balloon driver. 68 * to be released by the balloon driver.
69 */ 69 */
70 if (trylock_page(page)) { 70 if (trylock_page(page)) {
71#ifdef CONFIG_BALLOON_COMPACTION
71 if (!PagePrivate(page)) { 72 if (!PagePrivate(page)) {
72 /* raced with isolation */ 73 /* raced with isolation */
73 unlock_page(page); 74 unlock_page(page);
74 continue; 75 continue;
75 } 76 }
77#endif
76 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 78 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
77 balloon_page_delete(page); 79 balloon_page_delete(page);
78 __count_vm_event(BALLOON_DEFLATE); 80 __count_vm_event(BALLOON_DEFLATE);
diff --git a/mm/compaction.c b/mm/compaction.c
index edba18aed173..ec74cf0123ef 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -784,6 +784,9 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
784 cc->nr_migratepages = 0; 784 cc->nr_migratepages = 0;
785 break; 785 break;
786 } 786 }
787
788 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
789 break;
787 } 790 }
788 acct_isolated(cc->zone, cc); 791 acct_isolated(cc->zone, cc);
789 792
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 74c78aa8bc2f..de984159cf0b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -200,7 +200,7 @@ retry:
200 preempt_disable(); 200 preempt_disable();
201 if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 201 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
202 preempt_enable(); 202 preempt_enable();
203 __free_page(zero_page); 203 __free_pages(zero_page, compound_order(zero_page));
204 goto retry; 204 goto retry;
205 } 205 }
206 206
@@ -232,7 +232,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
232 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 232 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
233 struct page *zero_page = xchg(&huge_zero_page, NULL); 233 struct page *zero_page = xchg(&huge_zero_page, NULL);
234 BUG_ON(zero_page == NULL); 234 BUG_ON(zero_page == NULL);
235 __free_page(zero_page); 235 __free_pages(zero_page, compound_order(zero_page));
236 return HPAGE_PMD_NR; 236 return HPAGE_PMD_NR;
237 } 237 }
238 238
@@ -803,7 +803,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
803 return VM_FAULT_FALLBACK; 803 return VM_FAULT_FALLBACK;
804 if (unlikely(anon_vma_prepare(vma))) 804 if (unlikely(anon_vma_prepare(vma)))
805 return VM_FAULT_OOM; 805 return VM_FAULT_OOM;
806 if (unlikely(khugepaged_enter(vma))) 806 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
807 return VM_FAULT_OOM; 807 return VM_FAULT_OOM;
808 if (!(flags & FAULT_FLAG_WRITE) && 808 if (!(flags & FAULT_FLAG_WRITE) &&
809 transparent_hugepage_use_zero_page()) { 809 transparent_hugepage_use_zero_page()) {
@@ -1970,7 +1970,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
1970 * register it here without waiting a page fault that 1970 * register it here without waiting a page fault that
1971 * may not happen any time soon. 1971 * may not happen any time soon.
1972 */ 1972 */
1973 if (unlikely(khugepaged_enter_vma_merge(vma))) 1973 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
1974 return -ENOMEM; 1974 return -ENOMEM;
1975 break; 1975 break;
1976 case MADV_NOHUGEPAGE: 1976 case MADV_NOHUGEPAGE:
@@ -2071,7 +2071,8 @@ int __khugepaged_enter(struct mm_struct *mm)
2071 return 0; 2071 return 0;
2072} 2072}
2073 2073
2074int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 2074int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
2075 unsigned long vm_flags)
2075{ 2076{
2076 unsigned long hstart, hend; 2077 unsigned long hstart, hend;
2077 if (!vma->anon_vma) 2078 if (!vma->anon_vma)
@@ -2083,11 +2084,11 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
2083 if (vma->vm_ops) 2084 if (vma->vm_ops)
2084 /* khugepaged not yet working on file or special mappings */ 2085 /* khugepaged not yet working on file or special mappings */
2085 return 0; 2086 return 0;
2086 VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); 2087 VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
2087 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2088 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2088 hend = vma->vm_end & HPAGE_PMD_MASK; 2089 hend = vma->vm_end & HPAGE_PMD_MASK;
2089 if (hstart < hend) 2090 if (hstart < hend)
2090 return khugepaged_enter(vma); 2091 return khugepaged_enter(vma, vm_flags);
2091 return 0; 2092 return 0;
2092} 2093}
2093 2094
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 23976fd885fd..d6ac0e33e150 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1536,12 +1536,8 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1536 * start move here. 1536 * start move here.
1537 */ 1537 */
1538 1538
1539/* for quick checking without looking up memcg */
1540atomic_t memcg_moving __read_mostly;
1541
1542static void mem_cgroup_start_move(struct mem_cgroup *memcg) 1539static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1543{ 1540{
1544 atomic_inc(&memcg_moving);
1545 atomic_inc(&memcg->moving_account); 1541 atomic_inc(&memcg->moving_account);
1546 synchronize_rcu(); 1542 synchronize_rcu();
1547} 1543}
@@ -1552,10 +1548,8 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1552 * Now, mem_cgroup_clear_mc() may call this function with NULL. 1548 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1553 * We check NULL in callee rather than caller. 1549 * We check NULL in callee rather than caller.
1554 */ 1550 */
1555 if (memcg) { 1551 if (memcg)
1556 atomic_dec(&memcg_moving);
1557 atomic_dec(&memcg->moving_account); 1552 atomic_dec(&memcg->moving_account);
1558 }
1559} 1553}
1560 1554
1561/* 1555/*
@@ -2204,41 +2198,52 @@ cleanup:
2204 return true; 2198 return true;
2205} 2199}
2206 2200
2207/* 2201/**
2208 * Used to update mapped file or writeback or other statistics. 2202 * mem_cgroup_begin_page_stat - begin a page state statistics transaction
2203 * @page: page that is going to change accounted state
2204 * @locked: &memcg->move_lock slowpath was taken
2205 * @flags: IRQ-state flags for &memcg->move_lock
2209 * 2206 *
2210 * Notes: Race condition 2207 * This function must mark the beginning of an accounted page state
2208 * change to prevent double accounting when the page is concurrently
2209 * being moved to another memcg:
2211 * 2210 *
2212 * Charging occurs during page instantiation, while the page is 2211 * memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
2213 * unmapped and locked in page migration, or while the page table is 2212 * if (TestClearPageState(page))
2214 * locked in THP migration. No race is possible. 2213 * mem_cgroup_update_page_stat(memcg, state, -1);
2214 * mem_cgroup_end_page_stat(memcg, locked, flags);
2215 * 2215 *
2216 * Uncharge happens to pages with zero references, no race possible. 2216 * The RCU lock is held throughout the transaction. The fast path can
2217 * get away without acquiring the memcg->move_lock (@locked is false)
2218 * because page moving starts with an RCU grace period.
2217 * 2219 *
2218 * Charge moving between groups is protected by checking mm->moving 2220 * The RCU lock also protects the memcg from being freed when the page
2219 * account and taking the move_lock in the slowpath. 2221 * state that is going to change is the only thing preventing the page
2222 * from being uncharged. E.g. end-writeback clearing PageWriteback(),
2223 * which allows migration to go ahead and uncharge the page before the
2224 * account transaction might be complete.
2220 */ 2225 */
2221 2226struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
2222void __mem_cgroup_begin_update_page_stat(struct page *page, 2227 bool *locked,
2223 bool *locked, unsigned long *flags) 2228 unsigned long *flags)
2224{ 2229{
2225 struct mem_cgroup *memcg; 2230 struct mem_cgroup *memcg;
2226 struct page_cgroup *pc; 2231 struct page_cgroup *pc;
2227 2232
2233 rcu_read_lock();
2234
2235 if (mem_cgroup_disabled())
2236 return NULL;
2237
2228 pc = lookup_page_cgroup(page); 2238 pc = lookup_page_cgroup(page);
2229again: 2239again:
2230 memcg = pc->mem_cgroup; 2240 memcg = pc->mem_cgroup;
2231 if (unlikely(!memcg || !PageCgroupUsed(pc))) 2241 if (unlikely(!memcg || !PageCgroupUsed(pc)))
2232 return; 2242 return NULL;
2233 /* 2243
2234 * If this memory cgroup is not under account moving, we don't 2244 *locked = false;
2235 * need to take move_lock_mem_cgroup(). Because we already hold
2236 * rcu_read_lock(), any calls to move_account will be delayed until
2237 * rcu_read_unlock().
2238 */
2239 VM_BUG_ON(!rcu_read_lock_held());
2240 if (atomic_read(&memcg->moving_account) <= 0) 2245 if (atomic_read(&memcg->moving_account) <= 0)
2241 return; 2246 return memcg;
2242 2247
2243 move_lock_mem_cgroup(memcg, flags); 2248 move_lock_mem_cgroup(memcg, flags);
2244 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) { 2249 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
@@ -2246,36 +2251,40 @@ again:
2246 goto again; 2251 goto again;
2247 } 2252 }
2248 *locked = true; 2253 *locked = true;
2254
2255 return memcg;
2249} 2256}
2250 2257
2251void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags) 2258/**
2259 * mem_cgroup_end_page_stat - finish a page state statistics transaction
2260 * @memcg: the memcg that was accounted against
2261 * @locked: value received from mem_cgroup_begin_page_stat()
2262 * @flags: value received from mem_cgroup_begin_page_stat()
2263 */
2264void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
2265 unsigned long flags)
2252{ 2266{
2253 struct page_cgroup *pc = lookup_page_cgroup(page); 2267 if (memcg && locked)
2268 move_unlock_mem_cgroup(memcg, &flags);
2254 2269
2255 /* 2270 rcu_read_unlock();
2256 * It's guaranteed that pc->mem_cgroup never changes while
2257 * lock is held because a routine modifies pc->mem_cgroup
2258 * should take move_lock_mem_cgroup().
2259 */
2260 move_unlock_mem_cgroup(pc->mem_cgroup, flags);
2261} 2271}
2262 2272
2263void mem_cgroup_update_page_stat(struct page *page, 2273/**
2274 * mem_cgroup_update_page_stat - update page state statistics
2275 * @memcg: memcg to account against
2276 * @idx: page state item to account
2277 * @val: number of pages (positive or negative)
2278 *
2279 * See mem_cgroup_begin_page_stat() for locking requirements.
2280 */
2281void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
2264 enum mem_cgroup_stat_index idx, int val) 2282 enum mem_cgroup_stat_index idx, int val)
2265{ 2283{
2266 struct mem_cgroup *memcg;
2267 struct page_cgroup *pc = lookup_page_cgroup(page);
2268 unsigned long uninitialized_var(flags);
2269
2270 if (mem_cgroup_disabled())
2271 return;
2272
2273 VM_BUG_ON(!rcu_read_lock_held()); 2284 VM_BUG_ON(!rcu_read_lock_held());
2274 memcg = pc->mem_cgroup;
2275 if (unlikely(!memcg || !PageCgroupUsed(pc)))
2276 return;
2277 2285
2278 this_cpu_add(memcg->stat->count[idx], val); 2286 if (memcg)
2287 this_cpu_add(memcg->stat->count[idx], val);
2279} 2288}
2280 2289
2281/* 2290/*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 29d8693d0c61..252e1dbbed86 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1912,7 +1912,6 @@ void try_offline_node(int nid)
1912 unsigned long start_pfn = pgdat->node_start_pfn; 1912 unsigned long start_pfn = pgdat->node_start_pfn;
1913 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 1913 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1914 unsigned long pfn; 1914 unsigned long pfn;
1915 struct page *pgdat_page = virt_to_page(pgdat);
1916 int i; 1915 int i;
1917 1916
1918 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1917 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
@@ -1941,10 +1940,6 @@ void try_offline_node(int nid)
1941 node_set_offline(nid); 1940 node_set_offline(nid);
1942 unregister_one_node(nid); 1941 unregister_one_node(nid);
1943 1942
1944 if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page))
1945 /* node data is allocated from boot memory */
1946 return;
1947
1948 /* free waittable in each zone */ 1943 /* free waittable in each zone */
1949 for (i = 0; i < MAX_NR_ZONES; i++) { 1944 for (i = 0; i < MAX_NR_ZONES; i++) {
1950 struct zone *zone = pgdat->node_zones + i; 1945 struct zone *zone = pgdat->node_zones + i;
diff --git a/mm/mmap.c b/mm/mmap.c
index 7f855206e7fb..87e82b38453c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1080,7 +1080,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
1080 end, prev->vm_pgoff, NULL); 1080 end, prev->vm_pgoff, NULL);
1081 if (err) 1081 if (err)
1082 return NULL; 1082 return NULL;
1083 khugepaged_enter_vma_merge(prev); 1083 khugepaged_enter_vma_merge(prev, vm_flags);
1084 return prev; 1084 return prev;
1085 } 1085 }
1086 1086
@@ -1099,7 +1099,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
1099 next->vm_pgoff - pglen, NULL); 1099 next->vm_pgoff - pglen, NULL);
1100 if (err) 1100 if (err)
1101 return NULL; 1101 return NULL;
1102 khugepaged_enter_vma_merge(area); 1102 khugepaged_enter_vma_merge(area, vm_flags);
1103 return area; 1103 return area;
1104 } 1104 }
1105 1105
@@ -2208,7 +2208,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2208 } 2208 }
2209 } 2209 }
2210 vma_unlock_anon_vma(vma); 2210 vma_unlock_anon_vma(vma);
2211 khugepaged_enter_vma_merge(vma); 2211 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2212 validate_mm(vma->vm_mm); 2212 validate_mm(vma->vm_mm);
2213 return error; 2213 return error;
2214} 2214}
@@ -2277,7 +2277,7 @@ int expand_downwards(struct vm_area_struct *vma,
2277 } 2277 }
2278 } 2278 }
2279 vma_unlock_anon_vma(vma); 2279 vma_unlock_anon_vma(vma);
2280 khugepaged_enter_vma_merge(vma); 2280 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2281 validate_mm(vma->vm_mm); 2281 validate_mm(vma->vm_mm);
2282 return error; 2282 return error;
2283} 2283}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ff24c9d83112..19ceae87522d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2116,23 +2116,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
2116EXPORT_SYMBOL(account_page_dirtied); 2116EXPORT_SYMBOL(account_page_dirtied);
2117 2117
2118/* 2118/*
2119 * Helper function for set_page_writeback family.
2120 *
2121 * The caller must hold mem_cgroup_begin/end_update_page_stat() lock
2122 * while calling this function.
2123 * See test_set_page_writeback for example.
2124 *
2125 * NOTE: Unlike account_page_dirtied this does not rely on being atomic
2126 * wrt interrupts.
2127 */
2128void account_page_writeback(struct page *page)
2129{
2130 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2131 inc_zone_page_state(page, NR_WRITEBACK);
2132}
2133EXPORT_SYMBOL(account_page_writeback);
2134
2135/*
2136 * For address_spaces which do not use buffers. Just tag the page as dirty in 2119 * For address_spaces which do not use buffers. Just tag the page as dirty in
2137 * its radix tree. 2120 * its radix tree.
2138 * 2121 *
@@ -2344,11 +2327,12 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
2344int test_clear_page_writeback(struct page *page) 2327int test_clear_page_writeback(struct page *page)
2345{ 2328{
2346 struct address_space *mapping = page_mapping(page); 2329 struct address_space *mapping = page_mapping(page);
2347 int ret;
2348 bool locked;
2349 unsigned long memcg_flags; 2330 unsigned long memcg_flags;
2331 struct mem_cgroup *memcg;
2332 bool locked;
2333 int ret;
2350 2334
2351 mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags); 2335 memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
2352 if (mapping) { 2336 if (mapping) {
2353 struct backing_dev_info *bdi = mapping->backing_dev_info; 2337 struct backing_dev_info *bdi = mapping->backing_dev_info;
2354 unsigned long flags; 2338 unsigned long flags;
@@ -2369,22 +2353,23 @@ int test_clear_page_writeback(struct page *page)
2369 ret = TestClearPageWriteback(page); 2353 ret = TestClearPageWriteback(page);
2370 } 2354 }
2371 if (ret) { 2355 if (ret) {
2372 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); 2356 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
2373 dec_zone_page_state(page, NR_WRITEBACK); 2357 dec_zone_page_state(page, NR_WRITEBACK);
2374 inc_zone_page_state(page, NR_WRITTEN); 2358 inc_zone_page_state(page, NR_WRITTEN);
2375 } 2359 }
2376 mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags); 2360 mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
2377 return ret; 2361 return ret;
2378} 2362}
2379 2363
2380int __test_set_page_writeback(struct page *page, bool keep_write) 2364int __test_set_page_writeback(struct page *page, bool keep_write)
2381{ 2365{
2382 struct address_space *mapping = page_mapping(page); 2366 struct address_space *mapping = page_mapping(page);
2383 int ret;
2384 bool locked;
2385 unsigned long memcg_flags; 2367 unsigned long memcg_flags;
2368 struct mem_cgroup *memcg;
2369 bool locked;
2370 int ret;
2386 2371
2387 mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags); 2372 memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
2388 if (mapping) { 2373 if (mapping) {
2389 struct backing_dev_info *bdi = mapping->backing_dev_info; 2374 struct backing_dev_info *bdi = mapping->backing_dev_info;
2390 unsigned long flags; 2375 unsigned long flags;
@@ -2410,9 +2395,11 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2410 } else { 2395 } else {
2411 ret = TestSetPageWriteback(page); 2396 ret = TestSetPageWriteback(page);
2412 } 2397 }
2413 if (!ret) 2398 if (!ret) {
2414 account_page_writeback(page); 2399 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
2415 mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags); 2400 inc_zone_page_state(page, NR_WRITEBACK);
2401 }
2402 mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
2416 return ret; 2403 return ret;
2417 2404
2418} 2405}
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 3708264d2833..5331c2bd85a2 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -171,6 +171,7 @@ static void free_page_cgroup(void *addr)
171 sizeof(struct page_cgroup) * PAGES_PER_SECTION; 171 sizeof(struct page_cgroup) * PAGES_PER_SECTION;
172 172
173 BUG_ON(PageReserved(page)); 173 BUG_ON(PageReserved(page));
174 kmemleak_free(addr);
174 free_pages_exact(addr, table_size); 175 free_pages_exact(addr, table_size);
175 } 176 }
176} 177}
diff --git a/mm/rmap.c b/mm/rmap.c
index 116a5053415b..19886fb2f13a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1042,15 +1042,46 @@ void page_add_new_anon_rmap(struct page *page,
1042 */ 1042 */
1043void page_add_file_rmap(struct page *page) 1043void page_add_file_rmap(struct page *page)
1044{ 1044{
1045 bool locked; 1045 struct mem_cgroup *memcg;
1046 unsigned long flags; 1046 unsigned long flags;
1047 bool locked;
1047 1048
1048 mem_cgroup_begin_update_page_stat(page, &locked, &flags); 1049 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1049 if (atomic_inc_and_test(&page->_mapcount)) { 1050 if (atomic_inc_and_test(&page->_mapcount)) {
1050 __inc_zone_page_state(page, NR_FILE_MAPPED); 1051 __inc_zone_page_state(page, NR_FILE_MAPPED);
1051 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1052 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1052 } 1053 }
1053 mem_cgroup_end_update_page_stat(page, &locked, &flags); 1054 mem_cgroup_end_page_stat(memcg, locked, flags);
1055}
1056
1057static void page_remove_file_rmap(struct page *page)
1058{
1059 struct mem_cgroup *memcg;
1060 unsigned long flags;
1061 bool locked;
1062
1063 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1064
1065 /* page still mapped by someone else? */
1066 if (!atomic_add_negative(-1, &page->_mapcount))
1067 goto out;
1068
1069 /* Hugepages are not counted in NR_FILE_MAPPED for now. */
1070 if (unlikely(PageHuge(page)))
1071 goto out;
1072
1073 /*
1074 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1075 * these counters are not modified in interrupt context, and
1076 * pte lock(a spinlock) is held, which implies preemption disabled.
1077 */
1078 __dec_zone_page_state(page, NR_FILE_MAPPED);
1079 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1080
1081 if (unlikely(PageMlocked(page)))
1082 clear_page_mlock(page);
1083out:
1084 mem_cgroup_end_page_stat(memcg, locked, flags);
1054} 1085}
1055 1086
1056/** 1087/**
@@ -1061,46 +1092,33 @@ void page_add_file_rmap(struct page *page)
1061 */ 1092 */
1062void page_remove_rmap(struct page *page) 1093void page_remove_rmap(struct page *page)
1063{ 1094{
1064 bool anon = PageAnon(page); 1095 if (!PageAnon(page)) {
1065 bool locked; 1096 page_remove_file_rmap(page);
1066 unsigned long flags; 1097 return;
1067 1098 }
1068 /*
1069 * The anon case has no mem_cgroup page_stat to update; but may
1070 * uncharge_page() below, where the lock ordering can deadlock if
1071 * we hold the lock against page_stat move: so avoid it on anon.
1072 */
1073 if (!anon)
1074 mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1075 1099
1076 /* page still mapped by someone else? */ 1100 /* page still mapped by someone else? */
1077 if (!atomic_add_negative(-1, &page->_mapcount)) 1101 if (!atomic_add_negative(-1, &page->_mapcount))
1078 goto out; 1102 return;
1103
1104 /* Hugepages are not counted in NR_ANON_PAGES for now. */
1105 if (unlikely(PageHuge(page)))
1106 return;
1079 1107
1080 /* 1108 /*
1081 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
1082 * and not charged by memcg for now.
1083 *
1084 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1109 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1085 * these counters are not modified in interrupt context, and 1110 * these counters are not modified in interrupt context, and
1086 * these counters are not modified in interrupt context, and
1087 * pte lock(a spinlock) is held, which implies preemption disabled. 1111 * pte lock(a spinlock) is held, which implies preemption disabled.
1088 */ 1112 */
1089 if (unlikely(PageHuge(page))) 1113 if (PageTransHuge(page))
1090 goto out; 1114 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1091 if (anon) { 1115
1092 if (PageTransHuge(page)) 1116 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1093 __dec_zone_page_state(page, 1117 -hpage_nr_pages(page));
1094 NR_ANON_TRANSPARENT_HUGEPAGES); 1118
1095 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1096 -hpage_nr_pages(page));
1097 } else {
1098 __dec_zone_page_state(page, NR_FILE_MAPPED);
1099 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1100 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1101 }
1102 if (unlikely(PageMlocked(page))) 1119 if (unlikely(PageMlocked(page)))
1103 clear_page_mlock(page); 1120 clear_page_mlock(page);
1121
1104 /* 1122 /*
1105 * It would be tidy to reset the PageAnon mapping here, 1123 * It would be tidy to reset the PageAnon mapping here,
1106 * but that might overwrite a racing page_add_anon_rmap 1124 * but that might overwrite a racing page_add_anon_rmap
@@ -1110,10 +1128,6 @@ void page_remove_rmap(struct page *page)
1110 * Leaving it set also helps swapoff to reinstate ptes 1128 * Leaving it set also helps swapoff to reinstate ptes
1111 * faster for those pages still in swapcache. 1129 * faster for those pages still in swapcache.
1112 */ 1130 */
1113 return;
1114out:
1115 if (!anon)
1116 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1117} 1131}
1118 1132
1119/* 1133/*
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3a6e0cfdf03a..406944207b61 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -93,16 +93,6 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
93 s->object_size); 93 s->object_size);
94 continue; 94 continue;
95 } 95 }
96
97#if !defined(CONFIG_SLUB)
98 if (!strcmp(s->name, name)) {
99 pr_err("%s (%s): Cache name already exists.\n",
100 __func__, name);
101 dump_stack();
102 s = NULL;
103 return -EINVAL;
104 }
105#endif
106 } 96 }
107 97
108 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 98 WARN_ON(strchr(name, ' ')); /* It confuses parsers */