aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/vfat.txt11
-rw-r--r--Documentation/vm/hugetlbpage.txt10
-rw-r--r--arch/x86/mm/hugetlbpage.c21
-rw-r--r--drivers/block/cciss_scsi.c11
-rw-r--r--drivers/clocksource/cs5535-clockevt.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c84
-rw-r--r--drivers/rapidio/devices/tsi721.c12
-rw-r--r--drivers/rtc/rtc-pcf2123.c2
-rw-r--r--drivers/rtc/rtc-rs5c348.c7
-rw-r--r--include/linux/compaction.h4
-rw-r--r--include/linux/string.h2
-rw-r--r--mm/compaction.c156
-rw-r--r--mm/internal.h1
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page_alloc.c38
-rwxr-xr-xscripts/checkpatch.pl3
16 files changed, 258 insertions, 110 deletions
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index ead764b2728..de1e6c4dccf 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -137,6 +137,17 @@ errors=panic|continue|remount-ro
137 without doing anything or remount the partition in 137 without doing anything or remount the partition in
138 read-only mode (default behavior). 138 read-only mode (default behavior).
139 139
140discard -- If set, issues discard/TRIM commands to the block
141 device when blocks are freed. This is useful for SSD devices
142 and sparse/thinly-provisoned LUNs.
143
144nfs -- This option maintains an index (cache) of directory
145 inodes by i_logstart which is used by the nfs-related code to
146 improve look-ups.
147
148 Enable this only if you want to export the FAT filesystem
149 over NFS
150
140<bool>: 0,1,yes,no,true,false 151<bool>: 0,1,yes,no,true,false
141 152
142TODO 153TODO
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index f8551b3879f..4ac359b7aa1 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -299,11 +299,17 @@ map_hugetlb.c.
299******************************************************************* 299*******************************************************************
300 300
301/* 301/*
302 * hugepage-shm: see Documentation/vm/hugepage-shm.c 302 * map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c
303 */ 303 */
304 304
305******************************************************************* 305*******************************************************************
306 306
307/* 307/*
308 * hugepage-mmap: see Documentation/vm/hugepage-mmap.c 308 * hugepage-shm: see tools/testing/selftests/vm/hugepage-shm.c
309 */
310
311*******************************************************************
312
313/*
314 * hugepage-mmap: see tools/testing/selftests/vm/hugepage-mmap.c
309 */ 315 */
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index f6679a7fb8c..b91e4851242 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
56} 56}
57 57
58/* 58/*
59 * search for a shareable pmd page for hugetlb. 59 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
60 * and returns the corresponding pte. While this is not necessary for the
61 * !shared pmd case because we can allocate the pmd later as well, it makes the
62 * code much cleaner. pmd allocation is essential for the shared case because
63 * pud has to be populated inside the same i_mmap_mutex section - otherwise
64 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
65 * bad pmd for sharing.
60 */ 66 */
61static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 67static pte_t *
68huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
62{ 69{
63 struct vm_area_struct *vma = find_vma(mm, addr); 70 struct vm_area_struct *vma = find_vma(mm, addr);
64 struct address_space *mapping = vma->vm_file->f_mapping; 71 struct address_space *mapping = vma->vm_file->f_mapping;
@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
68 struct vm_area_struct *svma; 75 struct vm_area_struct *svma;
69 unsigned long saddr; 76 unsigned long saddr;
70 pte_t *spte = NULL; 77 pte_t *spte = NULL;
78 pte_t *pte;
71 79
72 if (!vma_shareable(vma, addr)) 80 if (!vma_shareable(vma, addr))
73 return; 81 return (pte_t *)pmd_alloc(mm, pud, addr);
74 82
75 mutex_lock(&mapping->i_mmap_mutex); 83 mutex_lock(&mapping->i_mmap_mutex);
76 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { 84 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
97 put_page(virt_to_page(spte)); 105 put_page(virt_to_page(spte));
98 spin_unlock(&mm->page_table_lock); 106 spin_unlock(&mm->page_table_lock);
99out: 107out:
108 pte = (pte_t *)pmd_alloc(mm, pud, addr);
100 mutex_unlock(&mapping->i_mmap_mutex); 109 mutex_unlock(&mapping->i_mmap_mutex);
110 return pte;
101} 111}
102 112
103/* 113/*
@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
142 } else { 152 } else {
143 BUG_ON(sz != PMD_SIZE); 153 BUG_ON(sz != PMD_SIZE);
144 if (pud_none(*pud)) 154 if (pud_none(*pud))
145 huge_pmd_share(mm, addr, pud); 155 pte = huge_pmd_share(mm, addr, pud);
146 pte = (pte_t *) pmd_alloc(mm, pud, addr); 156 else
157 pte = (pte_t *)pmd_alloc(mm, pud, addr);
147 } 158 }
148 } 159 }
149 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); 160 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index acda773b372..38aa6dda6b8 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
763 { 763 {
764 case CMD_TARGET_STATUS: 764 case CMD_TARGET_STATUS:
765 /* Pass it up to the upper layers... */ 765 /* Pass it up to the upper layers... */
766 if( ei->ScsiStatus) 766 if (!ei->ScsiStatus) {
767 {
768#if 0
769 printk(KERN_WARNING "cciss: cmd %p "
770 "has SCSI Status = %x\n",
771 c, ei->ScsiStatus);
772#endif
773 cmd->result |= (ei->ScsiStatus << 1);
774 }
775 else { /* scsi status is zero??? How??? */
776 767
777 /* Ordinarily, this case should never happen, but there is a bug 768 /* Ordinarily, this case should never happen, but there is a bug
778 in some released firmware revisions that allows it to happen 769 in some released firmware revisions that allows it to happen
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 540795cd076..d9279385304 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -53,7 +53,7 @@ static struct cs5535_mfgpt_timer *cs5535_event_clock;
53#define MFGPT_PERIODIC (MFGPT_HZ / HZ) 53#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
54 54
55/* 55/*
56 * The MFPGT timers on the CS5536 provide us with suitable timers to use 56 * The MFGPT timers on the CS5536 provide us with suitable timers to use
57 * as clock event sources - not as good as a HPET or APIC, but certainly 57 * as clock event sources - not as good as a HPET or APIC, but certainly
58 * better than the PIT. This isn't a general purpose MFGPT driver, but 58 * better than the PIT. This isn't a general purpose MFGPT driver, but
59 * a simplified one designed specifically to act as a clock event source. 59 * a simplified one designed specifically to act as a clock event source.
@@ -144,7 +144,7 @@ static int __init cs5535_mfgpt_init(void)
144 144
145 timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); 145 timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
146 if (!timer) { 146 if (!timer) {
147 printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n"); 147 printk(KERN_ERR DRV_NAME ": Could not allocate MFGPT timer\n");
148 return -ENODEV; 148 return -ENODEV;
149 } 149 }
150 cs5535_event_clock = timer; 150 cs5535_event_clock = timer;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 87b251ab6ec..b9e2000969f 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -18,6 +18,8 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/cpu.h>
22#include <linux/module.h>
21#include <linux/err.h> 23#include <linux/err.h>
22#include <linux/slab.h> 24#include <linux/slab.h>
23#include <asm/uv/uv_hub.h> 25#include <asm/uv/uv_hub.h>
@@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
59 XPC_NOTIFY_MSG_SIZE_UV) 61 XPC_NOTIFY_MSG_SIZE_UV)
60#define XPC_NOTIFY_IRQ_NAME "xpc_notify" 62#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
61 63
64static int xpc_mq_node = -1;
65
62static struct xpc_gru_mq_uv *xpc_activate_mq_uv; 66static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
63static struct xpc_gru_mq_uv *xpc_notify_mq_uv; 67static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
64 68
@@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
109#if defined CONFIG_X86_64 113#if defined CONFIG_X86_64
110 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, 114 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
111 UV_AFFINITY_CPU); 115 UV_AFFINITY_CPU);
112 if (mq->irq < 0) { 116 if (mq->irq < 0)
113 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
114 -mq->irq);
115 return mq->irq; 117 return mq->irq;
116 }
117 118
118 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); 119 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
119 120
@@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
238 mq->mmr_blade = uv_cpu_to_blade_id(cpu); 239 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
239 240
240 nid = cpu_to_node(cpu); 241 nid = cpu_to_node(cpu);
241 page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 242 page = alloc_pages_exact_node(nid,
242 pg_order); 243 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
244 pg_order);
243 if (page == NULL) { 245 if (page == NULL) {
244 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 246 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
245 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); 247 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
@@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
1731 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, 1733 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
1732}; 1734};
1733 1735
1736static int
1737xpc_init_mq_node(int nid)
1738{
1739 int cpu;
1740
1741 get_online_cpus();
1742
1743 for_each_cpu(cpu, cpumask_of_node(nid)) {
1744 xpc_activate_mq_uv =
1745 xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
1746 XPC_ACTIVATE_IRQ_NAME,
1747 xpc_handle_activate_IRQ_uv);
1748 if (!IS_ERR(xpc_activate_mq_uv))
1749 break;
1750 }
1751 if (IS_ERR(xpc_activate_mq_uv)) {
1752 put_online_cpus();
1753 return PTR_ERR(xpc_activate_mq_uv);
1754 }
1755
1756 for_each_cpu(cpu, cpumask_of_node(nid)) {
1757 xpc_notify_mq_uv =
1758 xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
1759 XPC_NOTIFY_IRQ_NAME,
1760 xpc_handle_notify_IRQ_uv);
1761 if (!IS_ERR(xpc_notify_mq_uv))
1762 break;
1763 }
1764 if (IS_ERR(xpc_notify_mq_uv)) {
1765 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1766 put_online_cpus();
1767 return PTR_ERR(xpc_notify_mq_uv);
1768 }
1769
1770 put_online_cpus();
1771 return 0;
1772}
1773
1734int 1774int
1735xpc_init_uv(void) 1775xpc_init_uv(void)
1736{ 1776{
1777 int nid;
1778 int ret = 0;
1779
1737 xpc_arch_ops = xpc_arch_ops_uv; 1780 xpc_arch_ops = xpc_arch_ops_uv;
1738 1781
1739 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { 1782 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
@@ -1742,21 +1785,21 @@ xpc_init_uv(void)
1742 return -E2BIG; 1785 return -E2BIG;
1743 } 1786 }
1744 1787
1745 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 1788 if (xpc_mq_node < 0)
1746 XPC_ACTIVATE_IRQ_NAME, 1789 for_each_online_node(nid) {
1747 xpc_handle_activate_IRQ_uv); 1790 ret = xpc_init_mq_node(nid);
1748 if (IS_ERR(xpc_activate_mq_uv))
1749 return PTR_ERR(xpc_activate_mq_uv);
1750 1791
1751 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 1792 if (!ret)
1752 XPC_NOTIFY_IRQ_NAME, 1793 break;
1753 xpc_handle_notify_IRQ_uv); 1794 }
1754 if (IS_ERR(xpc_notify_mq_uv)) { 1795 else
1755 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1796 ret = xpc_init_mq_node(xpc_mq_node);
1756 return PTR_ERR(xpc_notify_mq_uv);
1757 }
1758 1797
1759 return 0; 1798 if (ret < 0)
1799 dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
1800 -ret);
1801
1802 return ret;
1760} 1803}
1761 1804
1762void 1805void
@@ -1765,3 +1808,6 @@ xpc_exit_uv(void)
1765 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); 1808 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1766 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1809 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1767} 1810}
1811
1812module_param(xpc_mq_node, int, 0);
1813MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 722246cf20a..5d44252b734 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -435,6 +435,9 @@ static void tsi721_db_dpc(struct work_struct *work)
435 " info %4.4x\n", DBELL_SID(idb.bytes), 435 " info %4.4x\n", DBELL_SID(idb.bytes),
436 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); 436 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
437 } 437 }
438
439 wr_ptr = ioread32(priv->regs +
440 TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
438 } 441 }
439 442
440 iowrite32(rd_ptr & (IDB_QSIZE - 1), 443 iowrite32(rd_ptr & (IDB_QSIZE - 1),
@@ -445,6 +448,10 @@ static void tsi721_db_dpc(struct work_struct *work)
445 regval |= TSI721_SR_CHINT_IDBQRCV; 448 regval |= TSI721_SR_CHINT_IDBQRCV;
446 iowrite32(regval, 449 iowrite32(regval,
447 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 450 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
451
452 wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
453 if (wr_ptr != rd_ptr)
454 schedule_work(&priv->idb_work);
448} 455}
449 456
450/** 457/**
@@ -2212,7 +2219,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2212 const struct pci_device_id *id) 2219 const struct pci_device_id *id)
2213{ 2220{
2214 struct tsi721_device *priv; 2221 struct tsi721_device *priv;
2215 int i, cap; 2222 int cap;
2216 int err; 2223 int err;
2217 u32 regval; 2224 u32 regval;
2218 2225
@@ -2232,12 +2239,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2232 priv->pdev = pdev; 2239 priv->pdev = pdev;
2233 2240
2234#ifdef DEBUG 2241#ifdef DEBUG
2242 {
2243 int i;
2235 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { 2244 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
2236 dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", 2245 dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
2237 i, (unsigned long long)pci_resource_start(pdev, i), 2246 i, (unsigned long long)pci_resource_start(pdev, i),
2238 (unsigned long)pci_resource_len(pdev, i), 2247 (unsigned long)pci_resource_len(pdev, i),
2239 pci_resource_flags(pdev, i)); 2248 pci_resource_flags(pdev, i));
2240 } 2249 }
2250 }
2241#endif 2251#endif
2242 /* 2252 /*
2243 * Verify BAR configuration 2253 * Verify BAR configuration
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 836118795c0..13e4df63974 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -43,6 +43,7 @@
43#include <linux/rtc.h> 43#include <linux/rtc.h>
44#include <linux/spi/spi.h> 44#include <linux/spi/spi.h>
45#include <linux/module.h> 45#include <linux/module.h>
46#include <linux/sysfs.h>
46 47
47#define DRV_VERSION "0.6" 48#define DRV_VERSION "0.6"
48 49
@@ -292,6 +293,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi)
292 pdata->rtc = rtc; 293 pdata->rtc = rtc;
293 294
294 for (i = 0; i < 16; i++) { 295 for (i = 0; i < 16; i++) {
296 sysfs_attr_init(&pdata->regs[i].attr.attr);
295 sprintf(pdata->regs[i].name, "%1x", i); 297 sprintf(pdata->regs[i].name, "%1x", i);
296 pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR; 298 pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR;
297 pdata->regs[i].attr.attr.name = pdata->regs[i].name; 299 pdata->regs[i].attr.attr.name = pdata->regs[i].name;
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 77074ccd285..fd5c7af04ae 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
122 tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK); 122 tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
123 tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK); 123 tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
124 if (!pdata->rtc_24h) { 124 if (!pdata->rtc_24h) {
125 tm->tm_hour %= 12; 125 if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) {
126 if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) 126 tm->tm_hour -= 20;
127 tm->tm_hour %= 12;
127 tm->tm_hour += 12; 128 tm->tm_hour += 12;
129 } else
130 tm->tm_hour %= 12;
128 } 131 }
129 tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK); 132 tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
130 tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK); 133 tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 133ddcf8339..ef658147e4e 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
22extern int fragmentation_index(struct zone *zone, unsigned int order); 22extern int fragmentation_index(struct zone *zone, unsigned int order);
23extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 23extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
24 int order, gfp_t gfp_mask, nodemask_t *mask, 24 int order, gfp_t gfp_mask, nodemask_t *mask,
25 bool sync); 25 bool sync, bool *contended);
26extern int compact_pgdat(pg_data_t *pgdat, int order); 26extern int compact_pgdat(pg_data_t *pgdat, int order);
27extern unsigned long compaction_suitable(struct zone *zone, int order); 27extern unsigned long compaction_suitable(struct zone *zone, int order);
28 28
@@ -64,7 +64,7 @@ static inline bool compaction_deferred(struct zone *zone, int order)
64#else 64#else
65static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, 65static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
66 int order, gfp_t gfp_mask, nodemask_t *nodemask, 66 int order, gfp_t gfp_mask, nodemask_t *nodemask,
67 bool sync) 67 bool sync, bool *contended)
68{ 68{
69 return COMPACT_CONTINUE; 69 return COMPACT_CONTINUE;
70} 70}
diff --git a/include/linux/string.h b/include/linux/string.h
index ffe0442e18d..b9178812d9d 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -144,8 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
144{ 144{
145 return strncmp(str, prefix, strlen(prefix)) == 0; 145 return strncmp(str, prefix, strlen(prefix)) == 0;
146} 146}
147#endif
148 147
149extern size_t memweight(const void *ptr, size_t bytes); 148extern size_t memweight(const void *ptr, size_t bytes);
150 149
150#endif /* __KERNEL__ */
151#endif /* _LINUX_STRING_H_ */ 151#endif /* _LINUX_STRING_H_ */
diff --git a/mm/compaction.c b/mm/compaction.c
index e78cb968842..7fcd3a52e68 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -51,6 +51,47 @@ static inline bool migrate_async_suitable(int migratetype)
51} 51}
52 52
53/* 53/*
54 * Compaction requires the taking of some coarse locks that are potentially
55 * very heavily contended. Check if the process needs to be scheduled or
56 * if the lock is contended. For async compaction, back out in the event
57 * if contention is severe. For sync compaction, schedule.
58 *
59 * Returns true if the lock is held.
60 * Returns false if the lock is released and compaction should abort
61 */
62static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
63 bool locked, struct compact_control *cc)
64{
65 if (need_resched() || spin_is_contended(lock)) {
66 if (locked) {
67 spin_unlock_irqrestore(lock, *flags);
68 locked = false;
69 }
70
71 /* async aborts if taking too long or contended */
72 if (!cc->sync) {
73 if (cc->contended)
74 *cc->contended = true;
75 return false;
76 }
77
78 cond_resched();
79 if (fatal_signal_pending(current))
80 return false;
81 }
82
83 if (!locked)
84 spin_lock_irqsave(lock, *flags);
85 return true;
86}
87
88static inline bool compact_trylock_irqsave(spinlock_t *lock,
89 unsigned long *flags, struct compact_control *cc)
90{
91 return compact_checklock_irqsave(lock, flags, false, cc);
92}
93
94/*
54 * Isolate free pages onto a private freelist. Caller must hold zone->lock. 95 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
55 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free 96 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
56 * pages inside of the pageblock (even though it may still end up isolating 97 * pages inside of the pageblock (even though it may still end up isolating
@@ -173,7 +214,7 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
173} 214}
174 215
175/* Update the number of anon and file isolated pages in the zone */ 216/* Update the number of anon and file isolated pages in the zone */
176static void acct_isolated(struct zone *zone, struct compact_control *cc) 217static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
177{ 218{
178 struct page *page; 219 struct page *page;
179 unsigned int count[2] = { 0, }; 220 unsigned int count[2] = { 0, };
@@ -181,8 +222,14 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
181 list_for_each_entry(page, &cc->migratepages, lru) 222 list_for_each_entry(page, &cc->migratepages, lru)
182 count[!!page_is_file_cache(page)]++; 223 count[!!page_is_file_cache(page)]++;
183 224
184 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 225 /* If locked we can use the interrupt unsafe versions */
185 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 226 if (locked) {
227 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
228 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
229 } else {
230 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
231 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
232 }
186} 233}
187 234
188/* Similar to reclaim, but different enough that they don't share logic */ 235/* Similar to reclaim, but different enough that they don't share logic */
@@ -228,6 +275,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
228 struct list_head *migratelist = &cc->migratepages; 275 struct list_head *migratelist = &cc->migratepages;
229 isolate_mode_t mode = 0; 276 isolate_mode_t mode = 0;
230 struct lruvec *lruvec; 277 struct lruvec *lruvec;
278 unsigned long flags;
279 bool locked;
231 280
232 /* 281 /*
233 * Ensure that there are not too many pages isolated from the LRU 282 * Ensure that there are not too many pages isolated from the LRU
@@ -247,25 +296,22 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
247 296
248 /* Time to isolate some pages for migration */ 297 /* Time to isolate some pages for migration */
249 cond_resched(); 298 cond_resched();
250 spin_lock_irq(&zone->lru_lock); 299 spin_lock_irqsave(&zone->lru_lock, flags);
300 locked = true;
251 for (; low_pfn < end_pfn; low_pfn++) { 301 for (; low_pfn < end_pfn; low_pfn++) {
252 struct page *page; 302 struct page *page;
253 bool locked = true;
254 303
255 /* give a chance to irqs before checking need_resched() */ 304 /* give a chance to irqs before checking need_resched() */
256 if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { 305 if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
257 spin_unlock_irq(&zone->lru_lock); 306 spin_unlock_irqrestore(&zone->lru_lock, flags);
258 locked = false; 307 locked = false;
259 } 308 }
260 if (need_resched() || spin_is_contended(&zone->lru_lock)) { 309
261 if (locked) 310 /* Check if it is ok to still hold the lock */
262 spin_unlock_irq(&zone->lru_lock); 311 locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
263 cond_resched(); 312 locked, cc);
264 spin_lock_irq(&zone->lru_lock); 313 if (!locked)
265 if (fatal_signal_pending(current)) 314 break;
266 break;
267 } else if (!locked)
268 spin_lock_irq(&zone->lru_lock);
269 315
270 /* 316 /*
271 * migrate_pfn does not necessarily start aligned to a 317 * migrate_pfn does not necessarily start aligned to a
@@ -349,9 +395,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
349 } 395 }
350 } 396 }
351 397
352 acct_isolated(zone, cc); 398 acct_isolated(zone, locked, cc);
353 399
354 spin_unlock_irq(&zone->lru_lock); 400 if (locked)
401 spin_unlock_irqrestore(&zone->lru_lock, flags);
355 402
356 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 403 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
357 404
@@ -384,6 +431,20 @@ static bool suitable_migration_target(struct page *page)
384} 431}
385 432
386/* 433/*
434 * Returns the start pfn of the last page block in a zone. This is the starting
435 * point for full compaction of a zone. Compaction searches for free pages from
436 * the end of each zone, while isolate_freepages_block scans forward inside each
437 * page block.
438 */
439static unsigned long start_free_pfn(struct zone *zone)
440{
441 unsigned long free_pfn;
442 free_pfn = zone->zone_start_pfn + zone->spanned_pages;
443 free_pfn &= ~(pageblock_nr_pages-1);
444 return free_pfn;
445}
446
447/*
387 * Based on information in the current compact_control, find blocks 448 * Based on information in the current compact_control, find blocks
388 * suitable for isolating free pages from and then isolate them. 449 * suitable for isolating free pages from and then isolate them.
389 */ 450 */
@@ -422,17 +483,6 @@ static void isolate_freepages(struct zone *zone,
422 pfn -= pageblock_nr_pages) { 483 pfn -= pageblock_nr_pages) {
423 unsigned long isolated; 484 unsigned long isolated;
424 485
425 /*
426 * Skip ahead if another thread is compacting in the area
427 * simultaneously. If we wrapped around, we can only skip
428 * ahead if zone->compact_cached_free_pfn also wrapped to
429 * above our starting point.
430 */
431 if (cc->order > 0 && (!cc->wrapped ||
432 zone->compact_cached_free_pfn >
433 cc->start_free_pfn))
434 pfn = min(pfn, zone->compact_cached_free_pfn);
435
436 if (!pfn_valid(pfn)) 486 if (!pfn_valid(pfn))
437 continue; 487 continue;
438 488
@@ -458,7 +508,16 @@ static void isolate_freepages(struct zone *zone,
458 * are disabled 508 * are disabled
459 */ 509 */
460 isolated = 0; 510 isolated = 0;
461 spin_lock_irqsave(&zone->lock, flags); 511
512 /*
513 * The zone lock must be held to isolate freepages. This
514 * unfortunately this is a very coarse lock and can be
515 * heavily contended if there are parallel allocations
516 * or parallel compactions. For async compaction do not
517 * spin on the lock
518 */
519 if (!compact_trylock_irqsave(&zone->lock, &flags, cc))
520 break;
462 if (suitable_migration_target(page)) { 521 if (suitable_migration_target(page)) {
463 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 522 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
464 isolated = isolate_freepages_block(pfn, end_pfn, 523 isolated = isolate_freepages_block(pfn, end_pfn,
@@ -474,7 +533,15 @@ static void isolate_freepages(struct zone *zone,
474 */ 533 */
475 if (isolated) { 534 if (isolated) {
476 high_pfn = max(high_pfn, pfn); 535 high_pfn = max(high_pfn, pfn);
477 if (cc->order > 0) 536
537 /*
538 * If the free scanner has wrapped, update
539 * compact_cached_free_pfn to point to the highest
540 * pageblock with free pages. This reduces excessive
541 * scanning of full pageblocks near the end of the
542 * zone
543 */
544 if (cc->order > 0 && cc->wrapped)
478 zone->compact_cached_free_pfn = high_pfn; 545 zone->compact_cached_free_pfn = high_pfn;
479 } 546 }
480 } 547 }
@@ -484,6 +551,11 @@ static void isolate_freepages(struct zone *zone,
484 551
485 cc->free_pfn = high_pfn; 552 cc->free_pfn = high_pfn;
486 cc->nr_freepages = nr_freepages; 553 cc->nr_freepages = nr_freepages;
554
555 /* If compact_cached_free_pfn is reset then set it now */
556 if (cc->order > 0 && !cc->wrapped &&
557 zone->compact_cached_free_pfn == start_free_pfn(zone))
558 zone->compact_cached_free_pfn = high_pfn;
487} 559}
488 560
489/* 561/*
@@ -570,20 +642,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
570 return ISOLATE_SUCCESS; 642 return ISOLATE_SUCCESS;
571} 643}
572 644
573/*
574 * Returns the start pfn of the last page block in a zone. This is the starting
575 * point for full compaction of a zone. Compaction searches for free pages from
576 * the end of each zone, while isolate_freepages_block scans forward inside each
577 * page block.
578 */
579static unsigned long start_free_pfn(struct zone *zone)
580{
581 unsigned long free_pfn;
582 free_pfn = zone->zone_start_pfn + zone->spanned_pages;
583 free_pfn &= ~(pageblock_nr_pages-1);
584 return free_pfn;
585}
586
587static int compact_finished(struct zone *zone, 645static int compact_finished(struct zone *zone,
588 struct compact_control *cc) 646 struct compact_control *cc)
589{ 647{
@@ -771,7 +829,7 @@ out:
771 829
772static unsigned long compact_zone_order(struct zone *zone, 830static unsigned long compact_zone_order(struct zone *zone,
773 int order, gfp_t gfp_mask, 831 int order, gfp_t gfp_mask,
774 bool sync) 832 bool sync, bool *contended)
775{ 833{
776 struct compact_control cc = { 834 struct compact_control cc = {
777 .nr_freepages = 0, 835 .nr_freepages = 0,
@@ -780,6 +838,7 @@ static unsigned long compact_zone_order(struct zone *zone,
780 .migratetype = allocflags_to_migratetype(gfp_mask), 838 .migratetype = allocflags_to_migratetype(gfp_mask),
781 .zone = zone, 839 .zone = zone,
782 .sync = sync, 840 .sync = sync,
841 .contended = contended,
783 }; 842 };
784 INIT_LIST_HEAD(&cc.freepages); 843 INIT_LIST_HEAD(&cc.freepages);
785 INIT_LIST_HEAD(&cc.migratepages); 844 INIT_LIST_HEAD(&cc.migratepages);
@@ -801,7 +860,7 @@ int sysctl_extfrag_threshold = 500;
801 */ 860 */
802unsigned long try_to_compact_pages(struct zonelist *zonelist, 861unsigned long try_to_compact_pages(struct zonelist *zonelist,
803 int order, gfp_t gfp_mask, nodemask_t *nodemask, 862 int order, gfp_t gfp_mask, nodemask_t *nodemask,
804 bool sync) 863 bool sync, bool *contended)
805{ 864{
806 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 865 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
807 int may_enter_fs = gfp_mask & __GFP_FS; 866 int may_enter_fs = gfp_mask & __GFP_FS;
@@ -825,7 +884,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
825 nodemask) { 884 nodemask) {
826 int status; 885 int status;
827 886
828 status = compact_zone_order(zone, order, gfp_mask, sync); 887 status = compact_zone_order(zone, order, gfp_mask, sync,
888 contended);
829 rc = max(status, rc); 889 rc = max(status, rc);
830 890
831 /* If a normal allocation would succeed, stop compacting */ 891 /* If a normal allocation would succeed, stop compacting */
@@ -861,7 +921,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
861 if (cc->order > 0) { 921 if (cc->order > 0) {
862 int ok = zone_watermark_ok(zone, cc->order, 922 int ok = zone_watermark_ok(zone, cc->order,
863 low_wmark_pages(zone), 0, 0); 923 low_wmark_pages(zone), 0, 0);
864 if (ok && cc->order > zone->compact_order_failed) 924 if (ok && cc->order >= zone->compact_order_failed)
865 zone->compact_order_failed = cc->order + 1; 925 zone->compact_order_failed = cc->order + 1;
866 /* Currently async compaction is never deferred. */ 926 /* Currently async compaction is never deferred. */
867 else if (!ok && cc->sync) 927 else if (!ok && cc->sync)
diff --git a/mm/internal.h b/mm/internal.h
index 3314f79d775..b8c91b342e2 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -130,6 +130,7 @@ struct compact_control {
130 int order; /* order a direct compactor needs */ 130 int order; /* order a direct compactor needs */
131 int migratetype; /* MOVABLE, RECLAIMABLE etc */ 131 int migratetype; /* MOVABLE, RECLAIMABLE etc */
132 struct zone *zone; 132 struct zone *zone;
133 bool *contended; /* True if a lock was contended */
133}; 134};
134 135
135unsigned long 136unsigned long
diff --git a/mm/mmap.c b/mm/mmap.c
index e3e86914f11..9adee9fc0d8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2309,7 +2309,7 @@ void exit_mmap(struct mm_struct *mm)
2309 } 2309 }
2310 vm_unacct_memory(nr_accounted); 2310 vm_unacct_memory(nr_accounted);
2311 2311
2312 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); 2312 WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2313} 2313}
2314 2314
2315/* Insert vm structure into process list sorted by address 2315/* Insert vm structure into process list sorted by address
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 009ac285fea..c66fb875104 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1928,6 +1928,17 @@ this_zone_full:
1928 zlc_active = 0; 1928 zlc_active = 0;
1929 goto zonelist_scan; 1929 goto zonelist_scan;
1930 } 1930 }
1931
1932 if (page)
1933 /*
1934 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
1935 * necessary to allocate the page. The expectation is
1936 * that the caller is taking steps that will free more
1937 * memory. The caller should avoid the page being used
1938 * for !PFMEMALLOC purposes.
1939 */
1940 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
1941
1931 return page; 1942 return page;
1932} 1943}
1933 1944
@@ -2091,7 +2102,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2091 struct zonelist *zonelist, enum zone_type high_zoneidx, 2102 struct zonelist *zonelist, enum zone_type high_zoneidx,
2092 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2103 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2093 int migratetype, bool sync_migration, 2104 int migratetype, bool sync_migration,
2094 bool *deferred_compaction, 2105 bool *contended_compaction, bool *deferred_compaction,
2095 unsigned long *did_some_progress) 2106 unsigned long *did_some_progress)
2096{ 2107{
2097 struct page *page; 2108 struct page *page;
@@ -2106,7 +2117,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2106 2117
2107 current->flags |= PF_MEMALLOC; 2118 current->flags |= PF_MEMALLOC;
2108 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 2119 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
2109 nodemask, sync_migration); 2120 nodemask, sync_migration,
2121 contended_compaction);
2110 current->flags &= ~PF_MEMALLOC; 2122 current->flags &= ~PF_MEMALLOC;
2111 if (*did_some_progress != COMPACT_SKIPPED) { 2123 if (*did_some_progress != COMPACT_SKIPPED) {
2112 2124
@@ -2152,7 +2164,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2152 struct zonelist *zonelist, enum zone_type high_zoneidx, 2164 struct zonelist *zonelist, enum zone_type high_zoneidx,
2153 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2165 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2154 int migratetype, bool sync_migration, 2166 int migratetype, bool sync_migration,
2155 bool *deferred_compaction, 2167 bool *contended_compaction, bool *deferred_compaction,
2156 unsigned long *did_some_progress) 2168 unsigned long *did_some_progress)
2157{ 2169{
2158 return NULL; 2170 return NULL;
@@ -2325,6 +2337,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2325 unsigned long did_some_progress; 2337 unsigned long did_some_progress;
2326 bool sync_migration = false; 2338 bool sync_migration = false;
2327 bool deferred_compaction = false; 2339 bool deferred_compaction = false;
2340 bool contended_compaction = false;
2328 2341
2329 /* 2342 /*
2330 * In the slowpath, we sanity check order to avoid ever trying to 2343 * In the slowpath, we sanity check order to avoid ever trying to
@@ -2389,14 +2402,6 @@ rebalance:
2389 zonelist, high_zoneidx, nodemask, 2402 zonelist, high_zoneidx, nodemask,
2390 preferred_zone, migratetype); 2403 preferred_zone, migratetype);
2391 if (page) { 2404 if (page) {
2392 /*
2393 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
2394 * necessary to allocate the page. The expectation is
2395 * that the caller is taking steps that will free more
2396 * memory. The caller should avoid the page being used
2397 * for !PFMEMALLOC purposes.
2398 */
2399 page->pfmemalloc = true;
2400 goto got_pg; 2405 goto got_pg;
2401 } 2406 }
2402 } 2407 }
@@ -2422,6 +2427,7 @@ rebalance:
2422 nodemask, 2427 nodemask,
2423 alloc_flags, preferred_zone, 2428 alloc_flags, preferred_zone,
2424 migratetype, sync_migration, 2429 migratetype, sync_migration,
2430 &contended_compaction,
2425 &deferred_compaction, 2431 &deferred_compaction,
2426 &did_some_progress); 2432 &did_some_progress);
2427 if (page) 2433 if (page)
@@ -2431,10 +2437,11 @@ rebalance:
2431 /* 2437 /*
2432 * If compaction is deferred for high-order allocations, it is because 2438 * If compaction is deferred for high-order allocations, it is because
2433 * sync compaction recently failed. In this is the case and the caller 2439 * sync compaction recently failed. In this is the case and the caller
2434 * has requested the system not be heavily disrupted, fail the 2440 * requested a movable allocation that does not heavily disrupt the
2435 * allocation now instead of entering direct reclaim 2441 * system then fail the allocation instead of entering direct reclaim.
2436 */ 2442 */
2437 if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD)) 2443 if ((deferred_compaction || contended_compaction) &&
2444 (gfp_mask & __GFP_NO_KSWAPD))
2438 goto nopage; 2445 goto nopage;
2439 2446
2440 /* Try direct reclaim and then allocating */ 2447 /* Try direct reclaim and then allocating */
@@ -2505,6 +2512,7 @@ rebalance:
2505 nodemask, 2512 nodemask,
2506 alloc_flags, preferred_zone, 2513 alloc_flags, preferred_zone,
2507 migratetype, sync_migration, 2514 migratetype, sync_migration,
2515 &contended_compaction,
2508 &deferred_compaction, 2516 &deferred_compaction,
2509 &did_some_progress); 2517 &did_some_progress);
2510 if (page) 2518 if (page)
@@ -2569,8 +2577,6 @@ retry_cpuset:
2569 page = __alloc_pages_slowpath(gfp_mask, order, 2577 page = __alloc_pages_slowpath(gfp_mask, order,
2570 zonelist, high_zoneidx, nodemask, 2578 zonelist, high_zoneidx, nodemask,
2571 preferred_zone, migratetype); 2579 preferred_zone, migratetype);
2572 else
2573 page->pfmemalloc = false;
2574 2580
2575 trace_mm_page_alloc(page, order, gfp_mask, migratetype); 2581 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2576 2582
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 913d6bdfdda..ca05ba217f5 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3016,7 +3016,8 @@ sub process {
3016 $herectx .= raw_line($linenr, $n) . "\n"; 3016 $herectx .= raw_line($linenr, $n) . "\n";
3017 } 3017 }
3018 3018
3019 if (($stmts =~ tr/;/;/) == 1) { 3019 if (($stmts =~ tr/;/;/) == 1 &&
3020 $stmts !~ /^\s*(if|while|for|switch)\b/) {
3020 WARN("SINGLE_STATEMENT_DO_WHILE_MACRO", 3021 WARN("SINGLE_STATEMENT_DO_WHILE_MACRO",
3021 "Single statement macros should not use a do {} while (0) loop\n" . "$herectx"); 3022 "Single statement macros should not use a do {} while (0) loop\n" . "$herectx");
3022 } 3023 }