aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-07-30 20:16:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-07-30 20:16:36 -0400
commit4d0bd657d77d599307649aa03991b05e3ddef059 (patch)
treea1b028afe393695ea8ccbe4a2ef7742b94d82ef7
parent26bcd8b72563b4c54892c4c2a409f6656fb8ae8b (diff)
parente0198b290dcd8313bdf313a0d083033d5c01d761 (diff)
Merge branch 'akpm' (patches from Andrew Morton)
Merge fixes from Andrew Morton: "10 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: Josh has moved kexec: export free_huge_page to VMCOREINFO mm: fix filemap.c pagecache_get_page() kernel-doc warnings mm: debugfs: move rounddown_pow_of_two() out from do_fault path memcg: oom_notify use-after-free fix hwpoison: call action_result() in failure path of hwpoison_user_mappings() hwpoison: fix hugetlbfs/thp precheck in hwpoison_user_mappings() rapidio/tsi721_dma: fix failure to obtain transaction descriptor mm, thp: do not allow thp faults to avoid cpuset restrictions mm/page-writeback.c: fix divide by zero in bdi_dirty_limits()
-rw-r--r--.mailmap5
-rw-r--r--CREDITS7
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/rcu/rcutorture.c4
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory-failure.c14
-rw-r--r--mm/memory.c21
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c16
14 files changed, 69 insertions, 36 deletions
diff --git a/.mailmap b/.mailmap
index df1baba43a64..1ad68731fb47 100644
--- a/.mailmap
+++ b/.mailmap
@@ -62,6 +62,11 @@ Jeff Garzik <jgarzik@pretzel.yyz.us>
62Jens Axboe <axboe@suse.de> 62Jens Axboe <axboe@suse.de>
63Jens Osterkamp <Jens.Osterkamp@de.ibm.com> 63Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
64John Stultz <johnstul@us.ibm.com> 64John Stultz <johnstul@us.ibm.com>
65<josh@joshtriplett.org> <josh@freedesktop.org>
66<josh@joshtriplett.org> <josh@kernel.org>
67<josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
68<josh@joshtriplett.org> <josht@us.ibm.com>
69<josh@joshtriplett.org> <josht@vnet.ibm.com>
65Juha Yrjola <at solidboot.com> 70Juha Yrjola <at solidboot.com>
66Juha Yrjola <juha.yrjola@nokia.com> 71Juha Yrjola <juha.yrjola@nokia.com>
67Juha Yrjola <juha.yrjola@solidboot.com> 72Juha Yrjola <juha.yrjola@solidboot.com>
diff --git a/CREDITS b/CREDITS
index 28ee1514b9de..a80b66718f66 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3511,10 +3511,11 @@ S: MacGregor A.C.T 2615
3511S: Australia 3511S: Australia
3512 3512
3513N: Josh Triplett 3513N: Josh Triplett
3514E: josh@freedesktop.org 3514E: josh@joshtriplett.org
3515P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87 CA26 189B 9946 D0FE 7AFB 3515P: 4096R/8AFF873D 758E 5042 E397 4BA3 3A9C 1E67 0ED9 A3DF 8AFF 873D
3516D: rcutorture maintainer 3516D: RCU and rcutorture
3517D: lock annotations, finding and fixing lock bugs 3517D: lock annotations, finding and fixing lock bugs
3518D: kernel tinification
3518 3519
3519N: Winfried Trümper 3520N: Winfried Trümper
3520E: winni@xpilot.org 3521E: winni@xpilot.org
diff --git a/MAINTAINERS b/MAINTAINERS
index 86efa7e213c2..95990dd2678c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7424,7 +7424,7 @@ S: Orphan
7424F: drivers/net/wireless/ray* 7424F: drivers/net/wireless/ray*
7425 7425
7426RCUTORTURE MODULE 7426RCUTORTURE MODULE
7427M: Josh Triplett <josh@freedesktop.org> 7427M: Josh Triplett <josh@joshtriplett.org>
7428M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 7428M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
7429L: linux-kernel@vger.kernel.org 7429L: linux-kernel@vger.kernel.org
7430S: Supported 7430S: Supported
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 9b60b1f3261c..44341dc5b148 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
287 "desc %p not ACKed\n", tx_desc); 287 "desc %p not ACKed\n", tx_desc);
288 } 288 }
289 289
290 if (ret == NULL) {
291 dev_dbg(bdma_chan->dchan.device->dev,
292 "%s: unable to obtain tx descriptor\n", __func__);
293 goto err_out;
294 }
295
290 i = bdma_chan->wr_count_next % bdma_chan->bd_num; 296 i = bdma_chan->wr_count_next % bdma_chan->bd_num;
291 if (i == bdma_chan->bd_num - 1) { 297 if (i == bdma_chan->bd_num - 1) {
292 i = 0; 298 i = 0;
@@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
297 tx_desc->txd.phys = bdma_chan->bd_phys + 303 tx_desc->txd.phys = bdma_chan->bd_phys +
298 i * sizeof(struct tsi721_dma_desc); 304 i * sizeof(struct tsi721_dma_desc);
299 tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i]; 305 tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
300 306err_out:
301 spin_unlock_bh(&bdma_chan->lock); 307 spin_unlock_bh(&bdma_chan->lock);
302 308
303 return ret; 309 return ret;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 255cd5cc0754..a23c096b3080 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
80bool isolate_huge_page(struct page *page, struct list_head *list); 80bool isolate_huge_page(struct page *page, struct list_head *list);
81void putback_active_hugepage(struct page *page); 81void putback_active_hugepage(struct page *page);
82bool is_hugepage_active(struct page *page); 82bool is_hugepage_active(struct page *page);
83void free_huge_page(struct page *page);
83 84
84#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 85#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
85pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 86pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 369f41a94124..23a088fec3c0 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -33,6 +33,7 @@
33#include <linux/swap.h> 33#include <linux/swap.h>
34#include <linux/syscore_ops.h> 34#include <linux/syscore_ops.h>
35#include <linux/compiler.h> 35#include <linux/compiler.h>
36#include <linux/hugetlb.h>
36 37
37#include <asm/page.h> 38#include <asm/page.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
@@ -1619,6 +1620,7 @@ static int __init crash_save_vmcoreinfo_init(void)
1619#endif 1620#endif
1620 VMCOREINFO_NUMBER(PG_head_mask); 1621 VMCOREINFO_NUMBER(PG_head_mask);
1621 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); 1622 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1623 VMCOREINFO_SYMBOL(free_huge_page);
1622 1624
1623 arch_crash_save_vmcoreinfo(); 1625 arch_crash_save_vmcoreinfo();
1624 update_vmcoreinfo_note(); 1626 update_vmcoreinfo_note();
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 7fa34f86e5ba..948a7693748e 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -18,7 +18,7 @@
18 * Copyright (C) IBM Corporation, 2005, 2006 18 * Copyright (C) IBM Corporation, 2005, 2006
19 * 19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org> 21 * Josh Triplett <josh@joshtriplett.org>
22 * 22 *
23 * See also: Documentation/RCU/torture.txt 23 * See also: Documentation/RCU/torture.txt
24 */ 24 */
@@ -51,7 +51,7 @@
51#include <linux/torture.h> 51#include <linux/torture.h>
52 52
53MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
54MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); 54MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
55 55
56 56
57torture_param(int, fqs_duration, 0, 57torture_param(int, fqs_duration, 0,
diff --git a/mm/filemap.c b/mm/filemap.c
index dafb06f70a09..900edfaf6df5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1031,18 +1031,21 @@ EXPORT_SYMBOL(find_lock_entry);
1031 * @mapping: the address_space to search 1031 * @mapping: the address_space to search
1032 * @offset: the page index 1032 * @offset: the page index
1033 * @fgp_flags: PCG flags 1033 * @fgp_flags: PCG flags
1034 * @gfp_mask: gfp mask to use if a page is to be allocated 1034 * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
1035 * @radix_gfp_mask: gfp mask to use for radix tree node allocation
1035 * 1036 *
1036 * Looks up the page cache slot at @mapping & @offset. 1037 * Looks up the page cache slot at @mapping & @offset.
1037 * 1038 *
1038 * PCG flags modify how the page is returned 1039 * PCG flags modify how the page is returned.
1039 * 1040 *
1040 * FGP_ACCESSED: the page will be marked accessed 1041 * FGP_ACCESSED: the page will be marked accessed
1041 * FGP_LOCK: Page is return locked 1042 * FGP_LOCK: Page is return locked
1042 * FGP_CREAT: If page is not present then a new page is allocated using 1043 * FGP_CREAT: If page is not present then a new page is allocated using
1043 * @gfp_mask and added to the page cache and the VM's LRU 1044 * @cache_gfp_mask and added to the page cache and the VM's LRU
1044 * list. The page is returned locked and with an increased 1045 * list. If radix tree nodes are allocated during page cache
1045 * refcount. Otherwise, %NULL is returned. 1046 * insertion then @radix_gfp_mask is used. The page is returned
1047 * locked and with an increased refcount. Otherwise, %NULL is
1048 * returned.
1046 * 1049 *
1047 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1050 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1048 * if the GFP flags specified for FGP_CREAT are atomic. 1051 * if the GFP flags specified for FGP_CREAT are atomic.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9221c02ed9e2..7a0a73d2fcff 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -856,7 +856,7 @@ struct hstate *size_to_hstate(unsigned long size)
856 return NULL; 856 return NULL;
857} 857}
858 858
859static void free_huge_page(struct page *page) 859void free_huge_page(struct page *page)
860{ 860{
861 /* 861 /*
862 * Can't pass hstate in here because it is called from the 862 * Can't pass hstate in here because it is called from the
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a2c7bcb0e6eb..1f14a430c656 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5415,8 +5415,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
5415{ 5415{
5416 struct mem_cgroup_eventfd_list *ev; 5416 struct mem_cgroup_eventfd_list *ev;
5417 5417
5418 spin_lock(&memcg_oom_lock);
5419
5418 list_for_each_entry(ev, &memcg->oom_notify, list) 5420 list_for_each_entry(ev, &memcg->oom_notify, list)
5419 eventfd_signal(ev->eventfd, 1); 5421 eventfd_signal(ev->eventfd, 1);
5422
5423 spin_unlock(&memcg_oom_lock);
5420 return 0; 5424 return 0;
5421} 5425}
5422 5426
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 7211a73ba14d..a013bc94ebbe 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -895,7 +895,13 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
895 struct page *hpage = *hpagep; 895 struct page *hpage = *hpagep;
896 struct page *ppage; 896 struct page *ppage;
897 897
898 if (PageReserved(p) || PageSlab(p) || !PageLRU(p)) 898 /*
899 * Here we are interested only in user-mapped pages, so skip any
900 * other types of pages.
901 */
902 if (PageReserved(p) || PageSlab(p))
903 return SWAP_SUCCESS;
904 if (!(PageLRU(hpage) || PageHuge(p)))
899 return SWAP_SUCCESS; 905 return SWAP_SUCCESS;
900 906
901 /* 907 /*
@@ -905,8 +911,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
905 if (!page_mapped(hpage)) 911 if (!page_mapped(hpage))
906 return SWAP_SUCCESS; 912 return SWAP_SUCCESS;
907 913
908 if (PageKsm(p)) 914 if (PageKsm(p)) {
915 pr_err("MCE %#lx: can't handle KSM pages.\n", pfn);
909 return SWAP_FAIL; 916 return SWAP_FAIL;
917 }
910 918
911 if (PageSwapCache(p)) { 919 if (PageSwapCache(p)) {
912 printk(KERN_ERR 920 printk(KERN_ERR
@@ -1229,7 +1237,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1229 */ 1237 */
1230 if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage) 1238 if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
1231 != SWAP_SUCCESS) { 1239 != SWAP_SUCCESS) {
1232 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); 1240 action_result(pfn, "unmapping failed", IGNORED);
1233 res = -EBUSY; 1241 res = -EBUSY;
1234 goto out; 1242 goto out;
1235 } 1243 }
diff --git a/mm/memory.c b/mm/memory.c
index 7e8d8205b610..8b44f765b645 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2758,23 +2758,18 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2758 update_mmu_cache(vma, address, pte); 2758 update_mmu_cache(vma, address, pte);
2759} 2759}
2760 2760
2761static unsigned long fault_around_bytes = 65536; 2761static unsigned long fault_around_bytes = rounddown_pow_of_two(65536);
2762 2762
2763/*
2764 * fault_around_pages() and fault_around_mask() round down fault_around_bytes
2765 * to nearest page order. It's what do_fault_around() expects to see.
2766 */
2767static inline unsigned long fault_around_pages(void) 2763static inline unsigned long fault_around_pages(void)
2768{ 2764{
2769 return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE; 2765 return fault_around_bytes >> PAGE_SHIFT;
2770} 2766}
2771 2767
2772static inline unsigned long fault_around_mask(void) 2768static inline unsigned long fault_around_mask(void)
2773{ 2769{
2774 return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK; 2770 return ~(fault_around_bytes - 1) & PAGE_MASK;
2775} 2771}
2776 2772
2777
2778#ifdef CONFIG_DEBUG_FS 2773#ifdef CONFIG_DEBUG_FS
2779static int fault_around_bytes_get(void *data, u64 *val) 2774static int fault_around_bytes_get(void *data, u64 *val)
2780{ 2775{
@@ -2782,11 +2777,19 @@ static int fault_around_bytes_get(void *data, u64 *val)
2782 return 0; 2777 return 0;
2783} 2778}
2784 2779
2780/*
2781 * fault_around_pages() and fault_around_mask() expects fault_around_bytes
2782 * rounded down to nearest page order. It's what do_fault_around() expects to
2783 * see.
2784 */
2785static int fault_around_bytes_set(void *data, u64 val) 2785static int fault_around_bytes_set(void *data, u64 val)
2786{ 2786{
2787 if (val / PAGE_SIZE > PTRS_PER_PTE) 2787 if (val / PAGE_SIZE > PTRS_PER_PTE)
2788 return -EINVAL; 2788 return -EINVAL;
2789 fault_around_bytes = val; 2789 if (val > PAGE_SIZE)
2790 fault_around_bytes = rounddown_pow_of_two(val);
2791 else
2792 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
2790 return 0; 2793 return 0;
2791} 2794}
2792DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops, 2795DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 518e2c3f4c75..e0c943014eb7 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1306,9 +1306,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
1306 *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); 1306 *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
1307 1307
1308 if (bdi_bg_thresh) 1308 if (bdi_bg_thresh)
1309 *bdi_bg_thresh = div_u64((u64)*bdi_thresh * 1309 *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
1310 background_thresh, 1310 background_thresh,
1311 dirty_thresh); 1311 dirty_thresh) : 0;
1312 1312
1313 /* 1313 /*
1314 * In order to avoid the stacked BDI deadlock we need 1314 * In order to avoid the stacked BDI deadlock we need
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8bcfe3ae20cb..ef44ad736ca1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2447,7 +2447,7 @@ static inline int
2447gfp_to_alloc_flags(gfp_t gfp_mask) 2447gfp_to_alloc_flags(gfp_t gfp_mask)
2448{ 2448{
2449 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 2449 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2450 const gfp_t wait = gfp_mask & __GFP_WAIT; 2450 const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
2451 2451
2452 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 2452 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2453 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 2453 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
@@ -2456,20 +2456,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
2456 * The caller may dip into page reserves a bit more if the caller 2456 * The caller may dip into page reserves a bit more if the caller
2457 * cannot run direct reclaim, or if the caller has realtime scheduling 2457 * cannot run direct reclaim, or if the caller has realtime scheduling
2458 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 2458 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
2459 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 2459 * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
2460 */ 2460 */
2461 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 2461 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2462 2462
2463 if (!wait) { 2463 if (atomic) {
2464 /* 2464 /*
2465 * Not worth trying to allocate harder for 2465 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
2466 * __GFP_NOMEMALLOC even if it can't schedule. 2466 * if it can't schedule.
2467 */ 2467 */
2468 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2468 if (!(gfp_mask & __GFP_NOMEMALLOC))
2469 alloc_flags |= ALLOC_HARDER; 2469 alloc_flags |= ALLOC_HARDER;
2470 /* 2470 /*
2471 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 2471 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
2472 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 2472 * comment for __cpuset_node_allowed_softwall().
2473 */ 2473 */
2474 alloc_flags &= ~ALLOC_CPUSET; 2474 alloc_flags &= ~ALLOC_CPUSET;
2475 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2475 } else if (unlikely(rt_task(current)) && !in_interrupt())