diff options
author | Jens Axboe <axboe@fb.com> | 2014-09-11 11:31:18 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-11 11:31:18 -0400 |
commit | b207892b061da7608878e273ae22ba9bf9be264b (patch) | |
tree | 51daa46b89b83cad422941f52110b19571b85b79 /mm | |
parent | 018a17bdc8658ad448497c84d4ba21b6985820ec (diff) | |
parent | a516440542afcb9647f88d12c35640baf02d07ea (diff) |
Merge branch 'for-linus' into for-3.18/core
A bit of churn on the for-linus side that would be nice to have
in the core bits for 3.18, so pull it in to catch us up and make
forward progress easier.
Signed-off-by: Jens Axboe <axboe@fb.com>
Conflicts:
block/scsi_ioctl.c
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb_cgroup.c | 2 | ||||
-rw-r--r-- | mm/memblock.c | 3 | ||||
-rw-r--r-- | mm/memory.c | 7 | ||||
-rw-r--r-- | mm/pgtable-generic.c | 2 | ||||
-rw-r--r-- | mm/zbud.c | 1 | ||||
-rw-r--r-- | mm/zpool.c | 2 | ||||
-rw-r--r-- | mm/zsmalloc.c | 1 |
7 files changed, 9 insertions, 9 deletions
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index 9eebfadeeee1..a67c26e0f360 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c | |||
@@ -217,7 +217,7 @@ void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, | |||
217 | 217 | ||
218 | if (hugetlb_cgroup_disabled()) | 218 | if (hugetlb_cgroup_disabled()) |
219 | return; | 219 | return; |
220 | VM_BUG_ON(!spin_is_locked(&hugetlb_lock)); | 220 | lockdep_assert_held(&hugetlb_lock); |
221 | h_cg = hugetlb_cgroup_from_page(page); | 221 | h_cg = hugetlb_cgroup_from_page(page); |
222 | if (unlikely(!h_cg)) | 222 | if (unlikely(!h_cg)) |
223 | return; | 223 | return; |
diff --git a/mm/memblock.c b/mm/memblock.c index 6d2f219a48b0..70fad0c0dafb 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -192,8 +192,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, | |||
192 | phys_addr_t align, phys_addr_t start, | 192 | phys_addr_t align, phys_addr_t start, |
193 | phys_addr_t end, int nid) | 193 | phys_addr_t end, int nid) |
194 | { | 194 | { |
195 | int ret; | 195 | phys_addr_t kernel_end, ret; |
196 | phys_addr_t kernel_end; | ||
197 | 196 | ||
198 | /* pump up @end */ | 197 | /* pump up @end */ |
199 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | 198 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
diff --git a/mm/memory.c b/mm/memory.c index ab3537bcfed2..adeac306610f 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -751,7 +751,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | |||
751 | unsigned long pfn = pte_pfn(pte); | 751 | unsigned long pfn = pte_pfn(pte); |
752 | 752 | ||
753 | if (HAVE_PTE_SPECIAL) { | 753 | if (HAVE_PTE_SPECIAL) { |
754 | if (likely(!pte_special(pte) || pte_numa(pte))) | 754 | if (likely(!pte_special(pte))) |
755 | goto check_pfn; | 755 | goto check_pfn; |
756 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) | 756 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) |
757 | return NULL; | 757 | return NULL; |
@@ -777,15 +777,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | |||
777 | } | 777 | } |
778 | } | 778 | } |
779 | 779 | ||
780 | if (is_zero_pfn(pfn)) | ||
781 | return NULL; | ||
780 | check_pfn: | 782 | check_pfn: |
781 | if (unlikely(pfn > highest_memmap_pfn)) { | 783 | if (unlikely(pfn > highest_memmap_pfn)) { |
782 | print_bad_pte(vma, addr, pte, NULL); | 784 | print_bad_pte(vma, addr, pte, NULL); |
783 | return NULL; | 785 | return NULL; |
784 | } | 786 | } |
785 | 787 | ||
786 | if (is_zero_pfn(pfn)) | ||
787 | return NULL; | ||
788 | |||
789 | /* | 788 | /* |
790 | * NOTE! We still have PageReserved() pages in the page tables. | 789 | * NOTE! We still have PageReserved() pages in the page tables. |
791 | * eg. VDSO mappings can cause them to exist. | 790 | * eg. VDSO mappings can cause them to exist. |
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index a8b919925934..dfb79e028ecb 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c | |||
@@ -195,7 +195,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | |||
195 | pmd_t entry = *pmdp; | 195 | pmd_t entry = *pmdp; |
196 | if (pmd_numa(entry)) | 196 | if (pmd_numa(entry)) |
197 | entry = pmd_mknonnuma(entry); | 197 | entry = pmd_mknonnuma(entry); |
198 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); | 198 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); |
199 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | 199 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
200 | } | 200 | } |
201 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 201 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
@@ -195,6 +195,7 @@ static struct zpool_driver zbud_zpool_driver = { | |||
195 | .total_size = zbud_zpool_total_size, | 195 | .total_size = zbud_zpool_total_size, |
196 | }; | 196 | }; |
197 | 197 | ||
198 | MODULE_ALIAS("zpool-zbud"); | ||
198 | #endif /* CONFIG_ZPOOL */ | 199 | #endif /* CONFIG_ZPOOL */ |
199 | 200 | ||
200 | /***************** | 201 | /***************** |
diff --git a/mm/zpool.c b/mm/zpool.c index e40612a1df00..739cdf0d183a 100644 --- a/mm/zpool.c +++ b/mm/zpool.c | |||
@@ -150,7 +150,7 @@ struct zpool *zpool_create_pool(char *type, gfp_t gfp, struct zpool_ops *ops) | |||
150 | driver = zpool_get_driver(type); | 150 | driver = zpool_get_driver(type); |
151 | 151 | ||
152 | if (!driver) { | 152 | if (!driver) { |
153 | request_module(type); | 153 | request_module("zpool-%s", type); |
154 | driver = zpool_get_driver(type); | 154 | driver = zpool_get_driver(type); |
155 | } | 155 | } |
156 | 156 | ||
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 4e2fc83cb394..94f38fac5e81 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c | |||
@@ -315,6 +315,7 @@ static struct zpool_driver zs_zpool_driver = { | |||
315 | .total_size = zs_zpool_total_size, | 315 | .total_size = zs_zpool_total_size, |
316 | }; | 316 | }; |
317 | 317 | ||
318 | MODULE_ALIAS("zpool-zsmalloc"); | ||
318 | #endif /* CONFIG_ZPOOL */ | 319 | #endif /* CONFIG_ZPOOL */ |
319 | 320 | ||
320 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ | 321 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ |