diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:23:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:23:15 -0400 |
commit | 9e2d8656f5e8aa214e66b462680cf86b210b74a8 (patch) | |
tree | f67d62e896cedf75599ea45f9ecf9999c6ad24cd /arch/x86 | |
parent | 1ea4f4f8405cc1ceec23f2d261bc3775785e6712 (diff) | |
parent | 9e695d2ecc8451cc2c1603d60b5c8e7f5581923a (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge patches from Andrew Morton:
"A few misc things and very nearly all of the MM tree. A tremendous
amount of stuff (again), including a significant rbtree library
rework."
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (160 commits)
sparc64: Support transparent huge pages.
mm: thp: Use more portable PMD clearing sequenece in zap_huge_pmd().
mm: Add and use update_mmu_cache_pmd() in transparent huge page code.
sparc64: Document PGD and PMD layout.
sparc64: Eliminate PTE table memory wastage.
sparc64: Halve the size of PTE tables
sparc64: Only support 4MB huge pages and 8KB base pages.
memory-hotplug: suppress "Trying to free nonexistent resource <XXXXXXXXXXXXXXXX-YYYYYYYYYYYYYYYY>" warning
mm: memcg: clean up mm_match_cgroup() signature
mm: document PageHuge somewhat
mm: use %pK for /proc/vmallocinfo
mm, thp: fix mlock statistics
mm, thp: fix mapped pages avoiding unevictable list on mlock
memory-hotplug: update memory block's state and notify userspace
memory-hotplug: preparation to notify memory block's state at memory hot remove
mm: avoid section mismatch warning for memblock_type_name
make GFP_NOTRACK definition unconditional
cma: decrease cc.nr_migratepages after reclaiming pagelist
CMA: migrate mlocked pages
kpageflags: fix wrong KPF_THP on non-huge compound pages
...
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/atomic.h | 24 | ||||
-rw-r--r-- | arch/x86/include/asm/hugetlb.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 11 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_32.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_64.h | 1 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/hugetlbpage.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 87 | ||||
-rw-r--r-- | arch/x86/mm/pat_rbtree.c | 34 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 3 |
11 files changed, 98 insertions, 76 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b72777ff32a9..1ae94bcae5d9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -10,6 +10,7 @@ config X86_32 | |||
10 | def_bool y | 10 | def_bool y |
11 | depends on !64BIT | 11 | depends on !64BIT |
12 | select CLKSRC_I8253 | 12 | select CLKSRC_I8253 |
13 | select HAVE_UID16 | ||
13 | 14 | ||
14 | config X86_64 | 15 | config X86_64 |
15 | def_bool y | 16 | def_bool y |
@@ -46,6 +47,7 @@ config X86 | |||
46 | select HAVE_FUNCTION_GRAPH_FP_TEST | 47 | select HAVE_FUNCTION_GRAPH_FP_TEST |
47 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 48 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
48 | select HAVE_SYSCALL_TRACEPOINTS | 49 | select HAVE_SYSCALL_TRACEPOINTS |
50 | select SYSCTL_EXCEPTION_TRACE | ||
49 | select HAVE_KVM | 51 | select HAVE_KVM |
50 | select HAVE_ARCH_KGDB | 52 | select HAVE_ARCH_KGDB |
51 | select HAVE_ARCH_TRACEHOOK | 53 | select HAVE_ARCH_TRACEHOOK |
@@ -65,6 +67,7 @@ config X86 | |||
65 | select HAVE_PERF_EVENTS_NMI | 67 | select HAVE_PERF_EVENTS_NMI |
66 | select HAVE_PERF_REGS | 68 | select HAVE_PERF_REGS |
67 | select HAVE_PERF_USER_STACK_DUMP | 69 | select HAVE_PERF_USER_STACK_DUMP |
70 | select HAVE_DEBUG_KMEMLEAK | ||
68 | select ANON_INODES | 71 | select ANON_INODES |
69 | select HAVE_ALIGNED_STRUCT_PAGE if SLUB && !M386 | 72 | select HAVE_ALIGNED_STRUCT_PAGE if SLUB && !M386 |
70 | select HAVE_CMPXCHG_LOCAL if !M386 | 73 | select HAVE_CMPXCHG_LOCAL if !M386 |
@@ -85,6 +88,7 @@ config X86 | |||
85 | select IRQ_FORCED_THREADING | 88 | select IRQ_FORCED_THREADING |
86 | select USE_GENERIC_SMP_HELPERS if SMP | 89 | select USE_GENERIC_SMP_HELPERS if SMP |
87 | select HAVE_BPF_JIT if X86_64 | 90 | select HAVE_BPF_JIT if X86_64 |
91 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE | ||
88 | select CLKEVT_I8253 | 92 | select CLKEVT_I8253 |
89 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 93 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
90 | select GENERIC_IOMAP | 94 | select GENERIC_IOMAP |
@@ -2168,6 +2172,7 @@ config IA32_EMULATION | |||
2168 | bool "IA32 Emulation" | 2172 | bool "IA32 Emulation" |
2169 | depends on X86_64 | 2173 | depends on X86_64 |
2170 | select COMPAT_BINFMT_ELF | 2174 | select COMPAT_BINFMT_ELF |
2175 | select HAVE_UID16 | ||
2171 | ---help--- | 2176 | ---help--- |
2172 | Include code to run legacy 32-bit programs under a | 2177 | Include code to run legacy 32-bit programs under a |
2173 | 64-bit kernel. You should likely turn this on, unless you're | 2178 | 64-bit kernel. You should likely turn this on, unless you're |
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 250b8774c158..b6c3b821acf6 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -240,30 +240,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
240 | return c; | 240 | return c; |
241 | } | 241 | } |
242 | 242 | ||
243 | |||
244 | /* | ||
245 | * atomic_dec_if_positive - decrement by 1 if old value positive | ||
246 | * @v: pointer of type atomic_t | ||
247 | * | ||
248 | * The function returns the old value of *v minus 1, even if | ||
249 | * the atomic variable, v, was not decremented. | ||
250 | */ | ||
251 | static inline int atomic_dec_if_positive(atomic_t *v) | ||
252 | { | ||
253 | int c, old, dec; | ||
254 | c = atomic_read(v); | ||
255 | for (;;) { | ||
256 | dec = c - 1; | ||
257 | if (unlikely(dec < 0)) | ||
258 | break; | ||
259 | old = atomic_cmpxchg((v), c, dec); | ||
260 | if (likely(old == c)) | ||
261 | break; | ||
262 | c = old; | ||
263 | } | ||
264 | return dec; | ||
265 | } | ||
266 | |||
267 | /** | 243 | /** |
268 | * atomic_inc_short - increment of a short integer | 244 | * atomic_inc_short - increment of a short integer |
269 | * @v: pointer to type int | 245 | * @v: pointer to type int |
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index 439a9acc132d..bdd35dbd0605 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h | |||
@@ -90,4 +90,8 @@ static inline void arch_release_hugepage(struct page *page) | |||
90 | { | 90 | { |
91 | } | 91 | } |
92 | 92 | ||
93 | static inline void arch_clear_hugepage_flags(struct page *page) | ||
94 | { | ||
95 | } | ||
96 | |||
93 | #endif /* _ASM_X86_HUGETLB_H */ | 97 | #endif /* _ASM_X86_HUGETLB_H */ |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index fc9948465293..a1f780d45f76 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -146,8 +146,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd) | |||
146 | 146 | ||
147 | static inline int pmd_large(pmd_t pte) | 147 | static inline int pmd_large(pmd_t pte) |
148 | { | 148 | { |
149 | return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == | 149 | return pmd_flags(pte) & _PAGE_PSE; |
150 | (_PAGE_PSE | _PAGE_PRESENT); | ||
151 | } | 150 | } |
152 | 151 | ||
153 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 152 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -415,7 +414,13 @@ static inline int pte_hidden(pte_t pte) | |||
415 | 414 | ||
416 | static inline int pmd_present(pmd_t pmd) | 415 | static inline int pmd_present(pmd_t pmd) |
417 | { | 416 | { |
418 | return pmd_flags(pmd) & _PAGE_PRESENT; | 417 | /* |
418 | * Checking for _PAGE_PSE is needed too because | ||
419 | * split_huge_page will temporarily clear the present bit (but | ||
420 | * the _PAGE_PSE flag will remain set at all times while the | ||
421 | * _PAGE_PRESENT bit is clear). | ||
422 | */ | ||
423 | return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); | ||
419 | } | 424 | } |
420 | 425 | ||
421 | static inline int pmd_none(pmd_t pmd) | 426 | static inline int pmd_none(pmd_t pmd) |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 0c92113c4cb6..8faa215a503e 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -71,6 +71,7 @@ do { \ | |||
71 | * tables contain all the necessary information. | 71 | * tables contain all the necessary information. |
72 | */ | 72 | */ |
73 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | 73 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
74 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
74 | 75 | ||
75 | #endif /* !__ASSEMBLY__ */ | 76 | #endif /* !__ASSEMBLY__ */ |
76 | 77 | ||
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 8251be02301e..47356f9df82e 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -143,6 +143,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } | |||
143 | #define pte_unmap(pte) ((void)(pte))/* NOP */ | 143 | #define pte_unmap(pte) ((void)(pte))/* NOP */ |
144 | 144 | ||
145 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | 145 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
146 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
146 | 147 | ||
147 | /* Encode and de-code a swap entry */ | 148 | /* Encode and de-code a swap entry */ |
148 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE | 149 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index a530b230e7d7..8e13ecb41bee 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -1220,6 +1220,7 @@ good_area: | |||
1220 | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | 1220 | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk |
1221 | * of starvation. */ | 1221 | * of starvation. */ |
1222 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | 1222 | flags &= ~FAULT_FLAG_ALLOW_RETRY; |
1223 | flags |= FAULT_FLAG_TRIED; | ||
1223 | goto retry; | 1224 | goto retry; |
1224 | } | 1225 | } |
1225 | } | 1226 | } |
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index b91e48512425..937bff5cdaa7 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c | |||
@@ -71,7 +71,6 @@ huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
71 | struct address_space *mapping = vma->vm_file->f_mapping; | 71 | struct address_space *mapping = vma->vm_file->f_mapping; |
72 | pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + | 72 | pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + |
73 | vma->vm_pgoff; | 73 | vma->vm_pgoff; |
74 | struct prio_tree_iter iter; | ||
75 | struct vm_area_struct *svma; | 74 | struct vm_area_struct *svma; |
76 | unsigned long saddr; | 75 | unsigned long saddr; |
77 | pte_t *spte = NULL; | 76 | pte_t *spte = NULL; |
@@ -81,7 +80,7 @@ huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
81 | return (pte_t *)pmd_alloc(mm, pud, addr); | 80 | return (pte_t *)pmd_alloc(mm, pud, addr); |
82 | 81 | ||
83 | mutex_lock(&mapping->i_mmap_mutex); | 82 | mutex_lock(&mapping->i_mmap_mutex); |
84 | vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { | 83 | vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { |
85 | if (svma == vma) | 84 | if (svma == vma) |
86 | continue; | 85 | continue; |
87 | 86 | ||
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 3d68ef6d2266..0eb572eda406 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -664,20 +664,20 @@ static void free_pfn_range(u64 paddr, unsigned long size) | |||
664 | } | 664 | } |
665 | 665 | ||
666 | /* | 666 | /* |
667 | * track_pfn_vma_copy is called when vma that is covering the pfnmap gets | 667 | * track_pfn_copy is called when vma that is covering the pfnmap gets |
668 | * copied through copy_page_range(). | 668 | * copied through copy_page_range(). |
669 | * | 669 | * |
670 | * If the vma has a linear pfn mapping for the entire range, we get the prot | 670 | * If the vma has a linear pfn mapping for the entire range, we get the prot |
671 | * from pte and reserve the entire vma range with single reserve_pfn_range call. | 671 | * from pte and reserve the entire vma range with single reserve_pfn_range call. |
672 | */ | 672 | */ |
673 | int track_pfn_vma_copy(struct vm_area_struct *vma) | 673 | int track_pfn_copy(struct vm_area_struct *vma) |
674 | { | 674 | { |
675 | resource_size_t paddr; | 675 | resource_size_t paddr; |
676 | unsigned long prot; | 676 | unsigned long prot; |
677 | unsigned long vma_size = vma->vm_end - vma->vm_start; | 677 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
678 | pgprot_t pgprot; | 678 | pgprot_t pgprot; |
679 | 679 | ||
680 | if (is_linear_pfn_mapping(vma)) { | 680 | if (vma->vm_flags & VM_PAT) { |
681 | /* | 681 | /* |
682 | * reserve the whole chunk covered by vma. We need the | 682 | * reserve the whole chunk covered by vma. We need the |
683 | * starting address and protection from pte. | 683 | * starting address and protection from pte. |
@@ -694,31 +694,59 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) | |||
694 | } | 694 | } |
695 | 695 | ||
696 | /* | 696 | /* |
697 | * track_pfn_vma_new is called when a _new_ pfn mapping is being established | ||
698 | * for physical range indicated by pfn and size. | ||
699 | * | ||
700 | * prot is passed in as a parameter for the new mapping. If the vma has a | 697 | * prot is passed in as a parameter for the new mapping. If the vma has a |
701 | * linear pfn mapping for the entire range reserve the entire vma range with | 698 | * linear pfn mapping for the entire range reserve the entire vma range with |
702 | * single reserve_pfn_range call. | 699 | * single reserve_pfn_range call. |
703 | */ | 700 | */ |
704 | int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, | 701 | int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
705 | unsigned long pfn, unsigned long size) | 702 | unsigned long pfn, unsigned long addr, unsigned long size) |
706 | { | 703 | { |
704 | resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; | ||
707 | unsigned long flags; | 705 | unsigned long flags; |
708 | resource_size_t paddr; | ||
709 | unsigned long vma_size = vma->vm_end - vma->vm_start; | ||
710 | 706 | ||
711 | if (is_linear_pfn_mapping(vma)) { | 707 | /* reserve the whole chunk starting from paddr */ |
712 | /* reserve the whole chunk starting from vm_pgoff */ | 708 | if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { |
713 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; | 709 | int ret; |
714 | return reserve_pfn_range(paddr, vma_size, prot, 0); | 710 | |
711 | ret = reserve_pfn_range(paddr, size, prot, 0); | ||
712 | if (!ret) | ||
713 | vma->vm_flags |= VM_PAT; | ||
714 | return ret; | ||
715 | } | 715 | } |
716 | 716 | ||
717 | if (!pat_enabled) | 717 | if (!pat_enabled) |
718 | return 0; | 718 | return 0; |
719 | 719 | ||
720 | /* for vm_insert_pfn and friends, we set prot based on lookup */ | 720 | /* |
721 | flags = lookup_memtype(pfn << PAGE_SHIFT); | 721 | * For anything smaller than the vma size we set prot based on the |
722 | * lookup. | ||
723 | */ | ||
724 | flags = lookup_memtype(paddr); | ||
725 | |||
726 | /* Check memtype for the remaining pages */ | ||
727 | while (size > PAGE_SIZE) { | ||
728 | size -= PAGE_SIZE; | ||
729 | paddr += PAGE_SIZE; | ||
730 | if (flags != lookup_memtype(paddr)) | ||
731 | return -EINVAL; | ||
732 | } | ||
733 | |||
734 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | | ||
735 | flags); | ||
736 | |||
737 | return 0; | ||
738 | } | ||
739 | |||
740 | int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, | ||
741 | unsigned long pfn) | ||
742 | { | ||
743 | unsigned long flags; | ||
744 | |||
745 | if (!pat_enabled) | ||
746 | return 0; | ||
747 | |||
748 | /* Set prot based on lookup */ | ||
749 | flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); | ||
722 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | | 750 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | |
723 | flags); | 751 | flags); |
724 | 752 | ||
@@ -726,22 +754,31 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, | |||
726 | } | 754 | } |
727 | 755 | ||
728 | /* | 756 | /* |
729 | * untrack_pfn_vma is called while unmapping a pfnmap for a region. | 757 | * untrack_pfn is called while unmapping a pfnmap for a region. |
730 | * untrack can be called for a specific region indicated by pfn and size or | 758 | * untrack can be called for a specific region indicated by pfn and size or |
731 | * can be for the entire vma (in which case size can be zero). | 759 | * can be for the entire vma (in which case pfn, size are zero). |
732 | */ | 760 | */ |
733 | void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | 761 | void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, |
734 | unsigned long size) | 762 | unsigned long size) |
735 | { | 763 | { |
736 | resource_size_t paddr; | 764 | resource_size_t paddr; |
737 | unsigned long vma_size = vma->vm_end - vma->vm_start; | 765 | unsigned long prot; |
738 | 766 | ||
739 | if (is_linear_pfn_mapping(vma)) { | 767 | if (!(vma->vm_flags & VM_PAT)) |
740 | /* free the whole chunk starting from vm_pgoff */ | ||
741 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; | ||
742 | free_pfn_range(paddr, vma_size); | ||
743 | return; | 768 | return; |
769 | |||
770 | /* free the chunk starting from pfn or the whole chunk */ | ||
771 | paddr = (resource_size_t)pfn << PAGE_SHIFT; | ||
772 | if (!paddr && !size) { | ||
773 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { | ||
774 | WARN_ON_ONCE(1); | ||
775 | return; | ||
776 | } | ||
777 | |||
778 | size = vma->vm_end - vma->vm_start; | ||
744 | } | 779 | } |
780 | free_pfn_range(paddr, size); | ||
781 | vma->vm_flags &= ~VM_PAT; | ||
745 | } | 782 | } |
746 | 783 | ||
747 | pgprot_t pgprot_writecombine(pgprot_t prot) | 784 | pgprot_t pgprot_writecombine(pgprot_t prot) |
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index 8acaddd0fb21..415f6c4ced36 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/debugfs.h> | 12 | #include <linux/debugfs.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/rbtree.h> | 15 | #include <linux/rbtree_augmented.h> |
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/gfp.h> | 17 | #include <linux/gfp.h> |
18 | 18 | ||
@@ -54,29 +54,24 @@ static u64 get_subtree_max_end(struct rb_node *node) | |||
54 | return ret; | 54 | return ret; |
55 | } | 55 | } |
56 | 56 | ||
57 | /* Update 'subtree_max_end' for a node, based on node and its children */ | 57 | static u64 compute_subtree_max_end(struct memtype *data) |
58 | static void memtype_rb_augment_cb(struct rb_node *node, void *__unused) | ||
59 | { | 58 | { |
60 | struct memtype *data; | 59 | u64 max_end = data->end, child_max_end; |
61 | u64 max_end, child_max_end; | ||
62 | |||
63 | if (!node) | ||
64 | return; | ||
65 | 60 | ||
66 | data = container_of(node, struct memtype, rb); | 61 | child_max_end = get_subtree_max_end(data->rb.rb_right); |
67 | max_end = data->end; | ||
68 | |||
69 | child_max_end = get_subtree_max_end(node->rb_right); | ||
70 | if (child_max_end > max_end) | 62 | if (child_max_end > max_end) |
71 | max_end = child_max_end; | 63 | max_end = child_max_end; |
72 | 64 | ||
73 | child_max_end = get_subtree_max_end(node->rb_left); | 65 | child_max_end = get_subtree_max_end(data->rb.rb_left); |
74 | if (child_max_end > max_end) | 66 | if (child_max_end > max_end) |
75 | max_end = child_max_end; | 67 | max_end = child_max_end; |
76 | 68 | ||
77 | data->subtree_max_end = max_end; | 69 | return max_end; |
78 | } | 70 | } |
79 | 71 | ||
72 | RB_DECLARE_CALLBACKS(static, memtype_rb_augment_cb, struct memtype, rb, | ||
73 | u64, subtree_max_end, compute_subtree_max_end) | ||
74 | |||
80 | /* Find the first (lowest start addr) overlapping range from rb tree */ | 75 | /* Find the first (lowest start addr) overlapping range from rb tree */ |
81 | static struct memtype *memtype_rb_lowest_match(struct rb_root *root, | 76 | static struct memtype *memtype_rb_lowest_match(struct rb_root *root, |
82 | u64 start, u64 end) | 77 | u64 start, u64 end) |
@@ -179,15 +174,17 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) | |||
179 | struct memtype *data = container_of(*node, struct memtype, rb); | 174 | struct memtype *data = container_of(*node, struct memtype, rb); |
180 | 175 | ||
181 | parent = *node; | 176 | parent = *node; |
177 | if (data->subtree_max_end < newdata->end) | ||
178 | data->subtree_max_end = newdata->end; | ||
182 | if (newdata->start <= data->start) | 179 | if (newdata->start <= data->start) |
183 | node = &((*node)->rb_left); | 180 | node = &((*node)->rb_left); |
184 | else if (newdata->start > data->start) | 181 | else if (newdata->start > data->start) |
185 | node = &((*node)->rb_right); | 182 | node = &((*node)->rb_right); |
186 | } | 183 | } |
187 | 184 | ||
185 | newdata->subtree_max_end = newdata->end; | ||
188 | rb_link_node(&newdata->rb, parent, node); | 186 | rb_link_node(&newdata->rb, parent, node); |
189 | rb_insert_color(&newdata->rb, root); | 187 | rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb); |
190 | rb_augment_insert(&newdata->rb, memtype_rb_augment_cb, NULL); | ||
191 | } | 188 | } |
192 | 189 | ||
193 | int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) | 190 | int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) |
@@ -209,16 +206,13 @@ int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) | |||
209 | 206 | ||
210 | struct memtype *rbt_memtype_erase(u64 start, u64 end) | 207 | struct memtype *rbt_memtype_erase(u64 start, u64 end) |
211 | { | 208 | { |
212 | struct rb_node *deepest; | ||
213 | struct memtype *data; | 209 | struct memtype *data; |
214 | 210 | ||
215 | data = memtype_rb_exact_match(&memtype_rbroot, start, end); | 211 | data = memtype_rb_exact_match(&memtype_rbroot, start, end); |
216 | if (!data) | 212 | if (!data) |
217 | goto out; | 213 | goto out; |
218 | 214 | ||
219 | deepest = rb_augment_erase_begin(&data->rb); | 215 | rb_erase_augmented(&data->rb, &memtype_rbroot, &memtype_rb_augment_cb); |
220 | rb_erase(&data->rb, &memtype_rbroot); | ||
221 | rb_augment_erase_end(deepest, memtype_rb_augment_cb, NULL); | ||
222 | out: | 216 | out: |
223 | return data; | 217 | return data; |
224 | } | 218 | } |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5a16824cc2b3..fd28d86fe3d2 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -2451,8 +2451,7 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, | |||
2451 | 2451 | ||
2452 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); | 2452 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); |
2453 | 2453 | ||
2454 | BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) == | 2454 | BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); |
2455 | (VM_PFNMAP | VM_RESERVED | VM_IO))); | ||
2456 | 2455 | ||
2457 | rmd.mfn = mfn; | 2456 | rmd.mfn = mfn; |
2458 | rmd.prot = prot; | 2457 | rmd.prot = prot; |