aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/fault-armv.c3
-rw-r--r--arch/arm/mm/flush.c3
-rw-r--r--arch/parisc/kernel/cache.c3
-rw-r--r--arch/x86/mm/hugetlbpage.c3
4 files changed, 4 insertions, 8 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 7599e2625c7d..2a5907b5c8d2 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -134,7 +134,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
134{ 134{
135 struct mm_struct *mm = vma->vm_mm; 135 struct mm_struct *mm = vma->vm_mm;
136 struct vm_area_struct *mpnt; 136 struct vm_area_struct *mpnt;
137 struct prio_tree_iter iter;
138 unsigned long offset; 137 unsigned long offset;
139 pgoff_t pgoff; 138 pgoff_t pgoff;
140 int aliases = 0; 139 int aliases = 0;
@@ -147,7 +146,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
147 * cache coherency. 146 * cache coherency.
148 */ 147 */
149 flush_dcache_mmap_lock(mapping); 148 flush_dcache_mmap_lock(mapping);
150 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { 149 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
151 /* 150 /*
152 * If this VMA is not in our MM, we can ignore it. 151 * If this VMA is not in our MM, we can ignore it.
153 * Note that we intentionally mask out the VMA 152 * Note that we intentionally mask out the VMA
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 40ca11ed6e5f..1c8f7f564175 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -196,7 +196,6 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
196{ 196{
197 struct mm_struct *mm = current->active_mm; 197 struct mm_struct *mm = current->active_mm;
198 struct vm_area_struct *mpnt; 198 struct vm_area_struct *mpnt;
199 struct prio_tree_iter iter;
200 pgoff_t pgoff; 199 pgoff_t pgoff;
201 200
202 /* 201 /*
@@ -208,7 +207,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
208 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 207 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
209 208
210 flush_dcache_mmap_lock(mapping); 209 flush_dcache_mmap_lock(mapping);
211 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { 210 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
212 unsigned long offset; 211 unsigned long offset;
213 212
214 /* 213 /*
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 9d181890a7e3..48e16dc20102 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -276,7 +276,6 @@ void flush_dcache_page(struct page *page)
276{ 276{
277 struct address_space *mapping = page_mapping(page); 277 struct address_space *mapping = page_mapping(page);
278 struct vm_area_struct *mpnt; 278 struct vm_area_struct *mpnt;
279 struct prio_tree_iter iter;
280 unsigned long offset; 279 unsigned long offset;
281 unsigned long addr, old_addr = 0; 280 unsigned long addr, old_addr = 0;
282 pgoff_t pgoff; 281 pgoff_t pgoff;
@@ -299,7 +298,7 @@ void flush_dcache_page(struct page *page)
299 * to flush one address here for them all to become coherent */ 298 * to flush one address here for them all to become coherent */
300 299
301 flush_dcache_mmap_lock(mapping); 300 flush_dcache_mmap_lock(mapping);
302 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { 301 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
303 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 302 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
304 addr = mpnt->vm_start + offset; 303 addr = mpnt->vm_start + offset;
305 304
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index b91e48512425..937bff5cdaa7 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -71,7 +71,6 @@ huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
71 struct address_space *mapping = vma->vm_file->f_mapping; 71 struct address_space *mapping = vma->vm_file->f_mapping;
72 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 72 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
73 vma->vm_pgoff; 73 vma->vm_pgoff;
74 struct prio_tree_iter iter;
75 struct vm_area_struct *svma; 74 struct vm_area_struct *svma;
76 unsigned long saddr; 75 unsigned long saddr;
77 pte_t *spte = NULL; 76 pte_t *spte = NULL;
@@ -81,7 +80,7 @@ huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
81 return (pte_t *)pmd_alloc(mm, pud, addr); 80 return (pte_t *)pmd_alloc(mm, pud, addr);
82 81
83 mutex_lock(&mapping->i_mmap_mutex); 82 mutex_lock(&mapping->i_mmap_mutex);
84 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { 83 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
85 if (svma == vma) 84 if (svma == vma)
86 continue; 85 continue;
87 86