diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c index 95d0cce63583..b68812d682b6 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -82,7 +82,6 @@ EXPORT_SYMBOL(max_mapnr); | |||
82 | EXPORT_SYMBOL(mem_map); | 82 | EXPORT_SYMBOL(mem_map); |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | unsigned long num_physpages; | ||
86 | /* | 85 | /* |
87 | * A number of key systems in x86 including ioremap() rely on the assumption | 86 | * A number of key systems in x86 including ioremap() rely on the assumption |
88 | * that high_memory defines the upper bound on direct map memory, then end | 87 | * that high_memory defines the upper bound on direct map memory, then end |
@@ -92,7 +91,6 @@ unsigned long num_physpages; | |||
92 | */ | 91 | */ |
93 | void * high_memory; | 92 | void * high_memory; |
94 | 93 | ||
95 | EXPORT_SYMBOL(num_physpages); | ||
96 | EXPORT_SYMBOL(high_memory); | 94 | EXPORT_SYMBOL(high_memory); |
97 | 95 | ||
98 | /* | 96 | /* |
@@ -1101,6 +1099,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
1101 | spinlock_t *ptl; | 1099 | spinlock_t *ptl; |
1102 | pte_t *start_pte; | 1100 | pte_t *start_pte; |
1103 | pte_t *pte; | 1101 | pte_t *pte; |
1102 | unsigned long range_start = addr; | ||
1104 | 1103 | ||
1105 | again: | 1104 | again: |
1106 | init_rss_vec(rss); | 1105 | init_rss_vec(rss); |
@@ -1206,12 +1205,14 @@ again: | |||
1206 | force_flush = 0; | 1205 | force_flush = 0; |
1207 | 1206 | ||
1208 | #ifdef HAVE_GENERIC_MMU_GATHER | 1207 | #ifdef HAVE_GENERIC_MMU_GATHER |
1209 | tlb->start = addr; | 1208 | tlb->start = range_start; |
1210 | tlb->end = end; | 1209 | tlb->end = addr; |
1211 | #endif | 1210 | #endif |
1212 | tlb_flush_mmu(tlb); | 1211 | tlb_flush_mmu(tlb); |
1213 | if (addr != end) | 1212 | if (addr != end) { |
1213 | range_start = addr; | ||
1214 | goto again; | 1214 | goto again; |
1215 | } | ||
1215 | } | 1216 | } |
1216 | 1217 | ||
1217 | return addr; | 1218 | return addr; |
@@ -2904,7 +2905,7 @@ static inline void unmap_mapping_range_tree(struct rb_root *root, | |||
2904 | details->first_index, details->last_index) { | 2905 | details->first_index, details->last_index) { |
2905 | 2906 | ||
2906 | vba = vma->vm_pgoff; | 2907 | vba = vma->vm_pgoff; |
2907 | vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; | 2908 | vea = vba + vma_pages(vma) - 1; |
2908 | /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ | 2909 | /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ |
2909 | zba = details->first_index; | 2910 | zba = details->first_index; |
2910 | if (zba < vba) | 2911 | if (zba < vba) |