diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 32 |
1 files changed, 5 insertions, 27 deletions
diff --git a/mm/memory.c b/mm/memory.c index 6953d3926e01..40b7531ee8ba 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1112,11 +1112,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
1112 | int force_flush = 0; | 1112 | int force_flush = 0; |
1113 | int rss[NR_MM_COUNTERS]; | 1113 | int rss[NR_MM_COUNTERS]; |
1114 | spinlock_t *ptl; | 1114 | spinlock_t *ptl; |
1115 | pte_t *start_pte; | ||
1115 | pte_t *pte; | 1116 | pte_t *pte; |
1116 | 1117 | ||
1117 | again: | 1118 | again: |
1118 | init_rss_vec(rss); | 1119 | init_rss_vec(rss); |
1119 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); | 1120 | start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
1121 | pte = start_pte; | ||
1120 | arch_enter_lazy_mmu_mode(); | 1122 | arch_enter_lazy_mmu_mode(); |
1121 | do { | 1123 | do { |
1122 | pte_t ptent = *pte; | 1124 | pte_t ptent = *pte; |
@@ -1196,7 +1198,7 @@ again: | |||
1196 | 1198 | ||
1197 | add_mm_rss_vec(mm, rss); | 1199 | add_mm_rss_vec(mm, rss); |
1198 | arch_leave_lazy_mmu_mode(); | 1200 | arch_leave_lazy_mmu_mode(); |
1199 | pte_unmap_unlock(pte - 1, ptl); | 1201 | pte_unmap_unlock(start_pte, ptl); |
1200 | 1202 | ||
1201 | /* | 1203 | /* |
1202 | * mmu_gather ran out of room to batch pages, we break out of | 1204 | * mmu_gather ran out of room to batch pages, we break out of |
@@ -1296,7 +1298,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb, | |||
1296 | 1298 | ||
1297 | /** | 1299 | /** |
1298 | * unmap_vmas - unmap a range of memory covered by a list of vma's | 1300 | * unmap_vmas - unmap a range of memory covered by a list of vma's |
1299 | * @tlbp: address of the caller's struct mmu_gather | 1301 | * @tlb: address of the caller's struct mmu_gather |
1300 | * @vma: the starting vma | 1302 | * @vma: the starting vma |
1301 | * @start_addr: virtual address at which to start unmapping | 1303 | * @start_addr: virtual address at which to start unmapping |
1302 | * @end_addr: virtual address at which to end unmapping | 1304 | * @end_addr: virtual address at which to end unmapping |
@@ -2796,30 +2798,6 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2796 | } | 2798 | } |
2797 | EXPORT_SYMBOL(unmap_mapping_range); | 2799 | EXPORT_SYMBOL(unmap_mapping_range); |
2798 | 2800 | ||
2799 | int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) | ||
2800 | { | ||
2801 | struct address_space *mapping = inode->i_mapping; | ||
2802 | |||
2803 | /* | ||
2804 | * If the underlying filesystem is not going to provide | ||
2805 | * a way to truncate a range of blocks (punch a hole) - | ||
2806 | * we should return failure right now. | ||
2807 | */ | ||
2808 | if (!inode->i_op->truncate_range) | ||
2809 | return -ENOSYS; | ||
2810 | |||
2811 | mutex_lock(&inode->i_mutex); | ||
2812 | down_write(&inode->i_alloc_sem); | ||
2813 | unmap_mapping_range(mapping, offset, (end - offset), 1); | ||
2814 | truncate_inode_pages_range(mapping, offset, end); | ||
2815 | unmap_mapping_range(mapping, offset, (end - offset), 1); | ||
2816 | inode->i_op->truncate_range(inode, offset, end); | ||
2817 | up_write(&inode->i_alloc_sem); | ||
2818 | mutex_unlock(&inode->i_mutex); | ||
2819 | |||
2820 | return 0; | ||
2821 | } | ||
2822 | |||
2823 | /* | 2801 | /* |
2824 | * We enter with non-exclusive mmap_sem (to exclude vma changes, | 2802 | * We enter with non-exclusive mmap_sem (to exclude vma changes, |
2825 | * but allow concurrent faults), and pte mapped but not yet locked. | 2803 | * but allow concurrent faults), and pte mapped but not yet locked. |