diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 60 |
1 files changed, 35 insertions, 25 deletions
diff --git a/mm/memory.c b/mm/memory.c index 09e4b1be7b67..aecb7451965c 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -974,10 +974,13 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb, | |||
974 | return addr; | 974 | return addr; |
975 | } | 975 | } |
976 | 976 | ||
977 | #ifdef CONFIG_PREEMPT | 977 | #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT) |
978 | # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) | 978 | # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) |
979 | #else | 979 | #else |
980 | /* No preempt: go for improved straight-line efficiency */ | 980 | /* |
981 | * No preempt: go for improved straight-line efficiency | ||
982 | * on PREEMPT_RT this is not a critical latency-path. | ||
983 | */ | ||
981 | # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) | 984 | # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) |
982 | #endif | 985 | #endif |
983 | 986 | ||
@@ -1007,17 +1010,14 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb, | |||
1007 | * ensure that any thus-far unmapped pages are flushed before unmap_vmas() | 1010 | * ensure that any thus-far unmapped pages are flushed before unmap_vmas() |
1008 | * drops the lock and schedules. | 1011 | * drops the lock and schedules. |
1009 | */ | 1012 | */ |
1010 | unsigned long unmap_vmas(struct mmu_gather **tlbp, | 1013 | unsigned long unmap_vmas(struct mmu_gather *tlb, |
1011 | struct vm_area_struct *vma, unsigned long start_addr, | 1014 | struct vm_area_struct *vma, unsigned long start_addr, |
1012 | unsigned long end_addr, unsigned long *nr_accounted, | 1015 | unsigned long end_addr, unsigned long *nr_accounted, |
1013 | struct zap_details *details) | 1016 | struct zap_details *details) |
1014 | { | 1017 | { |
1015 | long zap_work = ZAP_BLOCK_SIZE; | 1018 | long zap_work = ZAP_BLOCK_SIZE; |
1016 | unsigned long tlb_start = 0; /* For tlb_finish_mmu */ | ||
1017 | int tlb_start_valid = 0; | ||
1018 | unsigned long start = start_addr; | 1019 | unsigned long start = start_addr; |
1019 | spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; | 1020 | spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; |
1020 | int fullmm = (*tlbp)->fullmm; | ||
1021 | struct mm_struct *mm = vma->vm_mm; | 1021 | struct mm_struct *mm = vma->vm_mm; |
1022 | 1022 | ||
1023 | mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); | 1023 | mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); |
@@ -1038,11 +1038,6 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, | |||
1038 | untrack_pfn_vma(vma, 0, 0); | 1038 | untrack_pfn_vma(vma, 0, 0); |
1039 | 1039 | ||
1040 | while (start != end) { | 1040 | while (start != end) { |
1041 | if (!tlb_start_valid) { | ||
1042 | tlb_start = start; | ||
1043 | tlb_start_valid = 1; | ||
1044 | } | ||
1045 | |||
1046 | if (unlikely(is_vm_hugetlb_page(vma))) { | 1041 | if (unlikely(is_vm_hugetlb_page(vma))) { |
1047 | /* | 1042 | /* |
1048 | * It is undesirable to test vma->vm_file as it | 1043 | * It is undesirable to test vma->vm_file as it |
@@ -1063,7 +1058,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, | |||
1063 | 1058 | ||
1064 | start = end; | 1059 | start = end; |
1065 | } else | 1060 | } else |
1066 | start = unmap_page_range(*tlbp, vma, | 1061 | start = unmap_page_range(tlb, vma, |
1067 | start, end, &zap_work, details); | 1062 | start, end, &zap_work, details); |
1068 | 1063 | ||
1069 | if (zap_work > 0) { | 1064 | if (zap_work > 0) { |
@@ -1071,19 +1066,13 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, | |||
1071 | break; | 1066 | break; |
1072 | } | 1067 | } |
1073 | 1068 | ||
1074 | tlb_finish_mmu(*tlbp, tlb_start, start); | ||
1075 | |||
1076 | if (need_resched() || | 1069 | if (need_resched() || |
1077 | (i_mmap_lock && spin_needbreak(i_mmap_lock))) { | 1070 | (i_mmap_lock && spin_needbreak(i_mmap_lock))) { |
1078 | if (i_mmap_lock) { | 1071 | if (i_mmap_lock) |
1079 | *tlbp = NULL; | ||
1080 | goto out; | 1072 | goto out; |
1081 | } | ||
1082 | cond_resched(); | 1073 | cond_resched(); |
1083 | } | 1074 | } |
1084 | 1075 | ||
1085 | *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); | ||
1086 | tlb_start_valid = 0; | ||
1087 | zap_work = ZAP_BLOCK_SIZE; | 1076 | zap_work = ZAP_BLOCK_SIZE; |
1088 | } | 1077 | } |
1089 | } | 1078 | } |
@@ -1103,16 +1092,15 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | |||
1103 | unsigned long size, struct zap_details *details) | 1092 | unsigned long size, struct zap_details *details) |
1104 | { | 1093 | { |
1105 | struct mm_struct *mm = vma->vm_mm; | 1094 | struct mm_struct *mm = vma->vm_mm; |
1106 | struct mmu_gather *tlb; | 1095 | struct mmu_gather tlb; |
1107 | unsigned long end = address + size; | 1096 | unsigned long end = address + size; |
1108 | unsigned long nr_accounted = 0; | 1097 | unsigned long nr_accounted = 0; |
1109 | 1098 | ||
1110 | lru_add_drain(); | 1099 | lru_add_drain(); |
1111 | tlb = tlb_gather_mmu(mm, 0); | 1100 | tlb_gather_mmu(&tlb, mm, 0); |
1112 | update_hiwater_rss(mm); | 1101 | update_hiwater_rss(mm); |
1113 | end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); | 1102 | end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); |
1114 | if (tlb) | 1103 | tlb_finish_mmu(&tlb, address, end); |
1115 | tlb_finish_mmu(tlb, address, end); | ||
1116 | return end; | 1104 | return end; |
1117 | } | 1105 | } |
1118 | 1106 | ||
@@ -2491,12 +2479,12 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) | |||
2491 | return -ENOSYS; | 2479 | return -ENOSYS; |
2492 | 2480 | ||
2493 | mutex_lock(&inode->i_mutex); | 2481 | mutex_lock(&inode->i_mutex); |
2494 | down_write(&inode->i_alloc_sem); | 2482 | anon_down_write(&inode->i_alloc_sem); |
2495 | unmap_mapping_range(mapping, offset, (end - offset), 1); | 2483 | unmap_mapping_range(mapping, offset, (end - offset), 1); |
2496 | truncate_inode_pages_range(mapping, offset, end); | 2484 | truncate_inode_pages_range(mapping, offset, end); |
2497 | unmap_mapping_range(mapping, offset, (end - offset), 1); | 2485 | unmap_mapping_range(mapping, offset, (end - offset), 1); |
2498 | inode->i_op->truncate_range(inode, offset, end); | 2486 | inode->i_op->truncate_range(inode, offset, end); |
2499 | up_write(&inode->i_alloc_sem); | 2487 | anon_up_write(&inode->i_alloc_sem); |
2500 | mutex_unlock(&inode->i_mutex); | 2488 | mutex_unlock(&inode->i_mutex); |
2501 | 2489 | ||
2502 | return 0; | 2490 | return 0; |
@@ -3008,6 +2996,28 @@ unlock: | |||
3008 | return 0; | 2996 | return 0; |
3009 | } | 2997 | } |
3010 | 2998 | ||
2999 | void pagefault_disable(void) | ||
3000 | { | ||
3001 | current->pagefault_disabled++; | ||
3002 | /* | ||
3003 | * make sure to have issued the store before a pagefault | ||
3004 | * can hit. | ||
3005 | */ | ||
3006 | barrier(); | ||
3007 | } | ||
3008 | EXPORT_SYMBOL(pagefault_disable); | ||
3009 | |||
3010 | void pagefault_enable(void) | ||
3011 | { | ||
3012 | /* | ||
3013 | * make sure to issue those last loads/stores before enabling | ||
3014 | * the pagefault handler again. | ||
3015 | */ | ||
3016 | barrier(); | ||
3017 | current->pagefault_disabled--; | ||
3018 | } | ||
3019 | EXPORT_SYMBOL(pagefault_enable); | ||
3020 | |||
3011 | /* | 3021 | /* |
3012 | * By the time we get here, we already hold the mm semaphore | 3022 | * By the time we get here, we already hold the mm semaphore |
3013 | */ | 3023 | */ |