diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 30 |
1 files changed, 18 insertions, 12 deletions
diff --git a/mm/memory.c b/mm/memory.c index cfce5f1f30f2..ece04963158e 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -334,7 +334,7 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) | |||
334 | 334 | ||
335 | /* | 335 | /* |
336 | * This function is called to print an error when a pte in a | 336 | * This function is called to print an error when a pte in a |
337 | * !VM_RESERVED region is found pointing to an invalid pfn (which | 337 | * !VM_UNPAGED region is found pointing to an invalid pfn (which |
338 | * is an error. | 338 | * is an error. |
339 | * | 339 | * |
340 | * The calling function must still handle the error. | 340 | * The calling function must still handle the error. |
@@ -381,15 +381,15 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
381 | goto out_set_pte; | 381 | goto out_set_pte; |
382 | } | 382 | } |
383 | 383 | ||
384 | /* If the region is VM_RESERVED, the mapping is not | 384 | /* If the region is VM_UNPAGED, the mapping is not |
385 | * mapped via rmap - duplicate the pte as is. | 385 | * mapped via rmap - duplicate the pte as is. |
386 | */ | 386 | */ |
387 | if (vm_flags & VM_RESERVED) | 387 | if (vm_flags & VM_UNPAGED) |
388 | goto out_set_pte; | 388 | goto out_set_pte; |
389 | 389 | ||
390 | pfn = pte_pfn(pte); | 390 | pfn = pte_pfn(pte); |
391 | /* If the pte points outside of valid memory but | 391 | /* If the pte points outside of valid memory but |
392 | * the region is not VM_RESERVED, we have a problem. | 392 | * the region is not VM_UNPAGED, we have a problem. |
393 | */ | 393 | */ |
394 | if (unlikely(!pfn_valid(pfn))) { | 394 | if (unlikely(!pfn_valid(pfn))) { |
395 | print_bad_pte(vma, pte, addr); | 395 | print_bad_pte(vma, pte, addr); |
@@ -528,7 +528,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
528 | * readonly mappings. The tradeoff is that copy_page_range is more | 528 | * readonly mappings. The tradeoff is that copy_page_range is more |
529 | * efficient than faulting. | 529 | * efficient than faulting. |
530 | */ | 530 | */ |
531 | if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) { | 531 | if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_UNPAGED))) { |
532 | if (!vma->anon_vma) | 532 | if (!vma->anon_vma) |
533 | return 0; | 533 | return 0; |
534 | } | 534 | } |
@@ -572,7 +572,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
572 | 572 | ||
573 | (*zap_work) -= PAGE_SIZE; | 573 | (*zap_work) -= PAGE_SIZE; |
574 | 574 | ||
575 | if (!(vma->vm_flags & VM_RESERVED)) { | 575 | if (!(vma->vm_flags & VM_UNPAGED)) { |
576 | unsigned long pfn = pte_pfn(ptent); | 576 | unsigned long pfn = pte_pfn(ptent); |
577 | if (unlikely(!pfn_valid(pfn))) | 577 | if (unlikely(!pfn_valid(pfn))) |
578 | print_bad_pte(vma, ptent, addr); | 578 | print_bad_pte(vma, ptent, addr); |
@@ -1191,10 +1191,16 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
1191 | * rest of the world about it: | 1191 | * rest of the world about it: |
1192 | * VM_IO tells people not to look at these pages | 1192 | * VM_IO tells people not to look at these pages |
1193 | * (accesses can have side effects). | 1193 | * (accesses can have side effects). |
1194 | * VM_RESERVED tells the core MM not to "manage" these pages | 1194 | * VM_RESERVED is specified all over the place, because |
1195 | * (e.g. refcount, mapcount, try to swap them out). | 1195 | * in 2.4 it kept swapout's vma scan off this vma; but |
1196 | * in 2.6 the LRU scan won't even find its pages, so this | ||
1197 | * flag means no more than count its pages in reserved_vm, | ||
1198 | * and omit it from core dump, even when VM_IO turned off. | ||
1199 | * VM_UNPAGED tells the core MM not to "manage" these pages | ||
1200 | * (e.g. refcount, mapcount, try to swap them out): in | ||
1201 | * particular, zap_pte_range does not try to free them. | ||
1196 | */ | 1202 | */ |
1197 | vma->vm_flags |= VM_IO | VM_RESERVED; | 1203 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED; |
1198 | 1204 | ||
1199 | BUG_ON(addr >= end); | 1205 | BUG_ON(addr >= end); |
1200 | pfn -= addr >> PAGE_SHIFT; | 1206 | pfn -= addr >> PAGE_SHIFT; |
@@ -1276,7 +1282,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1276 | pte_t entry; | 1282 | pte_t entry; |
1277 | int ret = VM_FAULT_MINOR; | 1283 | int ret = VM_FAULT_MINOR; |
1278 | 1284 | ||
1279 | BUG_ON(vma->vm_flags & VM_RESERVED); | 1285 | BUG_ON(vma->vm_flags & VM_UNPAGED); |
1280 | 1286 | ||
1281 | if (unlikely(!pfn_valid(pfn))) { | 1287 | if (unlikely(!pfn_valid(pfn))) { |
1282 | /* | 1288 | /* |
@@ -1924,7 +1930,7 @@ retry: | |||
1924 | inc_mm_counter(mm, anon_rss); | 1930 | inc_mm_counter(mm, anon_rss); |
1925 | lru_cache_add_active(new_page); | 1931 | lru_cache_add_active(new_page); |
1926 | page_add_anon_rmap(new_page, vma, address); | 1932 | page_add_anon_rmap(new_page, vma, address); |
1927 | } else if (!(vma->vm_flags & VM_RESERVED)) { | 1933 | } else if (!(vma->vm_flags & VM_UNPAGED)) { |
1928 | inc_mm_counter(mm, file_rss); | 1934 | inc_mm_counter(mm, file_rss); |
1929 | page_add_file_rmap(new_page); | 1935 | page_add_file_rmap(new_page); |
1930 | } | 1936 | } |
@@ -2203,7 +2209,7 @@ static int __init gate_vma_init(void) | |||
2203 | gate_vma.vm_start = FIXADDR_USER_START; | 2209 | gate_vma.vm_start = FIXADDR_USER_START; |
2204 | gate_vma.vm_end = FIXADDR_USER_END; | 2210 | gate_vma.vm_end = FIXADDR_USER_END; |
2205 | gate_vma.vm_page_prot = PAGE_READONLY; | 2211 | gate_vma.vm_page_prot = PAGE_READONLY; |
2206 | gate_vma.vm_flags = VM_RESERVED; | 2212 | gate_vma.vm_flags = 0; |
2207 | return 0; | 2213 | return 0; |
2208 | } | 2214 | } |
2209 | __initcall(gate_vma_init); | 2215 | __initcall(gate_vma_init); |