aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-11-29 03:51:15 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-29 03:51:15 -0500
commit4ab432677957e0a064bba3690004d3244de8ad6c (patch)
tree4ef5785f20be6f62bfc7fbe4f124e1751c9cabf5 /mm/memory.c
parent30765528d156e58d41ed07cae8726c9105111b9d (diff)
parent624f54be206adf970cd8eece16446b027913e533 (diff)
Merge branch 'master'
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c213
1 files changed, 142 insertions, 71 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 2998cfc12f5b..9ab206b829a2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -333,9 +333,9 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
333} 333}
334 334
335/* 335/*
336 * This function is called to print an error when a pte in a 336 * This function is called to print an error when a bad pte
337 * !VM_RESERVED region is found pointing to an invalid pfn (which 337 * is found. For example, we might have a PFN-mapped pte in
338 * is an error. 338 * a region that doesn't allow it.
339 * 339 *
340 * The calling function must still handle the error. 340 * The calling function must still handle the error.
341 */ 341 */
@@ -350,6 +350,59 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
350} 350}
351 351
352/* 352/*
353 * This function gets the "struct page" associated with a pte.
354 *
355 * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
356 * will have each page table entry just pointing to a raw page frame
357 * number, and as far as the VM layer is concerned, those do not have
358 * pages associated with them - even if the PFN might point to memory
359 * that otherwise is perfectly fine and has a "struct page".
360 *
361 * The way we recognize those mappings is through the rules set up
362 * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
363 * and the vm_pgoff will point to the first PFN mapped: thus every
364 * page that is a raw mapping will always honor the rule
365 *
366 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
367 *
368 * and if that isn't true, the page has been COW'ed (in which case it
369 * _does_ have a "struct page" associated with it even if it is in a
370 * VM_PFNMAP range).
371 */
372struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
373{
374 unsigned long pfn = pte_pfn(pte);
375
376 if (vma->vm_flags & VM_PFNMAP) {
377 unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
378 if (pfn == vma->vm_pgoff + off)
379 return NULL;
380 }
381
382 /*
383 * Add some anal sanity checks for now. Eventually,
384 * we should just do "return pfn_to_page(pfn)", but
385 * in the meantime we check that we get a valid pfn,
386 * and that the resulting page looks ok.
387 *
388 * Remove this test eventually!
389 */
390 if (unlikely(!pfn_valid(pfn))) {
391 print_bad_pte(vma, pte, addr);
392 return NULL;
393 }
394
395 /*
396 * NOTE! We still have PageReserved() pages in the page
397 * tables.
398 *
399 * The PAGE_ZERO() pages and various VDSO mappings can
400 * cause them to exist.
401 */
402 return pfn_to_page(pfn);
403}
404
405/*
353 * copy one vm_area from one task to the other. Assumes the page tables 406 * copy one vm_area from one task to the other. Assumes the page tables
354 * already present in the new task to be cleared in the whole range 407 * already present in the new task to be cleared in the whole range
355 * covered by this vma. 408 * covered by this vma.
@@ -363,7 +416,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
363 unsigned long vm_flags = vma->vm_flags; 416 unsigned long vm_flags = vma->vm_flags;
364 pte_t pte = *src_pte; 417 pte_t pte = *src_pte;
365 struct page *page; 418 struct page *page;
366 unsigned long pfn;
367 419
368 /* pte contains position in swap or file, so copy. */ 420 /* pte contains position in swap or file, so copy. */
369 if (unlikely(!pte_present(pte))) { 421 if (unlikely(!pte_present(pte))) {
@@ -381,23 +433,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
381 goto out_set_pte; 433 goto out_set_pte;
382 } 434 }
383 435
384 /* If the region is VM_RESERVED, the mapping is not
385 * mapped via rmap - duplicate the pte as is.
386 */
387 if (vm_flags & VM_RESERVED)
388 goto out_set_pte;
389
390 pfn = pte_pfn(pte);
391 /* If the pte points outside of valid memory but
392 * the region is not VM_RESERVED, we have a problem.
393 */
394 if (unlikely(!pfn_valid(pfn))) {
395 print_bad_pte(vma, pte, addr);
396 goto out_set_pte; /* try to do something sane */
397 }
398
399 page = pfn_to_page(pfn);
400
401 /* 436 /*
402 * If it's a COW mapping, write protect it both 437 * If it's a COW mapping, write protect it both
403 * in the parent and the child 438 * in the parent and the child
@@ -414,9 +449,13 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
414 if (vm_flags & VM_SHARED) 449 if (vm_flags & VM_SHARED)
415 pte = pte_mkclean(pte); 450 pte = pte_mkclean(pte);
416 pte = pte_mkold(pte); 451 pte = pte_mkold(pte);
417 get_page(page); 452
418 page_dup_rmap(page); 453 page = vm_normal_page(vma, addr, pte);
419 rss[!!PageAnon(page)]++; 454 if (page) {
455 get_page(page);
456 page_dup_rmap(page);
457 rss[!!PageAnon(page)]++;
458 }
420 459
421out_set_pte: 460out_set_pte:
422 set_pte_at(dst_mm, addr, dst_pte, pte); 461 set_pte_at(dst_mm, addr, dst_pte, pte);
@@ -528,7 +567,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
528 * readonly mappings. The tradeoff is that copy_page_range is more 567 * readonly mappings. The tradeoff is that copy_page_range is more
529 * efficient than faulting. 568 * efficient than faulting.
530 */ 569 */
531 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) { 570 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) {
532 if (!vma->anon_vma) 571 if (!vma->anon_vma)
533 return 0; 572 return 0;
534 } 573 }
@@ -568,17 +607,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
568 continue; 607 continue;
569 } 608 }
570 if (pte_present(ptent)) { 609 if (pte_present(ptent)) {
571 struct page *page = NULL; 610 struct page *page;
572 611
573 (*zap_work) -= PAGE_SIZE; 612 (*zap_work) -= PAGE_SIZE;
574 613
575 if (!(vma->vm_flags & VM_RESERVED)) { 614 page = vm_normal_page(vma, addr, ptent);
576 unsigned long pfn = pte_pfn(ptent);
577 if (unlikely(!pfn_valid(pfn)))
578 print_bad_pte(vma, ptent, addr);
579 else
580 page = pfn_to_page(pfn);
581 }
582 if (unlikely(details) && page) { 615 if (unlikely(details) && page) {
583 /* 616 /*
584 * unmap_shared_mapping_pages() wants to 617 * unmap_shared_mapping_pages() wants to
@@ -834,7 +867,7 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
834/* 867/*
835 * Do a quick page-table lookup for a single page. 868 * Do a quick page-table lookup for a single page.
836 */ 869 */
837struct page *follow_page(struct mm_struct *mm, unsigned long address, 870struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
838 unsigned int flags) 871 unsigned int flags)
839{ 872{
840 pgd_t *pgd; 873 pgd_t *pgd;
@@ -842,8 +875,8 @@ struct page *follow_page(struct mm_struct *mm, unsigned long address,
842 pmd_t *pmd; 875 pmd_t *pmd;
843 pte_t *ptep, pte; 876 pte_t *ptep, pte;
844 spinlock_t *ptl; 877 spinlock_t *ptl;
845 unsigned long pfn;
846 struct page *page; 878 struct page *page;
879 struct mm_struct *mm = vma->vm_mm;
847 880
848 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 881 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
849 if (!IS_ERR(page)) { 882 if (!IS_ERR(page)) {
@@ -879,11 +912,10 @@ struct page *follow_page(struct mm_struct *mm, unsigned long address,
879 goto unlock; 912 goto unlock;
880 if ((flags & FOLL_WRITE) && !pte_write(pte)) 913 if ((flags & FOLL_WRITE) && !pte_write(pte))
881 goto unlock; 914 goto unlock;
882 pfn = pte_pfn(pte); 915 page = vm_normal_page(vma, address, pte);
883 if (!pfn_valid(pfn)) 916 if (unlikely(!page))
884 goto unlock; 917 goto unlock;
885 918
886 page = pfn_to_page(pfn);
887 if (flags & FOLL_GET) 919 if (flags & FOLL_GET)
888 get_page(page); 920 get_page(page);
889 if (flags & FOLL_TOUCH) { 921 if (flags & FOLL_TOUCH) {
@@ -956,8 +988,10 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
956 return i ? : -EFAULT; 988 return i ? : -EFAULT;
957 } 989 }
958 if (pages) { 990 if (pages) {
959 pages[i] = pte_page(*pte); 991 struct page *page = vm_normal_page(vma, start, *pte);
960 get_page(pages[i]); 992 pages[i] = page;
993 if (page)
994 get_page(page);
961 } 995 }
962 pte_unmap(pte); 996 pte_unmap(pte);
963 if (vmas) 997 if (vmas)
@@ -968,7 +1002,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
968 continue; 1002 continue;
969 } 1003 }
970 1004
971 if (!vma || (vma->vm_flags & (VM_IO | VM_RESERVED)) 1005 if (!vma || (vma->vm_flags & VM_IO)
972 || !(vm_flags & vma->vm_flags)) 1006 || !(vm_flags & vma->vm_flags))
973 return i ? : -EFAULT; 1007 return i ? : -EFAULT;
974 1008
@@ -992,7 +1026,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
992 foll_flags |= FOLL_WRITE; 1026 foll_flags |= FOLL_WRITE;
993 1027
994 cond_resched(); 1028 cond_resched();
995 while (!(page = follow_page(mm, start, foll_flags))) { 1029 while (!(page = follow_page(vma, start, foll_flags))) {
996 int ret; 1030 int ret;
997 ret = __handle_mm_fault(mm, vma, start, 1031 ret = __handle_mm_fault(mm, vma, start,
998 foll_flags & FOLL_WRITE); 1032 foll_flags & FOLL_WRITE);
@@ -1191,10 +1225,17 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1191 * rest of the world about it: 1225 * rest of the world about it:
1192 * VM_IO tells people not to look at these pages 1226 * VM_IO tells people not to look at these pages
1193 * (accesses can have side effects). 1227 * (accesses can have side effects).
1194 * VM_RESERVED tells the core MM not to "manage" these pages 1228 * VM_RESERVED is specified all over the place, because
1195 * (e.g. refcount, mapcount, try to swap them out). 1229 * in 2.4 it kept swapout's vma scan off this vma; but
1230 * in 2.6 the LRU scan won't even find its pages, so this
1231 * flag means no more than count its pages in reserved_vm,
1232 * and omit it from core dump, even when VM_IO turned off.
1233 * VM_PFNMAP tells the core MM that the base pages are just
1234 * raw PFN mappings, and do not have a "struct page" associated
1235 * with them.
1196 */ 1236 */
1197 vma->vm_flags |= VM_IO | VM_RESERVED; 1237 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1238 vma->vm_pgoff = pfn;
1198 1239
1199 BUG_ON(addr >= end); 1240 BUG_ON(addr >= end);
1200 pfn -= addr >> PAGE_SHIFT; 1241 pfn -= addr >> PAGE_SHIFT;
@@ -1249,6 +1290,26 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1249 return pte; 1290 return pte;
1250} 1291}
1251 1292
1293static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va)
1294{
1295 /*
1296 * If the source page was a PFN mapping, we don't have
1297 * a "struct page" for it. We do a best-effort copy by
1298 * just copying from the original user address. If that
1299 * fails, we just zero-fill it. Live with it.
1300 */
1301 if (unlikely(!src)) {
1302 void *kaddr = kmap_atomic(dst, KM_USER0);
1303 unsigned long left = __copy_from_user_inatomic(kaddr, (void __user *)va, PAGE_SIZE);
1304 if (left)
1305 memset(kaddr, 0, PAGE_SIZE);
1306 kunmap_atomic(kaddr, KM_USER0);
1307 return;
1308
1309 }
1310 copy_user_highpage(dst, src, va);
1311}
1312
1252/* 1313/*
1253 * This routine handles present pages, when users try to write 1314 * This routine handles present pages, when users try to write
1254 * to a shared page. It is done by copying the page to a new address 1315 * to a shared page. It is done by copying the page to a new address
@@ -1271,22 +1332,14 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1271 unsigned long address, pte_t *page_table, pmd_t *pmd, 1332 unsigned long address, pte_t *page_table, pmd_t *pmd,
1272 spinlock_t *ptl, pte_t orig_pte) 1333 spinlock_t *ptl, pte_t orig_pte)
1273{ 1334{
1274 struct page *old_page, *new_page; 1335 struct page *old_page, *src_page, *new_page;
1275 unsigned long pfn = pte_pfn(orig_pte);
1276 pte_t entry; 1336 pte_t entry;
1277 int ret = VM_FAULT_MINOR; 1337 int ret = VM_FAULT_MINOR;
1278 1338
1279 BUG_ON(vma->vm_flags & VM_RESERVED); 1339 old_page = vm_normal_page(vma, address, orig_pte);
1280 1340 src_page = old_page;
1281 if (unlikely(!pfn_valid(pfn))) { 1341 if (!old_page)
1282 /* 1342 goto gotten;
1283 * Page table corrupted: show pte and kill process.
1284 */
1285 print_bad_pte(vma, orig_pte, address);
1286 ret = VM_FAULT_OOM;
1287 goto unlock;
1288 }
1289 old_page = pfn_to_page(pfn);
1290 1343
1291 if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { 1344 if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
1292 int reuse = can_share_swap_page(old_page); 1345 int reuse = can_share_swap_page(old_page);
@@ -1307,11 +1360,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1307 * Ok, we need to copy. Oh, well.. 1360 * Ok, we need to copy. Oh, well..
1308 */ 1361 */
1309 page_cache_get(old_page); 1362 page_cache_get(old_page);
1363gotten:
1310 pte_unmap_unlock(page_table, ptl); 1364 pte_unmap_unlock(page_table, ptl);
1311 1365
1312 if (unlikely(anon_vma_prepare(vma))) 1366 if (unlikely(anon_vma_prepare(vma)))
1313 goto oom; 1367 goto oom;
1314 if (old_page == ZERO_PAGE(address)) { 1368 if (src_page == ZERO_PAGE(address)) {
1315 new_page = alloc_zeroed_user_highpage(vma, address); 1369 new_page = alloc_zeroed_user_highpage(vma, address);
1316 if (!new_page) 1370 if (!new_page)
1317 goto oom; 1371 goto oom;
@@ -1319,7 +1373,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1319 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); 1373 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1320 if (!new_page) 1374 if (!new_page)
1321 goto oom; 1375 goto oom;
1322 copy_user_highpage(new_page, old_page, address); 1376 cow_user_page(new_page, src_page, address);
1323 } 1377 }
1324 1378
1325 /* 1379 /*
@@ -1327,11 +1381,14 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1327 */ 1381 */
1328 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1382 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1329 if (likely(pte_same(*page_table, orig_pte))) { 1383 if (likely(pte_same(*page_table, orig_pte))) {
1330 page_remove_rmap(old_page); 1384 if (old_page) {
1331 if (!PageAnon(old_page)) { 1385 page_remove_rmap(old_page);
1386 if (!PageAnon(old_page)) {
1387 dec_mm_counter(mm, file_rss);
1388 inc_mm_counter(mm, anon_rss);
1389 }
1390 } else
1332 inc_mm_counter(mm, anon_rss); 1391 inc_mm_counter(mm, anon_rss);
1333 dec_mm_counter(mm, file_rss);
1334 }
1335 flush_cache_page(vma, address, pfn); 1392 flush_cache_page(vma, address, pfn);
1336 entry = mk_pte(new_page, vma->vm_page_prot); 1393 entry = mk_pte(new_page, vma->vm_page_prot);
1337 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1394 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -1345,13 +1402,16 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1345 new_page = old_page; 1402 new_page = old_page;
1346 ret |= VM_FAULT_WRITE; 1403 ret |= VM_FAULT_WRITE;
1347 } 1404 }
1348 page_cache_release(new_page); 1405 if (new_page)
1349 page_cache_release(old_page); 1406 page_cache_release(new_page);
1407 if (old_page)
1408 page_cache_release(old_page);
1350unlock: 1409unlock:
1351 pte_unmap_unlock(page_table, ptl); 1410 pte_unmap_unlock(page_table, ptl);
1352 return ret; 1411 return ret;
1353oom: 1412oom:
1354 page_cache_release(old_page); 1413 if (old_page)
1414 page_cache_release(old_page);
1355 return VM_FAULT_OOM; 1415 return VM_FAULT_OOM;
1356} 1416}
1357 1417
@@ -1849,7 +1909,6 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1849 int anon = 0; 1909 int anon = 0;
1850 1910
1851 pte_unmap(page_table); 1911 pte_unmap(page_table);
1852
1853 if (vma->vm_file) { 1912 if (vma->vm_file) {
1854 mapping = vma->vm_file->f_mapping; 1913 mapping = vma->vm_file->f_mapping;
1855 sequence = mapping->truncate_count; 1914 sequence = mapping->truncate_count;
@@ -1882,7 +1941,7 @@ retry:
1882 page = alloc_page_vma(GFP_HIGHUSER, vma, address); 1941 page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1883 if (!page) 1942 if (!page)
1884 goto oom; 1943 goto oom;
1885 copy_user_highpage(page, new_page, address); 1944 cow_user_page(page, new_page, address);
1886 page_cache_release(new_page); 1945 page_cache_release(new_page);
1887 new_page = page; 1946 new_page = page;
1888 anon = 1; 1947 anon = 1;
@@ -1924,7 +1983,7 @@ retry:
1924 inc_mm_counter(mm, anon_rss); 1983 inc_mm_counter(mm, anon_rss);
1925 lru_cache_add_active(new_page); 1984 lru_cache_add_active(new_page);
1926 page_add_anon_rmap(new_page, vma, address); 1985 page_add_anon_rmap(new_page, vma, address);
1927 } else if (!(vma->vm_flags & VM_RESERVED)) { 1986 } else {
1928 inc_mm_counter(mm, file_rss); 1987 inc_mm_counter(mm, file_rss);
1929 page_add_file_rmap(new_page); 1988 page_add_file_rmap(new_page);
1930 } 1989 }
@@ -2101,6 +2160,12 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2101 spin_unlock(&mm->page_table_lock); 2160 spin_unlock(&mm->page_table_lock);
2102 return 0; 2161 return 0;
2103} 2162}
2163#else
2164/* Workaround for gcc 2.96 */
2165int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2166{
2167 return 0;
2168}
2104#endif /* __PAGETABLE_PUD_FOLDED */ 2169#endif /* __PAGETABLE_PUD_FOLDED */
2105 2170
2106#ifndef __PAGETABLE_PMD_FOLDED 2171#ifndef __PAGETABLE_PMD_FOLDED
@@ -2129,6 +2194,12 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2129 spin_unlock(&mm->page_table_lock); 2194 spin_unlock(&mm->page_table_lock);
2130 return 0; 2195 return 0;
2131} 2196}
2197#else
2198/* Workaround for gcc 2.96 */
2199int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2200{
2201 return 0;
2202}
2132#endif /* __PAGETABLE_PMD_FOLDED */ 2203#endif /* __PAGETABLE_PMD_FOLDED */
2133 2204
2134int make_pages_present(unsigned long addr, unsigned long end) 2205int make_pages_present(unsigned long addr, unsigned long end)
@@ -2203,7 +2274,7 @@ static int __init gate_vma_init(void)
2203 gate_vma.vm_start = FIXADDR_USER_START; 2274 gate_vma.vm_start = FIXADDR_USER_START;
2204 gate_vma.vm_end = FIXADDR_USER_END; 2275 gate_vma.vm_end = FIXADDR_USER_END;
2205 gate_vma.vm_page_prot = PAGE_READONLY; 2276 gate_vma.vm_page_prot = PAGE_READONLY;
2206 gate_vma.vm_flags = VM_RESERVED; 2277 gate_vma.vm_flags = 0;
2207 return 0; 2278 return 0;
2208} 2279}
2209__initcall(gate_vma_init); 2280__initcall(gate_vma_init);