diff options
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 70 |
1 files changed, 70 insertions, 0 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 312b6eb7843..c1459f2cdb5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -387,6 +387,76 @@ int filemap_write_and_wait_range(struct address_space *mapping, | |||
387 | EXPORT_SYMBOL(filemap_write_and_wait_range); | 387 | EXPORT_SYMBOL(filemap_write_and_wait_range); |
388 | 388 | ||
389 | /** | 389 | /** |
390 | * replace_page_cache_page - replace a pagecache page with a new one | ||
391 | * @old: page to be replaced | ||
392 | * @new: page to replace with | ||
393 | * @gfp_mask: allocation mode | ||
394 | * | ||
395 | * This function replaces a page in the pagecache with a new one. On | ||
396 | * success it acquires the pagecache reference for the new page and | ||
397 | * drops it for the old page. Both the old and new pages must be | ||
398 | * locked. This function does not add the new page to the LRU, the | ||
399 | * caller must do that. | ||
400 | * | ||
401 | * The remove + add is atomic. The only way this function can fail is | ||
402 | * memory allocation failure. | ||
403 | */ | ||
404 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | ||
405 | { | ||
406 | int error; | ||
407 | struct mem_cgroup *memcg = NULL; | ||
408 | |||
409 | VM_BUG_ON(!PageLocked(old)); | ||
410 | VM_BUG_ON(!PageLocked(new)); | ||
411 | VM_BUG_ON(new->mapping); | ||
412 | |||
413 | /* | ||
414 | * This is not page migration, but prepare_migration and | ||
415 | * end_migration does enough work for charge replacement. | ||
416 | * | ||
417 | * In the longer term we probably want a specialized function | ||
418 | * for moving the charge from old to new in a more efficient | ||
419 | * manner. | ||
420 | */ | ||
421 | error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask); | ||
422 | if (error) | ||
423 | return error; | ||
424 | |||
425 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | ||
426 | if (!error) { | ||
427 | struct address_space *mapping = old->mapping; | ||
428 | void (*freepage)(struct page *); | ||
429 | |||
430 | pgoff_t offset = old->index; | ||
431 | freepage = mapping->a_ops->freepage; | ||
432 | |||
433 | page_cache_get(new); | ||
434 | new->mapping = mapping; | ||
435 | new->index = offset; | ||
436 | |||
437 | spin_lock_irq(&mapping->tree_lock); | ||
438 | __remove_from_page_cache(old); | ||
439 | error = radix_tree_insert(&mapping->page_tree, offset, new); | ||
440 | BUG_ON(error); | ||
441 | mapping->nrpages++; | ||
442 | __inc_zone_page_state(new, NR_FILE_PAGES); | ||
443 | if (PageSwapBacked(new)) | ||
444 | __inc_zone_page_state(new, NR_SHMEM); | ||
445 | spin_unlock_irq(&mapping->tree_lock); | ||
446 | radix_tree_preload_end(); | ||
447 | if (freepage) | ||
448 | freepage(old); | ||
449 | page_cache_release(old); | ||
450 | mem_cgroup_end_migration(memcg, old, new, true); | ||
451 | } else { | ||
452 | mem_cgroup_end_migration(memcg, old, new, false); | ||
453 | } | ||
454 | |||
455 | return error; | ||
456 | } | ||
457 | EXPORT_SYMBOL_GPL(replace_page_cache_page); | ||
458 | |||
459 | /** | ||
390 | * add_to_page_cache_locked - add a locked page to the pagecache | 460 | * add_to_page_cache_locked - add a locked page to the pagecache |
391 | * @page: page to add | 461 | * @page: page to add |
392 | * @mapping: the page's address_space | 462 | * @mapping: the page's address_space |