aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c90
1 files changed, 74 insertions, 16 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index a603c4d7d3c9..d6df3bacb0fb 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -110,11 +110,17 @@
110static void page_cache_tree_delete(struct address_space *mapping, 110static void page_cache_tree_delete(struct address_space *mapping,
111 struct page *page, void *shadow) 111 struct page *page, void *shadow)
112{ 112{
113 if (shadow) { 113 struct radix_tree_node *node;
114 void **slot; 114 unsigned long index;
115 unsigned int offset;
116 unsigned int tag;
117 void **slot;
115 118
116 slot = radix_tree_lookup_slot(&mapping->page_tree, page->index); 119 VM_BUG_ON(!PageLocked(page));
117 radix_tree_replace_slot(slot, shadow); 120
121 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
122
123 if (shadow) {
118 mapping->nrshadows++; 124 mapping->nrshadows++;
119 /* 125 /*
120 * Make sure the nrshadows update is committed before 126 * Make sure the nrshadows update is committed before
@@ -123,9 +129,45 @@ static void page_cache_tree_delete(struct address_space *mapping,
123 * same time and miss a shadow entry. 129 * same time and miss a shadow entry.
124 */ 130 */
125 smp_wmb(); 131 smp_wmb();
126 } else 132 }
127 radix_tree_delete(&mapping->page_tree, page->index);
128 mapping->nrpages--; 133 mapping->nrpages--;
134
135 if (!node) {
136 /* Clear direct pointer tags in root node */
137 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK;
138 radix_tree_replace_slot(slot, shadow);
139 return;
140 }
141
142 /* Clear tree tags for the removed page */
143 index = page->index;
144 offset = index & RADIX_TREE_MAP_MASK;
145 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
146 if (test_bit(offset, node->tags[tag]))
147 radix_tree_tag_clear(&mapping->page_tree, index, tag);
148 }
149
150 /* Delete page, swap shadow entry */
151 radix_tree_replace_slot(slot, shadow);
152 workingset_node_pages_dec(node);
153 if (shadow)
154 workingset_node_shadows_inc(node);
155 else
156 if (__radix_tree_delete_node(&mapping->page_tree, node))
157 return;
158
159 /*
160 * Track node that only contains shadow entries.
161 *
162 * Avoid acquiring the list_lru lock if already tracked. The
163 * list_empty() test is safe as node->private_list is
164 * protected by mapping->tree_lock.
165 */
166 if (!workingset_node_pages(node) &&
167 list_empty(&node->private_list)) {
168 node->private_data = mapping;
169 list_lru_add(&workingset_shadow_nodes, &node->private_list);
170 }
129} 171}
130 172
131/* 173/*
@@ -471,27 +513,43 @@ EXPORT_SYMBOL_GPL(replace_page_cache_page);
471static int page_cache_tree_insert(struct address_space *mapping, 513static int page_cache_tree_insert(struct address_space *mapping,
472 struct page *page, void **shadowp) 514 struct page *page, void **shadowp)
473{ 515{
516 struct radix_tree_node *node;
474 void **slot; 517 void **slot;
475 int error; 518 int error;
476 519
477 slot = radix_tree_lookup_slot(&mapping->page_tree, page->index); 520 error = __radix_tree_create(&mapping->page_tree, page->index,
478 if (slot) { 521 &node, &slot);
522 if (error)
523 return error;
524 if (*slot) {
479 void *p; 525 void *p;
480 526
481 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 527 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
482 if (!radix_tree_exceptional_entry(p)) 528 if (!radix_tree_exceptional_entry(p))
483 return -EEXIST; 529 return -EEXIST;
484 radix_tree_replace_slot(slot, page);
485 mapping->nrshadows--;
486 mapping->nrpages++;
487 if (shadowp) 530 if (shadowp)
488 *shadowp = p; 531 *shadowp = p;
489 return 0; 532 mapping->nrshadows--;
533 if (node)
534 workingset_node_shadows_dec(node);
490 } 535 }
491 error = radix_tree_insert(&mapping->page_tree, page->index, page); 536 radix_tree_replace_slot(slot, page);
492 if (!error) 537 mapping->nrpages++;
493 mapping->nrpages++; 538 if (node) {
494 return error; 539 workingset_node_pages_inc(node);
540 /*
541 * Don't track node that contains actual pages.
542 *
543 * Avoid acquiring the list_lru lock if already
544 * untracked. The list_empty() test is safe as
545 * node->private_list is protected by
546 * mapping->tree_lock.
547 */
548 if (!list_empty(&node->private_list))
549 list_lru_del(&workingset_shadow_nodes,
550 &node->private_list);
551 }
552 return 0;
495} 553}
496 554
497static int __add_to_page_cache_locked(struct page *page, 555static int __add_to_page_cache_locked(struct page *page,