aboutsummaryrefslogtreecommitdiffstats
path: root/mm/truncate.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-04-03 17:47:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 19:21:00 -0400
commit0cd6144aadd2afd19d1aca880153530c52957604 (patch)
tree529df1dc75d6a58eff057dde5feb07cecf6ba527 /mm/truncate.c
parente7b563bb2a6f4d974208da46200784b9c5b5a47e (diff)
mm + fs: prepare for non-page entries in page cache radix trees
shmem mappings already contain exceptional entries where swap slot information is remembered. To be able to store eviction information for regular page cache, prepare every site dealing with the radix trees directly to handle entries other than pages. The common lookup functions will filter out non-page entries and return NULL for page cache holes, just as before. But provide a raw version of the API which returns non-page entries as well, and switch shmem over to use it. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan@kernel.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Bob Liu <bob.liu@oracle.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Greg Thelen <gthelen@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Luigi Semenzato <semenzato@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Metin Doslu <metin@citusdata.com> Cc: Michel Lespinasse <walken@google.com> Cc: Ozgun Erdogan <ozgun@citusdata.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Roman Gushchin <klamm@yandex-team.ru> Cc: Ryan Mallon <rmallon@gmail.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c74
1 files changed, 61 insertions, 13 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index 353b683afd6e..2e84fe59190b 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -22,6 +22,22 @@
22#include <linux/cleancache.h> 22#include <linux/cleancache.h>
23#include "internal.h" 23#include "internal.h"
24 24
25static void clear_exceptional_entry(struct address_space *mapping,
26 pgoff_t index, void *entry)
27{
28 /* Handled by shmem itself */
29 if (shmem_mapping(mapping))
30 return;
31
32 spin_lock_irq(&mapping->tree_lock);
33 /*
34 * Regular page slots are stabilized by the page lock even
35 * without the tree itself locked. These unlocked entries
36 * need verification under the tree lock.
37 */
38 radix_tree_delete_item(&mapping->page_tree, index, entry);
39 spin_unlock_irq(&mapping->tree_lock);
40}
25 41
26/** 42/**
27 * do_invalidatepage - invalidate part or all of a page 43 * do_invalidatepage - invalidate part or all of a page
@@ -208,6 +224,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
208 unsigned int partial_start; /* inclusive */ 224 unsigned int partial_start; /* inclusive */
209 unsigned int partial_end; /* exclusive */ 225 unsigned int partial_end; /* exclusive */
210 struct pagevec pvec; 226 struct pagevec pvec;
227 pgoff_t indices[PAGEVEC_SIZE];
211 pgoff_t index; 228 pgoff_t index;
212 int i; 229 int i;
213 230
@@ -238,17 +255,23 @@ void truncate_inode_pages_range(struct address_space *mapping,
238 255
239 pagevec_init(&pvec, 0); 256 pagevec_init(&pvec, 0);
240 index = start; 257 index = start;
241 while (index < end && pagevec_lookup(&pvec, mapping, index, 258 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
242 min(end - index, (pgoff_t)PAGEVEC_SIZE))) { 259 min(end - index, (pgoff_t)PAGEVEC_SIZE),
260 indices)) {
243 mem_cgroup_uncharge_start(); 261 mem_cgroup_uncharge_start();
244 for (i = 0; i < pagevec_count(&pvec); i++) { 262 for (i = 0; i < pagevec_count(&pvec); i++) {
245 struct page *page = pvec.pages[i]; 263 struct page *page = pvec.pages[i];
246 264
247 /* We rely upon deletion not changing page->index */ 265 /* We rely upon deletion not changing page->index */
248 index = page->index; 266 index = indices[i];
249 if (index >= end) 267 if (index >= end)
250 break; 268 break;
251 269
270 if (radix_tree_exceptional_entry(page)) {
271 clear_exceptional_entry(mapping, index, page);
272 continue;
273 }
274
252 if (!trylock_page(page)) 275 if (!trylock_page(page))
253 continue; 276 continue;
254 WARN_ON(page->index != index); 277 WARN_ON(page->index != index);
@@ -259,6 +282,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
259 truncate_inode_page(mapping, page); 282 truncate_inode_page(mapping, page);
260 unlock_page(page); 283 unlock_page(page);
261 } 284 }
285 pagevec_remove_exceptionals(&pvec);
262 pagevec_release(&pvec); 286 pagevec_release(&pvec);
263 mem_cgroup_uncharge_end(); 287 mem_cgroup_uncharge_end();
264 cond_resched(); 288 cond_resched();
@@ -307,14 +331,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
307 index = start; 331 index = start;
308 for ( ; ; ) { 332 for ( ; ; ) {
309 cond_resched(); 333 cond_resched();
310 if (!pagevec_lookup(&pvec, mapping, index, 334 if (!pagevec_lookup_entries(&pvec, mapping, index,
311 min(end - index, (pgoff_t)PAGEVEC_SIZE))) { 335 min(end - index, (pgoff_t)PAGEVEC_SIZE),
336 indices)) {
312 if (index == start) 337 if (index == start)
313 break; 338 break;
314 index = start; 339 index = start;
315 continue; 340 continue;
316 } 341 }
317 if (index == start && pvec.pages[0]->index >= end) { 342 if (index == start && indices[0] >= end) {
343 pagevec_remove_exceptionals(&pvec);
318 pagevec_release(&pvec); 344 pagevec_release(&pvec);
319 break; 345 break;
320 } 346 }
@@ -323,16 +349,22 @@ void truncate_inode_pages_range(struct address_space *mapping,
323 struct page *page = pvec.pages[i]; 349 struct page *page = pvec.pages[i];
324 350
325 /* We rely upon deletion not changing page->index */ 351 /* We rely upon deletion not changing page->index */
326 index = page->index; 352 index = indices[i];
327 if (index >= end) 353 if (index >= end)
328 break; 354 break;
329 355
356 if (radix_tree_exceptional_entry(page)) {
357 clear_exceptional_entry(mapping, index, page);
358 continue;
359 }
360
330 lock_page(page); 361 lock_page(page);
331 WARN_ON(page->index != index); 362 WARN_ON(page->index != index);
332 wait_on_page_writeback(page); 363 wait_on_page_writeback(page);
333 truncate_inode_page(mapping, page); 364 truncate_inode_page(mapping, page);
334 unlock_page(page); 365 unlock_page(page);
335 } 366 }
367 pagevec_remove_exceptionals(&pvec);
336 pagevec_release(&pvec); 368 pagevec_release(&pvec);
337 mem_cgroup_uncharge_end(); 369 mem_cgroup_uncharge_end();
338 index++; 370 index++;
@@ -375,6 +407,7 @@ EXPORT_SYMBOL(truncate_inode_pages);
375unsigned long invalidate_mapping_pages(struct address_space *mapping, 407unsigned long invalidate_mapping_pages(struct address_space *mapping,
376 pgoff_t start, pgoff_t end) 408 pgoff_t start, pgoff_t end)
377{ 409{
410 pgoff_t indices[PAGEVEC_SIZE];
378 struct pagevec pvec; 411 struct pagevec pvec;
379 pgoff_t index = start; 412 pgoff_t index = start;
380 unsigned long ret; 413 unsigned long ret;
@@ -390,17 +423,23 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
390 */ 423 */
391 424
392 pagevec_init(&pvec, 0); 425 pagevec_init(&pvec, 0);
393 while (index <= end && pagevec_lookup(&pvec, mapping, index, 426 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
394 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 427 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
428 indices)) {
395 mem_cgroup_uncharge_start(); 429 mem_cgroup_uncharge_start();
396 for (i = 0; i < pagevec_count(&pvec); i++) { 430 for (i = 0; i < pagevec_count(&pvec); i++) {
397 struct page *page = pvec.pages[i]; 431 struct page *page = pvec.pages[i];
398 432
399 /* We rely upon deletion not changing page->index */ 433 /* We rely upon deletion not changing page->index */
400 index = page->index; 434 index = indices[i];
401 if (index > end) 435 if (index > end)
402 break; 436 break;
403 437
438 if (radix_tree_exceptional_entry(page)) {
439 clear_exceptional_entry(mapping, index, page);
440 continue;
441 }
442
404 if (!trylock_page(page)) 443 if (!trylock_page(page))
405 continue; 444 continue;
406 WARN_ON(page->index != index); 445 WARN_ON(page->index != index);
@@ -414,6 +453,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
414 deactivate_page(page); 453 deactivate_page(page);
415 count += ret; 454 count += ret;
416 } 455 }
456 pagevec_remove_exceptionals(&pvec);
417 pagevec_release(&pvec); 457 pagevec_release(&pvec);
418 mem_cgroup_uncharge_end(); 458 mem_cgroup_uncharge_end();
419 cond_resched(); 459 cond_resched();
@@ -481,6 +521,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
481int invalidate_inode_pages2_range(struct address_space *mapping, 521int invalidate_inode_pages2_range(struct address_space *mapping,
482 pgoff_t start, pgoff_t end) 522 pgoff_t start, pgoff_t end)
483{ 523{
524 pgoff_t indices[PAGEVEC_SIZE];
484 struct pagevec pvec; 525 struct pagevec pvec;
485 pgoff_t index; 526 pgoff_t index;
486 int i; 527 int i;
@@ -491,17 +532,23 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
491 cleancache_invalidate_inode(mapping); 532 cleancache_invalidate_inode(mapping);
492 pagevec_init(&pvec, 0); 533 pagevec_init(&pvec, 0);
493 index = start; 534 index = start;
494 while (index <= end && pagevec_lookup(&pvec, mapping, index, 535 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
495 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 536 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
537 indices)) {
496 mem_cgroup_uncharge_start(); 538 mem_cgroup_uncharge_start();
497 for (i = 0; i < pagevec_count(&pvec); i++) { 539 for (i = 0; i < pagevec_count(&pvec); i++) {
498 struct page *page = pvec.pages[i]; 540 struct page *page = pvec.pages[i];
499 541
500 /* We rely upon deletion not changing page->index */ 542 /* We rely upon deletion not changing page->index */
501 index = page->index; 543 index = indices[i];
502 if (index > end) 544 if (index > end)
503 break; 545 break;
504 546
547 if (radix_tree_exceptional_entry(page)) {
548 clear_exceptional_entry(mapping, index, page);
549 continue;
550 }
551
505 lock_page(page); 552 lock_page(page);
506 WARN_ON(page->index != index); 553 WARN_ON(page->index != index);
507 if (page->mapping != mapping) { 554 if (page->mapping != mapping) {
@@ -539,6 +586,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
539 ret = ret2; 586 ret = ret2;
540 unlock_page(page); 587 unlock_page(page);
541 } 588 }
589 pagevec_remove_exceptionals(&pvec);
542 pagevec_release(&pvec); 590 pagevec_release(&pvec);
543 mem_cgroup_uncharge_end(); 591 mem_cgroup_uncharge_end();
544 cond_resched(); 592 cond_resched();