diff options
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r-- | mm/swap_state.c | 68 |
1 files changed, 56 insertions, 12 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c index 35d7e0ee1c77..3863acd6189c 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/blkdev.h> | 17 | #include <linux/blkdev.h> |
18 | #include <linux/pagevec.h> | 18 | #include <linux/pagevec.h> |
19 | #include <linux/migrate.h> | 19 | #include <linux/migrate.h> |
20 | #include <linux/vmalloc.h> | ||
20 | 21 | ||
21 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
22 | 23 | ||
@@ -32,15 +33,8 @@ static const struct address_space_operations swap_aops = { | |||
32 | #endif | 33 | #endif |
33 | }; | 34 | }; |
34 | 35 | ||
35 | struct address_space swapper_spaces[MAX_SWAPFILES] = { | 36 | struct address_space *swapper_spaces[MAX_SWAPFILES]; |
36 | [0 ... MAX_SWAPFILES - 1] = { | 37 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; |
37 | .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), | ||
38 | .i_mmap_writable = ATOMIC_INIT(0), | ||
39 | .a_ops = &swap_aops, | ||
40 | /* swap cache doesn't use writeback related tags */ | ||
41 | .flags = 1 << AS_NO_WRITEBACK_TAGS, | ||
42 | } | ||
43 | }; | ||
44 | 38 | ||
45 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) | 39 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) |
46 | 40 | ||
@@ -53,11 +47,26 @@ static struct { | |||
53 | 47 | ||
54 | unsigned long total_swapcache_pages(void) | 48 | unsigned long total_swapcache_pages(void) |
55 | { | 49 | { |
56 | int i; | 50 | unsigned int i, j, nr; |
57 | unsigned long ret = 0; | 51 | unsigned long ret = 0; |
52 | struct address_space *spaces; | ||
58 | 53 | ||
59 | for (i = 0; i < MAX_SWAPFILES; i++) | 54 | rcu_read_lock(); |
60 | ret += swapper_spaces[i].nrpages; | 55 | for (i = 0; i < MAX_SWAPFILES; i++) { |
56 | /* | ||
57 | * The corresponding entries in nr_swapper_spaces and | ||
58 | * swapper_spaces will be reused only after at least | ||
59 | * one grace period. So it is impossible for them | ||
60 | * belongs to different usage. | ||
61 | */ | ||
62 | nr = nr_swapper_spaces[i]; | ||
63 | spaces = rcu_dereference(swapper_spaces[i]); | ||
64 | if (!nr || !spaces) | ||
65 | continue; | ||
66 | for (j = 0; j < nr; j++) | ||
67 | ret += spaces[j].nrpages; | ||
68 | } | ||
69 | rcu_read_unlock(); | ||
61 | return ret; | 70 | return ret; |
62 | } | 71 | } |
63 | 72 | ||
@@ -505,3 +514,38 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, | |||
505 | skip: | 514 | skip: |
506 | return read_swap_cache_async(entry, gfp_mask, vma, addr); | 515 | return read_swap_cache_async(entry, gfp_mask, vma, addr); |
507 | } | 516 | } |
517 | |||
518 | int init_swap_address_space(unsigned int type, unsigned long nr_pages) | ||
519 | { | ||
520 | struct address_space *spaces, *space; | ||
521 | unsigned int i, nr; | ||
522 | |||
523 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); | ||
524 | spaces = vzalloc(sizeof(struct address_space) * nr); | ||
525 | if (!spaces) | ||
526 | return -ENOMEM; | ||
527 | for (i = 0; i < nr; i++) { | ||
528 | space = spaces + i; | ||
529 | INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN); | ||
530 | atomic_set(&space->i_mmap_writable, 0); | ||
531 | space->a_ops = &swap_aops; | ||
532 | /* swap cache doesn't use writeback related tags */ | ||
533 | mapping_set_no_writeback_tags(space); | ||
534 | spin_lock_init(&space->tree_lock); | ||
535 | } | ||
536 | nr_swapper_spaces[type] = nr; | ||
537 | rcu_assign_pointer(swapper_spaces[type], spaces); | ||
538 | |||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | void exit_swap_address_space(unsigned int type) | ||
543 | { | ||
544 | struct address_space *spaces; | ||
545 | |||
546 | spaces = swapper_spaces[type]; | ||
547 | nr_swapper_spaces[type] = 0; | ||
548 | rcu_assign_pointer(swapper_spaces[type], NULL); | ||
549 | synchronize_rcu(); | ||
550 | kvfree(spaces); | ||
551 | } | ||