aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-12-14 20:58:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 11:53:15 -0500
commit253d553ba75ab26b3e9e2f70cbf6fbf0813f7e86 (patch)
tree57c448fad45820c0eb984297fbef120b689381a0
parent73c34b6accc8427584f5d7db4d5acb230ed8c912 (diff)
swap_info: SWAP_HAS_CACHE cleanups
Though swap_count() is useful, I'm finding that swap_has_cache() and encode_swapmap() obscure what happens in the swap_map entry, just at those points where I need to understand it. Remove them, and pass more usable "usage" values to scan_swap_map(), swap_entry_free() and __swap_duplicate(), instead of the SWAP_MAP and SWAP_CACHE enum. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/swap.h2
-rw-r--r--mm/swapfile.c155
2 files changed, 65 insertions, 92 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 109dfe794237..c9d8870892b8 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -154,7 +154,7 @@ enum {
154#define SWAP_MAP_MAX 0x7ffe 154#define SWAP_MAP_MAX 0x7ffe
155#define SWAP_MAP_BAD 0x7fff 155#define SWAP_MAP_BAD 0x7fff
156#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */ 156#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */
157#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE) 157
158/* 158/*
159 * The in-memory structure used to track swap areas. 159 * The in-memory structure used to track swap areas.
160 */ 160 */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index fa5f10b9c28b..52497490a7ca 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -53,30 +53,9 @@ static struct swap_info_struct *swap_info[MAX_SWAPFILES];
53 53
54static DEFINE_MUTEX(swapon_mutex); 54static DEFINE_MUTEX(swapon_mutex);
55 55
56/* For reference count accounting in swap_map */
57/* enum for swap_map[] handling. internal use only */
58enum {
59 SWAP_MAP = 0, /* ops for reference from swap users */
60 SWAP_CACHE, /* ops for reference from swap cache */
61};
62
63static inline int swap_count(unsigned short ent) 56static inline int swap_count(unsigned short ent)
64{ 57{
65 return ent & SWAP_COUNT_MASK; 58 return ent & ~SWAP_HAS_CACHE;
66}
67
68static inline bool swap_has_cache(unsigned short ent)
69{
70 return !!(ent & SWAP_HAS_CACHE);
71}
72
73static inline unsigned short encode_swapmap(int count, bool has_cache)
74{
75 unsigned short ret = count;
76
77 if (has_cache)
78 return SWAP_HAS_CACHE | ret;
79 return ret;
80} 59}
81 60
82/* returns 1 if swap entry is freed */ 61/* returns 1 if swap entry is freed */
@@ -224,7 +203,7 @@ static int wait_for_discard(void *word)
224#define LATENCY_LIMIT 256 203#define LATENCY_LIMIT 256
225 204
226static inline unsigned long scan_swap_map(struct swap_info_struct *si, 205static inline unsigned long scan_swap_map(struct swap_info_struct *si,
227 int cache) 206 unsigned short usage)
228{ 207{
229 unsigned long offset; 208 unsigned long offset;
230 unsigned long scan_base; 209 unsigned long scan_base;
@@ -355,10 +334,7 @@ checks:
355 si->lowest_bit = si->max; 334 si->lowest_bit = si->max;
356 si->highest_bit = 0; 335 si->highest_bit = 0;
357 } 336 }
358 if (cache == SWAP_CACHE) /* at usual swap-out via vmscan.c */ 337 si->swap_map[offset] = usage;
359 si->swap_map[offset] = encode_swapmap(0, true);
360 else /* at suspend */
361 si->swap_map[offset] = encode_swapmap(1, false);
362 si->cluster_next = offset + 1; 338 si->cluster_next = offset + 1;
363 si->flags -= SWP_SCANNING; 339 si->flags -= SWP_SCANNING;
364 340
@@ -483,7 +459,7 @@ swp_entry_t get_swap_page(void)
483 459
484 swap_list.next = next; 460 swap_list.next = next;
485 /* This is called for allocating swap entry for cache */ 461 /* This is called for allocating swap entry for cache */
486 offset = scan_swap_map(si, SWAP_CACHE); 462 offset = scan_swap_map(si, SWAP_HAS_CACHE);
487 if (offset) { 463 if (offset) {
488 spin_unlock(&swap_lock); 464 spin_unlock(&swap_lock);
489 return swp_entry(type, offset); 465 return swp_entry(type, offset);
@@ -508,7 +484,7 @@ swp_entry_t get_swap_page_of_type(int type)
508 if (si && (si->flags & SWP_WRITEOK)) { 484 if (si && (si->flags & SWP_WRITEOK)) {
509 nr_swap_pages--; 485 nr_swap_pages--;
510 /* This is called for allocating swap entry, not cache */ 486 /* This is called for allocating swap entry, not cache */
511 offset = scan_swap_map(si, SWAP_MAP); 487 offset = scan_swap_map(si, 1);
512 if (offset) { 488 if (offset) {
513 spin_unlock(&swap_lock); 489 spin_unlock(&swap_lock);
514 return swp_entry(type, offset); 490 return swp_entry(type, offset);
@@ -555,29 +531,31 @@ out:
555 return NULL; 531 return NULL;
556} 532}
557 533
558static int swap_entry_free(struct swap_info_struct *p, 534static unsigned short swap_entry_free(struct swap_info_struct *p,
559 swp_entry_t ent, int cache) 535 swp_entry_t entry, unsigned short usage)
560{ 536{
561 unsigned long offset = swp_offset(ent); 537 unsigned long offset = swp_offset(entry);
562 int count = swap_count(p->swap_map[offset]); 538 unsigned short count;
563 bool has_cache; 539 unsigned short has_cache;
564 540
565 has_cache = swap_has_cache(p->swap_map[offset]); 541 count = p->swap_map[offset];
542 has_cache = count & SWAP_HAS_CACHE;
543 count &= ~SWAP_HAS_CACHE;
566 544
567 if (cache == SWAP_MAP) { /* dropping usage count of swap */ 545 if (usage == SWAP_HAS_CACHE) {
568 if (count < SWAP_MAP_MAX) {
569 count--;
570 p->swap_map[offset] = encode_swapmap(count, has_cache);
571 }
572 } else { /* dropping swap cache flag */
573 VM_BUG_ON(!has_cache); 546 VM_BUG_ON(!has_cache);
574 p->swap_map[offset] = encode_swapmap(count, false); 547 has_cache = 0;
548 } else if (count < SWAP_MAP_MAX)
549 count--;
550
551 if (!count)
552 mem_cgroup_uncharge_swap(entry);
553
554 usage = count | has_cache;
555 p->swap_map[offset] = usage;
575 556
576 }
577 /* return code. */
578 count = p->swap_map[offset];
579 /* free if no reference */ 557 /* free if no reference */
580 if (!count) { 558 if (!usage) {
581 if (offset < p->lowest_bit) 559 if (offset < p->lowest_bit)
582 p->lowest_bit = offset; 560 p->lowest_bit = offset;
583 if (offset > p->highest_bit) 561 if (offset > p->highest_bit)
@@ -588,9 +566,8 @@ static int swap_entry_free(struct swap_info_struct *p,
588 nr_swap_pages++; 566 nr_swap_pages++;
589 p->inuse_pages--; 567 p->inuse_pages--;
590 } 568 }
591 if (!swap_count(count)) 569
592 mem_cgroup_uncharge_swap(ent); 570 return usage;
593 return count;
594} 571}
595 572
596/* 573/*
@@ -603,7 +580,7 @@ void swap_free(swp_entry_t entry)
603 580
604 p = swap_info_get(entry); 581 p = swap_info_get(entry);
605 if (p) { 582 if (p) {
606 swap_entry_free(p, entry, SWAP_MAP); 583 swap_entry_free(p, entry, 1);
607 spin_unlock(&swap_lock); 584 spin_unlock(&swap_lock);
608 } 585 }
609} 586}
@@ -614,19 +591,13 @@ void swap_free(swp_entry_t entry)
614void swapcache_free(swp_entry_t entry, struct page *page) 591void swapcache_free(swp_entry_t entry, struct page *page)
615{ 592{
616 struct swap_info_struct *p; 593 struct swap_info_struct *p;
617 int ret; 594 unsigned short count;
618 595
619 p = swap_info_get(entry); 596 p = swap_info_get(entry);
620 if (p) { 597 if (p) {
621 ret = swap_entry_free(p, entry, SWAP_CACHE); 598 count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
622 if (page) { 599 if (page)
623 bool swapout; 600 mem_cgroup_uncharge_swapcache(page, entry, count != 0);
624 if (ret)
625 swapout = true; /* the end of swap out */
626 else
627 swapout = false; /* no more swap users! */
628 mem_cgroup_uncharge_swapcache(page, entry, swapout);
629 }
630 spin_unlock(&swap_lock); 601 spin_unlock(&swap_lock);
631 } 602 }
632} 603}
@@ -705,7 +676,7 @@ int free_swap_and_cache(swp_entry_t entry)
705 676
706 p = swap_info_get(entry); 677 p = swap_info_get(entry);
707 if (p) { 678 if (p) {
708 if (swap_entry_free(p, entry, SWAP_MAP) == SWAP_HAS_CACHE) { 679 if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
709 page = find_get_page(&swapper_space, entry.val); 680 page = find_get_page(&swapper_space, entry.val);
710 if (page && !trylock_page(page)) { 681 if (page && !trylock_page(page)) {
711 page_cache_release(page); 682 page_cache_release(page);
@@ -1212,7 +1183,7 @@ static int try_to_unuse(unsigned int type)
1212 1183
1213 if (swap_count(*swap_map) == SWAP_MAP_MAX) { 1184 if (swap_count(*swap_map) == SWAP_MAP_MAX) {
1214 spin_lock(&swap_lock); 1185 spin_lock(&swap_lock);
1215 *swap_map = encode_swapmap(0, true); 1186 *swap_map = SWAP_HAS_CACHE;
1216 spin_unlock(&swap_lock); 1187 spin_unlock(&swap_lock);
1217 reset_overflow = 1; 1188 reset_overflow = 1;
1218 } 1189 }
@@ -2111,16 +2082,16 @@ void si_swapinfo(struct sysinfo *val)
2111 * - swap-cache reference is requested but there is already one. -> EEXIST 2082 * - swap-cache reference is requested but there is already one. -> EEXIST
2112 * - swap-cache reference is requested but the entry is not used. -> ENOENT 2083 * - swap-cache reference is requested but the entry is not used. -> ENOENT
2113 */ 2084 */
2114static int __swap_duplicate(swp_entry_t entry, bool cache) 2085static int __swap_duplicate(swp_entry_t entry, unsigned short usage)
2115{ 2086{
2116 struct swap_info_struct *p; 2087 struct swap_info_struct *p;
2117 unsigned long offset, type; 2088 unsigned long offset, type;
2118 int result = -EINVAL; 2089 unsigned short count;
2119 int count; 2090 unsigned short has_cache;
2120 bool has_cache; 2091 int err = -EINVAL;
2121 2092
2122 if (non_swap_entry(entry)) 2093 if (non_swap_entry(entry))
2123 return -EINVAL; 2094 goto out;
2124 2095
2125 type = swp_type(entry); 2096 type = swp_type(entry);
2126 if (type >= nr_swapfiles) 2097 if (type >= nr_swapfiles)
@@ -2129,54 +2100,56 @@ static int __swap_duplicate(swp_entry_t entry, bool cache)
2129 offset = swp_offset(entry); 2100 offset = swp_offset(entry);
2130 2101
2131 spin_lock(&swap_lock); 2102 spin_lock(&swap_lock);
2132
2133 if (unlikely(offset >= p->max)) 2103 if (unlikely(offset >= p->max))
2134 goto unlock_out; 2104 goto unlock_out;
2135 2105
2136 count = swap_count(p->swap_map[offset]); 2106 count = p->swap_map[offset];
2137 has_cache = swap_has_cache(p->swap_map[offset]); 2107 has_cache = count & SWAP_HAS_CACHE;
2108 count &= ~SWAP_HAS_CACHE;
2109 err = 0;
2138 2110
2139 if (cache == SWAP_CACHE) { /* called for swapcache/swapin-readahead */ 2111 if (usage == SWAP_HAS_CACHE) {
2140 2112
2141 /* set SWAP_HAS_CACHE if there is no cache and entry is used */ 2113 /* set SWAP_HAS_CACHE if there is no cache and entry is used */
2142 if (!has_cache && count) { 2114 if (!has_cache && count)
2143 p->swap_map[offset] = encode_swapmap(count, true); 2115 has_cache = SWAP_HAS_CACHE;
2144 result = 0; 2116 else if (has_cache) /* someone else added cache */
2145 } else if (has_cache) /* someone added cache */ 2117 err = -EEXIST;
2146 result = -EEXIST; 2118 else /* no users remaining */
2147 else if (!count) /* no users */ 2119 err = -ENOENT;
2148 result = -ENOENT;
2149 2120
2150 } else if (count || has_cache) { 2121 } else if (count || has_cache) {
2151 if (count < SWAP_MAP_MAX - 1) { 2122
2152 p->swap_map[offset] = encode_swapmap(count + 1, 2123 if (count < SWAP_MAP_MAX - 1)
2153 has_cache); 2124 count++;
2154 result = 0; 2125 else if (count <= SWAP_MAP_MAX) {
2155 } else if (count <= SWAP_MAP_MAX) {
2156 if (swap_overflow++ < 5) 2126 if (swap_overflow++ < 5)
2157 printk(KERN_WARNING 2127 printk(KERN_WARNING
2158 "swap_dup: swap entry overflow\n"); 2128 "swap_dup: swap entry overflow\n");
2159 p->swap_map[offset] = encode_swapmap(SWAP_MAP_MAX, 2129 count = SWAP_MAP_MAX;
2160 has_cache); 2130 } else
2161 result = 0; 2131 err = -EINVAL;
2162 }
2163 } else 2132 } else
2164 result = -ENOENT; /* unused swap entry */ 2133 err = -ENOENT; /* unused swap entry */
2134
2135 p->swap_map[offset] = count | has_cache;
2136
2165unlock_out: 2137unlock_out:
2166 spin_unlock(&swap_lock); 2138 spin_unlock(&swap_lock);
2167out: 2139out:
2168 return result; 2140 return err;
2169 2141
2170bad_file: 2142bad_file:
2171 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); 2143 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
2172 goto out; 2144 goto out;
2173} 2145}
2146
2174/* 2147/*
2175 * increase reference count of swap entry by 1. 2148 * increase reference count of swap entry by 1.
2176 */ 2149 */
2177void swap_duplicate(swp_entry_t entry) 2150void swap_duplicate(swp_entry_t entry)
2178{ 2151{
2179 __swap_duplicate(entry, SWAP_MAP); 2152 __swap_duplicate(entry, 1);
2180} 2153}
2181 2154
2182/* 2155/*
@@ -2189,7 +2162,7 @@ void swap_duplicate(swp_entry_t entry)
2189 */ 2162 */
2190int swapcache_prepare(swp_entry_t entry) 2163int swapcache_prepare(swp_entry_t entry)
2191{ 2164{
2192 return __swap_duplicate(entry, SWAP_CACHE); 2165 return __swap_duplicate(entry, SWAP_HAS_CACHE);
2193} 2166}
2194 2167
2195/* 2168/*