aboutsummaryrefslogtreecommitdiffstats
path: root/mm/frontswap.c
diff options
context:
space:
mode:
authorSasha Levin <levinsasha928@gmail.com>2012-06-10 06:51:02 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-06-11 15:30:57 -0400
commitf116695a500cdd84cbeac68bc373e98ae729c24b (patch)
treee0af3b569d8b50a1a03612a57fec02f9846e1deb /mm/frontswap.c
parent96253444dbd90c6e9e9cfcb25315da5c412b058a (diff)
mm: frontswap: split out __frontswap_unuse_pages
An attempt at making frontswap_shrink shorter and more readable. This patch splits out walking through the swap list to find an entry with enough pages to unuse. Also, assert that the internal __frontswap_unuse_pages is called under swap lock, since that part of code was previously directly happen inside the lock. Signed-off-by: Sasha Levin <levinsasha928@gmail.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'mm/frontswap.c')
-rw-r--r--mm/frontswap.c59
1 files changed, 39 insertions, 20 deletions
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 5faf840f8726..faa43b7eea6f 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -230,6 +230,41 @@ static unsigned long __frontswap_curr_pages(void)
230 return totalpages; 230 return totalpages;
231} 231}
232 232
233static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
234 int *swapid)
235{
236 int ret = -EINVAL;
237 struct swap_info_struct *si = NULL;
238 int si_frontswap_pages;
239 unsigned long total_pages_to_unuse = total;
240 unsigned long pages = 0, pages_to_unuse = 0;
241 int type;
242
243 assert_spin_locked(&swap_lock);
244 for (type = swap_list.head; type >= 0; type = si->next) {
245 si = swap_info[type];
246 si_frontswap_pages = atomic_read(&si->frontswap_pages);
247 if (total_pages_to_unuse < si_frontswap_pages) {
248 pages = pages_to_unuse = total_pages_to_unuse;
249 } else {
250 pages = si_frontswap_pages;
251 pages_to_unuse = 0; /* unuse all */
252 }
253 /* ensure there is enough RAM to fetch pages from frontswap */
254 if (security_vm_enough_memory_mm(current->mm, pages)) {
255 ret = -ENOMEM;
256 continue;
257 }
258 vm_unacct_memory(pages);
259 *unused = pages_to_unuse;
260 *swapid = type;
261 ret = 0;
262 break;
263 }
264
265 return ret;
266}
267
233/* 268/*
234 * Frontswap, like a true swap device, may unnecessarily retain pages 269 * Frontswap, like a true swap device, may unnecessarily retain pages
235 * under certain circumstances; "shrink" frontswap is essentially a 270 * under certain circumstances; "shrink" frontswap is essentially a
@@ -240,11 +275,9 @@ static unsigned long __frontswap_curr_pages(void)
240 */ 275 */
241void frontswap_shrink(unsigned long target_pages) 276void frontswap_shrink(unsigned long target_pages)
242{ 277{
243 struct swap_info_struct *si = NULL;
244 int si_frontswap_pages;
245 unsigned long total_pages = 0, total_pages_to_unuse; 278 unsigned long total_pages = 0, total_pages_to_unuse;
246 unsigned long pages = 0, pages_to_unuse = 0; 279 unsigned long pages_to_unuse = 0;
247 int type; 280 int type, ret;
248 bool locked = false; 281 bool locked = false;
249 282
250 /* 283 /*
@@ -258,22 +291,8 @@ void frontswap_shrink(unsigned long target_pages)
258 if (total_pages <= target_pages) 291 if (total_pages <= target_pages)
259 goto out; 292 goto out;
260 total_pages_to_unuse = total_pages - target_pages; 293 total_pages_to_unuse = total_pages - target_pages;
261 for (type = swap_list.head; type >= 0; type = si->next) { 294 ret = __frontswap_unuse_pages(total_pages_to_unuse, &pages_to_unuse, &type);
262 si = swap_info[type]; 295 if (ret < 0)
263 si_frontswap_pages = atomic_read(&si->frontswap_pages);
264 if (total_pages_to_unuse < si_frontswap_pages) {
265 pages = pages_to_unuse = total_pages_to_unuse;
266 } else {
267 pages = si_frontswap_pages;
268 pages_to_unuse = 0; /* unuse all */
269 }
270 /* ensure there is enough RAM to fetch pages from frontswap */
271 if (security_vm_enough_memory_mm(current->mm, pages))
272 continue;
273 vm_unacct_memory(pages);
274 break;
275 }
276 if (type < 0)
277 goto out; 296 goto out;
278 locked = false; 297 locked = false;
279 spin_unlock(&swap_lock); 298 spin_unlock(&swap_lock);