aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2016-10-07 19:58:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 21:46:27 -0400
commit6b53491598a4d9694318e6e2b11d8c9988a483d4 (patch)
treeebd2d2b33b1a88c41976dcaee5de177cb379a5f7
parent131ddc5c7d814d61f945b6322019e5148f6d39f0 (diff)
mm, swap: add swap_cluster_list
This is a code clean up patch without functionality changes. The swap_cluster_list data structure and its operations are introduced to provide some better encapsulation for the free cluster and discard cluster list operations. This avoid some code duplication, improved the code readability, and reduced the total line number. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/1472067356-16004-1-git-send-email-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Acked-by: Minchan Kim <minchan@kernel.org> Acked-by: Rik van Riel <riel@redhat.com> Cc: Tim Chen <tim.c.chen@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Shaohua Li <shli@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/swap.h11
-rw-r--r--mm/swapfile.c133
2 files changed, 70 insertions, 74 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e1d761463243..a56523cefb9b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -191,6 +191,11 @@ struct percpu_cluster {
191 unsigned int next; /* Likely next allocation offset */ 191 unsigned int next; /* Likely next allocation offset */
192}; 192};
193 193
194struct swap_cluster_list {
195 struct swap_cluster_info head;
196 struct swap_cluster_info tail;
197};
198
194/* 199/*
195 * The in-memory structure used to track swap areas. 200 * The in-memory structure used to track swap areas.
196 */ 201 */
@@ -203,8 +208,7 @@ struct swap_info_struct {
203 unsigned int max; /* extent of the swap_map */ 208 unsigned int max; /* extent of the swap_map */
204 unsigned char *swap_map; /* vmalloc'ed array of usage counts */ 209 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
205 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ 210 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
206 struct swap_cluster_info free_cluster_head; /* free cluster list head */ 211 struct swap_cluster_list free_clusters; /* free clusters list */
207 struct swap_cluster_info free_cluster_tail; /* free cluster list tail */
208 unsigned int lowest_bit; /* index of first free in swap_map */ 212 unsigned int lowest_bit; /* index of first free in swap_map */
209 unsigned int highest_bit; /* index of last free in swap_map */ 213 unsigned int highest_bit; /* index of last free in swap_map */
210 unsigned int pages; /* total of usable pages of swap */ 214 unsigned int pages; /* total of usable pages of swap */
@@ -235,8 +239,7 @@ struct swap_info_struct {
235 * first. 239 * first.
236 */ 240 */
237 struct work_struct discard_work; /* discard worker */ 241 struct work_struct discard_work; /* discard worker */
238 struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */ 242 struct swap_cluster_list discard_clusters; /* discard clusters list */
239 struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
240}; 243};
241 244
242/* linux/mm/workingset.c */ 245/* linux/mm/workingset.c */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2657accc6e2b..134c085d0d7b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -257,6 +257,53 @@ static inline void cluster_set_null(struct swap_cluster_info *info)
257 info->data = 0; 257 info->data = 0;
258} 258}
259 259
260static inline bool cluster_list_empty(struct swap_cluster_list *list)
261{
262 return cluster_is_null(&list->head);
263}
264
265static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
266{
267 return cluster_next(&list->head);
268}
269
270static void cluster_list_init(struct swap_cluster_list *list)
271{
272 cluster_set_null(&list->head);
273 cluster_set_null(&list->tail);
274}
275
276static void cluster_list_add_tail(struct swap_cluster_list *list,
277 struct swap_cluster_info *ci,
278 unsigned int idx)
279{
280 if (cluster_list_empty(list)) {
281 cluster_set_next_flag(&list->head, idx, 0);
282 cluster_set_next_flag(&list->tail, idx, 0);
283 } else {
284 unsigned int tail = cluster_next(&list->tail);
285
286 cluster_set_next(&ci[tail], idx);
287 cluster_set_next_flag(&list->tail, idx, 0);
288 }
289}
290
291static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
292 struct swap_cluster_info *ci)
293{
294 unsigned int idx;
295
296 idx = cluster_next(&list->head);
297 if (cluster_next(&list->tail) == idx) {
298 cluster_set_null(&list->head);
299 cluster_set_null(&list->tail);
300 } else
301 cluster_set_next_flag(&list->head,
302 cluster_next(&ci[idx]), 0);
303
304 return idx;
305}
306
260/* Add a cluster to discard list and schedule it to do discard */ 307/* Add a cluster to discard list and schedule it to do discard */
261static void swap_cluster_schedule_discard(struct swap_info_struct *si, 308static void swap_cluster_schedule_discard(struct swap_info_struct *si,
262 unsigned int idx) 309 unsigned int idx)
@@ -270,17 +317,7 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
270 memset(si->swap_map + idx * SWAPFILE_CLUSTER, 317 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
271 SWAP_MAP_BAD, SWAPFILE_CLUSTER); 318 SWAP_MAP_BAD, SWAPFILE_CLUSTER);
272 319
273 if (cluster_is_null(&si->discard_cluster_head)) { 320 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
274 cluster_set_next_flag(&si->discard_cluster_head,
275 idx, 0);
276 cluster_set_next_flag(&si->discard_cluster_tail,
277 idx, 0);
278 } else {
279 unsigned int tail = cluster_next(&si->discard_cluster_tail);
280 cluster_set_next(&si->cluster_info[tail], idx);
281 cluster_set_next_flag(&si->discard_cluster_tail,
282 idx, 0);
283 }
284 321
285 schedule_work(&si->discard_work); 322 schedule_work(&si->discard_work);
286} 323}
@@ -296,15 +333,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
296 333
297 info = si->cluster_info; 334 info = si->cluster_info;
298 335
299 while (!cluster_is_null(&si->discard_cluster_head)) { 336 while (!cluster_list_empty(&si->discard_clusters)) {
300 idx = cluster_next(&si->discard_cluster_head); 337 idx = cluster_list_del_first(&si->discard_clusters, info);
301
302 cluster_set_next_flag(&si->discard_cluster_head,
303 cluster_next(&info[idx]), 0);
304 if (cluster_next(&si->discard_cluster_tail) == idx) {
305 cluster_set_null(&si->discard_cluster_head);
306 cluster_set_null(&si->discard_cluster_tail);
307 }
308 spin_unlock(&si->lock); 338 spin_unlock(&si->lock);
309 339
310 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, 340 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
@@ -312,19 +342,7 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
312 342
313 spin_lock(&si->lock); 343 spin_lock(&si->lock);
314 cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE); 344 cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE);
315 if (cluster_is_null(&si->free_cluster_head)) { 345 cluster_list_add_tail(&si->free_clusters, info, idx);
316 cluster_set_next_flag(&si->free_cluster_head,
317 idx, 0);
318 cluster_set_next_flag(&si->free_cluster_tail,
319 idx, 0);
320 } else {
321 unsigned int tail;
322
323 tail = cluster_next(&si->free_cluster_tail);
324 cluster_set_next(&info[tail], idx);
325 cluster_set_next_flag(&si->free_cluster_tail,
326 idx, 0);
327 }
328 memset(si->swap_map + idx * SWAPFILE_CLUSTER, 346 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
329 0, SWAPFILE_CLUSTER); 347 0, SWAPFILE_CLUSTER);
330 } 348 }
@@ -353,13 +371,8 @@ static void inc_cluster_info_page(struct swap_info_struct *p,
353 if (!cluster_info) 371 if (!cluster_info)
354 return; 372 return;
355 if (cluster_is_free(&cluster_info[idx])) { 373 if (cluster_is_free(&cluster_info[idx])) {
356 VM_BUG_ON(cluster_next(&p->free_cluster_head) != idx); 374 VM_BUG_ON(cluster_list_first(&p->free_clusters) != idx);
357 cluster_set_next_flag(&p->free_cluster_head, 375 cluster_list_del_first(&p->free_clusters, cluster_info);
358 cluster_next(&cluster_info[idx]), 0);
359 if (cluster_next(&p->free_cluster_tail) == idx) {
360 cluster_set_null(&p->free_cluster_tail);
361 cluster_set_null(&p->free_cluster_head);
362 }
363 cluster_set_count_flag(&cluster_info[idx], 0, 0); 376 cluster_set_count_flag(&cluster_info[idx], 0, 0);
364 } 377 }
365 378
@@ -398,14 +411,7 @@ static void dec_cluster_info_page(struct swap_info_struct *p,
398 } 411 }
399 412
400 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE); 413 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
401 if (cluster_is_null(&p->free_cluster_head)) { 414 cluster_list_add_tail(&p->free_clusters, cluster_info, idx);
402 cluster_set_next_flag(&p->free_cluster_head, idx, 0);
403 cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
404 } else {
405 unsigned int tail = cluster_next(&p->free_cluster_tail);
406 cluster_set_next(&cluster_info[tail], idx);
407 cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
408 }
409 } 415 }
410} 416}
411 417
@@ -421,8 +427,8 @@ scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
421 bool conflict; 427 bool conflict;
422 428
423 offset /= SWAPFILE_CLUSTER; 429 offset /= SWAPFILE_CLUSTER;
424 conflict = !cluster_is_null(&si->free_cluster_head) && 430 conflict = !cluster_list_empty(&si->free_clusters) &&
425 offset != cluster_next(&si->free_cluster_head) && 431 offset != cluster_list_first(&si->free_clusters) &&
426 cluster_is_free(&si->cluster_info[offset]); 432 cluster_is_free(&si->cluster_info[offset]);
427 433
428 if (!conflict) 434 if (!conflict)
@@ -447,11 +453,11 @@ static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
447new_cluster: 453new_cluster:
448 cluster = this_cpu_ptr(si->percpu_cluster); 454 cluster = this_cpu_ptr(si->percpu_cluster);
449 if (cluster_is_null(&cluster->index)) { 455 if (cluster_is_null(&cluster->index)) {
450 if (!cluster_is_null(&si->free_cluster_head)) { 456 if (!cluster_list_empty(&si->free_clusters)) {
451 cluster->index = si->free_cluster_head; 457 cluster->index = si->free_clusters.head;
452 cluster->next = cluster_next(&cluster->index) * 458 cluster->next = cluster_next(&cluster->index) *
453 SWAPFILE_CLUSTER; 459 SWAPFILE_CLUSTER;
454 } else if (!cluster_is_null(&si->discard_cluster_head)) { 460 } else if (!cluster_list_empty(&si->discard_clusters)) {
455 /* 461 /*
456 * we don't have free cluster but have some clusters in 462 * we don't have free cluster but have some clusters in
457 * discarding, do discard now and reclaim them 463 * discarding, do discard now and reclaim them
@@ -2292,10 +2298,8 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
2292 2298
2293 nr_good_pages = maxpages - 1; /* omit header page */ 2299 nr_good_pages = maxpages - 1; /* omit header page */
2294 2300
2295 cluster_set_null(&p->free_cluster_head); 2301 cluster_list_init(&p->free_clusters);
2296 cluster_set_null(&p->free_cluster_tail); 2302 cluster_list_init(&p->discard_clusters);
2297 cluster_set_null(&p->discard_cluster_head);
2298 cluster_set_null(&p->discard_cluster_tail);
2299 2303
2300 for (i = 0; i < swap_header->info.nr_badpages; i++) { 2304 for (i = 0; i < swap_header->info.nr_badpages; i++) {
2301 unsigned int page_nr = swap_header->info.badpages[i]; 2305 unsigned int page_nr = swap_header->info.badpages[i];
@@ -2341,19 +2345,8 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
2341 for (i = 0; i < nr_clusters; i++) { 2345 for (i = 0; i < nr_clusters; i++) {
2342 if (!cluster_count(&cluster_info[idx])) { 2346 if (!cluster_count(&cluster_info[idx])) {
2343 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE); 2347 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
2344 if (cluster_is_null(&p->free_cluster_head)) { 2348 cluster_list_add_tail(&p->free_clusters, cluster_info,
2345 cluster_set_next_flag(&p->free_cluster_head, 2349 idx);
2346 idx, 0);
2347 cluster_set_next_flag(&p->free_cluster_tail,
2348 idx, 0);
2349 } else {
2350 unsigned int tail;
2351
2352 tail = cluster_next(&p->free_cluster_tail);
2353 cluster_set_next(&cluster_info[tail], idx);
2354 cluster_set_next_flag(&p->free_cluster_tail,
2355 idx, 0);
2356 }
2357 } 2350 }
2358 idx++; 2351 idx++;
2359 if (idx == nr_clusters) 2352 if (idx == nr_clusters)