diff options
| author | Andrea Arcangeli <aarcange@redhat.com> | 2017-07-06 18:37:05 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 19:24:31 -0400 |
| commit | 8dc5ffcd5a74da39ed2c533d786a3f78671a38b8 (patch) | |
| tree | f766ea40745f1b564a8a49a35855c718d97d5985 /mm/ksm.c | |
| parent | 0ba1d0f7c41cdab306a3d30e036bc393c3ebba7e (diff) | |
ksm: swap the two output parameters of chain/chain_prune
Some static checker complains if chain/chain_prune returns a potentially
stale pointer.
There are two output parameters to chain/chain_prune, one is tree_page
the other is stable_node_dup. Like in get_ksm_page the caller has to
check tree_page is NULL before touching the stable_node. Similarly in
chain/chain_prune the caller has to check tree_page before touching the
stable_node_dup returned or the original stable_node passed as
parameter.
Because the tree_page is never returned as a stale pointer, it may be
more intuitive to return tree_page and to pass stable_node_dup for
reference instead of the reverse.
This patch purely swaps the two output parameters of chain/chain_prune
as a cleanup for the static checker and to mimic the get_ksm_page
behavior more closely. There's no change to the caller at all except
the swap, it's purely a cleanup and it is a noop from the caller point
of view.
Link: http://lkml.kernel.org/r/20170518173721.22316-3-aarcange@redhat.com
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Tested-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Evgheni Dereveanchin <ederevea@redhat.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Petr Holasek <pholasek@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Gavin Guo <gavin.guo@canonical.com>
Cc: Jay Vosburgh <jay.vosburgh@canonical.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
| -rw-r--r-- | mm/ksm.c | 78 |
1 files changed, 52 insertions, 26 deletions
| @@ -1313,14 +1313,14 @@ bool is_page_sharing_candidate(struct stable_node *stable_node) | |||
| 1313 | return __is_page_sharing_candidate(stable_node, 0); | 1313 | return __is_page_sharing_candidate(stable_node, 0); |
| 1314 | } | 1314 | } |
| 1315 | 1315 | ||
| 1316 | static struct stable_node *stable_node_dup(struct stable_node **_stable_node, | 1316 | struct page *stable_node_dup(struct stable_node **_stable_node_dup, |
| 1317 | struct page **tree_page, | 1317 | struct stable_node **_stable_node, |
| 1318 | struct rb_root *root, | 1318 | struct rb_root *root, |
| 1319 | bool prune_stale_stable_nodes) | 1319 | bool prune_stale_stable_nodes) |
| 1320 | { | 1320 | { |
| 1321 | struct stable_node *dup, *found = NULL, *stable_node = *_stable_node; | 1321 | struct stable_node *dup, *found = NULL, *stable_node = *_stable_node; |
| 1322 | struct hlist_node *hlist_safe; | 1322 | struct hlist_node *hlist_safe; |
| 1323 | struct page *_tree_page; | 1323 | struct page *_tree_page, *tree_page = NULL; |
| 1324 | int nr = 0; | 1324 | int nr = 0; |
| 1325 | int found_rmap_hlist_len; | 1325 | int found_rmap_hlist_len; |
| 1326 | 1326 | ||
| @@ -1353,14 +1353,14 @@ static struct stable_node *stable_node_dup(struct stable_node **_stable_node, | |||
| 1353 | if (!found || | 1353 | if (!found || |
| 1354 | dup->rmap_hlist_len > found_rmap_hlist_len) { | 1354 | dup->rmap_hlist_len > found_rmap_hlist_len) { |
| 1355 | if (found) | 1355 | if (found) |
| 1356 | put_page(*tree_page); | 1356 | put_page(tree_page); |
| 1357 | found = dup; | 1357 | found = dup; |
| 1358 | found_rmap_hlist_len = found->rmap_hlist_len; | 1358 | found_rmap_hlist_len = found->rmap_hlist_len; |
| 1359 | *tree_page = _tree_page; | 1359 | tree_page = _tree_page; |
| 1360 | 1360 | ||
| 1361 | /* skip put_page for found dup */ | ||
| 1361 | if (!prune_stale_stable_nodes) | 1362 | if (!prune_stale_stable_nodes) |
| 1362 | break; | 1363 | break; |
| 1363 | /* skip put_page */ | ||
| 1364 | continue; | 1364 | continue; |
| 1365 | } | 1365 | } |
| 1366 | } | 1366 | } |
| @@ -1417,7 +1417,8 @@ static struct stable_node *stable_node_dup(struct stable_node **_stable_node, | |||
| 1417 | } | 1417 | } |
| 1418 | } | 1418 | } |
| 1419 | 1419 | ||
| 1420 | return found; | 1420 | *_stable_node_dup = found; |
| 1421 | return tree_page; | ||
| 1421 | } | 1422 | } |
| 1422 | 1423 | ||
| 1423 | static struct stable_node *stable_node_dup_any(struct stable_node *stable_node, | 1424 | static struct stable_node *stable_node_dup_any(struct stable_node *stable_node, |
| @@ -1433,35 +1434,60 @@ static struct stable_node *stable_node_dup_any(struct stable_node *stable_node, | |||
| 1433 | typeof(*stable_node), hlist_dup); | 1434 | typeof(*stable_node), hlist_dup); |
| 1434 | } | 1435 | } |
| 1435 | 1436 | ||
| 1436 | static struct stable_node *__stable_node_chain(struct stable_node **_stable_node, | 1437 | /* |
| 1437 | struct page **tree_page, | 1438 | * Like for get_ksm_page, this function can free the *_stable_node and |
| 1438 | struct rb_root *root, | 1439 | * *_stable_node_dup if the returned tree_page is NULL. |
| 1439 | bool prune_stale_stable_nodes) | 1440 | * |
| 1441 | * It can also free and overwrite *_stable_node with the found | ||
| 1442 | * stable_node_dup if the chain is collapsed (in which case | ||
| 1443 | * *_stable_node will be equal to *_stable_node_dup like if the chain | ||
| 1444 | * never existed). It's up to the caller to verify tree_page is not | ||
| 1445 | * NULL before dereferencing *_stable_node or *_stable_node_dup. | ||
| 1446 | * | ||
| 1447 | * *_stable_node_dup is really a second output parameter of this | ||
| 1448 | * function and will be overwritten in all cases, the caller doesn't | ||
| 1449 | * need to initialize it. | ||
| 1450 | */ | ||
| 1451 | static struct page *__stable_node_chain(struct stable_node **_stable_node_dup, | ||
| 1452 | struct stable_node **_stable_node, | ||
| 1453 | struct rb_root *root, | ||
| 1454 | bool prune_stale_stable_nodes) | ||
| 1440 | { | 1455 | { |
| 1441 | struct stable_node *stable_node = *_stable_node; | 1456 | struct stable_node *stable_node = *_stable_node; |
| 1442 | if (!is_stable_node_chain(stable_node)) { | 1457 | if (!is_stable_node_chain(stable_node)) { |
| 1443 | if (is_page_sharing_candidate(stable_node)) { | 1458 | if (is_page_sharing_candidate(stable_node)) { |
| 1444 | *tree_page = get_ksm_page(stable_node, false); | 1459 | *_stable_node_dup = stable_node; |
| 1445 | return stable_node; | 1460 | return get_ksm_page(stable_node, false); |
| 1446 | } | 1461 | } |
| 1462 | /* | ||
| 1463 | * _stable_node_dup set to NULL means the stable_node | ||
| 1464 | * reached the ksm_max_page_sharing limit. | ||
| 1465 | */ | ||
| 1466 | *_stable_node_dup = NULL; | ||
| 1447 | return NULL; | 1467 | return NULL; |
| 1448 | } | 1468 | } |
| 1449 | return stable_node_dup(_stable_node, tree_page, root, | 1469 | return stable_node_dup(_stable_node_dup, _stable_node, root, |
| 1450 | prune_stale_stable_nodes); | 1470 | prune_stale_stable_nodes); |
| 1451 | } | 1471 | } |
| 1452 | 1472 | ||
| 1453 | static __always_inline struct stable_node *chain_prune(struct stable_node **s_n, | 1473 | static __always_inline struct page *chain_prune(struct stable_node **s_n_d, |
| 1454 | struct page **t_p, | 1474 | struct stable_node **s_n, |
| 1455 | struct rb_root *root) | 1475 | struct rb_root *root) |
| 1456 | { | 1476 | { |
| 1457 | return __stable_node_chain(s_n, t_p, root, true); | 1477 | return __stable_node_chain(s_n_d, s_n, root, true); |
| 1458 | } | 1478 | } |
| 1459 | 1479 | ||
| 1460 | static __always_inline struct stable_node *chain(struct stable_node *s_n, | 1480 | static __always_inline struct page *chain(struct stable_node **s_n_d, |
| 1461 | struct page **t_p, | 1481 | struct stable_node *s_n, |
| 1462 | struct rb_root *root) | 1482 | struct rb_root *root) |
| 1463 | { | 1483 | { |
| 1464 | return __stable_node_chain(&s_n, t_p, root, false); | 1484 | struct stable_node *old_stable_node = s_n; |
| 1485 | struct page *tree_page; | ||
| 1486 | |||
| 1487 | tree_page = __stable_node_chain(s_n_d, &s_n, root, false); | ||
| 1488 | /* not pruning dups so s_n cannot have changed */ | ||
| 1489 | VM_BUG_ON(s_n != old_stable_node); | ||
| 1490 | return tree_page; | ||
| 1465 | } | 1491 | } |
| 1466 | 1492 | ||
| 1467 | /* | 1493 | /* |
| @@ -1502,7 +1528,7 @@ again: | |||
| 1502 | cond_resched(); | 1528 | cond_resched(); |
| 1503 | stable_node = rb_entry(*new, struct stable_node, node); | 1529 | stable_node = rb_entry(*new, struct stable_node, node); |
| 1504 | stable_node_any = NULL; | 1530 | stable_node_any = NULL; |
| 1505 | stable_node_dup = chain_prune(&stable_node, &tree_page, root); | 1531 | tree_page = chain_prune(&stable_node_dup, &stable_node, root); |
| 1506 | /* | 1532 | /* |
| 1507 | * NOTE: stable_node may have been freed by | 1533 | * NOTE: stable_node may have been freed by |
| 1508 | * chain_prune() if the returned stable_node_dup is | 1534 | * chain_prune() if the returned stable_node_dup is |
| @@ -1743,7 +1769,7 @@ again: | |||
| 1743 | cond_resched(); | 1769 | cond_resched(); |
| 1744 | stable_node = rb_entry(*new, struct stable_node, node); | 1770 | stable_node = rb_entry(*new, struct stable_node, node); |
| 1745 | stable_node_any = NULL; | 1771 | stable_node_any = NULL; |
| 1746 | stable_node_dup = chain(stable_node, &tree_page, root); | 1772 | tree_page = chain(&stable_node_dup, stable_node, root); |
| 1747 | if (!stable_node_dup) { | 1773 | if (!stable_node_dup) { |
| 1748 | /* | 1774 | /* |
| 1749 | * Either all stable_node dups were full in | 1775 | * Either all stable_node dups were full in |
