diff options
author | Mel Gorman <mgorman@suse.de> | 2014-01-21 18:50:59 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-21 19:19:48 -0500 |
commit | 1c5e9c27cbd966c7f0038698d5dcd5ada3574f47 (patch) | |
tree | a546cec0019aa3b726ba06e50ce38f960ee8f222 /mm | |
parent | 1c30e0177e4f41a11cb88b0f1f056ccebfe0fff4 (diff) |
mm: numa: limit scope of lock for NUMA migrate rate limiting
NUMA migrate rate limiting protects a migration counter and window using
a lock but in some cases this can be a contended lock. It is not
critical that the number of pages be perfect, lost updates are
acceptable. Reduce the importance of this lock.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Alex Thorlton <athorlton@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/migrate.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 41eba21f10ba..4612bb2e3677 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -1602,26 +1602,29 @@ bool migrate_ratelimited(int node) | |||
1602 | static bool numamigrate_update_ratelimit(pg_data_t *pgdat, | 1602 | static bool numamigrate_update_ratelimit(pg_data_t *pgdat, |
1603 | unsigned long nr_pages) | 1603 | unsigned long nr_pages) |
1604 | { | 1604 | { |
1605 | bool rate_limited = false; | ||
1606 | |||
1607 | /* | 1605 | /* |
1608 | * Rate-limit the amount of data that is being migrated to a node. | 1606 | * Rate-limit the amount of data that is being migrated to a node. |
1609 | * Optimal placement is no good if the memory bus is saturated and | 1607 | * Optimal placement is no good if the memory bus is saturated and |
1610 | * all the time is being spent migrating! | 1608 | * all the time is being spent migrating! |
1611 | */ | 1609 | */ |
1612 | spin_lock(&pgdat->numabalancing_migrate_lock); | ||
1613 | if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { | 1610 | if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { |
1611 | spin_lock(&pgdat->numabalancing_migrate_lock); | ||
1614 | pgdat->numabalancing_migrate_nr_pages = 0; | 1612 | pgdat->numabalancing_migrate_nr_pages = 0; |
1615 | pgdat->numabalancing_migrate_next_window = jiffies + | 1613 | pgdat->numabalancing_migrate_next_window = jiffies + |
1616 | msecs_to_jiffies(migrate_interval_millisecs); | 1614 | msecs_to_jiffies(migrate_interval_millisecs); |
1615 | spin_unlock(&pgdat->numabalancing_migrate_lock); | ||
1617 | } | 1616 | } |
1618 | if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) | 1617 | if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) |
1619 | rate_limited = true; | 1618 | return true; |
1620 | else | 1619 | |
1621 | pgdat->numabalancing_migrate_nr_pages += nr_pages; | 1620 | /* |
1622 | spin_unlock(&pgdat->numabalancing_migrate_lock); | 1621 | * This is an unlocked non-atomic update so errors are possible. |
1623 | 1622 | * The consequences are failing to migrate when we potentiall should | |
1624 | return rate_limited; | 1623 | * have which is not severe enough to warrant locking. If it is ever |
1624 | * a problem, it can be converted to a per-cpu counter. | ||
1625 | */ | ||
1626 | pgdat->numabalancing_migrate_nr_pages += nr_pages; | ||
1627 | return false; | ||
1625 | } | 1628 | } |
1626 | 1629 | ||
1627 | static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) | 1630 | static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) |