diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-09-03 18:54:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 03:05:41 -0400 |
commit | 048c27fd72816b44e096997d1c6901c3abbfd45b (patch) | |
tree | 159a00b71ce9db161a48e4fc07b212db455a2cf1 | |
parent | 52b7efdbe5f5696fc80338560a3fc51e0b0a993c (diff) |
[PATCH] swap: scan_swap_map latency breaks
The get_swap_page/scan_swap_map latency can be so bad that even those without
preemption configured deserve relief: periodically cond_resched.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/swapfile.c | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c index fdee145afc6f..e675ae55f87d 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -56,8 +56,6 @@ static DECLARE_MUTEX(swapon_sem); | |||
56 | */ | 56 | */ |
57 | static DECLARE_RWSEM(swap_unplug_sem); | 57 | static DECLARE_RWSEM(swap_unplug_sem); |
58 | 58 | ||
59 | #define SWAPFILE_CLUSTER 256 | ||
60 | |||
61 | void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) | 59 | void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) |
62 | { | 60 | { |
63 | swp_entry_t entry; | 61 | swp_entry_t entry; |
@@ -84,9 +82,13 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) | |||
84 | up_read(&swap_unplug_sem); | 82 | up_read(&swap_unplug_sem); |
85 | } | 83 | } |
86 | 84 | ||
85 | #define SWAPFILE_CLUSTER 256 | ||
86 | #define LATENCY_LIMIT 256 | ||
87 | |||
87 | static inline unsigned long scan_swap_map(struct swap_info_struct *si) | 88 | static inline unsigned long scan_swap_map(struct swap_info_struct *si) |
88 | { | 89 | { |
89 | unsigned long offset, last_in_cluster; | 90 | unsigned long offset, last_in_cluster; |
91 | int latency_ration = LATENCY_LIMIT; | ||
90 | 92 | ||
91 | /* | 93 | /* |
92 | * We try to cluster swap pages by allocating them sequentially | 94 | * We try to cluster swap pages by allocating them sequentially |
@@ -117,6 +119,10 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) | |||
117 | si->cluster_next = offset-SWAPFILE_CLUSTER-1; | 119 | si->cluster_next = offset-SWAPFILE_CLUSTER-1; |
118 | goto cluster; | 120 | goto cluster; |
119 | } | 121 | } |
122 | if (unlikely(--latency_ration < 0)) { | ||
123 | cond_resched(); | ||
124 | latency_ration = LATENCY_LIMIT; | ||
125 | } | ||
120 | } | 126 | } |
121 | swap_device_lock(si); | 127 | swap_device_lock(si); |
122 | goto lowest; | 128 | goto lowest; |
@@ -153,6 +159,10 @@ checks: if (!(si->flags & SWP_WRITEOK)) | |||
153 | swap_device_lock(si); | 159 | swap_device_lock(si); |
154 | goto checks; | 160 | goto checks; |
155 | } | 161 | } |
162 | if (unlikely(--latency_ration < 0)) { | ||
163 | cond_resched(); | ||
164 | latency_ration = LATENCY_LIMIT; | ||
165 | } | ||
156 | } | 166 | } |
157 | swap_device_lock(si); | 167 | swap_device_lock(si); |
158 | goto lowest; | 168 | goto lowest; |