diff options
author | Lars Ellenberg <lars.ellenberg@linbit.com> | 2014-04-28 12:43:19 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-04-30 15:46:54 -0400 |
commit | e82998743385ca861b9ec919eb2ba8177ce72180 (patch) | |
tree | 4cf895d0a9633eff1f11363f98ebe60713a4bcd7 /drivers/block/drbd/drbd_receiver.c | |
parent | 0e49d7b014c5d591a053d08888a455bd74a88646 (diff) |
drbd: don't let application IO pre-empt resync too often
Before, application IO could pre-empt resync activity
for up to hardcoded 20 seconds per resync request.
A very busy server could throttle the effective resync bandwidth
down to one request per 20 seconds.
Now, we only let application IO pre-empt resync traffic
while the current resync rate estimate is above c-min-rate.
If you disable the c-min-rate throttle feature (set c-min-rate = 0),
application IO will no longer pre-empt resync traffic at all.
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block/drbd/drbd_receiver.c')
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 47 |
1 files changed, 26 insertions, 21 deletions
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 6ffbc22eba0b..10d2dcb16bff 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -2323,39 +2323,45 @@ out_interrupted: | |||
2323 | * The current sync rate used here uses only the most recent two step marks, | 2323 | * The current sync rate used here uses only the most recent two step marks, |
2324 | * to have a short time average so we can react faster. | 2324 | * to have a short time average so we can react faster. |
2325 | */ | 2325 | */ |
2326 | int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector) | 2326 | bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector) |
2327 | { | 2327 | { |
2328 | struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; | ||
2329 | unsigned long db, dt, dbdt; | ||
2330 | struct lc_element *tmp; | 2328 | struct lc_element *tmp; |
2331 | int curr_events; | 2329 | bool throttle = true; |
2332 | int throttle = 0; | ||
2333 | unsigned int c_min_rate; | ||
2334 | 2330 | ||
2335 | rcu_read_lock(); | 2331 | if (!drbd_rs_c_min_rate_throttle(device)) |
2336 | c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate; | 2332 | return false; |
2337 | rcu_read_unlock(); | ||
2338 | |||
2339 | /* feature disabled? */ | ||
2340 | if (c_min_rate == 0) | ||
2341 | return 0; | ||
2342 | 2333 | ||
2343 | spin_lock_irq(&device->al_lock); | 2334 | spin_lock_irq(&device->al_lock); |
2344 | tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); | 2335 | tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); |
2345 | if (tmp) { | 2336 | if (tmp) { |
2346 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); | 2337 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); |
2347 | if (test_bit(BME_PRIORITY, &bm_ext->flags)) { | 2338 | if (test_bit(BME_PRIORITY, &bm_ext->flags)) |
2348 | spin_unlock_irq(&device->al_lock); | 2339 | throttle = false; |
2349 | return 0; | ||
2350 | } | ||
2351 | /* Do not slow down if app IO is already waiting for this extent */ | 2340 | /* Do not slow down if app IO is already waiting for this extent */ |
2352 | } | 2341 | } |
2353 | spin_unlock_irq(&device->al_lock); | 2342 | spin_unlock_irq(&device->al_lock); |
2354 | 2343 | ||
2344 | return throttle; | ||
2345 | } | ||
2346 | |||
2347 | bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) | ||
2348 | { | ||
2349 | struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; | ||
2350 | unsigned long db, dt, dbdt; | ||
2351 | unsigned int c_min_rate; | ||
2352 | int curr_events; | ||
2353 | |||
2354 | rcu_read_lock(); | ||
2355 | c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate; | ||
2356 | rcu_read_unlock(); | ||
2357 | |||
2358 | /* feature disabled? */ | ||
2359 | if (c_min_rate == 0) | ||
2360 | return false; | ||
2361 | |||
2355 | curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + | 2362 | curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + |
2356 | (int)part_stat_read(&disk->part0, sectors[1]) - | 2363 | (int)part_stat_read(&disk->part0, sectors[1]) - |
2357 | atomic_read(&device->rs_sect_ev); | 2364 | atomic_read(&device->rs_sect_ev); |
2358 | |||
2359 | if (!device->rs_last_events || curr_events - device->rs_last_events > 64) { | 2365 | if (!device->rs_last_events || curr_events - device->rs_last_events > 64) { |
2360 | unsigned long rs_left; | 2366 | unsigned long rs_left; |
2361 | int i; | 2367 | int i; |
@@ -2378,12 +2384,11 @@ int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector) | |||
2378 | dbdt = Bit2KB(db/dt); | 2384 | dbdt = Bit2KB(db/dt); |
2379 | 2385 | ||
2380 | if (dbdt > c_min_rate) | 2386 | if (dbdt > c_min_rate) |
2381 | throttle = 1; | 2387 | return true; |
2382 | } | 2388 | } |
2383 | return throttle; | 2389 | return false; |
2384 | } | 2390 | } |
2385 | 2391 | ||
2386 | |||
2387 | static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi) | 2392 | static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi) |
2388 | { | 2393 | { |
2389 | struct drbd_peer_device *peer_device; | 2394 | struct drbd_peer_device *peer_device; |