diff options
author | Lars Ellenberg <lars.ellenberg@linbit.com> | 2010-09-06 06:30:25 -0400 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2010-10-14 12:38:33 -0400 |
commit | 435f07402b3165b90592073bc0f8c6f8fa160ff9 (patch) | |
tree | cef4df9cde79857ad5590358c5f155c2f8bd84a2 /drivers/block/drbd/drbd_receiver.c | |
parent | 76d2e7eca8e7675c6d7a6592f9e747b121cc8a87 (diff) |
drbd: don't count sendpage()d pages only referenced by tcp as in use
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_receiver.c')
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 26 |
1 files changed, 16 insertions, 10 deletions
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index e96fbb04ea4d..2c3edf0ac5ca 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -241,7 +241,7 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) | |||
241 | spin_unlock_irq(&mdev->req_lock); | 241 | spin_unlock_irq(&mdev->req_lock); |
242 | 242 | ||
243 | list_for_each_entry_safe(e, t, &reclaimed, w.list) | 243 | list_for_each_entry_safe(e, t, &reclaimed, w.list) |
244 | drbd_free_ee(mdev, e); | 244 | drbd_free_net_ee(mdev, e); |
245 | } | 245 | } |
246 | 246 | ||
247 | /** | 247 | /** |
@@ -298,9 +298,11 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool | |||
298 | * Is also used from inside an other spin_lock_irq(&mdev->req_lock); | 298 | * Is also used from inside an other spin_lock_irq(&mdev->req_lock); |
299 | * Either links the page chain back to the global pool, | 299 | * Either links the page chain back to the global pool, |
300 | * or returns all pages to the system. */ | 300 | * or returns all pages to the system. */ |
301 | static void drbd_pp_free(struct drbd_conf *mdev, struct page *page) | 301 | static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) |
302 | { | 302 | { |
303 | atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; | ||
303 | int i; | 304 | int i; |
305 | |||
304 | if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) | 306 | if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) |
305 | i = page_chain_free(page); | 307 | i = page_chain_free(page); |
306 | else { | 308 | else { |
@@ -311,10 +313,10 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page) | |||
311 | drbd_pp_vacant += i; | 313 | drbd_pp_vacant += i; |
312 | spin_unlock(&drbd_pp_lock); | 314 | spin_unlock(&drbd_pp_lock); |
313 | } | 315 | } |
314 | atomic_sub(i, &mdev->pp_in_use); | 316 | i = atomic_sub_return(i, a); |
315 | i = atomic_read(&mdev->pp_in_use); | ||
316 | if (i < 0) | 317 | if (i < 0) |
317 | dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i); | 318 | dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n", |
319 | is_net ? "pp_in_use_by_net" : "pp_in_use", i); | ||
318 | wake_up(&drbd_pp_wait); | 320 | wake_up(&drbd_pp_wait); |
319 | } | 321 | } |
320 | 322 | ||
@@ -374,11 +376,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, | |||
374 | return NULL; | 376 | return NULL; |
375 | } | 377 | } |
376 | 378 | ||
377 | void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e) | 379 | void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net) |
378 | { | 380 | { |
379 | if (e->flags & EE_HAS_DIGEST) | 381 | if (e->flags & EE_HAS_DIGEST) |
380 | kfree(e->digest); | 382 | kfree(e->digest); |
381 | drbd_pp_free(mdev, e->pages); | 383 | drbd_pp_free(mdev, e->pages, is_net); |
382 | D_ASSERT(atomic_read(&e->pending_bios) == 0); | 384 | D_ASSERT(atomic_read(&e->pending_bios) == 0); |
383 | D_ASSERT(hlist_unhashed(&e->colision)); | 385 | D_ASSERT(hlist_unhashed(&e->colision)); |
384 | mempool_free(e, drbd_ee_mempool); | 386 | mempool_free(e, drbd_ee_mempool); |
@@ -389,13 +391,14 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list) | |||
389 | LIST_HEAD(work_list); | 391 | LIST_HEAD(work_list); |
390 | struct drbd_epoch_entry *e, *t; | 392 | struct drbd_epoch_entry *e, *t; |
391 | int count = 0; | 393 | int count = 0; |
394 | int is_net = list == &mdev->net_ee; | ||
392 | 395 | ||
393 | spin_lock_irq(&mdev->req_lock); | 396 | spin_lock_irq(&mdev->req_lock); |
394 | list_splice_init(list, &work_list); | 397 | list_splice_init(list, &work_list); |
395 | spin_unlock_irq(&mdev->req_lock); | 398 | spin_unlock_irq(&mdev->req_lock); |
396 | 399 | ||
397 | list_for_each_entry_safe(e, t, &work_list, w.list) { | 400 | list_for_each_entry_safe(e, t, &work_list, w.list) { |
398 | drbd_free_ee(mdev, e); | 401 | drbd_free_some_ee(mdev, e, is_net); |
399 | count++; | 402 | count++; |
400 | } | 403 | } |
401 | return count; | 404 | return count; |
@@ -424,7 +427,7 @@ static int drbd_process_done_ee(struct drbd_conf *mdev) | |||
424 | spin_unlock_irq(&mdev->req_lock); | 427 | spin_unlock_irq(&mdev->req_lock); |
425 | 428 | ||
426 | list_for_each_entry_safe(e, t, &reclaimed, w.list) | 429 | list_for_each_entry_safe(e, t, &reclaimed, w.list) |
427 | drbd_free_ee(mdev, e); | 430 | drbd_free_net_ee(mdev, e); |
428 | 431 | ||
429 | /* possible callbacks here: | 432 | /* possible callbacks here: |
430 | * e_end_block, and e_end_resync_block, e_send_discard_ack. | 433 | * e_end_block, and e_end_resync_block, e_send_discard_ack. |
@@ -1460,7 +1463,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) | |||
1460 | data_size -= rr; | 1463 | data_size -= rr; |
1461 | } | 1464 | } |
1462 | kunmap(page); | 1465 | kunmap(page); |
1463 | drbd_pp_free(mdev, page); | 1466 | drbd_pp_free(mdev, page, 0); |
1464 | return rv; | 1467 | return rv; |
1465 | } | 1468 | } |
1466 | 1469 | ||
@@ -3879,6 +3882,9 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3879 | i = drbd_release_ee(mdev, &mdev->net_ee); | 3882 | i = drbd_release_ee(mdev, &mdev->net_ee); |
3880 | if (i) | 3883 | if (i) |
3881 | dev_info(DEV, "net_ee not empty, killed %u entries\n", i); | 3884 | dev_info(DEV, "net_ee not empty, killed %u entries\n", i); |
3885 | i = atomic_read(&mdev->pp_in_use_by_net); | ||
3886 | if (i) | ||
3887 | dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i); | ||
3882 | i = atomic_read(&mdev->pp_in_use); | 3888 | i = atomic_read(&mdev->pp_in_use); |
3883 | if (i) | 3889 | if (i) |
3884 | dev_info(DEV, "pp_in_use = %d, expected 0\n", i); | 3890 | dev_info(DEV, "pp_in_use = %d, expected 0\n", i); |