aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_main.c
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2010-05-25 10:26:16 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-06-01 05:12:27 -0400
commitba11ad9a3b9dd2dbb9c6686ea9d41a9a77d94327 (patch)
treed1aa2d5a2673a063e7cc8b9a4bba5305c608f6a1 /drivers/block/drbd/drbd_main.c
parent5dbf1673383f2f1554f0634fdfc390d59dc2c7d6 (diff)
drbd: improve usage of MSG_MORE
It seems to improve performance if we allow the "p_data" header in its own frame (no MSG_MORE), but sendpage all but the last page with MSG_MORE. This is also in preparation of a later zero copy receive implementation. Suggested by Eduard.Guzovsky@stratus.com on drbd-dev. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/block/drbd/drbd_main.c')
-rw-r--r--drivers/block/drbd/drbd_main.c32
1 files changed, 19 insertions, 13 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 1fcf2d1bcc39..c978557b4b80 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2272,9 +2272,9 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *
2272 * with page_count == 0 or PageSlab. 2272 * with page_count == 0 or PageSlab.
2273 */ 2273 */
2274static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, 2274static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2275 int offset, size_t size) 2275 int offset, size_t size, unsigned msg_flags)
2276{ 2276{
2277 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, 0); 2277 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
2278 kunmap(page); 2278 kunmap(page);
2279 if (sent == size) 2279 if (sent == size)
2280 mdev->send_cnt += size>>9; 2280 mdev->send_cnt += size>>9;
@@ -2282,7 +2282,7 @@ static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2282} 2282}
2283 2283
2284static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, 2284static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2285 int offset, size_t size) 2285 int offset, size_t size, unsigned msg_flags)
2286{ 2286{
2287 mm_segment_t oldfs = get_fs(); 2287 mm_segment_t oldfs = get_fs();
2288 int sent, ok; 2288 int sent, ok;
@@ -2295,14 +2295,15 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2295 * __page_cache_release a page that would actually still be referenced 2295 * __page_cache_release a page that would actually still be referenced
2296 * by someone, leading to some obscure delayed Oops somewhere else. */ 2296 * by someone, leading to some obscure delayed Oops somewhere else. */
2297 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) 2297 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2298 return _drbd_no_send_page(mdev, page, offset, size); 2298 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
2299 2299
2300 msg_flags |= MSG_NOSIGNAL;
2300 drbd_update_congested(mdev); 2301 drbd_update_congested(mdev);
2301 set_fs(KERNEL_DS); 2302 set_fs(KERNEL_DS);
2302 do { 2303 do {
2303 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page, 2304 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2304 offset, len, 2305 offset, len,
2305 MSG_NOSIGNAL); 2306 msg_flags);
2306 if (sent == -EAGAIN) { 2307 if (sent == -EAGAIN) {
2307 if (we_should_drop_the_connection(mdev, 2308 if (we_should_drop_the_connection(mdev,
2308 mdev->data.socket)) 2309 mdev->data.socket))
@@ -2331,9 +2332,11 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2331{ 2332{
2332 struct bio_vec *bvec; 2333 struct bio_vec *bvec;
2333 int i; 2334 int i;
2335 /* hint all but last page with MSG_MORE */
2334 __bio_for_each_segment(bvec, bio, i, 0) { 2336 __bio_for_each_segment(bvec, bio, i, 0) {
2335 if (!_drbd_no_send_page(mdev, bvec->bv_page, 2337 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2336 bvec->bv_offset, bvec->bv_len)) 2338 bvec->bv_offset, bvec->bv_len,
2339 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2337 return 0; 2340 return 0;
2338 } 2341 }
2339 return 1; 2342 return 1;
@@ -2343,12 +2346,13 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2343{ 2346{
2344 struct bio_vec *bvec; 2347 struct bio_vec *bvec;
2345 int i; 2348 int i;
2349 /* hint all but last page with MSG_MORE */
2346 __bio_for_each_segment(bvec, bio, i, 0) { 2350 __bio_for_each_segment(bvec, bio, i, 0) {
2347 if (!_drbd_send_page(mdev, bvec->bv_page, 2351 if (!_drbd_send_page(mdev, bvec->bv_page,
2348 bvec->bv_offset, bvec->bv_len)) 2352 bvec->bv_offset, bvec->bv_len,
2353 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2349 return 0; 2354 return 0;
2350 } 2355 }
2351
2352 return 1; 2356 return 1;
2353} 2357}
2354 2358
@@ -2356,9 +2360,11 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2356{ 2360{
2357 struct page *page = e->pages; 2361 struct page *page = e->pages;
2358 unsigned len = e->size; 2362 unsigned len = e->size;
2363 /* hint all but last page with MSG_MORE */
2359 page_chain_for_each(page) { 2364 page_chain_for_each(page) {
2360 unsigned l = min_t(unsigned, len, PAGE_SIZE); 2365 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2361 if (!_drbd_send_page(mdev, page, 0, l)) 2366 if (!_drbd_send_page(mdev, page, 0, l,
2367 page_chain_next(page) ? MSG_MORE : 0))
2362 return 0; 2368 return 0;
2363 len -= l; 2369 len -= l;
2364 } 2370 }
@@ -2438,11 +2444,11 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2438 p.dp_flags = cpu_to_be32(dp_flags); 2444 p.dp_flags = cpu_to_be32(dp_flags);
2439 set_bit(UNPLUG_REMOTE, &mdev->flags); 2445 set_bit(UNPLUG_REMOTE, &mdev->flags);
2440 ok = (sizeof(p) == 2446 ok = (sizeof(p) ==
2441 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE)); 2447 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
2442 if (ok && dgs) { 2448 if (ok && dgs) {
2443 dgb = mdev->int_dig_out; 2449 dgb = mdev->int_dig_out;
2444 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb); 2450 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2445 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE); 2451 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2446 } 2452 }
2447 if (ok) { 2453 if (ok) {
2448 if (mdev->net_conf->wire_protocol == DRBD_PROT_A) 2454 if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
@@ -2491,11 +2497,11 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2491 return 0; 2497 return 0;
2492 2498
2493 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, 2499 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p,
2494 sizeof(p), MSG_MORE); 2500 sizeof(p), dgs ? MSG_MORE : 0);
2495 if (ok && dgs) { 2501 if (ok && dgs) {
2496 dgb = mdev->int_dig_out; 2502 dgb = mdev->int_dig_out;
2497 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb); 2503 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
2498 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE); 2504 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2499 } 2505 }
2500 if (ok) 2506 if (ok)
2501 ok = _drbd_send_zc_ee(mdev, e); 2507 ok = _drbd_send_zc_ee(mdev, e);