diff options
author | Jonathan Brassow <jbrassow@redhat.com> | 2011-01-13 14:59:51 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2011-01-13 14:59:51 -0500 |
commit | 085ae0651b2791f3a430ddb76da92925b9952e13 (patch) | |
tree | 8d15e4fed61628e285f7c22956f77319e5e839d5 /drivers | |
parent | 909cc4fb48dd9870f6ebe4bd32cfbe37c102df62 (diff) |
dm log userspace: group clear and mark requests
Allow the device-mapper log's 'mark' and 'clear' requests to be
grouped and processed in a batch. This can significantly reduce the
amount of traffic going between the kernel and userspace (where the
processing daemon resides).
Signed-off-by: Jonathan Brassow <jbrassow@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/dm-log-userspace-base.c | 102 |
1 files changed, 79 insertions, 23 deletions
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index 767adf300fa5..31e1687e7bf6 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c | |||
@@ -18,6 +18,14 @@ struct flush_entry { | |||
18 | struct list_head list; | 18 | struct list_head list; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | /* | ||
22 | * This limit on the number of mark and clear request is, to a degree, | ||
23 | * arbitrary. However, there is some basis for the choice in the limits | ||
24 | * imposed on the size of data payload by dm-log-userspace-transfer.c: | ||
25 | * dm_consult_userspace(). | ||
26 | */ | ||
27 | #define MAX_FLUSH_GROUP_COUNT 32 | ||
28 | |||
21 | struct log_c { | 29 | struct log_c { |
22 | struct dm_target *ti; | 30 | struct dm_target *ti; |
23 | uint32_t region_size; | 31 | uint32_t region_size; |
@@ -348,6 +356,71 @@ static int userspace_in_sync(struct dm_dirty_log *log, region_t region, | |||
348 | return (r) ? 0 : (int)in_sync; | 356 | return (r) ? 0 : (int)in_sync; |
349 | } | 357 | } |
350 | 358 | ||
359 | static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list) | ||
360 | { | ||
361 | int r = 0; | ||
362 | struct flush_entry *fe; | ||
363 | |||
364 | list_for_each_entry(fe, flush_list, list) { | ||
365 | r = userspace_do_request(lc, lc->uuid, fe->type, | ||
366 | (char *)&fe->region, | ||
367 | sizeof(fe->region), | ||
368 | NULL, NULL); | ||
369 | if (r) | ||
370 | break; | ||
371 | } | ||
372 | |||
373 | return r; | ||
374 | } | ||
375 | |||
376 | static int flush_by_group(struct log_c *lc, struct list_head *flush_list) | ||
377 | { | ||
378 | int r = 0; | ||
379 | int count; | ||
380 | uint32_t type = 0; | ||
381 | struct flush_entry *fe, *tmp_fe; | ||
382 | LIST_HEAD(tmp_list); | ||
383 | uint64_t group[MAX_FLUSH_GROUP_COUNT]; | ||
384 | |||
385 | /* | ||
386 | * Group process the requests | ||
387 | */ | ||
388 | while (!list_empty(flush_list)) { | ||
389 | count = 0; | ||
390 | |||
391 | list_for_each_entry_safe(fe, tmp_fe, flush_list, list) { | ||
392 | group[count] = fe->region; | ||
393 | count++; | ||
394 | |||
395 | list_del(&fe->list); | ||
396 | list_add(&fe->list, &tmp_list); | ||
397 | |||
398 | type = fe->type; | ||
399 | if (count >= MAX_FLUSH_GROUP_COUNT) | ||
400 | break; | ||
401 | } | ||
402 | |||
403 | r = userspace_do_request(lc, lc->uuid, type, | ||
404 | (char *)(group), | ||
405 | count * sizeof(uint64_t), | ||
406 | NULL, NULL); | ||
407 | if (r) { | ||
408 | /* Group send failed. Attempt one-by-one. */ | ||
409 | list_splice_init(&tmp_list, flush_list); | ||
410 | r = flush_one_by_one(lc, flush_list); | ||
411 | break; | ||
412 | } | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * Must collect flush_entrys that were successfully processed | ||
417 | * as a group so that they will be free'd by the caller. | ||
418 | */ | ||
419 | list_splice_init(&tmp_list, flush_list); | ||
420 | |||
421 | return r; | ||
422 | } | ||
423 | |||
351 | /* | 424 | /* |
352 | * userspace_flush | 425 | * userspace_flush |
353 | * | 426 | * |
@@ -382,30 +455,13 @@ static int userspace_flush(struct dm_dirty_log *log) | |||
382 | if (list_empty(&mark_list) && list_empty(&clear_list)) | 455 | if (list_empty(&mark_list) && list_empty(&clear_list)) |
383 | return 0; | 456 | return 0; |
384 | 457 | ||
385 | /* | 458 | r = flush_by_group(lc, &mark_list); |
386 | * FIXME: Count up requests, group request types, | 459 | if (r) |
387 | * allocate memory to stick all requests in and | 460 | goto fail; |
388 | * send to server in one go. Failing the allocation, | ||
389 | * do it one by one. | ||
390 | */ | ||
391 | |||
392 | list_for_each_entry(fe, &mark_list, list) { | ||
393 | r = userspace_do_request(lc, lc->uuid, fe->type, | ||
394 | (char *)&fe->region, | ||
395 | sizeof(fe->region), | ||
396 | NULL, NULL); | ||
397 | if (r) | ||
398 | goto fail; | ||
399 | } | ||
400 | 461 | ||
401 | list_for_each_entry(fe, &clear_list, list) { | 462 | r = flush_by_group(lc, &clear_list); |
402 | r = userspace_do_request(lc, lc->uuid, fe->type, | 463 | if (r) |
403 | (char *)&fe->region, | 464 | goto fail; |
404 | sizeof(fe->region), | ||
405 | NULL, NULL); | ||
406 | if (r) | ||
407 | goto fail; | ||
408 | } | ||
409 | 465 | ||
410 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, | 466 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, |
411 | NULL, 0, NULL, NULL); | 467 | NULL, 0, NULL, NULL); |