aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-log-userspace-base.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-log-userspace-base.c')
-rw-r--r--drivers/md/dm-log-userspace-base.c139
1 files changed, 111 insertions, 28 deletions
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 1ed0094f064b..aa2e0c374ab3 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -12,12 +12,22 @@
12 12
13#include "dm-log-userspace-transfer.h" 13#include "dm-log-userspace-transfer.h"
14 14
15#define DM_LOG_USERSPACE_VSN "1.1.0"
16
15struct flush_entry { 17struct flush_entry {
16 int type; 18 int type;
17 region_t region; 19 region_t region;
18 struct list_head list; 20 struct list_head list;
19}; 21};
20 22
23/*
24 * This limit on the number of mark and clear request is, to a degree,
25 * arbitrary. However, there is some basis for the choice in the limits
26 * imposed on the size of data payload by dm-log-userspace-transfer.c:
27 * dm_consult_userspace().
28 */
29#define MAX_FLUSH_GROUP_COUNT 32
30
21struct log_c { 31struct log_c {
22 struct dm_target *ti; 32 struct dm_target *ti;
23 uint32_t region_size; 33 uint32_t region_size;
@@ -37,8 +47,15 @@ struct log_c {
37 */ 47 */
38 uint64_t in_sync_hint; 48 uint64_t in_sync_hint;
39 49
50 /*
51 * Mark and clear requests are held until a flush is issued
52 * so that we can group, and thereby limit, the amount of
53 * network traffic between kernel and userspace. The 'flush_lock'
54 * is used to protect these lists.
55 */
40 spinlock_t flush_lock; 56 spinlock_t flush_lock;
41 struct list_head flush_list; /* only for clear and mark requests */ 57 struct list_head mark_list;
58 struct list_head clear_list;
42}; 59};
43 60
44static mempool_t *flush_entry_pool; 61static mempool_t *flush_entry_pool;
@@ -169,7 +186,8 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
169 186
170 strncpy(lc->uuid, argv[0], DM_UUID_LEN); 187 strncpy(lc->uuid, argv[0], DM_UUID_LEN);
171 spin_lock_init(&lc->flush_lock); 188 spin_lock_init(&lc->flush_lock);
172 INIT_LIST_HEAD(&lc->flush_list); 189 INIT_LIST_HEAD(&lc->mark_list);
190 INIT_LIST_HEAD(&lc->clear_list);
173 191
174 str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str); 192 str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str);
175 if (str_size < 0) { 193 if (str_size < 0) {
@@ -181,8 +199,11 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
181 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR, 199 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
182 ctr_str, str_size, NULL, NULL); 200 ctr_str, str_size, NULL, NULL);
183 201
184 if (r == -ESRCH) { 202 if (r < 0) {
185 DMERR("Userspace log server not found"); 203 if (r == -ESRCH)
204 DMERR("Userspace log server not found");
205 else
206 DMERR("Userspace log server failed to create log");
186 goto out; 207 goto out;
187 } 208 }
188 209
@@ -214,10 +235,9 @@ out:
214 235
215static void userspace_dtr(struct dm_dirty_log *log) 236static void userspace_dtr(struct dm_dirty_log *log)
216{ 237{
217 int r;
218 struct log_c *lc = log->context; 238 struct log_c *lc = log->context;
219 239
220 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, 240 (void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
221 NULL, 0, 241 NULL, 0,
222 NULL, NULL); 242 NULL, NULL);
223 243
@@ -338,6 +358,71 @@ static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
338 return (r) ? 0 : (int)in_sync; 358 return (r) ? 0 : (int)in_sync;
339} 359}
340 360
361static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
362{
363 int r = 0;
364 struct flush_entry *fe;
365
366 list_for_each_entry(fe, flush_list, list) {
367 r = userspace_do_request(lc, lc->uuid, fe->type,
368 (char *)&fe->region,
369 sizeof(fe->region),
370 NULL, NULL);
371 if (r)
372 break;
373 }
374
375 return r;
376}
377
378static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
379{
380 int r = 0;
381 int count;
382 uint32_t type = 0;
383 struct flush_entry *fe, *tmp_fe;
384 LIST_HEAD(tmp_list);
385 uint64_t group[MAX_FLUSH_GROUP_COUNT];
386
387 /*
388 * Group process the requests
389 */
390 while (!list_empty(flush_list)) {
391 count = 0;
392
393 list_for_each_entry_safe(fe, tmp_fe, flush_list, list) {
394 group[count] = fe->region;
395 count++;
396
397 list_del(&fe->list);
398 list_add(&fe->list, &tmp_list);
399
400 type = fe->type;
401 if (count >= MAX_FLUSH_GROUP_COUNT)
402 break;
403 }
404
405 r = userspace_do_request(lc, lc->uuid, type,
406 (char *)(group),
407 count * sizeof(uint64_t),
408 NULL, NULL);
409 if (r) {
410 /* Group send failed. Attempt one-by-one. */
411 list_splice_init(&tmp_list, flush_list);
412 r = flush_one_by_one(lc, flush_list);
413 break;
414 }
415 }
416
417 /*
418 * Must collect flush_entrys that were successfully processed
419 * as a group so that they will be free'd by the caller.
420 */
421 list_splice_init(&tmp_list, flush_list);
422
423 return r;
424}
425
341/* 426/*
342 * userspace_flush 427 * userspace_flush
343 * 428 *
@@ -360,31 +445,25 @@ static int userspace_flush(struct dm_dirty_log *log)
360 int r = 0; 445 int r = 0;
361 unsigned long flags; 446 unsigned long flags;
362 struct log_c *lc = log->context; 447 struct log_c *lc = log->context;
363 LIST_HEAD(flush_list); 448 LIST_HEAD(mark_list);
449 LIST_HEAD(clear_list);
364 struct flush_entry *fe, *tmp_fe; 450 struct flush_entry *fe, *tmp_fe;
365 451
366 spin_lock_irqsave(&lc->flush_lock, flags); 452 spin_lock_irqsave(&lc->flush_lock, flags);
367 list_splice_init(&lc->flush_list, &flush_list); 453 list_splice_init(&lc->mark_list, &mark_list);
454 list_splice_init(&lc->clear_list, &clear_list);
368 spin_unlock_irqrestore(&lc->flush_lock, flags); 455 spin_unlock_irqrestore(&lc->flush_lock, flags);
369 456
370 if (list_empty(&flush_list)) 457 if (list_empty(&mark_list) && list_empty(&clear_list))
371 return 0; 458 return 0;
372 459
373 /* 460 r = flush_by_group(lc, &mark_list);
374 * FIXME: Count up requests, group request types, 461 if (r)
375 * allocate memory to stick all requests in and 462 goto fail;
376 * send to server in one go. Failing the allocation,
377 * do it one by one.
378 */
379 463
380 list_for_each_entry(fe, &flush_list, list) { 464 r = flush_by_group(lc, &clear_list);
381 r = userspace_do_request(lc, lc->uuid, fe->type, 465 if (r)
382 (char *)&fe->region, 466 goto fail;
383 sizeof(fe->region),
384 NULL, NULL);
385 if (r)
386 goto fail;
387 }
388 467
389 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, 468 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
390 NULL, 0, NULL, NULL); 469 NULL, 0, NULL, NULL);
@@ -395,7 +474,11 @@ fail:
395 * Calling code will receive an error and will know that 474 * Calling code will receive an error and will know that
396 * the log facility has failed. 475 * the log facility has failed.
397 */ 476 */
398 list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) { 477 list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) {
478 list_del(&fe->list);
479 mempool_free(fe, flush_entry_pool);
480 }
481 list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) {
399 list_del(&fe->list); 482 list_del(&fe->list);
400 mempool_free(fe, flush_entry_pool); 483 mempool_free(fe, flush_entry_pool);
401 } 484 }
@@ -425,7 +508,7 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
425 spin_lock_irqsave(&lc->flush_lock, flags); 508 spin_lock_irqsave(&lc->flush_lock, flags);
426 fe->type = DM_ULOG_MARK_REGION; 509 fe->type = DM_ULOG_MARK_REGION;
427 fe->region = region; 510 fe->region = region;
428 list_add(&fe->list, &lc->flush_list); 511 list_add(&fe->list, &lc->mark_list);
429 spin_unlock_irqrestore(&lc->flush_lock, flags); 512 spin_unlock_irqrestore(&lc->flush_lock, flags);
430 513
431 return; 514 return;
@@ -462,7 +545,7 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
462 spin_lock_irqsave(&lc->flush_lock, flags); 545 spin_lock_irqsave(&lc->flush_lock, flags);
463 fe->type = DM_ULOG_CLEAR_REGION; 546 fe->type = DM_ULOG_CLEAR_REGION;
464 fe->region = region; 547 fe->region = region;
465 list_add(&fe->list, &lc->flush_list); 548 list_add(&fe->list, &lc->clear_list);
466 spin_unlock_irqrestore(&lc->flush_lock, flags); 549 spin_unlock_irqrestore(&lc->flush_lock, flags);
467 550
468 return; 551 return;
@@ -684,7 +767,7 @@ static int __init userspace_dirty_log_init(void)
684 return r; 767 return r;
685 } 768 }
686 769
687 DMINFO("version 1.0.0 loaded"); 770 DMINFO("version " DM_LOG_USERSPACE_VSN " loaded");
688 return 0; 771 return 0;
689} 772}
690 773
@@ -694,7 +777,7 @@ static void __exit userspace_dirty_log_exit(void)
694 dm_ulog_tfr_exit(); 777 dm_ulog_tfr_exit();
695 mempool_destroy(flush_entry_pool); 778 mempool_destroy(flush_entry_pool);
696 779
697 DMINFO("version 1.0.0 unloaded"); 780 DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
698 return; 781 return;
699} 782}
700 783