aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-log-userspace-base.c
diff options
context:
space:
mode:
authorJonathan Brassow <jbrassow@redhat.com>2011-01-13 14:59:50 -0500
committerAlasdair G Kergon <agk@redhat.com>2011-01-13 14:59:50 -0500
commit909cc4fb48dd9870f6ebe4bd32cfbe37c102df62 (patch)
treec88ef5cc536d49871a6b176d2742048c659bbdc8 /drivers/md/dm-log-userspace-base.c
parent8d35d3e37eed884ba15229a146df846f399909b4 (diff)
dm log userspace: split flush queue
Split the 'flush_list', which contained a mix of both 'mark' and 'clear' requests, into two distinct lists ('mark_list' and 'clear_list'). The device mapper log implementations (used by various DM targets) are allowed to cache 'mark' and 'clear' requests until a 'flush' is received. Until now, these cached requests were kept in the same list. They will now be put into distinct lists to facilitate group processing of these requests (in the next patch). Signed-off-by: Jonathan Brassow <jbrassow@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-log-userspace-base.c')
-rw-r--r--drivers/md/dm-log-userspace-base.c41
1 files changed, 32 insertions, 9 deletions
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 1c25ad3d02a2..767adf300fa5 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -37,8 +37,15 @@ struct log_c {
37 */ 37 */
38 uint64_t in_sync_hint; 38 uint64_t in_sync_hint;
39 39
40 /*
41 * Mark and clear requests are held until a flush is issued
42 * so that we can group, and thereby limit, the amount of
43 * network traffic between kernel and userspace. The 'flush_lock'
44 * is used to protect these lists.
45 */
40 spinlock_t flush_lock; 46 spinlock_t flush_lock;
41 struct list_head flush_list; /* only for clear and mark requests */ 47 struct list_head mark_list;
48 struct list_head clear_list;
42}; 49};
43 50
44static mempool_t *flush_entry_pool; 51static mempool_t *flush_entry_pool;
@@ -169,7 +176,8 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
169 176
170 strncpy(lc->uuid, argv[0], DM_UUID_LEN); 177 strncpy(lc->uuid, argv[0], DM_UUID_LEN);
171 spin_lock_init(&lc->flush_lock); 178 spin_lock_init(&lc->flush_lock);
172 INIT_LIST_HEAD(&lc->flush_list); 179 INIT_LIST_HEAD(&lc->mark_list);
180 INIT_LIST_HEAD(&lc->clear_list);
173 181
174 str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str); 182 str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str);
175 if (str_size < 0) { 183 if (str_size < 0) {
@@ -362,14 +370,16 @@ static int userspace_flush(struct dm_dirty_log *log)
362 int r = 0; 370 int r = 0;
363 unsigned long flags; 371 unsigned long flags;
364 struct log_c *lc = log->context; 372 struct log_c *lc = log->context;
365 LIST_HEAD(flush_list); 373 LIST_HEAD(mark_list);
374 LIST_HEAD(clear_list);
366 struct flush_entry *fe, *tmp_fe; 375 struct flush_entry *fe, *tmp_fe;
367 376
368 spin_lock_irqsave(&lc->flush_lock, flags); 377 spin_lock_irqsave(&lc->flush_lock, flags);
369 list_splice_init(&lc->flush_list, &flush_list); 378 list_splice_init(&lc->mark_list, &mark_list);
379 list_splice_init(&lc->clear_list, &clear_list);
370 spin_unlock_irqrestore(&lc->flush_lock, flags); 380 spin_unlock_irqrestore(&lc->flush_lock, flags);
371 381
372 if (list_empty(&flush_list)) 382 if (list_empty(&mark_list) && list_empty(&clear_list))
373 return 0; 383 return 0;
374 384
375 /* 385 /*
@@ -379,7 +389,16 @@ static int userspace_flush(struct dm_dirty_log *log)
379 * do it one by one. 389 * do it one by one.
380 */ 390 */
381 391
382 list_for_each_entry(fe, &flush_list, list) { 392 list_for_each_entry(fe, &mark_list, list) {
393 r = userspace_do_request(lc, lc->uuid, fe->type,
394 (char *)&fe->region,
395 sizeof(fe->region),
396 NULL, NULL);
397 if (r)
398 goto fail;
399 }
400
401 list_for_each_entry(fe, &clear_list, list) {
383 r = userspace_do_request(lc, lc->uuid, fe->type, 402 r = userspace_do_request(lc, lc->uuid, fe->type,
384 (char *)&fe->region, 403 (char *)&fe->region,
385 sizeof(fe->region), 404 sizeof(fe->region),
@@ -397,7 +416,11 @@ fail:
397 * Calling code will receive an error and will know that 416 * Calling code will receive an error and will know that
398 * the log facility has failed. 417 * the log facility has failed.
399 */ 418 */
400 list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) { 419 list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) {
420 list_del(&fe->list);
421 mempool_free(fe, flush_entry_pool);
422 }
423 list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) {
401 list_del(&fe->list); 424 list_del(&fe->list);
402 mempool_free(fe, flush_entry_pool); 425 mempool_free(fe, flush_entry_pool);
403 } 426 }
@@ -427,7 +450,7 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
427 spin_lock_irqsave(&lc->flush_lock, flags); 450 spin_lock_irqsave(&lc->flush_lock, flags);
428 fe->type = DM_ULOG_MARK_REGION; 451 fe->type = DM_ULOG_MARK_REGION;
429 fe->region = region; 452 fe->region = region;
430 list_add(&fe->list, &lc->flush_list); 453 list_add(&fe->list, &lc->mark_list);
431 spin_unlock_irqrestore(&lc->flush_lock, flags); 454 spin_unlock_irqrestore(&lc->flush_lock, flags);
432 455
433 return; 456 return;
@@ -464,7 +487,7 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
464 spin_lock_irqsave(&lc->flush_lock, flags); 487 spin_lock_irqsave(&lc->flush_lock, flags);
465 fe->type = DM_ULOG_CLEAR_REGION; 488 fe->type = DM_ULOG_CLEAR_REGION;
466 fe->region = region; 489 fe->region = region;
467 list_add(&fe->list, &lc->flush_list); 490 list_add(&fe->list, &lc->clear_list);
468 spin_unlock_irqrestore(&lc->flush_lock, flags); 491 spin_unlock_irqrestore(&lc->flush_lock, flags);
469 492
470 return; 493 return;