diff options
author | Davidlohr Bueso <dave@stgolabs.ne> | 2015-10-29 16:25:59 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-10-29 16:25:59 -0400 |
commit | cdea01b2bf98affb7e9c44530108a4a28535eee8 (patch) | |
tree | d76f70baf592052d4dff44e45bcde1ee29062570 /kernel/trace/blktrace.c | |
parent | cfd0c552a8272d691691f40073654d775836e23a (diff) |
blktrace: re-write setting q->blk_trace
This is really about simplifying the double xchg patterns into
a single cmpxchg, with the same logic. Other than the immediate
cleanup, there are some subtleties this change deals with:
(i) While the load of the old bt is fully ordered wrt everything,
ie:
old_bt = xchg(&q->blk_trace, bt); [barrier]
if (old_bt)
(void) xchg(&q->blk_trace, old_bt); [barrier]
blk_trace could still be changed between the xchg and the old_bt
load. Note that this description is merely theoretical and afaict
very small, but doing everything in a single context with cmpxchg
closes this potential race.
(ii) Ordering guarantees are obviously kept with cmpxchg.
(iii) Gets rid of the hacky-by-nature (void)xchg pattern.
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
eviewed-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'kernel/trace/blktrace.c')
-rw-r--r-- | kernel/trace/blktrace.c | 16 |
1 files changed, 5 insertions, 11 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 90e72a0c3047..e3a26188b95e 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -437,7 +437,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
437 | struct block_device *bdev, | 437 | struct block_device *bdev, |
438 | struct blk_user_trace_setup *buts) | 438 | struct blk_user_trace_setup *buts) |
439 | { | 439 | { |
440 | struct blk_trace *old_bt, *bt = NULL; | 440 | struct blk_trace *bt = NULL; |
441 | struct dentry *dir = NULL; | 441 | struct dentry *dir = NULL; |
442 | int ret; | 442 | int ret; |
443 | 443 | ||
@@ -519,11 +519,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
519 | bt->trace_state = Blktrace_setup; | 519 | bt->trace_state = Blktrace_setup; |
520 | 520 | ||
521 | ret = -EBUSY; | 521 | ret = -EBUSY; |
522 | old_bt = xchg(&q->blk_trace, bt); | 522 | if (cmpxchg(&q->blk_trace, NULL, bt)) |
523 | if (old_bt) { | ||
524 | (void) xchg(&q->blk_trace, old_bt); | ||
525 | goto err; | 523 | goto err; |
526 | } | ||
527 | 524 | ||
528 | if (atomic_inc_return(&blk_probes_ref) == 1) | 525 | if (atomic_inc_return(&blk_probes_ref) == 1) |
529 | blk_register_tracepoints(); | 526 | blk_register_tracepoints(); |
@@ -1481,7 +1478,7 @@ static int blk_trace_remove_queue(struct request_queue *q) | |||
1481 | static int blk_trace_setup_queue(struct request_queue *q, | 1478 | static int blk_trace_setup_queue(struct request_queue *q, |
1482 | struct block_device *bdev) | 1479 | struct block_device *bdev) |
1483 | { | 1480 | { |
1484 | struct blk_trace *old_bt, *bt = NULL; | 1481 | struct blk_trace *bt = NULL; |
1485 | int ret = -ENOMEM; | 1482 | int ret = -ENOMEM; |
1486 | 1483 | ||
1487 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); | 1484 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); |
@@ -1497,12 +1494,9 @@ static int blk_trace_setup_queue(struct request_queue *q, | |||
1497 | 1494 | ||
1498 | blk_trace_setup_lba(bt, bdev); | 1495 | blk_trace_setup_lba(bt, bdev); |
1499 | 1496 | ||
1500 | old_bt = xchg(&q->blk_trace, bt); | 1497 | ret = -EBUSY; |
1501 | if (old_bt != NULL) { | 1498 | if (cmpxchg(&q->blk_trace, NULL, bt)) |
1502 | (void)xchg(&q->blk_trace, old_bt); | ||
1503 | ret = -EBUSY; | ||
1504 | goto free_bt; | 1499 | goto free_bt; |
1505 | } | ||
1506 | 1500 | ||
1507 | if (atomic_inc_return(&blk_probes_ref) == 1) | 1501 | if (atomic_inc_return(&blk_probes_ref) == 1) |
1508 | blk_register_tracepoints(); | 1502 | blk_register_tracepoints(); |