aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-04-13 21:07:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-04-13 21:07:19 -0400
commitd8dd0b6d4836bce81cece60509ef3b157a420776 (patch)
tree7a28f327a15443d6c9d091f3d272abd107251ab7 /include
parent2d59dcfb54ade45cacc59a6e7bd96b8c19088c3d (diff)
parent1b2e19f17ed327af6add02978efdf354e4f8e4df (diff)
Merge branch 'for-3.4/core' of git://git.kernel.dk/linux-block
Pull block core bits from Jens Axboe: "It's a nice and quiet round this time, since most of the tricky stuff has been pushed to 3.5 to give it more time to mature. After a few hectic block IO core changes for 3.3 and 3.2, I'm quite happy with a slow round. Really minor stuff in here, the only real functional change is making the auto-unplug threshold a per-queue entity. The threshold is set so that it's low enough that we don't hold off IO for too long, but still big enough to get a nice benefit from the batched insert (and hence queue lock cost reduction). For raid configurations, this currently breaks down." * 'for-3.4/core' of git://git.kernel.dk/linux-block: block: make auto block plug flush threshold per-disk based Documentation: Add sysfs ABI change for cfq's target latency. block: Make cfq_target_latency tunable through sysfs. block: use lockdep_assert_held for queue locking block: blk_alloc_queue_node(): use caller's GFP flags instead of GFP_KERNEL
Diffstat (limited to 'include')
-rw-r--r--include/linux/blkdev.h18
1 files changed, 7 insertions, 11 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 606cf339bb56..2aa24664a5b5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -426,14 +426,10 @@ struct request_queue {
426 (1 << QUEUE_FLAG_SAME_COMP) | \ 426 (1 << QUEUE_FLAG_SAME_COMP) | \
427 (1 << QUEUE_FLAG_ADD_RANDOM)) 427 (1 << QUEUE_FLAG_ADD_RANDOM))
428 428
429static inline int queue_is_locked(struct request_queue *q) 429static inline void queue_lockdep_assert_held(struct request_queue *q)
430{ 430{
431#ifdef CONFIG_SMP 431 if (q->queue_lock)
432 spinlock_t *lock = q->queue_lock; 432 lockdep_assert_held(q->queue_lock);
433 return lock && spin_is_locked(lock);
434#else
435 return 1;
436#endif
437} 433}
438 434
439static inline void queue_flag_set_unlocked(unsigned int flag, 435static inline void queue_flag_set_unlocked(unsigned int flag,
@@ -445,7 +441,7 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
445static inline int queue_flag_test_and_clear(unsigned int flag, 441static inline int queue_flag_test_and_clear(unsigned int flag,
446 struct request_queue *q) 442 struct request_queue *q)
447{ 443{
448 WARN_ON_ONCE(!queue_is_locked(q)); 444 queue_lockdep_assert_held(q);
449 445
450 if (test_bit(flag, &q->queue_flags)) { 446 if (test_bit(flag, &q->queue_flags)) {
451 __clear_bit(flag, &q->queue_flags); 447 __clear_bit(flag, &q->queue_flags);
@@ -458,7 +454,7 @@ static inline int queue_flag_test_and_clear(unsigned int flag,
458static inline int queue_flag_test_and_set(unsigned int flag, 454static inline int queue_flag_test_and_set(unsigned int flag,
459 struct request_queue *q) 455 struct request_queue *q)
460{ 456{
461 WARN_ON_ONCE(!queue_is_locked(q)); 457 queue_lockdep_assert_held(q);
462 458
463 if (!test_bit(flag, &q->queue_flags)) { 459 if (!test_bit(flag, &q->queue_flags)) {
464 __set_bit(flag, &q->queue_flags); 460 __set_bit(flag, &q->queue_flags);
@@ -470,7 +466,7 @@ static inline int queue_flag_test_and_set(unsigned int flag,
470 466
471static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 467static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
472{ 468{
473 WARN_ON_ONCE(!queue_is_locked(q)); 469 queue_lockdep_assert_held(q);
474 __set_bit(flag, &q->queue_flags); 470 __set_bit(flag, &q->queue_flags);
475} 471}
476 472
@@ -487,7 +483,7 @@ static inline int queue_in_flight(struct request_queue *q)
487 483
488static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 484static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
489{ 485{
490 WARN_ON_ONCE(!queue_is_locked(q)); 486 queue_lockdep_assert_held(q);
491 __clear_bit(flag, &q->queue_flags); 487 __clear_bit(flag, &q->queue_flags);
492} 488}
493 489