diff options
author | Jens Axboe <axboe@kernel.dk> | 2012-05-01 08:29:55 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-05-01 08:29:55 -0400 |
commit | 0b7877d4eea3f93e3dd941999522bbd8c538cb53 (patch) | |
tree | ade6d4e411b9b9b569c802e3b2179826162c934c /include/linux/blkdev.h | |
parent | bd1a68b59c8e3bce45fb76632c64e1e063c3962d (diff) | |
parent | 69964ea4c7b68c9399f7977aa5b9aa6539a6a98a (diff) |
Merge tag 'v3.4-rc5' into for-3.5/core
The core branch is behind driver commits that we want to build
on for 3.5, hence I'm pulling in a later -rc.
Linux 3.4-rc5
Conflicts:
Documentation/feature-removal-schedule.txt
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index af33fb1adfee..9e0edbfcbaea 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -443,14 +443,10 @@ struct request_queue { | |||
443 | (1 << QUEUE_FLAG_SAME_COMP) | \ | 443 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
444 | (1 << QUEUE_FLAG_ADD_RANDOM)) | 444 | (1 << QUEUE_FLAG_ADD_RANDOM)) |
445 | 445 | ||
446 | static inline int queue_is_locked(struct request_queue *q) | 446 | static inline void queue_lockdep_assert_held(struct request_queue *q) |
447 | { | 447 | { |
448 | #ifdef CONFIG_SMP | 448 | if (q->queue_lock) |
449 | spinlock_t *lock = q->queue_lock; | 449 | lockdep_assert_held(q->queue_lock); |
450 | return lock && spin_is_locked(lock); | ||
451 | #else | ||
452 | return 1; | ||
453 | #endif | ||
454 | } | 450 | } |
455 | 451 | ||
456 | static inline void queue_flag_set_unlocked(unsigned int flag, | 452 | static inline void queue_flag_set_unlocked(unsigned int flag, |
@@ -462,7 +458,7 @@ static inline void queue_flag_set_unlocked(unsigned int flag, | |||
462 | static inline int queue_flag_test_and_clear(unsigned int flag, | 458 | static inline int queue_flag_test_and_clear(unsigned int flag, |
463 | struct request_queue *q) | 459 | struct request_queue *q) |
464 | { | 460 | { |
465 | WARN_ON_ONCE(!queue_is_locked(q)); | 461 | queue_lockdep_assert_held(q); |
466 | 462 | ||
467 | if (test_bit(flag, &q->queue_flags)) { | 463 | if (test_bit(flag, &q->queue_flags)) { |
468 | __clear_bit(flag, &q->queue_flags); | 464 | __clear_bit(flag, &q->queue_flags); |
@@ -475,7 +471,7 @@ static inline int queue_flag_test_and_clear(unsigned int flag, | |||
475 | static inline int queue_flag_test_and_set(unsigned int flag, | 471 | static inline int queue_flag_test_and_set(unsigned int flag, |
476 | struct request_queue *q) | 472 | struct request_queue *q) |
477 | { | 473 | { |
478 | WARN_ON_ONCE(!queue_is_locked(q)); | 474 | queue_lockdep_assert_held(q); |
479 | 475 | ||
480 | if (!test_bit(flag, &q->queue_flags)) { | 476 | if (!test_bit(flag, &q->queue_flags)) { |
481 | __set_bit(flag, &q->queue_flags); | 477 | __set_bit(flag, &q->queue_flags); |
@@ -487,7 +483,7 @@ static inline int queue_flag_test_and_set(unsigned int flag, | |||
487 | 483 | ||
488 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) | 484 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) |
489 | { | 485 | { |
490 | WARN_ON_ONCE(!queue_is_locked(q)); | 486 | queue_lockdep_assert_held(q); |
491 | __set_bit(flag, &q->queue_flags); | 487 | __set_bit(flag, &q->queue_flags); |
492 | } | 488 | } |
493 | 489 | ||
@@ -504,7 +500,7 @@ static inline int queue_in_flight(struct request_queue *q) | |||
504 | 500 | ||
505 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | 501 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) |
506 | { | 502 | { |
507 | WARN_ON_ONCE(!queue_is_locked(q)); | 503 | queue_lockdep_assert_held(q); |
508 | __clear_bit(flag, &q->queue_flags); | 504 | __clear_bit(flag, &q->queue_flags); |
509 | } | 505 | } |
510 | 506 | ||