diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-04-22 04:19:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-22 04:19:30 -0400 |
commit | eff430de53be6f3328c3eebe93755f1ecf499e37 (patch) | |
tree | c8e5ae958fe3e6656b4e96c83bbda17e649321a2 /include/linux/blkdev.h | |
parent | 9cbdb702092a2d82f909312f4ec3eeded77bb82e (diff) | |
parent | 91e8549bde9e5cc88c5a2e8c8114389279e240b5 (diff) |
Merge branch 'linus' into perf/core
Merge reason: Pick up upstream fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 54 |
1 files changed, 36 insertions, 18 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 32176cc8e715..2ad95fa1d130 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -388,20 +388,19 @@ struct request_queue | |||
388 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 388 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ | 389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
391 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 391 | #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ |
392 | #define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ | 392 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ |
393 | #define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ | 393 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ |
394 | #define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ | 394 | #define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */ |
395 | #define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ | 395 | #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ |
396 | #define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ | 396 | #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ |
397 | #define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ | 397 | #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ |
398 | #define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */ | ||
399 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 398 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
400 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 399 | #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ |
401 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 400 | #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ |
402 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | 401 | #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ |
403 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | 402 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
404 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ | 403 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ |
405 | 404 | ||
406 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 405 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
407 | (1 << QUEUE_FLAG_STACKABLE) | \ | 406 | (1 << QUEUE_FLAG_STACKABLE) | \ |
@@ -697,8 +696,9 @@ extern void blk_start_queue(struct request_queue *q); | |||
697 | extern void blk_stop_queue(struct request_queue *q); | 696 | extern void blk_stop_queue(struct request_queue *q); |
698 | extern void blk_sync_queue(struct request_queue *q); | 697 | extern void blk_sync_queue(struct request_queue *q); |
699 | extern void __blk_stop_queue(struct request_queue *q); | 698 | extern void __blk_stop_queue(struct request_queue *q); |
700 | extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); | 699 | extern void __blk_run_queue(struct request_queue *q); |
701 | extern void blk_run_queue(struct request_queue *); | 700 | extern void blk_run_queue(struct request_queue *); |
701 | extern void blk_run_queue_async(struct request_queue *q); | ||
702 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 702 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
703 | struct rq_map_data *, void __user *, unsigned long, | 703 | struct rq_map_data *, void __user *, unsigned long, |
704 | gfp_t); | 704 | gfp_t); |
@@ -857,26 +857,39 @@ extern void blk_put_queue(struct request_queue *); | |||
857 | struct blk_plug { | 857 | struct blk_plug { |
858 | unsigned long magic; | 858 | unsigned long magic; |
859 | struct list_head list; | 859 | struct list_head list; |
860 | struct list_head cb_list; | ||
860 | unsigned int should_sort; | 861 | unsigned int should_sort; |
861 | }; | 862 | }; |
863 | struct blk_plug_cb { | ||
864 | struct list_head list; | ||
865 | void (*callback)(struct blk_plug_cb *); | ||
866 | }; | ||
862 | 867 | ||
863 | extern void blk_start_plug(struct blk_plug *); | 868 | extern void blk_start_plug(struct blk_plug *); |
864 | extern void blk_finish_plug(struct blk_plug *); | 869 | extern void blk_finish_plug(struct blk_plug *); |
865 | extern void __blk_flush_plug(struct task_struct *, struct blk_plug *); | 870 | extern void blk_flush_plug_list(struct blk_plug *, bool); |
866 | 871 | ||
867 | static inline void blk_flush_plug(struct task_struct *tsk) | 872 | static inline void blk_flush_plug(struct task_struct *tsk) |
868 | { | 873 | { |
869 | struct blk_plug *plug = tsk->plug; | 874 | struct blk_plug *plug = tsk->plug; |
870 | 875 | ||
871 | if (unlikely(plug)) | 876 | if (plug) |
872 | __blk_flush_plug(tsk, plug); | 877 | blk_flush_plug_list(plug, false); |
878 | } | ||
879 | |||
880 | static inline void blk_schedule_flush_plug(struct task_struct *tsk) | ||
881 | { | ||
882 | struct blk_plug *plug = tsk->plug; | ||
883 | |||
884 | if (plug) | ||
885 | blk_flush_plug_list(plug, true); | ||
873 | } | 886 | } |
874 | 887 | ||
875 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | 888 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) |
876 | { | 889 | { |
877 | struct blk_plug *plug = tsk->plug; | 890 | struct blk_plug *plug = tsk->plug; |
878 | 891 | ||
879 | return plug && !list_empty(&plug->list); | 892 | return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); |
880 | } | 893 | } |
881 | 894 | ||
882 | /* | 895 | /* |
@@ -1314,6 +1327,11 @@ static inline void blk_flush_plug(struct task_struct *task) | |||
1314 | { | 1327 | { |
1315 | } | 1328 | } |
1316 | 1329 | ||
1330 | static inline void blk_schedule_flush_plug(struct task_struct *task) | ||
1331 | { | ||
1332 | } | ||
1333 | |||
1334 | |||
1317 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | 1335 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) |
1318 | { | 1336 | { |
1319 | return false; | 1337 | return false; |