aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorSuresh Jayaraman <sjayaraman@suse.de>2011-09-21 04:00:16 -0400
committerJens Axboe <axboe@kernel.dk>2011-09-21 04:00:16 -0400
commit75df713627f28f88b901b329c8857747545fd4ab (patch)
tree0986bc4d82595dae4a3fabb15fce4780c053f004 /include/linux
parent27a84d54c02591e815d291ae0ee4bfb9cfd21065 (diff)
block: document blk-plug
Thus spake Andrew Morton: "And I have the usual maintainability whine. If someone comes up to vmscan.c and sees it calling blk_start_plug(), how are they supposed to work out why that call is there? They go look at the blk_start_plug() definition and it is undocumented. I think we can do better than this?" Adapted from the LWN article - http://lwn.net/Articles/438256/ by Jens Axboe and from an earlier attempt by Shaohua Li to document blk-plug. [akpm@linux-foundation.org: grammatical and spelling tweaks] Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de> Cc: Shaohua Li <shaohua.li@intel.com> Cc: Jonathan Corbet <corbet@lwn.net> Signed-off-by: Andrew Morton <akpm@google.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blkdev.h24
1 files changed, 15 insertions, 9 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c712efdafc3f..1978655faa3b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -860,17 +860,23 @@ struct request_queue *blk_alloc_queue_node(gfp_t, int);
860extern void blk_put_queue(struct request_queue *); 860extern void blk_put_queue(struct request_queue *);
861 861
862/* 862/*
863 * Note: Code in between changing the blk_plug list/cb_list or element of such 863 * blk_plug permits building a queue of related requests by holding the I/O
864 * lists is preemptable, but such code can't do sleep (or be very careful), 864 * fragments for a short period. This allows merging of sequential requests
865 * otherwise data is corrupted. For details, please check schedule() where 865 * into single larger request. As the requests are moved from a per-task list to
866 * blk_schedule_flush_plug() is called. 866 * the device's request_queue in a batch, this results in improved scalability
867 * as the lock contention for request_queue lock is reduced.
868 *
869 * It is ok not to disable preemption when adding the request to the plug list
870 * or when attempting a merge, because blk_schedule_flush_list() will only flush
871 * the plug list when the task sleeps by itself. For details, please see
872 * schedule() where blk_schedule_flush_plug() is called.
867 */ 873 */
868struct blk_plug { 874struct blk_plug {
869 unsigned long magic; 875 unsigned long magic; /* detect uninitialized use-cases */
870 struct list_head list; 876 struct list_head list; /* requests */
871 struct list_head cb_list; 877 struct list_head cb_list; /* md requires an unplug callback */
872 unsigned int should_sort; 878 unsigned int should_sort; /* list to be sorted before flushing? */
873 unsigned int count; 879 unsigned int count; /* number of queued requests */
874}; 880};
875#define BLK_MAX_REQUEST_COUNT 16 881#define BLK_MAX_REQUEST_COUNT 16
876 882