aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h42
1 files changed, 23 insertions, 19 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4a99b76c5a33..0881b5cdee3d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -54,16 +54,23 @@ struct as_io_context {
54 54
55struct cfq_queue; 55struct cfq_queue;
56struct cfq_io_context { 56struct cfq_io_context {
57 void (*dtor)(struct cfq_io_context *);
58 void (*exit)(struct cfq_io_context *);
59
60 struct io_context *ioc;
61
62 /* 57 /*
63 * circular list of cfq_io_contexts belonging to a process io context 58 * circular list of cfq_io_contexts belonging to a process io context
64 */ 59 */
65 struct list_head list; 60 struct list_head list;
66 struct cfq_queue *cfqq; 61 struct cfq_queue *cfqq;
62 void *key;
63
64 struct io_context *ioc;
65
66 unsigned long last_end_request;
67 unsigned long last_queue;
68 unsigned long ttime_total;
69 unsigned long ttime_samples;
70 unsigned long ttime_mean;
71
72 void (*dtor)(struct cfq_io_context *);
73 void (*exit)(struct cfq_io_context *);
67}; 74};
68 75
69/* 76/*
@@ -73,7 +80,9 @@ struct cfq_io_context {
73 */ 80 */
74struct io_context { 81struct io_context {
75 atomic_t refcount; 82 atomic_t refcount;
76 pid_t pid; 83 struct task_struct *task;
84
85 int (*set_ioprio)(struct io_context *, unsigned int);
77 86
78 /* 87 /*
79 * For request batching 88 * For request batching
@@ -81,14 +90,13 @@ struct io_context {
81 unsigned long last_waited; /* Time last woken after wait for request */ 90 unsigned long last_waited; /* Time last woken after wait for request */
82 int nr_batch_requests; /* Number of requests left in the batch */ 91 int nr_batch_requests; /* Number of requests left in the batch */
83 92
84 spinlock_t lock;
85
86 struct as_io_context *aic; 93 struct as_io_context *aic;
87 struct cfq_io_context *cic; 94 struct cfq_io_context *cic;
88}; 95};
89 96
90void put_io_context(struct io_context *ioc); 97void put_io_context(struct io_context *ioc);
91void exit_io_context(void); 98void exit_io_context(void);
99struct io_context *current_io_context(int gfp_flags);
92struct io_context *get_io_context(int gfp_flags); 100struct io_context *get_io_context(int gfp_flags);
93void copy_io_context(struct io_context **pdst, struct io_context **psrc); 101void copy_io_context(struct io_context **pdst, struct io_context **psrc);
94void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); 102void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
@@ -134,6 +142,8 @@ struct request {
134 142
135 void *elevator_private; 143 void *elevator_private;
136 144
145 unsigned short ioprio;
146
137 int rq_status; /* should split this into a few status bits */ 147 int rq_status; /* should split this into a few status bits */
138 struct gendisk *rq_disk; 148 struct gendisk *rq_disk;
139 int errors; 149 int errors;
@@ -285,16 +295,12 @@ enum blk_queue_state {
285 Queue_up, 295 Queue_up,
286}; 296};
287 297
288#define BLK_TAGS_PER_LONG (sizeof(unsigned long) * 8)
289#define BLK_TAGS_MASK (BLK_TAGS_PER_LONG - 1)
290
291struct blk_queue_tag { 298struct blk_queue_tag {
292 struct request **tag_index; /* map of busy tags */ 299 struct request **tag_index; /* map of busy tags */
293 unsigned long *tag_map; /* bit map of free/busy tags */ 300 unsigned long *tag_map; /* bit map of free/busy tags */
294 struct list_head busy_list; /* fifo list of busy tags */ 301 struct list_head busy_list; /* fifo list of busy tags */
295 int busy; /* current depth */ 302 int busy; /* current depth */
296 int max_depth; /* what we will send to device */ 303 int max_depth; /* what we will send to device */
297 int real_max_depth; /* what the array can hold */
298 atomic_t refcnt; /* map can be shared */ 304 atomic_t refcnt; /* map can be shared */
299}; 305};
300 306
@@ -396,6 +402,7 @@ struct request_queue
396 */ 402 */
397 unsigned int sg_timeout; 403 unsigned int sg_timeout;
398 unsigned int sg_reserved_size; 404 unsigned int sg_reserved_size;
405 int node;
399 406
400 struct list_head drain_list; 407 struct list_head drain_list;
401 408
@@ -542,15 +549,12 @@ extern void generic_make_request(struct bio *bio);
542extern void blk_put_request(struct request *); 549extern void blk_put_request(struct request *);
543extern void blk_end_sync_rq(struct request *rq); 550extern void blk_end_sync_rq(struct request *rq);
544extern void blk_attempt_remerge(request_queue_t *, struct request *); 551extern void blk_attempt_remerge(request_queue_t *, struct request *);
545extern void __blk_attempt_remerge(request_queue_t *, struct request *);
546extern struct request *blk_get_request(request_queue_t *, int, int); 552extern struct request *blk_get_request(request_queue_t *, int, int);
547extern void blk_insert_request(request_queue_t *, struct request *, int, void *); 553extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
548extern void blk_requeue_request(request_queue_t *, struct request *); 554extern void blk_requeue_request(request_queue_t *, struct request *);
549extern void blk_plug_device(request_queue_t *); 555extern void blk_plug_device(request_queue_t *);
550extern int blk_remove_plug(request_queue_t *); 556extern int blk_remove_plug(request_queue_t *);
551extern void blk_recount_segments(request_queue_t *, struct bio *); 557extern void blk_recount_segments(request_queue_t *, struct bio *);
552extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
553extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
554extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); 558extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
555extern void blk_start_queue(request_queue_t *q); 559extern void blk_start_queue(request_queue_t *q);
556extern void blk_stop_queue(request_queue_t *q); 560extern void blk_stop_queue(request_queue_t *q);
@@ -615,6 +619,8 @@ static inline void blkdev_dequeue_request(struct request *req)
615/* 619/*
616 * Access functions for manipulating queue properties 620 * Access functions for manipulating queue properties
617 */ 621 */
622extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
623 spinlock_t *lock, int node_id);
618extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); 624extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
619extern void blk_cleanup_queue(request_queue_t *); 625extern void blk_cleanup_queue(request_queue_t *);
620extern void blk_queue_make_request(request_queue_t *, make_request_fn *); 626extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
@@ -632,7 +638,6 @@ extern void blk_queue_dma_alignment(request_queue_t *, int);
632extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 638extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
633extern void blk_queue_ordered(request_queue_t *, int); 639extern void blk_queue_ordered(request_queue_t *, int);
634extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); 640extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
635extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *);
636extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); 641extern struct request *blk_start_pre_flush(request_queue_t *,struct request *);
637extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); 642extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int);
638extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); 643extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int);
@@ -646,7 +651,8 @@ extern void blk_wait_queue_drained(request_queue_t *, int);
646extern void blk_finish_queue_drain(request_queue_t *); 651extern void blk_finish_queue_drain(request_queue_t *);
647 652
648int blk_get_queue(request_queue_t *); 653int blk_get_queue(request_queue_t *);
649request_queue_t *blk_alloc_queue(int); 654request_queue_t *blk_alloc_queue(int gfp_mask);
655request_queue_t *blk_alloc_queue_node(int,int);
650#define blk_put_queue(q) blk_cleanup_queue((q)) 656#define blk_put_queue(q) blk_cleanup_queue((q))
651 657
652/* 658/*
@@ -675,8 +681,6 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);
675 681
676#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 682#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
677 683
678extern void drive_stat_acct(struct request *, int, int);
679
680static inline int queue_hardsect_size(request_queue_t *q) 684static inline int queue_hardsect_size(request_queue_t *q)
681{ 685{
682 int retval = 512; 686 int retval = 512;