diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 41 |
1 files changed, 23 insertions, 18 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4a99b76c5a33..19bd8e7e11bf 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -54,16 +54,23 @@ struct as_io_context { | |||
54 | 54 | ||
55 | struct cfq_queue; | 55 | struct cfq_queue; |
56 | struct cfq_io_context { | 56 | struct cfq_io_context { |
57 | void (*dtor)(struct cfq_io_context *); | ||
58 | void (*exit)(struct cfq_io_context *); | ||
59 | |||
60 | struct io_context *ioc; | ||
61 | |||
62 | /* | 57 | /* |
63 | * circular list of cfq_io_contexts belonging to a process io context | 58 | * circular list of cfq_io_contexts belonging to a process io context |
64 | */ | 59 | */ |
65 | struct list_head list; | 60 | struct list_head list; |
66 | struct cfq_queue *cfqq; | 61 | struct cfq_queue *cfqq; |
62 | void *key; | ||
63 | |||
64 | struct io_context *ioc; | ||
65 | |||
66 | unsigned long last_end_request; | ||
67 | unsigned long last_queue; | ||
68 | unsigned long ttime_total; | ||
69 | unsigned long ttime_samples; | ||
70 | unsigned long ttime_mean; | ||
71 | |||
72 | void (*dtor)(struct cfq_io_context *); | ||
73 | void (*exit)(struct cfq_io_context *); | ||
67 | }; | 74 | }; |
68 | 75 | ||
69 | /* | 76 | /* |
@@ -73,7 +80,9 @@ struct cfq_io_context { | |||
73 | */ | 80 | */ |
74 | struct io_context { | 81 | struct io_context { |
75 | atomic_t refcount; | 82 | atomic_t refcount; |
76 | pid_t pid; | 83 | struct task_struct *task; |
84 | |||
85 | int (*set_ioprio)(struct io_context *, unsigned int); | ||
77 | 86 | ||
78 | /* | 87 | /* |
79 | * For request batching | 88 | * For request batching |
@@ -81,14 +90,13 @@ struct io_context { | |||
81 | unsigned long last_waited; /* Time last woken after wait for request */ | 90 | unsigned long last_waited; /* Time last woken after wait for request */ |
82 | int nr_batch_requests; /* Number of requests left in the batch */ | 91 | int nr_batch_requests; /* Number of requests left in the batch */ |
83 | 92 | ||
84 | spinlock_t lock; | ||
85 | |||
86 | struct as_io_context *aic; | 93 | struct as_io_context *aic; |
87 | struct cfq_io_context *cic; | 94 | struct cfq_io_context *cic; |
88 | }; | 95 | }; |
89 | 96 | ||
90 | void put_io_context(struct io_context *ioc); | 97 | void put_io_context(struct io_context *ioc); |
91 | void exit_io_context(void); | 98 | void exit_io_context(void); |
99 | struct io_context *current_io_context(int gfp_flags); | ||
92 | struct io_context *get_io_context(int gfp_flags); | 100 | struct io_context *get_io_context(int gfp_flags); |
93 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); | 101 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); |
94 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); | 102 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); |
@@ -134,6 +142,8 @@ struct request { | |||
134 | 142 | ||
135 | void *elevator_private; | 143 | void *elevator_private; |
136 | 144 | ||
145 | unsigned short ioprio; | ||
146 | |||
137 | int rq_status; /* should split this into a few status bits */ | 147 | int rq_status; /* should split this into a few status bits */ |
138 | struct gendisk *rq_disk; | 148 | struct gendisk *rq_disk; |
139 | int errors; | 149 | int errors; |
@@ -285,9 +295,6 @@ enum blk_queue_state { | |||
285 | Queue_up, | 295 | Queue_up, |
286 | }; | 296 | }; |
287 | 297 | ||
288 | #define BLK_TAGS_PER_LONG (sizeof(unsigned long) * 8) | ||
289 | #define BLK_TAGS_MASK (BLK_TAGS_PER_LONG - 1) | ||
290 | |||
291 | struct blk_queue_tag { | 298 | struct blk_queue_tag { |
292 | struct request **tag_index; /* map of busy tags */ | 299 | struct request **tag_index; /* map of busy tags */ |
293 | unsigned long *tag_map; /* bit map of free/busy tags */ | 300 | unsigned long *tag_map; /* bit map of free/busy tags */ |
@@ -396,6 +403,7 @@ struct request_queue | |||
396 | */ | 403 | */ |
397 | unsigned int sg_timeout; | 404 | unsigned int sg_timeout; |
398 | unsigned int sg_reserved_size; | 405 | unsigned int sg_reserved_size; |
406 | int node; | ||
399 | 407 | ||
400 | struct list_head drain_list; | 408 | struct list_head drain_list; |
401 | 409 | ||
@@ -542,15 +550,12 @@ extern void generic_make_request(struct bio *bio); | |||
542 | extern void blk_put_request(struct request *); | 550 | extern void blk_put_request(struct request *); |
543 | extern void blk_end_sync_rq(struct request *rq); | 551 | extern void blk_end_sync_rq(struct request *rq); |
544 | extern void blk_attempt_remerge(request_queue_t *, struct request *); | 552 | extern void blk_attempt_remerge(request_queue_t *, struct request *); |
545 | extern void __blk_attempt_remerge(request_queue_t *, struct request *); | ||
546 | extern struct request *blk_get_request(request_queue_t *, int, int); | 553 | extern struct request *blk_get_request(request_queue_t *, int, int); |
547 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); | 554 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); |
548 | extern void blk_requeue_request(request_queue_t *, struct request *); | 555 | extern void blk_requeue_request(request_queue_t *, struct request *); |
549 | extern void blk_plug_device(request_queue_t *); | 556 | extern void blk_plug_device(request_queue_t *); |
550 | extern int blk_remove_plug(request_queue_t *); | 557 | extern int blk_remove_plug(request_queue_t *); |
551 | extern void blk_recount_segments(request_queue_t *, struct bio *); | 558 | extern void blk_recount_segments(request_queue_t *, struct bio *); |
552 | extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *); | ||
553 | extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *); | ||
554 | extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); | 559 | extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); |
555 | extern void blk_start_queue(request_queue_t *q); | 560 | extern void blk_start_queue(request_queue_t *q); |
556 | extern void blk_stop_queue(request_queue_t *q); | 561 | extern void blk_stop_queue(request_queue_t *q); |
@@ -615,6 +620,8 @@ static inline void blkdev_dequeue_request(struct request *req) | |||
615 | /* | 620 | /* |
616 | * Access functions for manipulating queue properties | 621 | * Access functions for manipulating queue properties |
617 | */ | 622 | */ |
623 | extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, | ||
624 | spinlock_t *lock, int node_id); | ||
618 | extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); | 625 | extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); |
619 | extern void blk_cleanup_queue(request_queue_t *); | 626 | extern void blk_cleanup_queue(request_queue_t *); |
620 | extern void blk_queue_make_request(request_queue_t *, make_request_fn *); | 627 | extern void blk_queue_make_request(request_queue_t *, make_request_fn *); |
@@ -632,7 +639,6 @@ extern void blk_queue_dma_alignment(request_queue_t *, int); | |||
632 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 639 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
633 | extern void blk_queue_ordered(request_queue_t *, int); | 640 | extern void blk_queue_ordered(request_queue_t *, int); |
634 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); | 641 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); |
635 | extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *); | ||
636 | extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); | 642 | extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); |
637 | extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); | 643 | extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); |
638 | extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); | 644 | extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); |
@@ -646,7 +652,8 @@ extern void blk_wait_queue_drained(request_queue_t *, int); | |||
646 | extern void blk_finish_queue_drain(request_queue_t *); | 652 | extern void blk_finish_queue_drain(request_queue_t *); |
647 | 653 | ||
648 | int blk_get_queue(request_queue_t *); | 654 | int blk_get_queue(request_queue_t *); |
649 | request_queue_t *blk_alloc_queue(int); | 655 | request_queue_t *blk_alloc_queue(int gfp_mask); |
656 | request_queue_t *blk_alloc_queue_node(int,int); | ||
650 | #define blk_put_queue(q) blk_cleanup_queue((q)) | 657 | #define blk_put_queue(q) blk_cleanup_queue((q)) |
651 | 658 | ||
652 | /* | 659 | /* |
@@ -675,8 +682,6 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); | |||
675 | 682 | ||
676 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 683 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
677 | 684 | ||
678 | extern void drive_stat_acct(struct request *, int, int); | ||
679 | |||
680 | static inline int queue_hardsect_size(request_queue_t *q) | 685 | static inline int queue_hardsect_size(request_queue_t *q) |
681 | { | 686 | { |
682 | int retval = 512; | 687 | int retval = 512; |