diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 41 |
1 files changed, 23 insertions, 18 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a48dc12c6699..aefa26fbae8a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -54,16 +54,23 @@ struct as_io_context { | |||
54 | 54 | ||
55 | struct cfq_queue; | 55 | struct cfq_queue; |
56 | struct cfq_io_context { | 56 | struct cfq_io_context { |
57 | void (*dtor)(struct cfq_io_context *); | ||
58 | void (*exit)(struct cfq_io_context *); | ||
59 | |||
60 | struct io_context *ioc; | ||
61 | |||
62 | /* | 57 | /* |
63 | * circular list of cfq_io_contexts belonging to a process io context | 58 | * circular list of cfq_io_contexts belonging to a process io context |
64 | */ | 59 | */ |
65 | struct list_head list; | 60 | struct list_head list; |
66 | struct cfq_queue *cfqq; | 61 | struct cfq_queue *cfqq; |
62 | void *key; | ||
63 | |||
64 | struct io_context *ioc; | ||
65 | |||
66 | unsigned long last_end_request; | ||
67 | unsigned long last_queue; | ||
68 | unsigned long ttime_total; | ||
69 | unsigned long ttime_samples; | ||
70 | unsigned long ttime_mean; | ||
71 | |||
72 | void (*dtor)(struct cfq_io_context *); | ||
73 | void (*exit)(struct cfq_io_context *); | ||
67 | }; | 74 | }; |
68 | 75 | ||
69 | /* | 76 | /* |
@@ -73,7 +80,9 @@ struct cfq_io_context { | |||
73 | */ | 80 | */ |
74 | struct io_context { | 81 | struct io_context { |
75 | atomic_t refcount; | 82 | atomic_t refcount; |
76 | pid_t pid; | 83 | struct task_struct *task; |
84 | |||
85 | int (*set_ioprio)(struct io_context *, unsigned int); | ||
77 | 86 | ||
78 | /* | 87 | /* |
79 | * For request batching | 88 | * For request batching |
@@ -81,14 +90,13 @@ struct io_context { | |||
81 | unsigned long last_waited; /* Time last woken after wait for request */ | 90 | unsigned long last_waited; /* Time last woken after wait for request */ |
82 | int nr_batch_requests; /* Number of requests left in the batch */ | 91 | int nr_batch_requests; /* Number of requests left in the batch */ |
83 | 92 | ||
84 | spinlock_t lock; | ||
85 | |||
86 | struct as_io_context *aic; | 93 | struct as_io_context *aic; |
87 | struct cfq_io_context *cic; | 94 | struct cfq_io_context *cic; |
88 | }; | 95 | }; |
89 | 96 | ||
90 | void put_io_context(struct io_context *ioc); | 97 | void put_io_context(struct io_context *ioc); |
91 | void exit_io_context(void); | 98 | void exit_io_context(void); |
99 | struct io_context *current_io_context(int gfp_flags); | ||
92 | struct io_context *get_io_context(int gfp_flags); | 100 | struct io_context *get_io_context(int gfp_flags); |
93 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); | 101 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); |
94 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); | 102 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); |
@@ -134,6 +142,8 @@ struct request { | |||
134 | 142 | ||
135 | void *elevator_private; | 143 | void *elevator_private; |
136 | 144 | ||
145 | unsigned short ioprio; | ||
146 | |||
137 | int rq_status; /* should split this into a few status bits */ | 147 | int rq_status; /* should split this into a few status bits */ |
138 | struct gendisk *rq_disk; | 148 | struct gendisk *rq_disk; |
139 | int errors; | 149 | int errors; |
@@ -285,9 +295,6 @@ enum blk_queue_state { | |||
285 | Queue_up, | 295 | Queue_up, |
286 | }; | 296 | }; |
287 | 297 | ||
288 | #define BLK_TAGS_PER_LONG (sizeof(unsigned long) * 8) | ||
289 | #define BLK_TAGS_MASK (BLK_TAGS_PER_LONG - 1) | ||
290 | |||
291 | struct blk_queue_tag { | 298 | struct blk_queue_tag { |
292 | struct request **tag_index; /* map of busy tags */ | 299 | struct request **tag_index; /* map of busy tags */ |
293 | unsigned long *tag_map; /* bit map of free/busy tags */ | 300 | unsigned long *tag_map; /* bit map of free/busy tags */ |
@@ -396,6 +403,7 @@ struct request_queue | |||
396 | */ | 403 | */ |
397 | unsigned int sg_timeout; | 404 | unsigned int sg_timeout; |
398 | unsigned int sg_reserved_size; | 405 | unsigned int sg_reserved_size; |
406 | int node; | ||
399 | 407 | ||
400 | struct list_head drain_list; | 408 | struct list_head drain_list; |
401 | 409 | ||
@@ -542,15 +550,12 @@ extern void generic_make_request(struct bio *bio); | |||
542 | extern void blk_put_request(struct request *); | 550 | extern void blk_put_request(struct request *); |
543 | extern void blk_end_sync_rq(struct request *rq); | 551 | extern void blk_end_sync_rq(struct request *rq); |
544 | extern void blk_attempt_remerge(request_queue_t *, struct request *); | 552 | extern void blk_attempt_remerge(request_queue_t *, struct request *); |
545 | extern void __blk_attempt_remerge(request_queue_t *, struct request *); | ||
546 | extern struct request *blk_get_request(request_queue_t *, int, int); | 553 | extern struct request *blk_get_request(request_queue_t *, int, int); |
547 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); | 554 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); |
548 | extern void blk_requeue_request(request_queue_t *, struct request *); | 555 | extern void blk_requeue_request(request_queue_t *, struct request *); |
549 | extern void blk_plug_device(request_queue_t *); | 556 | extern void blk_plug_device(request_queue_t *); |
550 | extern int blk_remove_plug(request_queue_t *); | 557 | extern int blk_remove_plug(request_queue_t *); |
551 | extern void blk_recount_segments(request_queue_t *, struct bio *); | 558 | extern void blk_recount_segments(request_queue_t *, struct bio *); |
552 | extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *); | ||
553 | extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *); | ||
554 | extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); | 559 | extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); |
555 | extern void blk_start_queue(request_queue_t *q); | 560 | extern void blk_start_queue(request_queue_t *q); |
556 | extern void blk_stop_queue(request_queue_t *q); | 561 | extern void blk_stop_queue(request_queue_t *q); |
@@ -617,6 +622,8 @@ static inline void blkdev_dequeue_request(struct request *req) | |||
617 | /* | 622 | /* |
618 | * Access functions for manipulating queue properties | 623 | * Access functions for manipulating queue properties |
619 | */ | 624 | */ |
625 | extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, | ||
626 | spinlock_t *lock, int node_id); | ||
620 | extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); | 627 | extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); |
621 | extern void blk_cleanup_queue(request_queue_t *); | 628 | extern void blk_cleanup_queue(request_queue_t *); |
622 | extern void blk_queue_make_request(request_queue_t *, make_request_fn *); | 629 | extern void blk_queue_make_request(request_queue_t *, make_request_fn *); |
@@ -634,7 +641,6 @@ extern void blk_queue_dma_alignment(request_queue_t *, int); | |||
634 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 641 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
635 | extern void blk_queue_ordered(request_queue_t *, int); | 642 | extern void blk_queue_ordered(request_queue_t *, int); |
636 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); | 643 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); |
637 | extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *); | ||
638 | extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); | 644 | extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); |
639 | extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); | 645 | extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); |
640 | extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); | 646 | extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); |
@@ -648,7 +654,8 @@ extern void blk_wait_queue_drained(request_queue_t *, int); | |||
648 | extern void blk_finish_queue_drain(request_queue_t *); | 654 | extern void blk_finish_queue_drain(request_queue_t *); |
649 | 655 | ||
650 | int blk_get_queue(request_queue_t *); | 656 | int blk_get_queue(request_queue_t *); |
651 | request_queue_t *blk_alloc_queue(int); | 657 | request_queue_t *blk_alloc_queue(int gfp_mask); |
658 | request_queue_t *blk_alloc_queue_node(int,int); | ||
652 | #define blk_put_queue(q) blk_cleanup_queue((q)) | 659 | #define blk_put_queue(q) blk_cleanup_queue((q)) |
653 | 660 | ||
654 | /* | 661 | /* |
@@ -677,8 +684,6 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); | |||
677 | 684 | ||
678 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 685 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
679 | 686 | ||
680 | extern void drive_stat_acct(struct request *, int, int); | ||
681 | |||
682 | static inline int queue_hardsect_size(request_queue_t *q) | 687 | static inline int queue_hardsect_size(request_queue_t *q) |
683 | { | 688 | { |
684 | int retval = 512; | 689 | int retval = 512; |