aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h337
1 files changed, 178 insertions, 159 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c773ee545ebd..1d79b8d4ca6d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1,6 +1,7 @@
1#ifndef _LINUX_BLKDEV_H 1#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H
3 3
4#include <linux/sched.h>
4#include <linux/major.h> 5#include <linux/major.h>
5#include <linux/genhd.h> 6#include <linux/genhd.h>
6#include <linux/list.h> 7#include <linux/list.h>
@@ -16,6 +17,22 @@
16 17
17#include <asm/scatterlist.h> 18#include <asm/scatterlist.h>
18 19
20#ifdef CONFIG_LBD
21# include <asm/div64.h>
22# define sector_div(a, b) do_div(a, b)
23#else
24# define sector_div(n, b)( \
25{ \
26 int _res; \
27 _res = (n) % (b); \
28 (n) /= (b); \
29 _res; \
30} \
31)
32#endif
33
34#ifdef CONFIG_BLOCK
35
19struct scsi_ioctl_command; 36struct scsi_ioctl_command;
20 37
21struct request_queue; 38struct request_queue;
@@ -90,7 +107,7 @@ struct io_context {
90 atomic_t refcount; 107 atomic_t refcount;
91 struct task_struct *task; 108 struct task_struct *task;
92 109
93 int (*set_ioprio)(struct io_context *, unsigned int); 110 unsigned int ioprio_changed;
94 111
95 /* 112 /*
96 * For request batching 113 * For request batching
@@ -104,8 +121,7 @@ struct io_context {
104 121
105void put_io_context(struct io_context *ioc); 122void put_io_context(struct io_context *ioc);
106void exit_io_context(void); 123void exit_io_context(void);
107struct io_context *current_io_context(gfp_t gfp_flags); 124struct io_context *get_io_context(gfp_t gfp_flags, int node);
108struct io_context *get_io_context(gfp_t gfp_flags);
109void copy_io_context(struct io_context **pdst, struct io_context **psrc); 125void copy_io_context(struct io_context **pdst, struct io_context **psrc);
110void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); 126void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
111 127
@@ -120,6 +136,90 @@ struct request_list {
120 wait_queue_head_t wait[2]; 136 wait_queue_head_t wait[2];
121}; 137};
122 138
139/*
140 * request command types
141 */
142enum rq_cmd_type_bits {
143 REQ_TYPE_FS = 1, /* fs request */
144 REQ_TYPE_BLOCK_PC, /* scsi command */
145 REQ_TYPE_SENSE, /* sense request */
146 REQ_TYPE_PM_SUSPEND, /* suspend request */
147 REQ_TYPE_PM_RESUME, /* resume request */
148 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
149 REQ_TYPE_FLUSH, /* flush request */
150 REQ_TYPE_SPECIAL, /* driver defined type */
151 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */
152 /*
153 * for ATA/ATAPI devices. this really doesn't belong here, ide should
154 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
155 * private REQ_LB opcodes to differentiate what type of request this is
156 */
157 REQ_TYPE_ATA_CMD,
158 REQ_TYPE_ATA_TASK,
159 REQ_TYPE_ATA_TASKFILE,
160};
161
162/*
163 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
164 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
165 * SCSI cdb.
166 *
167 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
168 * typically to differentiate REQ_TYPE_SPECIAL requests.
169 *
170 */
171enum {
172 /*
173 * just examples for now
174 */
175 REQ_LB_OP_EJECT = 0x40, /* eject request */
176 REQ_LB_OP_FLUSH = 0x41, /* flush device */
177};
178
179/*
180 * request type modified bits. first three bits match BIO_RW* bits, important
181 */
182enum rq_flag_bits {
183 __REQ_RW, /* not set, read. set, write */
184 __REQ_FAILFAST, /* no low level driver retries */
185 __REQ_SORTED, /* elevator knows about this request */
186 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
187 __REQ_HARDBARRIER, /* may not be passed by drive either */
188 __REQ_FUA, /* forced unit access */
189 __REQ_NOMERGE, /* don't touch this for merging */
190 __REQ_STARTED, /* drive already may have started this one */
191 __REQ_DONTPREP, /* don't call prep for this one */
192 __REQ_QUEUED, /* uses queueing */
193 __REQ_ELVPRIV, /* elevator private data attached */
194 __REQ_FAILED, /* set if the request failed */
195 __REQ_QUIET, /* don't worry about errors */
196 __REQ_PREEMPT, /* set for "ide_preempt" requests */
197 __REQ_ORDERED_COLOR, /* is before or after barrier */
198 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */
199 __REQ_ALLOCED, /* request came from our alloc pool */
200 __REQ_RW_META, /* metadata io request */
201 __REQ_NR_BITS, /* stops here */
202};
203
204#define REQ_RW (1 << __REQ_RW)
205#define REQ_FAILFAST (1 << __REQ_FAILFAST)
206#define REQ_SORTED (1 << __REQ_SORTED)
207#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
208#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
209#define REQ_FUA (1 << __REQ_FUA)
210#define REQ_NOMERGE (1 << __REQ_NOMERGE)
211#define REQ_STARTED (1 << __REQ_STARTED)
212#define REQ_DONTPREP (1 << __REQ_DONTPREP)
213#define REQ_QUEUED (1 << __REQ_QUEUED)
214#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
215#define REQ_FAILED (1 << __REQ_FAILED)
216#define REQ_QUIET (1 << __REQ_QUIET)
217#define REQ_PREEMPT (1 << __REQ_PREEMPT)
218#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
219#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
220#define REQ_ALLOCED (1 << __REQ_ALLOCED)
221#define REQ_RW_META (1 << __REQ_RW_META)
222
123#define BLK_MAX_CDB 16 223#define BLK_MAX_CDB 16
124 224
125/* 225/*
@@ -129,30 +229,46 @@ struct request {
129 struct list_head queuelist; 229 struct list_head queuelist;
130 struct list_head donelist; 230 struct list_head donelist;
131 231
132 unsigned long flags; /* see REQ_ bits below */ 232 request_queue_t *q;
233
234 unsigned int cmd_flags;
235 enum rq_cmd_type_bits cmd_type;
133 236
134 /* Maintain bio traversal state for part by part I/O submission. 237 /* Maintain bio traversal state for part by part I/O submission.
135 * hard_* are block layer internals, no driver should touch them! 238 * hard_* are block layer internals, no driver should touch them!
136 */ 239 */
137 240
138 sector_t sector; /* next sector to submit */ 241 sector_t sector; /* next sector to submit */
242 sector_t hard_sector; /* next sector to complete */
139 unsigned long nr_sectors; /* no. of sectors left to submit */ 243 unsigned long nr_sectors; /* no. of sectors left to submit */
244 unsigned long hard_nr_sectors; /* no. of sectors left to complete */
140 /* no. of sectors left to submit in the current segment */ 245 /* no. of sectors left to submit in the current segment */
141 unsigned int current_nr_sectors; 246 unsigned int current_nr_sectors;
142 247
143 sector_t hard_sector; /* next sector to complete */
144 unsigned long hard_nr_sectors; /* no. of sectors left to complete */
145 /* no. of sectors left to complete in the current segment */ 248 /* no. of sectors left to complete in the current segment */
146 unsigned int hard_cur_sectors; 249 unsigned int hard_cur_sectors;
147 250
148 struct bio *bio; 251 struct bio *bio;
149 struct bio *biotail; 252 struct bio *biotail;
150 253
254 struct hlist_node hash; /* merge hash */
255 /*
256 * The rb_node is only used inside the io scheduler, requests
257 * are pruned when moved to the dispatch queue. So let the
258 * completion_data share space with the rb_node.
259 */
260 union {
261 struct rb_node rb_node; /* sort/lookup */
262 void *completion_data;
263 };
264
265 /*
266 * two pointers are available for the IO schedulers, if they need
267 * more they have to dynamically allocate it.
268 */
151 void *elevator_private; 269 void *elevator_private;
152 void *completion_data; 270 void *elevator_private2;
153 271
154 int rq_status; /* should split this into a few status bits */
155 int errors;
156 struct gendisk *rq_disk; 272 struct gendisk *rq_disk;
157 unsigned long start_time; 273 unsigned long start_time;
158 274
@@ -170,15 +286,13 @@ struct request {
170 286
171 unsigned short ioprio; 287 unsigned short ioprio;
172 288
289 void *special;
290 char *buffer;
291
173 int tag; 292 int tag;
293 int errors;
174 294
175 int ref_count; 295 int ref_count;
176 request_queue_t *q;
177 struct request_list *rl;
178
179 struct completion *waiting;
180 void *special;
181 char *buffer;
182 296
183 /* 297 /*
184 * when request is used as a packet command carrier 298 * when request is used as a packet command carrier
@@ -195,80 +309,14 @@ struct request {
195 int retries; 309 int retries;
196 310
197 /* 311 /*
198 * completion callback. end_io_data should be folded in with waiting 312 * completion callback.
199 */ 313 */
200 rq_end_io_fn *end_io; 314 rq_end_io_fn *end_io;
201 void *end_io_data; 315 void *end_io_data;
202}; 316};
203 317
204/* 318/*
205 * first three bits match BIO_RW* bits, important 319 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
206 */
207enum rq_flag_bits {
208 __REQ_RW, /* not set, read. set, write */
209 __REQ_FAILFAST, /* no low level driver retries */
210 __REQ_SORTED, /* elevator knows about this request */
211 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
212 __REQ_HARDBARRIER, /* may not be passed by drive either */
213 __REQ_FUA, /* forced unit access */
214 __REQ_CMD, /* is a regular fs rw request */
215 __REQ_NOMERGE, /* don't touch this for merging */
216 __REQ_STARTED, /* drive already may have started this one */
217 __REQ_DONTPREP, /* don't call prep for this one */
218 __REQ_QUEUED, /* uses queueing */
219 __REQ_ELVPRIV, /* elevator private data attached */
220 /*
221 * for ATA/ATAPI devices
222 */
223 __REQ_PC, /* packet command (special) */
224 __REQ_BLOCK_PC, /* queued down pc from block layer */
225 __REQ_SENSE, /* sense retrival */
226
227 __REQ_FAILED, /* set if the request failed */
228 __REQ_QUIET, /* don't worry about errors */
229 __REQ_SPECIAL, /* driver suplied command */
230 __REQ_DRIVE_CMD,
231 __REQ_DRIVE_TASK,
232 __REQ_DRIVE_TASKFILE,
233 __REQ_PREEMPT, /* set for "ide_preempt" requests */
234 __REQ_PM_SUSPEND, /* suspend request */
235 __REQ_PM_RESUME, /* resume request */
236 __REQ_PM_SHUTDOWN, /* shutdown request */
237 __REQ_ORDERED_COLOR, /* is before or after barrier */
238 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */
239 __REQ_NR_BITS, /* stops here */
240};
241
242#define REQ_RW (1 << __REQ_RW)
243#define REQ_FAILFAST (1 << __REQ_FAILFAST)
244#define REQ_SORTED (1 << __REQ_SORTED)
245#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
246#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
247#define REQ_FUA (1 << __REQ_FUA)
248#define REQ_CMD (1 << __REQ_CMD)
249#define REQ_NOMERGE (1 << __REQ_NOMERGE)
250#define REQ_STARTED (1 << __REQ_STARTED)
251#define REQ_DONTPREP (1 << __REQ_DONTPREP)
252#define REQ_QUEUED (1 << __REQ_QUEUED)
253#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
254#define REQ_PC (1 << __REQ_PC)
255#define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC)
256#define REQ_SENSE (1 << __REQ_SENSE)
257#define REQ_FAILED (1 << __REQ_FAILED)
258#define REQ_QUIET (1 << __REQ_QUIET)
259#define REQ_SPECIAL (1 << __REQ_SPECIAL)
260#define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD)
261#define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK)
262#define REQ_DRIVE_TASKFILE (1 << __REQ_DRIVE_TASKFILE)
263#define REQ_PREEMPT (1 << __REQ_PREEMPT)
264#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
265#define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
266#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
267#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
268#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
269
270/*
271 * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
272 * requests. Some step values could eventually be made generic. 320 * requests. Some step values could eventually be made generic.
273 */ 321 */
274struct request_pm_state 322struct request_pm_state
@@ -417,9 +465,9 @@ struct request_queue
417 unsigned int sg_timeout; 465 unsigned int sg_timeout;
418 unsigned int sg_reserved_size; 466 unsigned int sg_reserved_size;
419 int node; 467 int node;
420 468#ifdef CONFIG_BLK_DEV_IO_TRACE
421 struct blk_trace *blk_trace; 469 struct blk_trace *blk_trace;
422 470#endif
423 /* 471 /*
424 * reserved for flush operations 472 * reserved for flush operations
425 */ 473 */
@@ -432,9 +480,6 @@ struct request_queue
432 struct mutex sysfs_lock; 480 struct mutex sysfs_lock;
433}; 481};
434 482
435#define RQ_INACTIVE (-1)
436#define RQ_ACTIVE 1
437
438#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 483#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
439#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 484#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
440#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 485#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
@@ -490,25 +535,34 @@ enum {
490#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 535#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
491#define blk_queue_flushing(q) ((q)->ordseq) 536#define blk_queue_flushing(q) ((q)->ordseq)
492 537
493#define blk_fs_request(rq) ((rq)->flags & REQ_CMD) 538#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
494#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) 539#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
495#define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST) 540#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
496#define blk_rq_started(rq) ((rq)->flags & REQ_STARTED) 541#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)
542
543#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
544#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
497 545
498#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) 546#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq))
499 547
500#define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND) 548#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
501#define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME) 549#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
502#define blk_pm_request(rq) \ 550#define blk_pm_request(rq) \
503 ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) 551 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
504 552
505#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) 553#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
506#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) 554#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
507#define blk_fua_rq(rq) ((rq)->flags & REQ_FUA) 555#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
508 556
509#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 557#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
510 558
511#define rq_data_dir(rq) ((rq)->flags & 1) 559#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
560
561/*
562 * We regard a request as sync, if it's a READ or a SYNC write.
563 */
564#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
565#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
512 566
513static inline int blk_queue_full(struct request_queue *q, int rw) 567static inline int blk_queue_full(struct request_queue *q, int rw)
514{ 568{
@@ -541,13 +595,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
541#define RQ_NOMERGE_FLAGS \ 595#define RQ_NOMERGE_FLAGS \
542 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 596 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
543#define rq_mergeable(rq) \ 597#define rq_mergeable(rq) \
544 (!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) 598 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
545
546/*
547 * noop, requests are automagically marked as active/inactive by I/O
548 * scheduler -- see elv_next_request
549 */
550#define blk_queue_headactive(q, head_active)
551 599
552/* 600/*
553 * q->prep_rq_fn return values 601 * q->prep_rq_fn return values
@@ -586,11 +634,6 @@ static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
586 if ((rq->bio)) \ 634 if ((rq->bio)) \
587 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 635 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
588 636
589struct sec_size {
590 unsigned block_size;
591 unsigned block_size_bits;
592};
593
594extern int blk_register_queue(struct gendisk *disk); 637extern int blk_register_queue(struct gendisk *disk);
595extern void blk_unregister_queue(struct gendisk *disk); 638extern void blk_unregister_queue(struct gendisk *disk);
596extern void register_disk(struct gendisk *dev); 639extern void register_disk(struct gendisk *dev);
@@ -612,6 +655,7 @@ extern void blk_stop_queue(request_queue_t *q);
612extern void blk_sync_queue(struct request_queue *q); 655extern void blk_sync_queue(struct request_queue *q);
613extern void __blk_stop_queue(request_queue_t *q); 656extern void __blk_stop_queue(request_queue_t *q);
614extern void blk_run_queue(request_queue_t *); 657extern void blk_run_queue(request_queue_t *);
658extern void blk_start_queueing(request_queue_t *);
615extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); 659extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
616extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int); 660extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);
617extern int blk_rq_unmap_user(struct bio *, unsigned int); 661extern int blk_rq_unmap_user(struct bio *, unsigned int);
@@ -655,16 +699,6 @@ extern void end_that_request_last(struct request *, int);
655extern void end_request(struct request *req, int uptodate); 699extern void end_request(struct request *req, int uptodate);
656extern void blk_complete_request(struct request *); 700extern void blk_complete_request(struct request *);
657 701
658static inline int rq_all_done(struct request *rq, unsigned int nr_bytes)
659{
660 if (blk_fs_request(rq))
661 return (nr_bytes >= (rq->hard_nr_sectors << 9));
662 else if (blk_pc_request(rq))
663 return nr_bytes >= rq->data_len;
664
665 return 0;
666}
667
668/* 702/*
669 * end_that_request_first/chunk() takes an uptodate argument. we account 703 * end_that_request_first/chunk() takes an uptodate argument. we account
670 * any value <= as an io error. 0 means -EIO for compatability reasons, 704 * any value <= as an io error. 0 means -EIO for compatability reasons,
@@ -679,21 +713,6 @@ static inline void blkdev_dequeue_request(struct request *req)
679} 713}
680 714
681/* 715/*
682 * This should be in elevator.h, but that requires pulling in rq and q
683 */
684static inline void elv_dispatch_add_tail(struct request_queue *q,
685 struct request *rq)
686{
687 if (q->last_merge == rq)
688 q->last_merge = NULL;
689 q->nr_sorted--;
690
691 q->end_sector = rq_end_sector(rq);
692 q->boundary_rq = rq;
693 list_add_tail(&rq->queuelist, &q->queue_head);
694}
695
696/*
697 * Access functions for manipulating queue properties 716 * Access functions for manipulating queue properties
698 */ 717 */
699extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, 718extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
@@ -737,7 +756,7 @@ extern void blk_put_queue(request_queue_t *);
737 */ 756 */
738#define blk_queue_tag_depth(q) ((q)->queue_tags->busy) 757#define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
739#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) 758#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
740#define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED) 759#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
741extern int blk_queue_start_tag(request_queue_t *, struct request *); 760extern int blk_queue_start_tag(request_queue_t *, struct request *);
742extern struct request *blk_queue_find_tag(request_queue_t *, int); 761extern struct request *blk_queue_find_tag(request_queue_t *, int);
743extern void blk_queue_end_tag(request_queue_t *, struct request *); 762extern void blk_queue_end_tag(request_queue_t *, struct request *);
@@ -787,14 +806,6 @@ static inline int queue_dma_alignment(request_queue_t *q)
787 return retval; 806 return retval;
788} 807}
789 808
790static inline int bdev_dma_aligment(struct block_device *bdev)
791{
792 return queue_dma_alignment(bdev_get_queue(bdev));
793}
794
795#define blk_finished_io(nsects) do { } while (0)
796#define blk_started_io(nsects) do { } while (0)
797
798/* assumes size > 256 */ 809/* assumes size > 256 */
799static inline unsigned int blksize_bits(unsigned int size) 810static inline unsigned int blksize_bits(unsigned int size)
800{ 811{
@@ -824,24 +835,32 @@ struct work_struct;
824int kblockd_schedule_work(struct work_struct *work); 835int kblockd_schedule_work(struct work_struct *work);
825void kblockd_flush(void); 836void kblockd_flush(void);
826 837
827#ifdef CONFIG_LBD
828# include <asm/div64.h>
829# define sector_div(a, b) do_div(a, b)
830#else
831# define sector_div(n, b)( \
832{ \
833 int _res; \
834 _res = (n) % (b); \
835 (n) /= (b); \
836 _res; \
837} \
838)
839#endif
840
841#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 838#define MODULE_ALIAS_BLOCKDEV(major,minor) \
842 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 839 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
843#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 840#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
844 MODULE_ALIAS("block-major-" __stringify(major) "-*") 841 MODULE_ALIAS("block-major-" __stringify(major) "-*")
845 842
846 843
844#else /* CONFIG_BLOCK */
845/*
846 * stubs for when the block layer is configured out
847 */
848#define buffer_heads_over_limit 0
849
850static inline long blk_congestion_wait(int rw, long timeout)
851{
852 return io_schedule_timeout(timeout);
853}
854
855static inline long nr_blockdev_pages(void)
856{
857 return 0;
858}
859
860static inline void exit_io_context(void)
861{
862}
863
864#endif /* CONFIG_BLOCK */
865
847#endif 866#endif