aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-08-03 07:14:58 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-08-07 12:53:10 -0400
commit7cc015811ef8992dfcce314d0ed9642bc18143d1 (patch)
treecae16766d233563bef102032eb954c05f1814f77 /include
parentaca27ba9618276dd2f777bcd5a1419589ccf1ca8 (diff)
bio, fs: separate out bio_types.h and define READ/WRITE constants in terms of BIO_RW_* flags
linux/fs.h hard coded READ/WRITE constants which should match BIO_RW_* flags. This is fragile and caused breakage during BIO_RW_* flag rearrangement. The hardcoding is to avoid include dependency hell. Create linux/bio_types.h which contatins definitions for bio data structures and flags and include it from bio.h and fs.h, and make fs.h define all READ/WRITE related constants in terms of BIO_RW_* flags. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bio.h183
-rw-r--r--include/linux/blk_types.h193
-rw-r--r--include/linux/fs.h15
3 files changed, 204 insertions, 187 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index f655b54c9ef3..5274103434ad 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -9,7 +9,7 @@
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 12 *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
@@ -28,6 +28,9 @@
28 28
29#include <asm/io.h> 29#include <asm/io.h>
30 30
31/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
32#include <linux/blk_types.h>
33
31#define BIO_DEBUG 34#define BIO_DEBUG
32 35
33#ifdef BIO_DEBUG 36#ifdef BIO_DEBUG
@@ -41,184 +44,6 @@
41#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 44#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
42 45
43/* 46/*
44 * was unsigned short, but we might as well be ready for > 64kB I/O pages
45 */
46struct bio_vec {
47 struct page *bv_page;
48 unsigned int bv_len;
49 unsigned int bv_offset;
50};
51
52struct bio_set;
53struct bio;
54struct bio_integrity_payload;
55typedef void (bio_end_io_t) (struct bio *, int);
56typedef void (bio_destructor_t) (struct bio *);
57
58/*
59 * main unit of I/O for the block layer and lower layers (ie drivers and
60 * stacking drivers)
61 */
62struct bio {
63 sector_t bi_sector; /* device address in 512 byte
64 sectors */
65 struct bio *bi_next; /* request queue link */
66 struct block_device *bi_bdev;
67 unsigned long bi_flags; /* status, command, etc */
68 unsigned long bi_rw; /* bottom bits READ/WRITE,
69 * top bits priority
70 */
71
72 unsigned short bi_vcnt; /* how many bio_vec's */
73 unsigned short bi_idx; /* current index into bvl_vec */
74
75 /* Number of segments in this BIO after
76 * physical address coalescing is performed.
77 */
78 unsigned int bi_phys_segments;
79
80 unsigned int bi_size; /* residual I/O count */
81
82 /*
83 * To keep track of the max segment size, we account for the
84 * sizes of the first and last mergeable segments in this bio.
85 */
86 unsigned int bi_seg_front_size;
87 unsigned int bi_seg_back_size;
88
89 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
90
91 unsigned int bi_comp_cpu; /* completion CPU */
92
93 atomic_t bi_cnt; /* pin count */
94
95 struct bio_vec *bi_io_vec; /* the actual vec list */
96
97 bio_end_io_t *bi_end_io;
98
99 void *bi_private;
100#if defined(CONFIG_BLK_DEV_INTEGRITY)
101 struct bio_integrity_payload *bi_integrity; /* data integrity */
102#endif
103
104 bio_destructor_t *bi_destructor; /* destructor */
105
106 /*
107 * We can inline a number of vecs at the end of the bio, to avoid
108 * double allocations for a small number of bio_vecs. This member
109 * MUST obviously be kept at the very end of the bio.
110 */
111 struct bio_vec bi_inline_vecs[0];
112};
113
114/*
115 * bio flags
116 */
117#define BIO_UPTODATE 0 /* ok after I/O completion */
118#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
119#define BIO_EOF 2 /* out-out-bounds error */
120#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
121#define BIO_CLONED 4 /* doesn't own data */
122#define BIO_BOUNCED 5 /* bio is a bounce bio */
123#define BIO_USER_MAPPED 6 /* contains user pages */
124#define BIO_EOPNOTSUPP 7 /* not supported */
125#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
126#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
127#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
128#define BIO_QUIET 11 /* Make BIO Quiet */
129#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
130
131/*
132 * top 4 bits of bio flags indicate the pool this bio came from
133 */
134#define BIO_POOL_BITS (4)
135#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
136#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
137#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
138#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
139
140/*
141 * Request flags. For use in the cmd_flags field of struct request, and in
142 * bi_rw of struct bio. Note that some flags are only valid in either one.
143 */
144enum rq_flag_bits {
145 /* common flags */
146 __REQ_WRITE, /* not set, read. set, write */
147 __REQ_FAILFAST_DEV, /* no driver retries of device errors */
148 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
149 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
150
151 __REQ_HARDBARRIER, /* may not be passed by drive either */
152 __REQ_SYNC, /* request is sync (sync write or read) */
153 __REQ_META, /* metadata io request */
154 __REQ_DISCARD, /* request to discard sectors */
155 __REQ_NOIDLE, /* don't anticipate more IO after this one */
156
157 /* bio only flags */
158 __REQ_UNPLUG, /* unplug the immediately after submission */
159 __REQ_RAHEAD, /* read ahead, can fail anytime */
160
161 /* request only flags */
162 __REQ_SORTED, /* elevator knows about this request */
163 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
164 __REQ_FUA, /* forced unit access */
165 __REQ_NOMERGE, /* don't touch this for merging */
166 __REQ_STARTED, /* drive already may have started this one */
167 __REQ_DONTPREP, /* don't call prep for this one */
168 __REQ_QUEUED, /* uses queueing */
169 __REQ_ELVPRIV, /* elevator private data attached */
170 __REQ_FAILED, /* set if the request failed */
171 __REQ_QUIET, /* don't worry about errors */
172 __REQ_PREEMPT, /* set for "ide_preempt" requests */
173 __REQ_ORDERED_COLOR, /* is before or after barrier */
174 __REQ_ALLOCED, /* request came from our alloc pool */
175 __REQ_COPY_USER, /* contains copies of user pages */
176 __REQ_INTEGRITY, /* integrity metadata has been remapped */
177 __REQ_FLUSH, /* request for cache flush */
178 __REQ_IO_STAT, /* account I/O stat */
179 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
180 __REQ_NR_BITS, /* stops here */
181};
182
183#define REQ_WRITE (1 << __REQ_WRITE)
184#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
185#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
186#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
187#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
188#define REQ_SYNC (1 << __REQ_SYNC)
189#define REQ_META (1 << __REQ_META)
190#define REQ_DISCARD (1 << __REQ_DISCARD)
191#define REQ_NOIDLE (1 << __REQ_NOIDLE)
192
193#define REQ_FAILFAST_MASK \
194 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
195#define REQ_COMMON_MASK \
196 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
197 REQ_META| REQ_DISCARD | REQ_NOIDLE)
198
199#define REQ_UNPLUG (1 << __REQ_UNPLUG)
200#define REQ_RAHEAD (1 << __REQ_RAHEAD)
201
202#define REQ_SORTED (1 << __REQ_SORTED)
203#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
204#define REQ_FUA (1 << __REQ_FUA)
205#define REQ_NOMERGE (1 << __REQ_NOMERGE)
206#define REQ_STARTED (1 << __REQ_STARTED)
207#define REQ_DONTPREP (1 << __REQ_DONTPREP)
208#define REQ_QUEUED (1 << __REQ_QUEUED)
209#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
210#define REQ_FAILED (1 << __REQ_FAILED)
211#define REQ_QUIET (1 << __REQ_QUIET)
212#define REQ_PREEMPT (1 << __REQ_PREEMPT)
213#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
214#define REQ_ALLOCED (1 << __REQ_ALLOCED)
215#define REQ_COPY_USER (1 << __REQ_COPY_USER)
216#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
217#define REQ_FLUSH (1 << __REQ_FLUSH)
218#define REQ_IO_STAT (1 << __REQ_IO_STAT)
219#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
220
221/*
222 * upper 16 bits of bi_rw define the io priority of this bio 47 * upper 16 bits of bi_rw define the io priority of this bio
223 */ 48 */
224#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) 49#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
new file mode 100644
index 000000000000..118523734af0
--- /dev/null
+++ b/include/linux/blk_types.h
@@ -0,0 +1,193 @@
1/*
2 * Block data types and constants. Directly include this file only to
3 * break include dependency loop.
4 */
5#ifndef __LINUX_BLK_TYPES_H
6#define __LINUX_BLK_TYPES_H
7
8#ifdef CONFIG_BLOCK
9
10#include <linux/types.h>
11
12struct bio_set;
13struct bio;
14struct bio_integrity_payload;
15struct page;
16struct block_device;
17typedef void (bio_end_io_t) (struct bio *, int);
18typedef void (bio_destructor_t) (struct bio *);
19
20/*
21 * was unsigned short, but we might as well be ready for > 64kB I/O pages
22 */
23struct bio_vec {
24 struct page *bv_page;
25 unsigned int bv_len;
26 unsigned int bv_offset;
27};
28
29/*
30 * main unit of I/O for the block layer and lower layers (ie drivers and
31 * stacking drivers)
32 */
33struct bio {
34 sector_t bi_sector; /* device address in 512 byte
35 sectors */
36 struct bio *bi_next; /* request queue link */
37 struct block_device *bi_bdev;
38 unsigned long bi_flags; /* status, command, etc */
39 unsigned long bi_rw; /* bottom bits READ/WRITE,
40 * top bits priority
41 */
42
43 unsigned short bi_vcnt; /* how many bio_vec's */
44 unsigned short bi_idx; /* current index into bvl_vec */
45
46 /* Number of segments in this BIO after
47 * physical address coalescing is performed.
48 */
49 unsigned int bi_phys_segments;
50
51 unsigned int bi_size; /* residual I/O count */
52
53 /*
54 * To keep track of the max segment size, we account for the
55 * sizes of the first and last mergeable segments in this bio.
56 */
57 unsigned int bi_seg_front_size;
58 unsigned int bi_seg_back_size;
59
60 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
61
62 unsigned int bi_comp_cpu; /* completion CPU */
63
64 atomic_t bi_cnt; /* pin count */
65
66 struct bio_vec *bi_io_vec; /* the actual vec list */
67
68 bio_end_io_t *bi_end_io;
69
70 void *bi_private;
71#if defined(CONFIG_BLK_DEV_INTEGRITY)
72 struct bio_integrity_payload *bi_integrity; /* data integrity */
73#endif
74
75 bio_destructor_t *bi_destructor; /* destructor */
76
77 /*
78 * We can inline a number of vecs at the end of the bio, to avoid
79 * double allocations for a small number of bio_vecs. This member
80 * MUST obviously be kept at the very end of the bio.
81 */
82 struct bio_vec bi_inline_vecs[0];
83};
84
85/*
86 * bio flags
87 */
88#define BIO_UPTODATE 0 /* ok after I/O completion */
89#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
90#define BIO_EOF 2 /* out-out-bounds error */
91#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
92#define BIO_CLONED 4 /* doesn't own data */
93#define BIO_BOUNCED 5 /* bio is a bounce bio */
94#define BIO_USER_MAPPED 6 /* contains user pages */
95#define BIO_EOPNOTSUPP 7 /* not supported */
96#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
97#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
98#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
99#define BIO_QUIET 11 /* Make BIO Quiet */
100#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
101
102/*
103 * top 4 bits of bio flags indicate the pool this bio came from
104 */
105#define BIO_POOL_BITS (4)
106#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
107#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
108#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
109#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
110
111/*
112 * Request flags. For use in the cmd_flags field of struct request, and in
113 * bi_rw of struct bio. Note that some flags are only valid in either one.
114 */
115enum rq_flag_bits {
116 /* common flags */
117 __REQ_WRITE, /* not set, read. set, write */
118 __REQ_FAILFAST_DEV, /* no driver retries of device errors */
119 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
120 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
121
122 __REQ_HARDBARRIER, /* may not be passed by drive either */
123 __REQ_SYNC, /* request is sync (sync write or read) */
124 __REQ_META, /* metadata io request */
125 __REQ_DISCARD, /* request to discard sectors */
126 __REQ_NOIDLE, /* don't anticipate more IO after this one */
127
128 /* bio only flags */
129 __REQ_UNPLUG, /* unplug the immediately after submission */
130 __REQ_RAHEAD, /* read ahead, can fail anytime */
131
132 /* request only flags */
133 __REQ_SORTED, /* elevator knows about this request */
134 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
135 __REQ_FUA, /* forced unit access */
136 __REQ_NOMERGE, /* don't touch this for merging */
137 __REQ_STARTED, /* drive already may have started this one */
138 __REQ_DONTPREP, /* don't call prep for this one */
139 __REQ_QUEUED, /* uses queueing */
140 __REQ_ELVPRIV, /* elevator private data attached */
141 __REQ_FAILED, /* set if the request failed */
142 __REQ_QUIET, /* don't worry about errors */
143 __REQ_PREEMPT, /* set for "ide_preempt" requests */
144 __REQ_ORDERED_COLOR, /* is before or after barrier */
145 __REQ_ALLOCED, /* request came from our alloc pool */
146 __REQ_COPY_USER, /* contains copies of user pages */
147 __REQ_INTEGRITY, /* integrity metadata has been remapped */
148 __REQ_FLUSH, /* request for cache flush */
149 __REQ_IO_STAT, /* account I/O stat */
150 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
151 __REQ_NR_BITS, /* stops here */
152};
153
154#define REQ_WRITE (1 << __REQ_WRITE)
155#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
156#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
157#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
158#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
159#define REQ_SYNC (1 << __REQ_SYNC)
160#define REQ_META (1 << __REQ_META)
161#define REQ_DISCARD (1 << __REQ_DISCARD)
162#define REQ_NOIDLE (1 << __REQ_NOIDLE)
163
164#define REQ_FAILFAST_MASK \
165 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
166#define REQ_COMMON_MASK \
167 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
168 REQ_META| REQ_DISCARD | REQ_NOIDLE)
169
170#define REQ_UNPLUG (1 << __REQ_UNPLUG)
171#define REQ_RAHEAD (1 << __REQ_RAHEAD)
172
173#define REQ_SORTED (1 << __REQ_SORTED)
174#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
175#define REQ_FUA (1 << __REQ_FUA)
176#define REQ_NOMERGE (1 << __REQ_NOMERGE)
177#define REQ_STARTED (1 << __REQ_STARTED)
178#define REQ_DONTPREP (1 << __REQ_DONTPREP)
179#define REQ_QUEUED (1 << __REQ_QUEUED)
180#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
181#define REQ_FAILED (1 << __REQ_FAILED)
182#define REQ_QUIET (1 << __REQ_QUIET)
183#define REQ_PREEMPT (1 << __REQ_PREEMPT)
184#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
185#define REQ_ALLOCED (1 << __REQ_ALLOCED)
186#define REQ_COPY_USER (1 << __REQ_COPY_USER)
187#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
188#define REQ_FLUSH (1 << __REQ_FLUSH)
189#define REQ_IO_STAT (1 << __REQ_IO_STAT)
190#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
191
192#endif /* CONFIG_BLOCK */
193#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 55dad7bca25b..c53911277210 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -8,6 +8,7 @@
8 8
9#include <linux/limits.h> 9#include <linux/limits.h>
10#include <linux/ioctl.h> 10#include <linux/ioctl.h>
11#include <linux/blk_types.h>
11 12
12/* 13/*
13 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change 14 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
@@ -117,7 +118,7 @@ struct inodes_stat_t {
117 * immediately wait on this read without caring about 118 * immediately wait on this read without caring about
118 * unplugging. 119 * unplugging.
119 * READA Used for read-ahead operations. Lower priority, and the 120 * READA Used for read-ahead operations. Lower priority, and the
120 * block layer could (in theory) choose to ignore this 121 * block layer could (in theory) choose to ignore this
121 * request if it runs into resource problems. 122 * request if it runs into resource problems.
122 * WRITE A normal async write. Device will be plugged. 123 * WRITE A normal async write. Device will be plugged.
123 * SWRITE Like WRITE, but a special case for ll_rw_block() that 124 * SWRITE Like WRITE, but a special case for ll_rw_block() that
@@ -144,13 +145,13 @@ struct inodes_stat_t {
144 * of this IO. 145 * of this IO.
145 * 146 *
146 */ 147 */
147#define RW_MASK 1 148#define RW_MASK REQ_WRITE
148#define RWA_MASK 16 149#define RWA_MASK REQ_RAHEAD
149 150
150#define READ 0 151#define READ 0
151#define WRITE 1 152#define WRITE RW_MASK
152#define READA 16 /* readahead - don't block if no resources */ 153#define READA RWA_MASK
153#define SWRITE 17 /* for ll_rw_block(), wait for buffer lock */ 154#define SWRITE (WRITE | READA)
154 155
155#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) 156#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG)
156#define READ_META (READ | REQ_META) 157#define READ_META (READ | REQ_META)
@@ -2200,7 +2201,6 @@ static inline void insert_inode_hash(struct inode *inode) {
2200extern void file_move(struct file *f, struct list_head *list); 2201extern void file_move(struct file *f, struct list_head *list);
2201extern void file_kill(struct file *f); 2202extern void file_kill(struct file *f);
2202#ifdef CONFIG_BLOCK 2203#ifdef CONFIG_BLOCK
2203struct bio;
2204extern void submit_bio(int, struct bio *); 2204extern void submit_bio(int, struct bio *);
2205extern int bdev_read_only(struct block_device *); 2205extern int bdev_read_only(struct block_device *);
2206#endif 2206#endif
@@ -2267,7 +2267,6 @@ static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
2267#endif 2267#endif
2268 2268
2269#ifdef CONFIG_BLOCK 2269#ifdef CONFIG_BLOCK
2270struct bio;
2271typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, 2270typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
2272 loff_t file_offset); 2271 loff_t file_offset);
2273void dio_end_io(struct bio *bio, int error); 2272void dio_end_io(struct bio *bio, int error);