aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/backing-dev.h23
-rw-r--r--include/linux/bio.h158
-rw-r--r--include/linux/blk_types.h194
-rw-r--r--include/linux/blkdev.h142
-rw-r--r--include/linux/blktrace_api.h18
-rw-r--r--include/linux/coda_psdev.h8
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/drbd_nl.h9
-rw-r--r--include/linux/fs.h45
-rw-r--r--include/trace/events/block.h15
-rw-r--r--include/trace/events/writeback.h159
13 files changed, 461 insertions, 315 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 2547daf2aef2..9d65d4d0bd9c 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -39,6 +39,7 @@ header-y += ax25.h
39header-y += b1lli.h 39header-y += b1lli.h
40header-y += baycom.h 40header-y += baycom.h
41header-y += bfs_fs.h 41header-y += bfs_fs.h
42header-y += blk_types.h
42header-y += blkpg.h 43header-y += blkpg.h
43header-y += bpqether.h 44header-y += bpqether.h
44header-y += bsg.h 45header-y += bsg.h
diff --git a/include/linux/audit.h b/include/linux/audit.h
index f391d45c8aea..e24afabc548f 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -544,7 +544,7 @@ extern int audit_signals;
544#define audit_putname(n) do { ; } while (0) 544#define audit_putname(n) do { ; } while (0)
545#define __audit_inode(n,d) do { ; } while (0) 545#define __audit_inode(n,d) do { ; } while (0)
546#define __audit_inode_child(i,p) do { ; } while (0) 546#define __audit_inode_child(i,p) do { ; } while (0)
547#define audit_inode(n,d) do { ; } while (0) 547#define audit_inode(n,d) do { (void)(d); } while (0)
548#define audit_inode_child(i,p) do { ; } while (0) 548#define audit_inode_child(i,p) do { ; } while (0)
549#define audit_core_dumps(i) do { ; } while (0) 549#define audit_core_dumps(i) do { ; } while (0)
550#define auditsc_get_stamp(c,t,s) (0) 550#define auditsc_get_stamp(c,t,s) (0)
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e9aec0d099df..7628219e5386 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -45,22 +45,21 @@ enum bdi_stat_item {
45#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) 45#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
46 46
47struct bdi_writeback { 47struct bdi_writeback {
48 struct list_head list; /* hangs off the bdi */ 48 struct backing_dev_info *bdi; /* our parent bdi */
49
50 struct backing_dev_info *bdi; /* our parent bdi */
51 unsigned int nr; 49 unsigned int nr;
52 50
53 unsigned long last_old_flush; /* last old data flush */ 51 unsigned long last_old_flush; /* last old data flush */
52 unsigned long last_active; /* last time bdi thread was active */
54 53
55 struct task_struct *task; /* writeback task */ 54 struct task_struct *task; /* writeback thread */
56 struct list_head b_dirty; /* dirty inodes */ 55 struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
57 struct list_head b_io; /* parked for writeback */ 56 struct list_head b_dirty; /* dirty inodes */
58 struct list_head b_more_io; /* parked for more writeback */ 57 struct list_head b_io; /* parked for writeback */
58 struct list_head b_more_io; /* parked for more writeback */
59}; 59};
60 60
61struct backing_dev_info { 61struct backing_dev_info {
62 struct list_head bdi_list; 62 struct list_head bdi_list;
63 struct rcu_head rcu_head;
64 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 63 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
65 unsigned long state; /* Always use atomic bitops on this */ 64 unsigned long state; /* Always use atomic bitops on this */
66 unsigned int capabilities; /* Device capabilities */ 65 unsigned int capabilities; /* Device capabilities */
@@ -80,8 +79,7 @@ struct backing_dev_info {
80 unsigned int max_ratio, max_prop_frac; 79 unsigned int max_ratio, max_prop_frac;
81 80
82 struct bdi_writeback wb; /* default writeback info for this bdi */ 81 struct bdi_writeback wb; /* default writeback info for this bdi */
83 spinlock_t wb_lock; /* protects update side of wb_list */ 82 spinlock_t wb_lock; /* protects work_list */
84 struct list_head wb_list; /* the flusher threads hanging off this bdi */
85 83
86 struct list_head work_list; 84 struct list_head work_list;
87 85
@@ -105,9 +103,10 @@ void bdi_unregister(struct backing_dev_info *bdi);
105int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); 103int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
106void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); 104void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
107void bdi_start_background_writeback(struct backing_dev_info *bdi); 105void bdi_start_background_writeback(struct backing_dev_info *bdi);
108int bdi_writeback_task(struct bdi_writeback *wb); 106int bdi_writeback_thread(void *data);
109int bdi_has_dirty_io(struct backing_dev_info *bdi); 107int bdi_has_dirty_io(struct backing_dev_info *bdi);
110void bdi_arm_supers_timer(void); 108void bdi_arm_supers_timer(void);
109void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
111 110
112extern spinlock_t bdi_lock; 111extern spinlock_t bdi_lock;
113extern struct list_head bdi_list; 112extern struct list_head bdi_list;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7fc5606e6ea5..5274103434ad 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -9,7 +9,7 @@
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 12 *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
@@ -28,6 +28,9 @@
28 28
29#include <asm/io.h> 29#include <asm/io.h>
30 30
31/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
32#include <linux/blk_types.h>
33
31#define BIO_DEBUG 34#define BIO_DEBUG
32 35
33#ifdef BIO_DEBUG 36#ifdef BIO_DEBUG
@@ -41,154 +44,6 @@
41#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 44#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
42 45
43/* 46/*
44 * was unsigned short, but we might as well be ready for > 64kB I/O pages
45 */
46struct bio_vec {
47 struct page *bv_page;
48 unsigned int bv_len;
49 unsigned int bv_offset;
50};
51
52struct bio_set;
53struct bio;
54struct bio_integrity_payload;
55typedef void (bio_end_io_t) (struct bio *, int);
56typedef void (bio_destructor_t) (struct bio *);
57
58/*
59 * main unit of I/O for the block layer and lower layers (ie drivers and
60 * stacking drivers)
61 */
62struct bio {
63 sector_t bi_sector; /* device address in 512 byte
64 sectors */
65 struct bio *bi_next; /* request queue link */
66 struct block_device *bi_bdev;
67 unsigned long bi_flags; /* status, command, etc */
68 unsigned long bi_rw; /* bottom bits READ/WRITE,
69 * top bits priority
70 */
71
72 unsigned short bi_vcnt; /* how many bio_vec's */
73 unsigned short bi_idx; /* current index into bvl_vec */
74
75 /* Number of segments in this BIO after
76 * physical address coalescing is performed.
77 */
78 unsigned int bi_phys_segments;
79
80 unsigned int bi_size; /* residual I/O count */
81
82 /*
83 * To keep track of the max segment size, we account for the
84 * sizes of the first and last mergeable segments in this bio.
85 */
86 unsigned int bi_seg_front_size;
87 unsigned int bi_seg_back_size;
88
89 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
90
91 unsigned int bi_comp_cpu; /* completion CPU */
92
93 atomic_t bi_cnt; /* pin count */
94
95 struct bio_vec *bi_io_vec; /* the actual vec list */
96
97 bio_end_io_t *bi_end_io;
98
99 void *bi_private;
100#if defined(CONFIG_BLK_DEV_INTEGRITY)
101 struct bio_integrity_payload *bi_integrity; /* data integrity */
102#endif
103
104 bio_destructor_t *bi_destructor; /* destructor */
105
106 /*
107 * We can inline a number of vecs at the end of the bio, to avoid
108 * double allocations for a small number of bio_vecs. This member
109 * MUST obviously be kept at the very end of the bio.
110 */
111 struct bio_vec bi_inline_vecs[0];
112};
113
114/*
115 * bio flags
116 */
117#define BIO_UPTODATE 0 /* ok after I/O completion */
118#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
119#define BIO_EOF 2 /* out-out-bounds error */
120#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
121#define BIO_CLONED 4 /* doesn't own data */
122#define BIO_BOUNCED 5 /* bio is a bounce bio */
123#define BIO_USER_MAPPED 6 /* contains user pages */
124#define BIO_EOPNOTSUPP 7 /* not supported */
125#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
126#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
127#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
128#define BIO_QUIET 11 /* Make BIO Quiet */
129#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
130
131/*
132 * top 4 bits of bio flags indicate the pool this bio came from
133 */
134#define BIO_POOL_BITS (4)
135#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
136#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
137#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
138#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
139
140/*
141 * bio bi_rw flags
142 *
143 * bit 0 -- data direction
144 * If not set, bio is a read from device. If set, it's a write to device.
145 * bit 1 -- fail fast device errors
146 * bit 2 -- fail fast transport errors
147 * bit 3 -- fail fast driver errors
148 * bit 4 -- rw-ahead when set
149 * bit 5 -- barrier
150 * Insert a serialization point in the IO queue, forcing previously
151 * submitted IO to be completed before this one is issued.
152 * bit 6 -- synchronous I/O hint.
153 * bit 7 -- Unplug the device immediately after submitting this bio.
154 * bit 8 -- metadata request
155 * Used for tracing to differentiate metadata and data IO. May also
156 * get some preferential treatment in the IO scheduler
157 * bit 9 -- discard sectors
158 * Informs the lower level device that this range of sectors is no longer
159 * used by the file system and may thus be freed by the device. Used
160 * for flash based storage.
161 * Don't want driver retries for any fast fail whatever the reason.
162 * bit 10 -- Tell the IO scheduler not to wait for more requests after this
163 one has been submitted, even if it is a SYNC request.
164 */
165enum bio_rw_flags {
166 BIO_RW,
167 BIO_RW_FAILFAST_DEV,
168 BIO_RW_FAILFAST_TRANSPORT,
169 BIO_RW_FAILFAST_DRIVER,
170 /* above flags must match REQ_* */
171 BIO_RW_AHEAD,
172 BIO_RW_BARRIER,
173 BIO_RW_SYNCIO,
174 BIO_RW_UNPLUG,
175 BIO_RW_META,
176 BIO_RW_DISCARD,
177 BIO_RW_NOIDLE,
178};
179
180/*
181 * First four bits must match between bio->bi_rw and rq->cmd_flags, make
182 * that explicit here.
183 */
184#define BIO_RW_RQ_MASK 0xf
185
186static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
187{
188 return (bio->bi_rw & (1 << flag)) != 0;
189}
190
191/*
192 * upper 16 bits of bi_rw define the io priority of this bio 47 * upper 16 bits of bi_rw define the io priority of this bio
193 */ 48 */
194#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) 49#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
@@ -211,7 +66,10 @@ static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
211#define bio_offset(bio) bio_iovec((bio))->bv_offset 66#define bio_offset(bio) bio_iovec((bio))->bv_offset
212#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 67#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
213#define bio_sectors(bio) ((bio)->bi_size >> 9) 68#define bio_sectors(bio) ((bio)->bi_size >> 9)
214#define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD)) 69#define bio_empty_barrier(bio) \
70 ((bio->bi_rw & REQ_HARDBARRIER) && \
71 !bio_has_data(bio) && \
72 !(bio->bi_rw & REQ_DISCARD))
215 73
216static inline unsigned int bio_cur_bytes(struct bio *bio) 74static inline unsigned int bio_cur_bytes(struct bio *bio)
217{ 75{
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
new file mode 100644
index 000000000000..53691774d34e
--- /dev/null
+++ b/include/linux/blk_types.h
@@ -0,0 +1,194 @@
1/*
2 * Block data types and constants. Directly include this file only to
3 * break include dependency loop.
4 */
5#ifndef __LINUX_BLK_TYPES_H
6#define __LINUX_BLK_TYPES_H
7
8#ifdef CONFIG_BLOCK
9
10#include <linux/types.h>
11
12struct bio_set;
13struct bio;
14struct bio_integrity_payload;
15struct page;
16struct block_device;
17typedef void (bio_end_io_t) (struct bio *, int);
18typedef void (bio_destructor_t) (struct bio *);
19
20/*
21 * was unsigned short, but we might as well be ready for > 64kB I/O pages
22 */
23struct bio_vec {
24 struct page *bv_page;
25 unsigned int bv_len;
26 unsigned int bv_offset;
27};
28
29/*
30 * main unit of I/O for the block layer and lower layers (ie drivers and
31 * stacking drivers)
32 */
33struct bio {
34 sector_t bi_sector; /* device address in 512 byte
35 sectors */
36 struct bio *bi_next; /* request queue link */
37 struct block_device *bi_bdev;
38 unsigned long bi_flags; /* status, command, etc */
39 unsigned long bi_rw; /* bottom bits READ/WRITE,
40 * top bits priority
41 */
42
43 unsigned short bi_vcnt; /* how many bio_vec's */
44 unsigned short bi_idx; /* current index into bvl_vec */
45
46 /* Number of segments in this BIO after
47 * physical address coalescing is performed.
48 */
49 unsigned int bi_phys_segments;
50
51 unsigned int bi_size; /* residual I/O count */
52
53 /*
54 * To keep track of the max segment size, we account for the
55 * sizes of the first and last mergeable segments in this bio.
56 */
57 unsigned int bi_seg_front_size;
58 unsigned int bi_seg_back_size;
59
60 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
61
62 unsigned int bi_comp_cpu; /* completion CPU */
63
64 atomic_t bi_cnt; /* pin count */
65
66 struct bio_vec *bi_io_vec; /* the actual vec list */
67
68 bio_end_io_t *bi_end_io;
69
70 void *bi_private;
71#if defined(CONFIG_BLK_DEV_INTEGRITY)
72 struct bio_integrity_payload *bi_integrity; /* data integrity */
73#endif
74
75 bio_destructor_t *bi_destructor; /* destructor */
76
77 /*
78 * We can inline a number of vecs at the end of the bio, to avoid
79 * double allocations for a small number of bio_vecs. This member
80 * MUST obviously be kept at the very end of the bio.
81 */
82 struct bio_vec bi_inline_vecs[0];
83};
84
85/*
86 * bio flags
87 */
88#define BIO_UPTODATE 0 /* ok after I/O completion */
89#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
90#define BIO_EOF 2 /* out-out-bounds error */
91#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
92#define BIO_CLONED 4 /* doesn't own data */
93#define BIO_BOUNCED 5 /* bio is a bounce bio */
94#define BIO_USER_MAPPED 6 /* contains user pages */
95#define BIO_EOPNOTSUPP 7 /* not supported */
96#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
97#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
98#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
99#define BIO_QUIET 11 /* Make BIO Quiet */
100#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
101
102/*
103 * top 4 bits of bio flags indicate the pool this bio came from
104 */
105#define BIO_POOL_BITS (4)
106#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
107#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
108#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
109#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
110
111#endif /* CONFIG_BLOCK */
112
113/*
114 * Request flags. For use in the cmd_flags field of struct request, and in
115 * bi_rw of struct bio. Note that some flags are only valid in either one.
116 */
117enum rq_flag_bits {
118 /* common flags */
119 __REQ_WRITE, /* not set, read. set, write */
120 __REQ_FAILFAST_DEV, /* no driver retries of device errors */
121 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
122 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
123
124 __REQ_HARDBARRIER, /* may not be passed by drive either */
125 __REQ_SYNC, /* request is sync (sync write or read) */
126 __REQ_META, /* metadata io request */
127 __REQ_DISCARD, /* request to discard sectors */
128 __REQ_NOIDLE, /* don't anticipate more IO after this one */
129
130 /* bio only flags */
131 __REQ_UNPLUG, /* unplug the immediately after submission */
132 __REQ_RAHEAD, /* read ahead, can fail anytime */
133
134 /* request only flags */
135 __REQ_SORTED, /* elevator knows about this request */
136 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
137 __REQ_FUA, /* forced unit access */
138 __REQ_NOMERGE, /* don't touch this for merging */
139 __REQ_STARTED, /* drive already may have started this one */
140 __REQ_DONTPREP, /* don't call prep for this one */
141 __REQ_QUEUED, /* uses queueing */
142 __REQ_ELVPRIV, /* elevator private data attached */
143 __REQ_FAILED, /* set if the request failed */
144 __REQ_QUIET, /* don't worry about errors */
145 __REQ_PREEMPT, /* set for "ide_preempt" requests */
146 __REQ_ORDERED_COLOR, /* is before or after barrier */
147 __REQ_ALLOCED, /* request came from our alloc pool */
148 __REQ_COPY_USER, /* contains copies of user pages */
149 __REQ_INTEGRITY, /* integrity metadata has been remapped */
150 __REQ_FLUSH, /* request for cache flush */
151 __REQ_IO_STAT, /* account I/O stat */
152 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
153 __REQ_NR_BITS, /* stops here */
154};
155
156#define REQ_WRITE (1 << __REQ_WRITE)
157#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
158#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
159#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
160#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
161#define REQ_SYNC (1 << __REQ_SYNC)
162#define REQ_META (1 << __REQ_META)
163#define REQ_DISCARD (1 << __REQ_DISCARD)
164#define REQ_NOIDLE (1 << __REQ_NOIDLE)
165
166#define REQ_FAILFAST_MASK \
167 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
168#define REQ_COMMON_MASK \
169 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
170 REQ_META| REQ_DISCARD | REQ_NOIDLE)
171
172#define REQ_UNPLUG (1 << __REQ_UNPLUG)
173#define REQ_RAHEAD (1 << __REQ_RAHEAD)
174
175#define REQ_SORTED (1 << __REQ_SORTED)
176#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
177#define REQ_FUA (1 << __REQ_FUA)
178#define REQ_NOMERGE (1 << __REQ_NOMERGE)
179#define REQ_STARTED (1 << __REQ_STARTED)
180#define REQ_DONTPREP (1 << __REQ_DONTPREP)
181#define REQ_QUEUED (1 << __REQ_QUEUED)
182#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
183#define REQ_FAILED (1 << __REQ_FAILED)
184#define REQ_QUIET (1 << __REQ_QUIET)
185#define REQ_PREEMPT (1 << __REQ_PREEMPT)
186#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
187#define REQ_ALLOCED (1 << __REQ_ALLOCED)
188#define REQ_COPY_USER (1 << __REQ_COPY_USER)
189#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
190#define REQ_FLUSH (1 << __REQ_FLUSH)
191#define REQ_IO_STAT (1 << __REQ_IO_STAT)
192#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
193
194#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 09a840264d6f..89c855c5655c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -60,7 +60,6 @@ enum rq_cmd_type_bits {
60 REQ_TYPE_PM_RESUME, /* resume request */ 60 REQ_TYPE_PM_RESUME, /* resume request */
61 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 61 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
62 REQ_TYPE_SPECIAL, /* driver defined type */ 62 REQ_TYPE_SPECIAL, /* driver defined type */
63 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */
64 /* 63 /*
65 * for ATA/ATAPI devices. this really doesn't belong here, ide should 64 * for ATA/ATAPI devices. this really doesn't belong here, ide should
66 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 65 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
@@ -70,84 +69,6 @@ enum rq_cmd_type_bits {
70 REQ_TYPE_ATA_PC, 69 REQ_TYPE_ATA_PC,
71}; 70};
72 71
73/*
74 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
75 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
76 * SCSI cdb.
77 *
78 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
79 * typically to differentiate REQ_TYPE_SPECIAL requests.
80 *
81 */
82enum {
83 REQ_LB_OP_EJECT = 0x40, /* eject request */
84 REQ_LB_OP_FLUSH = 0x41, /* flush request */
85};
86
87/*
88 * request type modified bits. first four bits match BIO_RW* bits, important
89 */
90enum rq_flag_bits {
91 __REQ_RW, /* not set, read. set, write */
92 __REQ_FAILFAST_DEV, /* no driver retries of device errors */
93 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
94 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
95 /* above flags must match BIO_RW_* */
96 __REQ_DISCARD, /* request to discard sectors */
97 __REQ_SORTED, /* elevator knows about this request */
98 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
99 __REQ_HARDBARRIER, /* may not be passed by drive either */
100 __REQ_FUA, /* forced unit access */
101 __REQ_NOMERGE, /* don't touch this for merging */
102 __REQ_STARTED, /* drive already may have started this one */
103 __REQ_DONTPREP, /* don't call prep for this one */
104 __REQ_QUEUED, /* uses queueing */
105 __REQ_ELVPRIV, /* elevator private data attached */
106 __REQ_FAILED, /* set if the request failed */
107 __REQ_QUIET, /* don't worry about errors */
108 __REQ_PREEMPT, /* set for "ide_preempt" requests */
109 __REQ_ORDERED_COLOR, /* is before or after barrier */
110 __REQ_RW_SYNC, /* request is sync (sync write or read) */
111 __REQ_ALLOCED, /* request came from our alloc pool */
112 __REQ_RW_META, /* metadata io request */
113 __REQ_COPY_USER, /* contains copies of user pages */
114 __REQ_INTEGRITY, /* integrity metadata has been remapped */
115 __REQ_NOIDLE, /* Don't anticipate more IO after this one */
116 __REQ_IO_STAT, /* account I/O stat */
117 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
118 __REQ_NR_BITS, /* stops here */
119};
120
121#define REQ_RW (1 << __REQ_RW)
122#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
123#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
124#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
125#define REQ_DISCARD (1 << __REQ_DISCARD)
126#define REQ_SORTED (1 << __REQ_SORTED)
127#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
128#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
129#define REQ_FUA (1 << __REQ_FUA)
130#define REQ_NOMERGE (1 << __REQ_NOMERGE)
131#define REQ_STARTED (1 << __REQ_STARTED)
132#define REQ_DONTPREP (1 << __REQ_DONTPREP)
133#define REQ_QUEUED (1 << __REQ_QUEUED)
134#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
135#define REQ_FAILED (1 << __REQ_FAILED)
136#define REQ_QUIET (1 << __REQ_QUIET)
137#define REQ_PREEMPT (1 << __REQ_PREEMPT)
138#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
139#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
140#define REQ_ALLOCED (1 << __REQ_ALLOCED)
141#define REQ_RW_META (1 << __REQ_RW_META)
142#define REQ_COPY_USER (1 << __REQ_COPY_USER)
143#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
144#define REQ_NOIDLE (1 << __REQ_NOIDLE)
145#define REQ_IO_STAT (1 << __REQ_IO_STAT)
146#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
147
148#define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \
149 REQ_FAILFAST_DRIVER)
150
151#define BLK_MAX_CDB 16 72#define BLK_MAX_CDB 16
152 73
153/* 74/*
@@ -264,6 +185,7 @@ struct request_pm_state
264typedef void (request_fn_proc) (struct request_queue *q); 185typedef void (request_fn_proc) (struct request_queue *q);
265typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 186typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
266typedef int (prep_rq_fn) (struct request_queue *, struct request *); 187typedef int (prep_rq_fn) (struct request_queue *, struct request *);
188typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
267typedef void (unplug_fn) (struct request_queue *); 189typedef void (unplug_fn) (struct request_queue *);
268 190
269struct bio_vec; 191struct bio_vec;
@@ -275,7 +197,6 @@ struct bvec_merge_data {
275}; 197};
276typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 198typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
277 struct bio_vec *); 199 struct bio_vec *);
278typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
279typedef void (softirq_done_fn)(struct request *); 200typedef void (softirq_done_fn)(struct request *);
280typedef int (dma_drain_needed_fn)(struct request *); 201typedef int (dma_drain_needed_fn)(struct request *);
281typedef int (lld_busy_fn) (struct request_queue *q); 202typedef int (lld_busy_fn) (struct request_queue *q);
@@ -346,9 +267,9 @@ struct request_queue
346 request_fn_proc *request_fn; 267 request_fn_proc *request_fn;
347 make_request_fn *make_request_fn; 268 make_request_fn *make_request_fn;
348 prep_rq_fn *prep_rq_fn; 269 prep_rq_fn *prep_rq_fn;
270 unprep_rq_fn *unprep_rq_fn;
349 unplug_fn *unplug_fn; 271 unplug_fn *unplug_fn;
350 merge_bvec_fn *merge_bvec_fn; 272 merge_bvec_fn *merge_bvec_fn;
351 prepare_flush_fn *prepare_flush_fn;
352 softirq_done_fn *softirq_done_fn; 273 softirq_done_fn *softirq_done_fn;
353 rq_timed_out_fn *rq_timed_out_fn; 274 rq_timed_out_fn *rq_timed_out_fn;
354 dma_drain_needed_fn *dma_drain_needed; 275 dma_drain_needed_fn *dma_drain_needed;
@@ -467,11 +388,13 @@ struct request_queue
467#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 388#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
468#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 389#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
469#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ 390#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
391#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
470 392
471#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 393#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
472 (1 << QUEUE_FLAG_CLUSTER) | \ 394 (1 << QUEUE_FLAG_CLUSTER) | \
473 (1 << QUEUE_FLAG_STACKABLE) | \ 395 (1 << QUEUE_FLAG_STACKABLE) | \
474 (1 << QUEUE_FLAG_SAME_COMP)) 396 (1 << QUEUE_FLAG_SAME_COMP) | \
397 (1 << QUEUE_FLAG_ADD_RANDOM))
475 398
476static inline int queue_is_locked(struct request_queue *q) 399static inline int queue_is_locked(struct request_queue *q)
477{ 400{
@@ -596,38 +519,26 @@ enum {
596 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 519 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
597#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 520#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
598#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 521#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
522#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
599#define blk_queue_flushing(q) ((q)->ordseq) 523#define blk_queue_flushing(q) ((q)->ordseq)
600#define blk_queue_stackable(q) \ 524#define blk_queue_stackable(q) \
601 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 525 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
602#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 526#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
603 527
604#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 528#define blk_noretry_request(rq) \
605#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 529 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
606#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 530 REQ_FAILFAST_DRIVER))
607#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 531
608 532#define blk_account_rq(rq) \
609#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) 533 (((rq)->cmd_flags & REQ_STARTED) && \
610#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) 534 ((rq)->cmd_type == REQ_TYPE_FS || \
611#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) 535 ((rq)->cmd_flags & REQ_DISCARD)))
612#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ 536
613 blk_failfast_transport(rq) || \
614 blk_failfast_driver(rq))
615#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
616#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT)
617#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET)
618
619#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
620
621#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
622#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
623#define blk_pm_request(rq) \ 537#define blk_pm_request(rq) \
624 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 538 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
539 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
625 540
626#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 541#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
627#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
628#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
629#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
630#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
631#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 542#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
632/* rq->queuelist of dequeued request must be list_empty() */ 543/* rq->queuelist of dequeued request must be list_empty() */
633#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 544#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
@@ -641,7 +552,7 @@ enum {
641 */ 552 */
642static inline bool rw_is_sync(unsigned int rw_flags) 553static inline bool rw_is_sync(unsigned int rw_flags)
643{ 554{
644 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); 555 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
645} 556}
646 557
647static inline bool rq_is_sync(struct request *rq) 558static inline bool rq_is_sync(struct request *rq)
@@ -649,9 +560,6 @@ static inline bool rq_is_sync(struct request *rq)
649 return rw_is_sync(rq->cmd_flags); 560 return rw_is_sync(rq->cmd_flags);
650} 561}
651 562
652#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
653#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
654
655static inline int blk_queue_full(struct request_queue *q, int sync) 563static inline int blk_queue_full(struct request_queue *q, int sync)
656{ 564{
657 if (sync) 565 if (sync)
@@ -684,7 +592,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
684 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 592 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
685#define rq_mergeable(rq) \ 593#define rq_mergeable(rq) \
686 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 594 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
687 (blk_discard_rq(rq) || blk_fs_request((rq)))) 595 (((rq)->cmd_flags & REQ_DISCARD) || \
596 (rq)->cmd_type == REQ_TYPE_FS))
688 597
689/* 598/*
690 * q->prep_rq_fn return values 599 * q->prep_rq_fn return values
@@ -709,7 +618,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
709#define BLK_BOUNCE_HIGH -1ULL 618#define BLK_BOUNCE_HIGH -1ULL
710#endif 619#endif
711#define BLK_BOUNCE_ANY (-1ULL) 620#define BLK_BOUNCE_ANY (-1ULL)
712#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 621#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
713 622
714/* 623/*
715 * default timeout for SG_IO if none specified 624 * default timeout for SG_IO if none specified
@@ -781,6 +690,8 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *,
781 gfp_t); 690 gfp_t);
782extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 691extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
783extern void blk_requeue_request(struct request_queue *, struct request *); 692extern void blk_requeue_request(struct request_queue *, struct request *);
693extern void blk_add_request_payload(struct request *rq, struct page *page,
694 unsigned int len);
784extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 695extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
785extern int blk_lld_busy(struct request_queue *q); 696extern int blk_lld_busy(struct request_queue *q);
786extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 697extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
@@ -915,6 +826,7 @@ extern void blk_complete_request(struct request *);
915extern void __blk_complete_request(struct request *); 826extern void __blk_complete_request(struct request *);
916extern void blk_abort_request(struct request *); 827extern void blk_abort_request(struct request *);
917extern void blk_abort_queue(struct request_queue *); 828extern void blk_abort_queue(struct request_queue *);
829extern void blk_unprep_request(struct request *);
918 830
919/* 831/*
920 * Access functions for manipulating queue properties 832 * Access functions for manipulating queue properties
@@ -959,6 +871,7 @@ extern int blk_queue_dma_drain(struct request_queue *q,
959extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 871extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
960extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 872extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
961extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 873extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
874extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
962extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 875extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
963extern void blk_queue_dma_alignment(struct request_queue *, int); 876extern void blk_queue_dma_alignment(struct request_queue *, int);
964extern void blk_queue_update_dma_alignment(struct request_queue *, int); 877extern void blk_queue_update_dma_alignment(struct request_queue *, int);
@@ -966,7 +879,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
966extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 879extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
967extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 880extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
968extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 881extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
969extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 882extern int blk_queue_ordered(struct request_queue *, unsigned);
970extern bool blk_do_ordered(struct request_queue *, struct request **); 883extern bool blk_do_ordered(struct request_queue *, struct request **);
971extern unsigned blk_ordered_cur_seq(struct request_queue *); 884extern unsigned blk_ordered_cur_seq(struct request_queue *);
972extern unsigned blk_ordered_req_seq(struct request *); 885extern unsigned blk_ordered_req_seq(struct request *);
@@ -1020,7 +933,7 @@ static inline int sb_issue_discard(struct super_block *sb,
1020{ 933{
1021 block <<= (sb->s_blocksize_bits - 9); 934 block <<= (sb->s_blocksize_bits - 9);
1022 nr_blocks <<= (sb->s_blocksize_bits - 9); 935 nr_blocks <<= (sb->s_blocksize_bits - 9);
1023 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL, 936 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS,
1024 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); 937 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
1025} 938}
1026 939
@@ -1333,7 +1246,6 @@ static inline int blk_integrity_rq(struct request *rq)
1333struct block_device_operations { 1246struct block_device_operations {
1334 int (*open) (struct block_device *, fmode_t); 1247 int (*open) (struct block_device *, fmode_t);
1335 int (*release) (struct gendisk *, fmode_t); 1248 int (*release) (struct gendisk *, fmode_t);
1336 int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1337 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1249 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1338 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1250 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1339 int (*direct_access) (struct block_device *, sector_t, 1251 int (*direct_access) (struct block_device *, sector_t,
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 416bf62d6d46..3395cf7130f5 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -5,6 +5,7 @@
5#ifdef __KERNEL__ 5#ifdef __KERNEL__
6#include <linux/blkdev.h> 6#include <linux/blkdev.h>
7#include <linux/relay.h> 7#include <linux/relay.h>
8#include <linux/compat.h>
8#endif 9#endif
9 10
10/* 11/*
@@ -220,11 +221,26 @@ static inline int blk_trace_init_sysfs(struct device *dev)
220 221
221#endif /* CONFIG_BLK_DEV_IO_TRACE */ 222#endif /* CONFIG_BLK_DEV_IO_TRACE */
222 223
224#ifdef CONFIG_COMPAT
225
226struct compat_blk_user_trace_setup {
227 char name[32];
228 u16 act_mask;
229 u32 buf_size;
230 u32 buf_nr;
231 compat_u64 start_lba;
232 compat_u64 end_lba;
233 u32 pid;
234};
235#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
236
237#endif
238
223#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) 239#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK)
224 240
225static inline int blk_cmd_buf_len(struct request *rq) 241static inline int blk_cmd_buf_len(struct request *rq)
226{ 242{
227 return blk_pc_request(rq) ? rq->cmd_len * 3 : 1; 243 return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1;
228} 244}
229 245
230extern void blk_dump_cmd(char *buf, struct request *rq); 246extern void blk_dump_cmd(char *buf, struct request *rq);
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index 8859e2ede9fe..284b520934a0 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -86,9 +86,9 @@ struct upc_req {
86 wait_queue_head_t uc_sleep; /* process' wait queue */ 86 wait_queue_head_t uc_sleep; /* process' wait queue */
87}; 87};
88 88
89#define REQ_ASYNC 0x1 89#define CODA_REQ_ASYNC 0x1
90#define REQ_READ 0x2 90#define CODA_REQ_READ 0x2
91#define REQ_WRITE 0x4 91#define CODA_REQ_WRITE 0x4
92#define REQ_ABORT 0x8 92#define CODA_REQ_ABORT 0x8
93 93
94#endif 94#endif
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index b8d2516668aa..479ee3a1d901 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
53 53
54 54
55extern const char *drbd_buildtag(void); 55extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.8" 56#define REL_VERSION "8.3.8.1"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 94 59#define PRO_VERSION_MAX 94
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index ce77a746fc9d..5f042810a56c 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -78,10 +78,11 @@ NL_PACKET(syncer_conf, 8,
78 NL_INTEGER( 30, T_MAY_IGNORE, rate) 78 NL_INTEGER( 30, T_MAY_IGNORE, rate)
79 NL_INTEGER( 31, T_MAY_IGNORE, after) 79 NL_INTEGER( 31, T_MAY_IGNORE, after)
80 NL_INTEGER( 32, T_MAY_IGNORE, al_extents) 80 NL_INTEGER( 32, T_MAY_IGNORE, al_extents)
81 NL_INTEGER( 71, T_MAY_IGNORE, dp_volume) 81/* NL_INTEGER( 71, T_MAY_IGNORE, dp_volume)
82 NL_INTEGER( 72, T_MAY_IGNORE, dp_interval) 82 * NL_INTEGER( 72, T_MAY_IGNORE, dp_interval)
83 NL_INTEGER( 73, T_MAY_IGNORE, throttle_th) 83 * NL_INTEGER( 73, T_MAY_IGNORE, throttle_th)
84 NL_INTEGER( 74, T_MAY_IGNORE, hold_off_th) 84 * NL_INTEGER( 74, T_MAY_IGNORE, hold_off_th)
85 * feature will be reimplemented differently with 8.3.9 */
85 NL_STRING( 52, T_MAY_IGNORE, verify_alg, SHARED_SECRET_MAX) 86 NL_STRING( 52, T_MAY_IGNORE, verify_alg, SHARED_SECRET_MAX)
86 NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32) 87 NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32)
87 NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX) 88 NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a8ccf85b8691..1542e0e52b2e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -8,6 +8,7 @@
8 8
9#include <linux/limits.h> 9#include <linux/limits.h>
10#include <linux/ioctl.h> 10#include <linux/ioctl.h>
11#include <linux/blk_types.h>
11 12
12/* 13/*
13 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change 14 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
@@ -121,7 +122,7 @@ struct inodes_stat_t {
121 * immediately wait on this read without caring about 122 * immediately wait on this read without caring about
122 * unplugging. 123 * unplugging.
123 * READA Used for read-ahead operations. Lower priority, and the 124 * READA Used for read-ahead operations. Lower priority, and the
124 * block layer could (in theory) choose to ignore this 125 * block layer could (in theory) choose to ignore this
125 * request if it runs into resource problems. 126 * request if it runs into resource problems.
126 * WRITE A normal async write. Device will be plugged. 127 * WRITE A normal async write. Device will be plugged.
127 * SWRITE Like WRITE, but a special case for ll_rw_block() that 128 * SWRITE Like WRITE, but a special case for ll_rw_block() that
@@ -140,7 +141,7 @@ struct inodes_stat_t {
140 * SWRITE_SYNC 141 * SWRITE_SYNC
141 * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer. 142 * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
142 * See SWRITE. 143 * See SWRITE.
143 * WRITE_BARRIER Like WRITE, but tells the block layer that all 144 * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all
144 * previously submitted writes must be safely on storage 145 * previously submitted writes must be safely on storage
145 * before this one is started. Also guarantees that when 146 * before this one is started. Also guarantees that when
146 * this write is complete, it itself is also safely on 147 * this write is complete, it itself is also safely on
@@ -148,29 +149,31 @@ struct inodes_stat_t {
148 * of this IO. 149 * of this IO.
149 * 150 *
150 */ 151 */
151#define RW_MASK 1 152#define RW_MASK REQ_WRITE
152#define RWA_MASK 2 153#define RWA_MASK REQ_RAHEAD
153#define READ 0 154
154#define WRITE 1 155#define READ 0
155#define READA 2 /* read-ahead - don't block if no resources */ 156#define WRITE RW_MASK
156#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ 157#define READA RWA_MASK
157#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 158#define SWRITE (WRITE | READA)
158#define READ_META (READ | (1 << BIO_RW_META)) 159
159#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 160#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG)
160#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) 161#define READ_META (READ | REQ_META)
161#define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO)) 162#define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE)
162#define WRITE_META (WRITE | (1 << BIO_RW_META)) 163#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
163#define SWRITE_SYNC_PLUG \ 164#define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC)
164 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 165#define WRITE_META (WRITE | REQ_META)
165#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) 166#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
166#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) 167 REQ_HARDBARRIER)
168#define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE)
169#define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
167 170
168/* 171/*
169 * These aren't really reads or writes, they pass down information about 172 * These aren't really reads or writes, they pass down information about
170 * parts of device that are now unused by the file system. 173 * parts of device that are now unused by the file system.
171 */ 174 */
172#define DISCARD_NOBARRIER (WRITE | (1 << BIO_RW_DISCARD)) 175#define DISCARD_NOBARRIER (WRITE | REQ_DISCARD)
173#define DISCARD_BARRIER (DISCARD_NOBARRIER | (1 << BIO_RW_BARRIER)) 176#define DISCARD_BARRIER (WRITE | REQ_DISCARD | REQ_HARDBARRIER)
174 177
175#define SEL_IN 1 178#define SEL_IN 1
176#define SEL_OUT 2 179#define SEL_OUT 2
@@ -2196,7 +2199,6 @@ static inline void insert_inode_hash(struct inode *inode) {
2196extern void file_move(struct file *f, struct list_head *list); 2199extern void file_move(struct file *f, struct list_head *list);
2197extern void file_kill(struct file *f); 2200extern void file_kill(struct file *f);
2198#ifdef CONFIG_BLOCK 2201#ifdef CONFIG_BLOCK
2199struct bio;
2200extern void submit_bio(int, struct bio *); 2202extern void submit_bio(int, struct bio *);
2201extern int bdev_read_only(struct block_device *); 2203extern int bdev_read_only(struct block_device *);
2202#endif 2204#endif
@@ -2263,7 +2265,6 @@ static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
2263#endif 2265#endif
2264 2266
2265#ifdef CONFIG_BLOCK 2267#ifdef CONFIG_BLOCK
2266struct bio;
2267typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, 2268typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
2268 loff_t file_offset); 2269 loff_t file_offset);
2269 2270
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index d870a918559c..d8ce278515c3 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -25,8 +25,10 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
25 25
26 TP_fast_assign( 26 TP_fast_assign(
27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
28 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 28 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
29 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 29 0 : blk_rq_pos(rq);
30 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
31 0 : blk_rq_sectors(rq);
30 __entry->errors = rq->errors; 32 __entry->errors = rq->errors;
31 33
32 blk_fill_rwbs_rq(__entry->rwbs, rq); 34 blk_fill_rwbs_rq(__entry->rwbs, rq);
@@ -109,9 +111,12 @@ DECLARE_EVENT_CLASS(block_rq,
109 111
110 TP_fast_assign( 112 TP_fast_assign(
111 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 113 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
112 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 114 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
113 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 115 0 : blk_rq_pos(rq);
114 __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; 116 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
117 0 : blk_rq_sectors(rq);
118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
119 blk_rq_bytes(rq) : 0;
115 120
116 blk_fill_rwbs_rq(__entry->rwbs, rq); 121 blk_fill_rwbs_rq(__entry->rwbs, rq);
117 blk_dump_cmd(__get_str(cmd), rq); 122 blk_dump_cmd(__get_str(cmd), rq);
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
new file mode 100644
index 000000000000..f345f66ae9d1
--- /dev/null
+++ b/include/trace/events/writeback.h
@@ -0,0 +1,159 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM writeback
3
4#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_WRITEBACK_H
6
7#include <linux/backing-dev.h>
8#include <linux/device.h>
9#include <linux/writeback.h>
10
11struct wb_writeback_work;
12
13DECLARE_EVENT_CLASS(writeback_work_class,
14 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
15 TP_ARGS(bdi, work),
16 TP_STRUCT__entry(
17 __array(char, name, 32)
18 __field(long, nr_pages)
19 __field(dev_t, sb_dev)
20 __field(int, sync_mode)
21 __field(int, for_kupdate)
22 __field(int, range_cyclic)
23 __field(int, for_background)
24 ),
25 TP_fast_assign(
26 strncpy(__entry->name, dev_name(bdi->dev), 32);
27 __entry->nr_pages = work->nr_pages;
28 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
29 __entry->sync_mode = work->sync_mode;
30 __entry->for_kupdate = work->for_kupdate;
31 __entry->range_cyclic = work->range_cyclic;
32 __entry->for_background = work->for_background;
33 ),
34 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
35 "kupdate=%d range_cyclic=%d background=%d",
36 __entry->name,
37 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
38 __entry->nr_pages,
39 __entry->sync_mode,
40 __entry->for_kupdate,
41 __entry->range_cyclic,
42 __entry->for_background
43 )
44);
45#define DEFINE_WRITEBACK_WORK_EVENT(name) \
46DEFINE_EVENT(writeback_work_class, name, \
47 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
48 TP_ARGS(bdi, work))
49DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
50DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
51DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
52
53TRACE_EVENT(writeback_pages_written,
54 TP_PROTO(long pages_written),
55 TP_ARGS(pages_written),
56 TP_STRUCT__entry(
57 __field(long, pages)
58 ),
59 TP_fast_assign(
60 __entry->pages = pages_written;
61 ),
62 TP_printk("%ld", __entry->pages)
63);
64
65DECLARE_EVENT_CLASS(writeback_class,
66 TP_PROTO(struct backing_dev_info *bdi),
67 TP_ARGS(bdi),
68 TP_STRUCT__entry(
69 __array(char, name, 32)
70 ),
71 TP_fast_assign(
72 strncpy(__entry->name, dev_name(bdi->dev), 32);
73 ),
74 TP_printk("bdi %s",
75 __entry->name
76 )
77);
78#define DEFINE_WRITEBACK_EVENT(name) \
79DEFINE_EVENT(writeback_class, name, \
80 TP_PROTO(struct backing_dev_info *bdi), \
81 TP_ARGS(bdi))
82
83DEFINE_WRITEBACK_EVENT(writeback_nowork);
84DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
85DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
86DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
87DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
88DEFINE_WRITEBACK_EVENT(writeback_thread_start);
89DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
90
91DECLARE_EVENT_CLASS(wbc_class,
92 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
93 TP_ARGS(wbc, bdi),
94 TP_STRUCT__entry(
95 __array(char, name, 32)
96 __field(long, nr_to_write)
97 __field(long, pages_skipped)
98 __field(int, sync_mode)
99 __field(int, nonblocking)
100 __field(int, encountered_congestion)
101 __field(int, for_kupdate)
102 __field(int, for_background)
103 __field(int, for_reclaim)
104 __field(int, range_cyclic)
105 __field(int, more_io)
106 __field(unsigned long, older_than_this)
107 __field(long, range_start)
108 __field(long, range_end)
109 ),
110
111 TP_fast_assign(
112 strncpy(__entry->name, dev_name(bdi->dev), 32);
113 __entry->nr_to_write = wbc->nr_to_write;
114 __entry->pages_skipped = wbc->pages_skipped;
115 __entry->sync_mode = wbc->sync_mode;
116 __entry->for_kupdate = wbc->for_kupdate;
117 __entry->for_background = wbc->for_background;
118 __entry->for_reclaim = wbc->for_reclaim;
119 __entry->range_cyclic = wbc->range_cyclic;
120 __entry->more_io = wbc->more_io;
121 __entry->older_than_this = wbc->older_than_this ?
122 *wbc->older_than_this : 0;
123 __entry->range_start = (long)wbc->range_start;
124 __entry->range_end = (long)wbc->range_end;
125 ),
126
127 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
128 "bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx "
129 "start=0x%lx end=0x%lx",
130 __entry->name,
131 __entry->nr_to_write,
132 __entry->pages_skipped,
133 __entry->sync_mode,
134 __entry->for_kupdate,
135 __entry->for_background,
136 __entry->for_reclaim,
137 __entry->range_cyclic,
138 __entry->more_io,
139 __entry->older_than_this,
140 __entry->range_start,
141 __entry->range_end)
142)
143
144#define DEFINE_WBC_EVENT(name) \
145DEFINE_EVENT(wbc_class, name, \
146 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
147 TP_ARGS(wbc, bdi))
148DEFINE_WBC_EVENT(wbc_writeback_start);
149DEFINE_WBC_EVENT(wbc_writeback_written);
150DEFINE_WBC_EVENT(wbc_writeback_wait);
151DEFINE_WBC_EVENT(wbc_balance_dirty_start);
152DEFINE_WBC_EVENT(wbc_balance_dirty_written);
153DEFINE_WBC_EVENT(wbc_balance_dirty_wait);
154DEFINE_WBC_EVENT(wbc_writepage);
155
156#endif /* _TRACE_WRITEBACK_H */
157
158/* This part must be outside protection */
159#include <trace/define_trace.h>