diff options
Diffstat (limited to 'drivers/block/xen-blkback/common.h')
-rw-r--r-- | drivers/block/xen-blkback/common.h | 100 |
1 files changed, 82 insertions, 18 deletions
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 9e40b283a46..e638457d9de 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -46,7 +46,7 @@ | |||
46 | 46 | ||
47 | #define DRV_PFX "xen-blkback:" | 47 | #define DRV_PFX "xen-blkback:" |
48 | #define DPRINTK(fmt, args...) \ | 48 | #define DPRINTK(fmt, args...) \ |
49 | pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ | 49 | pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ |
50 | __func__, __LINE__, ##args) | 50 | __func__, __LINE__, ##args) |
51 | 51 | ||
52 | 52 | ||
@@ -63,13 +63,26 @@ struct blkif_common_response { | |||
63 | 63 | ||
64 | /* i386 protocol version */ | 64 | /* i386 protocol version */ |
65 | #pragma pack(push, 4) | 65 | #pragma pack(push, 4) |
66 | |||
67 | struct blkif_x86_32_request_rw { | ||
68 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ | ||
69 | struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
70 | }; | ||
71 | |||
72 | struct blkif_x86_32_request_discard { | ||
73 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ | ||
74 | uint64_t nr_sectors; | ||
75 | }; | ||
76 | |||
66 | struct blkif_x86_32_request { | 77 | struct blkif_x86_32_request { |
67 | uint8_t operation; /* BLKIF_OP_??? */ | 78 | uint8_t operation; /* BLKIF_OP_??? */ |
68 | uint8_t nr_segments; /* number of segments */ | 79 | uint8_t nr_segments; /* number of segments */ |
69 | blkif_vdev_t handle; /* only for read/write requests */ | 80 | blkif_vdev_t handle; /* only for read/write requests */ |
70 | uint64_t id; /* private guest value, echoed in resp */ | 81 | uint64_t id; /* private guest value, echoed in resp */ |
71 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ | 82 | union { |
72 | struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 83 | struct blkif_x86_32_request_rw rw; |
84 | struct blkif_x86_32_request_discard discard; | ||
85 | } u; | ||
73 | }; | 86 | }; |
74 | struct blkif_x86_32_response { | 87 | struct blkif_x86_32_response { |
75 | uint64_t id; /* copied from request */ | 88 | uint64_t id; /* copied from request */ |
@@ -79,13 +92,26 @@ struct blkif_x86_32_response { | |||
79 | #pragma pack(pop) | 92 | #pragma pack(pop) |
80 | 93 | ||
81 | /* x86_64 protocol version */ | 94 | /* x86_64 protocol version */ |
95 | |||
96 | struct blkif_x86_64_request_rw { | ||
97 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ | ||
98 | struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
99 | }; | ||
100 | |||
101 | struct blkif_x86_64_request_discard { | ||
102 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ | ||
103 | uint64_t nr_sectors; | ||
104 | }; | ||
105 | |||
82 | struct blkif_x86_64_request { | 106 | struct blkif_x86_64_request { |
83 | uint8_t operation; /* BLKIF_OP_??? */ | 107 | uint8_t operation; /* BLKIF_OP_??? */ |
84 | uint8_t nr_segments; /* number of segments */ | 108 | uint8_t nr_segments; /* number of segments */ |
85 | blkif_vdev_t handle; /* only for read/write requests */ | 109 | blkif_vdev_t handle; /* only for read/write requests */ |
86 | uint64_t __attribute__((__aligned__(8))) id; | 110 | uint64_t __attribute__((__aligned__(8))) id; |
87 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ | 111 | union { |
88 | struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 112 | struct blkif_x86_64_request_rw rw; |
113 | struct blkif_x86_64_request_discard discard; | ||
114 | } u; | ||
89 | }; | 115 | }; |
90 | struct blkif_x86_64_response { | 116 | struct blkif_x86_64_response { |
91 | uint64_t __attribute__((__aligned__(8))) id; | 117 | uint64_t __attribute__((__aligned__(8))) id; |
@@ -113,6 +139,11 @@ enum blkif_protocol { | |||
113 | BLKIF_PROTOCOL_X86_64 = 3, | 139 | BLKIF_PROTOCOL_X86_64 = 3, |
114 | }; | 140 | }; |
115 | 141 | ||
142 | enum blkif_backend_type { | ||
143 | BLKIF_BACKEND_PHY = 1, | ||
144 | BLKIF_BACKEND_FILE = 2, | ||
145 | }; | ||
146 | |||
116 | struct xen_vbd { | 147 | struct xen_vbd { |
117 | /* What the domain refers to this vbd as. */ | 148 | /* What the domain refers to this vbd as. */ |
118 | blkif_vdev_t handle; | 149 | blkif_vdev_t handle; |
@@ -138,6 +169,7 @@ struct xen_blkif { | |||
138 | unsigned int irq; | 169 | unsigned int irq; |
139 | /* Comms information. */ | 170 | /* Comms information. */ |
140 | enum blkif_protocol blk_protocol; | 171 | enum blkif_protocol blk_protocol; |
172 | enum blkif_backend_type blk_backend_type; | ||
141 | union blkif_back_rings blk_rings; | 173 | union blkif_back_rings blk_rings; |
142 | struct vm_struct *blk_ring_area; | 174 | struct vm_struct *blk_ring_area; |
143 | /* The VBD attached to this interface. */ | 175 | /* The VBD attached to this interface. */ |
@@ -149,6 +181,9 @@ struct xen_blkif { | |||
149 | atomic_t refcnt; | 181 | atomic_t refcnt; |
150 | 182 | ||
151 | wait_queue_head_t wq; | 183 | wait_queue_head_t wq; |
184 | /* for barrier (drain) requests */ | ||
185 | struct completion drain_complete; | ||
186 | atomic_t drain; | ||
152 | /* One thread per one blkif. */ | 187 | /* One thread per one blkif. */ |
153 | struct task_struct *xenblkd; | 188 | struct task_struct *xenblkd; |
154 | unsigned int waiting_reqs; | 189 | unsigned int waiting_reqs; |
@@ -159,6 +194,7 @@ struct xen_blkif { | |||
159 | int st_wr_req; | 194 | int st_wr_req; |
160 | int st_oo_req; | 195 | int st_oo_req; |
161 | int st_f_req; | 196 | int st_f_req; |
197 | int st_ds_req; | ||
162 | int st_rd_sect; | 198 | int st_rd_sect; |
163 | int st_wr_sect; | 199 | int st_wr_sect; |
164 | 200 | ||
@@ -182,7 +218,7 @@ struct xen_blkif { | |||
182 | 218 | ||
183 | struct phys_req { | 219 | struct phys_req { |
184 | unsigned short dev; | 220 | unsigned short dev; |
185 | unsigned short nr_sects; | 221 | blkif_sector_t nr_sects; |
186 | struct block_device *bdev; | 222 | struct block_device *bdev; |
187 | blkif_sector_t sector_number; | 223 | blkif_sector_t sector_number; |
188 | }; | 224 | }; |
@@ -196,6 +232,8 @@ int xen_blkif_schedule(void *arg); | |||
196 | int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, | 232 | int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, |
197 | struct backend_info *be, int state); | 233 | struct backend_info *be, int state); |
198 | 234 | ||
235 | int xen_blkbk_barrier(struct xenbus_transaction xbt, | ||
236 | struct backend_info *be, int state); | ||
199 | struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); | 237 | struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); |
200 | 238 | ||
201 | static inline void blkif_get_x86_32_req(struct blkif_request *dst, | 239 | static inline void blkif_get_x86_32_req(struct blkif_request *dst, |
@@ -206,12 +244,25 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, | |||
206 | dst->nr_segments = src->nr_segments; | 244 | dst->nr_segments = src->nr_segments; |
207 | dst->handle = src->handle; | 245 | dst->handle = src->handle; |
208 | dst->id = src->id; | 246 | dst->id = src->id; |
209 | dst->u.rw.sector_number = src->sector_number; | 247 | switch (src->operation) { |
210 | barrier(); | 248 | case BLKIF_OP_READ: |
211 | if (n > dst->nr_segments) | 249 | case BLKIF_OP_WRITE: |
212 | n = dst->nr_segments; | 250 | case BLKIF_OP_WRITE_BARRIER: |
213 | for (i = 0; i < n; i++) | 251 | case BLKIF_OP_FLUSH_DISKCACHE: |
214 | dst->u.rw.seg[i] = src->seg[i]; | 252 | dst->u.rw.sector_number = src->u.rw.sector_number; |
253 | barrier(); | ||
254 | if (n > dst->nr_segments) | ||
255 | n = dst->nr_segments; | ||
256 | for (i = 0; i < n; i++) | ||
257 | dst->u.rw.seg[i] = src->u.rw.seg[i]; | ||
258 | break; | ||
259 | case BLKIF_OP_DISCARD: | ||
260 | dst->u.discard.sector_number = src->u.discard.sector_number; | ||
261 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; | ||
262 | break; | ||
263 | default: | ||
264 | break; | ||
265 | } | ||
215 | } | 266 | } |
216 | 267 | ||
217 | static inline void blkif_get_x86_64_req(struct blkif_request *dst, | 268 | static inline void blkif_get_x86_64_req(struct blkif_request *dst, |
@@ -222,12 +273,25 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, | |||
222 | dst->nr_segments = src->nr_segments; | 273 | dst->nr_segments = src->nr_segments; |
223 | dst->handle = src->handle; | 274 | dst->handle = src->handle; |
224 | dst->id = src->id; | 275 | dst->id = src->id; |
225 | dst->u.rw.sector_number = src->sector_number; | 276 | switch (src->operation) { |
226 | barrier(); | 277 | case BLKIF_OP_READ: |
227 | if (n > dst->nr_segments) | 278 | case BLKIF_OP_WRITE: |
228 | n = dst->nr_segments; | 279 | case BLKIF_OP_WRITE_BARRIER: |
229 | for (i = 0; i < n; i++) | 280 | case BLKIF_OP_FLUSH_DISKCACHE: |
230 | dst->u.rw.seg[i] = src->seg[i]; | 281 | dst->u.rw.sector_number = src->u.rw.sector_number; |
282 | barrier(); | ||
283 | if (n > dst->nr_segments) | ||
284 | n = dst->nr_segments; | ||
285 | for (i = 0; i < n; i++) | ||
286 | dst->u.rw.seg[i] = src->u.rw.seg[i]; | ||
287 | break; | ||
288 | case BLKIF_OP_DISCARD: | ||
289 | dst->u.discard.sector_number = src->u.discard.sector_number; | ||
290 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; | ||
291 | break; | ||
292 | default: | ||
293 | break; | ||
294 | } | ||
231 | } | 295 | } |
232 | 296 | ||
233 | #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */ | 297 | #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */ |