diff options
author | Roger Pau Monne <roger.pau@citrix.com> | 2013-04-18 10:06:54 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-04-18 14:16:00 -0400 |
commit | 402b27f9f2c22309d5bb285628765bc27b82fcf5 (patch) | |
tree | 0177eb3c2073f1379674338ede8510e6d1e6d4bf /include/xen | |
parent | 31552ee32df89f97a61766cee51b8dabb1ae3f4f (diff) |
xen-block: implement indirect descriptors
Indirect descriptors introduce a new block operation
(BLKIF_OP_INDIRECT) that passes grant references instead of segments
in the request. This grant references are filled with arrays of
blkif_request_segment_aligned, this way we can send more segments in a
request.
The proposed implementation sets the maximum number of indirect grefs
(frames filled with blkif_request_segment_aligned) to 256 in the
backend and 32 in the frontend. The value in the frontend has been
chosen experimentally, and the backend value has been set to a sane
value that allows expanding the maximum number of indirect descriptors
in the frontend if needed.
The migration code has changed from the previous implementation, in
which we simply remapped the segments on the shared ring. Now the
maximum number of segments allowed in a request can change depending
on the backend, so we have to requeue all the requests in the ring and
in the queue and split the bios in them if they are bigger than the
new maximum number of segments.
[v2: Fixed minor comments by Konrad.
[v1: Added padding to make the indirect request 64bit aligned.
Added some BUGs, comments; fixed number of indirect pages in
blkif_get_x86_{32/64}_req. Added description about the indirect operation
in blkif.h]
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
[v3: Fixed spaces and tabs mix ups]
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'include/xen')
-rw-r--r-- | include/xen/interface/io/blkif.h | 53 |
1 files changed, 53 insertions, 0 deletions
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index ffd4652de91c..65e12099ef89 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h | |||
@@ -103,12 +103,46 @@ typedef uint64_t blkif_sector_t; | |||
103 | #define BLKIF_OP_DISCARD 5 | 103 | #define BLKIF_OP_DISCARD 5 |
104 | 104 | ||
105 | /* | 105 | /* |
106 | * Recognized if "feature-max-indirect-segments" in present in the backend | ||
107 | * xenbus info. The "feature-max-indirect-segments" node contains the maximum | ||
108 | * number of segments allowed by the backend per request. If the node is | ||
109 | * present, the frontend might use blkif_request_indirect structs in order to | ||
110 | * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The | ||
111 | * maximum number of indirect segments is fixed by the backend, but the | ||
112 | * frontend can issue requests with any number of indirect segments as long as | ||
113 | * it's less than the number provided by the backend. The indirect_grefs field | ||
114 | * in blkif_request_indirect should be filled by the frontend with the | ||
115 | * grant references of the pages that are holding the indirect segments. | ||
116 | * This pages are filled with an array of blkif_request_segment_aligned | ||
117 | * that hold the information about the segments. The number of indirect | ||
118 | * pages to use is determined by the maximum number of segments | ||
119 | * a indirect request contains. Every indirect page can contain a maximum | ||
120 | * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), | ||
121 | * so to calculate the number of indirect pages to use we have to do | ||
122 | * ceil(indirect_segments/512). | ||
123 | * | ||
124 | * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* | ||
125 | * create the "feature-max-indirect-segments" node! | ||
126 | */ | ||
127 | #define BLKIF_OP_INDIRECT 6 | ||
128 | |||
129 | /* | ||
106 | * Maximum scatter/gather segments per request. | 130 | * Maximum scatter/gather segments per request. |
107 | * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. | 131 | * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. |
108 | * NB. This could be 12 if the ring indexes weren't stored in the same page. | 132 | * NB. This could be 12 if the ring indexes weren't stored in the same page. |
109 | */ | 133 | */ |
110 | #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 | 134 | #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 |
111 | 135 | ||
136 | #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 | ||
137 | |||
138 | struct blkif_request_segment_aligned { | ||
139 | grant_ref_t gref; /* reference to I/O buffer frame */ | ||
140 | /* @first_sect: first sector in frame to transfer (inclusive). */ | ||
141 | /* @last_sect: last sector in frame to transfer (inclusive). */ | ||
142 | uint8_t first_sect, last_sect; | ||
143 | uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ | ||
144 | } __attribute__((__packed__)); | ||
145 | |||
112 | struct blkif_request_rw { | 146 | struct blkif_request_rw { |
113 | uint8_t nr_segments; /* number of segments */ | 147 | uint8_t nr_segments; /* number of segments */ |
114 | blkif_vdev_t handle; /* only for read/write requests */ | 148 | blkif_vdev_t handle; /* only for read/write requests */ |
@@ -147,12 +181,31 @@ struct blkif_request_other { | |||
147 | uint64_t id; /* private guest value, echoed in resp */ | 181 | uint64_t id; /* private guest value, echoed in resp */ |
148 | } __attribute__((__packed__)); | 182 | } __attribute__((__packed__)); |
149 | 183 | ||
184 | struct blkif_request_indirect { | ||
185 | uint8_t indirect_op; | ||
186 | uint16_t nr_segments; | ||
187 | #ifdef CONFIG_X86_64 | ||
188 | uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ | ||
189 | #endif | ||
190 | uint64_t id; | ||
191 | blkif_sector_t sector_number; | ||
192 | blkif_vdev_t handle; | ||
193 | uint16_t _pad2; | ||
194 | grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; | ||
195 | #ifdef CONFIG_X86_64 | ||
196 | uint32_t _pad3; /* make it 64 byte aligned */ | ||
197 | #else | ||
198 | uint64_t _pad3; /* make it 64 byte aligned */ | ||
199 | #endif | ||
200 | } __attribute__((__packed__)); | ||
201 | |||
150 | struct blkif_request { | 202 | struct blkif_request { |
151 | uint8_t operation; /* BLKIF_OP_??? */ | 203 | uint8_t operation; /* BLKIF_OP_??? */ |
152 | union { | 204 | union { |
153 | struct blkif_request_rw rw; | 205 | struct blkif_request_rw rw; |
154 | struct blkif_request_discard discard; | 206 | struct blkif_request_discard discard; |
155 | struct blkif_request_other other; | 207 | struct blkif_request_other other; |
208 | struct blkif_request_indirect indirect; | ||
156 | } u; | 209 | } u; |
157 | } __attribute__((__packed__)); | 210 | } __attribute__((__packed__)); |
158 | 211 | ||