aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-12-30 10:26:05 -0500
committerRusty Russell <rusty@rustcorp.com.au>2008-12-29 17:56:05 -0500
commit0864b79a153342c1dfbebb12b2d099fec76c5e18 (patch)
tree8e6fa6ada6701ceeadbee5f96c78cdd59048688d /drivers/block
parent4b7f7e2049956f6e946ad56c1ee093e7bab74da9 (diff)
virtio: block: dynamic maximum segments
Enhance the driver to handle whatever maximum segment number the host tells us to handle. Do to this, we need to allocate the scatterlist dynamically. We set max_phys_segments and max_hw_segments to the same value (1 if the host doesn't tell us, since that's safest and all known hosts do tell us). Note that kmalloc'ing the structure for large sg_elems might be problematic: the fix for this is sg_table, but that requires more work. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/virtio_blk.c36
1 files changed, 23 insertions, 13 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 0e2ade648aff..7b9b38a12d25 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -6,7 +6,6 @@
6#include <linux/virtio_blk.h> 6#include <linux/virtio_blk.h>
7#include <linux/scatterlist.h> 7#include <linux/scatterlist.h>
8 8
9#define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS)
10#define PART_BITS 4 9#define PART_BITS 4
11 10
12static int major, index; 11static int major, index;
@@ -26,8 +25,11 @@ struct virtio_blk
26 25
27 mempool_t *pool; 26 mempool_t *pool;
28 27
28 /* What host tells us, plus 2 for header & tailer. */
29 unsigned int sg_elems;
30
29 /* Scatterlist: can be too big for stack. */ 31 /* Scatterlist: can be too big for stack. */
30 struct scatterlist sg[VIRTIO_MAX_SG]; 32 struct scatterlist sg[/*sg_elems*/];
31}; 33};
32 34
33struct virtblk_req 35struct virtblk_req
@@ -97,8 +99,6 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
97 if (blk_barrier_rq(vbr->req)) 99 if (blk_barrier_rq(vbr->req))
98 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 100 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
99 101
100 /* This init could be done at vblk creation time */
101 sg_init_table(vblk->sg, VIRTIO_MAX_SG);
102 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); 102 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
103 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); 103 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
104 sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status)); 104 sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
@@ -130,7 +130,7 @@ static void do_virtblk_request(struct request_queue *q)
130 130
131 while ((req = elv_next_request(q)) != NULL) { 131 while ((req = elv_next_request(q)) != NULL) {
132 vblk = req->rq_disk->private_data; 132 vblk = req->rq_disk->private_data;
133 BUG_ON(req->nr_phys_segments > ARRAY_SIZE(vblk->sg)); 133 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
134 134
135 /* If this request fails, stop queue and wait for something to 135 /* If this request fails, stop queue and wait for something to
136 finish to restart it. */ 136 finish to restart it. */
@@ -196,12 +196,22 @@ static int virtblk_probe(struct virtio_device *vdev)
196 int err; 196 int err;
197 u64 cap; 197 u64 cap;
198 u32 v; 198 u32 v;
199 u32 blk_size; 199 u32 blk_size, sg_elems;
200 200
201 if (index_to_minor(index) >= 1 << MINORBITS) 201 if (index_to_minor(index) >= 1 << MINORBITS)
202 return -ENOSPC; 202 return -ENOSPC;
203 203
204 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); 204 /* We need to know how many segments before we allocate. */
205 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
206 offsetof(struct virtio_blk_config, seg_max),
207 &sg_elems);
208 if (err)
209 sg_elems = 1;
210
211 /* We need an extra sg elements at head and tail. */
212 sg_elems += 2;
213 vdev->priv = vblk = kmalloc(sizeof(*vblk) +
214 sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
205 if (!vblk) { 215 if (!vblk) {
206 err = -ENOMEM; 216 err = -ENOMEM;
207 goto out; 217 goto out;
@@ -210,6 +220,8 @@ static int virtblk_probe(struct virtio_device *vdev)
210 INIT_LIST_HEAD(&vblk->reqs); 220 INIT_LIST_HEAD(&vblk->reqs);
211 spin_lock_init(&vblk->lock); 221 spin_lock_init(&vblk->lock);
212 vblk->vdev = vdev; 222 vblk->vdev = vdev;
223 vblk->sg_elems = sg_elems;
224 sg_init_table(vblk->sg, vblk->sg_elems);
213 225
214 /* We expect one virtqueue, for output. */ 226 /* We expect one virtqueue, for output. */
215 vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); 227 vblk->vq = vdev->config->find_vq(vdev, 0, blk_done);
@@ -277,6 +289,10 @@ static int virtblk_probe(struct virtio_device *vdev)
277 } 289 }
278 set_capacity(vblk->disk, cap); 290 set_capacity(vblk->disk, cap);
279 291
292 /* We can handle whatever the host told us to handle. */
293 blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
294 blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
295
280 /* No real sector limit. */ 296 /* No real sector limit. */
281 blk_queue_max_sectors(vblk->disk->queue, -1U); 297 blk_queue_max_sectors(vblk->disk->queue, -1U);
282 298
@@ -290,12 +306,6 @@ static int virtblk_probe(struct virtio_device *vdev)
290 else 306 else
291 blk_queue_max_segment_size(vblk->disk->queue, -1UL); 307 blk_queue_max_segment_size(vblk->disk->queue, -1UL);
292 308
293 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
294 offsetof(struct virtio_blk_config, seg_max),
295 &v);
296 if (!err)
297 blk_queue_max_hw_segments(vblk->disk->queue, v);
298
299 /* Host can optionally specify the block size of the device */ 309 /* Host can optionally specify the block size of the device */
300 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, 310 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
301 offsetof(struct virtio_blk_config, blk_size), 311 offsetof(struct virtio_blk_config, blk_size),