diff options
author | Mark McLoughlin <markmc@redhat.com> | 2009-05-11 13:11:46 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-06-12 08:57:13 -0400 |
commit | d1f0132e76a11b05167313c606a853953f416081 (patch) | |
tree | abd06f91d578b1927249f6a467910da88b3ec910 /Documentation/lguest/lguest.c | |
parent | b60da13fc7bbf99d3c68578bd3fbcf66e1cb5f41 (diff) |
lguest: add support for indirect ring entries
Support the VIRTIO_RING_F_INDIRECT_DESC feature.
This is a simple matter of changing the descriptor walking
code to operate on a struct vring_desc* and supplying it
with an indirect table if detected.
Signed-off-by: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'Documentation/lguest/lguest.c')
-rw-r--r-- | Documentation/lguest/lguest.c | 41 |
1 files changed, 29 insertions, 12 deletions
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index bb5e3c28d9d6..9ebcd6ef361b 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c | |||
@@ -536,20 +536,21 @@ static void *_check_pointer(unsigned long addr, unsigned int size, | |||
536 | /* Each buffer in the virtqueues is actually a chain of descriptors. This | 536 | /* Each buffer in the virtqueues is actually a chain of descriptors. This |
537 | * function returns the next descriptor in the chain, or vq->vring.num if we're | 537 | * function returns the next descriptor in the chain, or vq->vring.num if we're |
538 | * at the end. */ | 538 | * at the end. */ |
539 | static unsigned next_desc(struct virtqueue *vq, unsigned int i) | 539 | static unsigned next_desc(struct vring_desc *desc, |
540 | unsigned int i, unsigned int max) | ||
540 | { | 541 | { |
541 | unsigned int next; | 542 | unsigned int next; |
542 | 543 | ||
543 | /* If this descriptor says it doesn't chain, we're done. */ | 544 | /* If this descriptor says it doesn't chain, we're done. */ |
544 | if (!(vq->vring.desc[i].flags & VRING_DESC_F_NEXT)) | 545 | if (!(desc[i].flags & VRING_DESC_F_NEXT)) |
545 | return vq->vring.num; | 546 | return max; |
546 | 547 | ||
547 | /* Check they're not leading us off end of descriptors. */ | 548 | /* Check they're not leading us off end of descriptors. */ |
548 | next = vq->vring.desc[i].next; | 549 | next = desc[i].next; |
549 | /* Make sure compiler knows to grab that: we don't want it changing! */ | 550 | /* Make sure compiler knows to grab that: we don't want it changing! */ |
550 | wmb(); | 551 | wmb(); |
551 | 552 | ||
552 | if (next >= vq->vring.num) | 553 | if (next >= max) |
553 | errx(1, "Desc next is %u", next); | 554 | errx(1, "Desc next is %u", next); |
554 | 555 | ||
555 | return next; | 556 | return next; |
@@ -585,7 +586,8 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
585 | struct iovec iov[], | 586 | struct iovec iov[], |
586 | unsigned int *out_num, unsigned int *in_num) | 587 | unsigned int *out_num, unsigned int *in_num) |
587 | { | 588 | { |
588 | unsigned int i, head; | 589 | unsigned int i, head, max; |
590 | struct vring_desc *desc; | ||
589 | u16 last_avail = lg_last_avail(vq); | 591 | u16 last_avail = lg_last_avail(vq); |
590 | 592 | ||
591 | while (last_avail == vq->vring.avail->idx) { | 593 | while (last_avail == vq->vring.avail->idx) { |
@@ -630,15 +632,28 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
630 | /* When we start there are none of either input nor output. */ | 632 | /* When we start there are none of either input nor output. */ |
631 | *out_num = *in_num = 0; | 633 | *out_num = *in_num = 0; |
632 | 634 | ||
635 | max = vq->vring.num; | ||
636 | desc = vq->vring.desc; | ||
633 | i = head; | 637 | i = head; |
638 | |||
639 | /* If this is an indirect entry, then this buffer contains a descriptor | ||
640 | * table which we handle as if it's any normal descriptor chain. */ | ||
641 | if (desc[i].flags & VRING_DESC_F_INDIRECT) { | ||
642 | if (desc[i].len % sizeof(struct vring_desc)) | ||
643 | errx(1, "Invalid size for indirect buffer table"); | ||
644 | |||
645 | max = desc[i].len / sizeof(struct vring_desc); | ||
646 | desc = check_pointer(desc[i].addr, desc[i].len); | ||
647 | i = 0; | ||
648 | } | ||
649 | |||
634 | do { | 650 | do { |
635 | /* Grab the first descriptor, and check it's OK. */ | 651 | /* Grab the first descriptor, and check it's OK. */ |
636 | iov[*out_num + *in_num].iov_len = vq->vring.desc[i].len; | 652 | iov[*out_num + *in_num].iov_len = desc[i].len; |
637 | iov[*out_num + *in_num].iov_base | 653 | iov[*out_num + *in_num].iov_base |
638 | = check_pointer(vq->vring.desc[i].addr, | 654 | = check_pointer(desc[i].addr, desc[i].len); |
639 | vq->vring.desc[i].len); | ||
640 | /* If this is an input descriptor, increment that count. */ | 655 | /* If this is an input descriptor, increment that count. */ |
641 | if (vq->vring.desc[i].flags & VRING_DESC_F_WRITE) | 656 | if (desc[i].flags & VRING_DESC_F_WRITE) |
642 | (*in_num)++; | 657 | (*in_num)++; |
643 | else { | 658 | else { |
644 | /* If it's an output descriptor, they're all supposed | 659 | /* If it's an output descriptor, they're all supposed |
@@ -649,9 +664,9 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
649 | } | 664 | } |
650 | 665 | ||
651 | /* If we've got too many, that implies a descriptor loop. */ | 666 | /* If we've got too many, that implies a descriptor loop. */ |
652 | if (*out_num + *in_num > vq->vring.num) | 667 | if (*out_num + *in_num > max) |
653 | errx(1, "Looped descriptor"); | 668 | errx(1, "Looped descriptor"); |
654 | } while ((i = next_desc(vq, i)) != vq->vring.num); | 669 | } while ((i = next_desc(desc, i, max)) != max); |
655 | 670 | ||
656 | return head; | 671 | return head; |
657 | } | 672 | } |
@@ -1331,6 +1346,8 @@ static void setup_tun_net(char *arg) | |||
1331 | add_feature(dev, VIRTIO_NET_F_HOST_TSO4); | 1346 | add_feature(dev, VIRTIO_NET_F_HOST_TSO4); |
1332 | add_feature(dev, VIRTIO_NET_F_HOST_TSO6); | 1347 | add_feature(dev, VIRTIO_NET_F_HOST_TSO6); |
1333 | add_feature(dev, VIRTIO_NET_F_HOST_ECN); | 1348 | add_feature(dev, VIRTIO_NET_F_HOST_ECN); |
1349 | /* We handle indirect ring entries */ | ||
1350 | add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC); | ||
1334 | set_config(dev, sizeof(conf), &conf); | 1351 | set_config(dev, sizeof(conf), &conf); |
1335 | 1352 | ||
1336 | /* We don't need the socket any more; setup is done. */ | 1353 | /* We don't need the socket any more; setup is done. */ |