diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
| commit | 1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch) | |
| tree | 44db563f64cf5f8d62af8f99a61e2b248c44ea3a /drivers/virtio | |
| parent | 03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff) | |
| parent | f9eccf24615672896dc13251410c3f2f33a14f95 (diff) | |
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano:
- Fix the vt8500 timer leading to a system lock up when dealing with too
small delta (Roman Volkov)
- Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST
(Daniel Lezcano)
- Prevent to compile timers using the 'iomem' API when the architecture has
not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'drivers/virtio')
| -rw-r--r-- | drivers/virtio/virtio.c | 1 | ||||
| -rw-r--r-- | drivers/virtio/virtio_ring.c | 48 |
2 files changed, 36 insertions, 13 deletions
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index b1877d73fa56..7062bb0975a5 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c | |||
| @@ -412,6 +412,7 @@ static int virtio_init(void) | |||
| 412 | static void __exit virtio_exit(void) | 412 | static void __exit virtio_exit(void) |
| 413 | { | 413 | { |
| 414 | bus_unregister(&virtio_bus); | 414 | bus_unregister(&virtio_bus); |
| 415 | ida_destroy(&virtio_index_ida); | ||
| 415 | } | 416 | } |
| 416 | core_initcall(virtio_init); | 417 | core_initcall(virtio_init); |
| 417 | module_exit(virtio_exit); | 418 | module_exit(virtio_exit); |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 096b857e7b75..ee663c458b20 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
| @@ -80,6 +80,12 @@ struct vring_virtqueue { | |||
| 80 | /* Last used index we've seen. */ | 80 | /* Last used index we've seen. */ |
| 81 | u16 last_used_idx; | 81 | u16 last_used_idx; |
| 82 | 82 | ||
| 83 | /* Last written value to avail->flags */ | ||
| 84 | u16 avail_flags_shadow; | ||
| 85 | |||
| 86 | /* Last written value to avail->idx in guest byte order */ | ||
| 87 | u16 avail_idx_shadow; | ||
| 88 | |||
| 83 | /* How to notify other side. FIXME: commonalize hcalls! */ | 89 | /* How to notify other side. FIXME: commonalize hcalls! */ |
| 84 | bool (*notify)(struct virtqueue *vq); | 90 | bool (*notify)(struct virtqueue *vq); |
| 85 | 91 | ||
| @@ -109,7 +115,7 @@ static struct vring_desc *alloc_indirect(struct virtqueue *_vq, | |||
| 109 | * otherwise virt_to_phys will give us bogus addresses in the | 115 | * otherwise virt_to_phys will give us bogus addresses in the |
| 110 | * virtqueue. | 116 | * virtqueue. |
| 111 | */ | 117 | */ |
| 112 | gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); | 118 | gfp &= ~__GFP_HIGHMEM; |
| 113 | 119 | ||
| 114 | desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); | 120 | desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); |
| 115 | if (!desc) | 121 | if (!desc) |
| @@ -235,13 +241,14 @@ static inline int virtqueue_add(struct virtqueue *_vq, | |||
| 235 | 241 | ||
| 236 | /* Put entry in available array (but don't update avail->idx until they | 242 | /* Put entry in available array (but don't update avail->idx until they |
| 237 | * do sync). */ | 243 | * do sync). */ |
| 238 | avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1); | 244 | avail = vq->avail_idx_shadow & (vq->vring.num - 1); |
| 239 | vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); | 245 | vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); |
| 240 | 246 | ||
| 241 | /* Descriptors and available array need to be set before we expose the | 247 | /* Descriptors and available array need to be set before we expose the |
| 242 | * new available array entries. */ | 248 | * new available array entries. */ |
| 243 | virtio_wmb(vq->weak_barriers); | 249 | virtio_wmb(vq->weak_barriers); |
| 244 | vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); | 250 | vq->avail_idx_shadow++; |
| 251 | vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); | ||
| 245 | vq->num_added++; | 252 | vq->num_added++; |
| 246 | 253 | ||
| 247 | pr_debug("Added buffer head %i to %p\n", head, vq); | 254 | pr_debug("Added buffer head %i to %p\n", head, vq); |
| @@ -354,8 +361,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq) | |||
| 354 | * event. */ | 361 | * event. */ |
| 355 | virtio_mb(vq->weak_barriers); | 362 | virtio_mb(vq->weak_barriers); |
| 356 | 363 | ||
| 357 | old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added; | 364 | old = vq->avail_idx_shadow - vq->num_added; |
| 358 | new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx); | 365 | new = vq->avail_idx_shadow; |
| 359 | vq->num_added = 0; | 366 | vq->num_added = 0; |
| 360 | 367 | ||
| 361 | #ifdef DEBUG | 368 | #ifdef DEBUG |
| @@ -510,7 +517,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) | |||
| 510 | /* If we expect an interrupt for the next entry, tell host | 517 | /* If we expect an interrupt for the next entry, tell host |
| 511 | * by writing event index and flush out the write before | 518 | * by writing event index and flush out the write before |
| 512 | * the read in the next get_buf call. */ | 519 | * the read in the next get_buf call. */ |
| 513 | if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) { | 520 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { |
| 514 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); | 521 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); |
| 515 | virtio_mb(vq->weak_barriers); | 522 | virtio_mb(vq->weak_barriers); |
| 516 | } | 523 | } |
| @@ -537,7 +544,11 @@ void virtqueue_disable_cb(struct virtqueue *_vq) | |||
| 537 | { | 544 | { |
| 538 | struct vring_virtqueue *vq = to_vvq(_vq); | 545 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 539 | 546 | ||
| 540 | vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT); | 547 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { |
| 548 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; | ||
| 549 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | ||
| 550 | } | ||
| 551 | |||
| 541 | } | 552 | } |
| 542 | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); | 553 | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); |
| 543 | 554 | ||
| @@ -565,7 +576,10 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) | |||
| 565 | /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to | 576 | /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to |
| 566 | * either clear the flags bit or point the event index at the next | 577 | * either clear the flags bit or point the event index at the next |
| 567 | * entry. Always do both to keep code simple. */ | 578 | * entry. Always do both to keep code simple. */ |
| 568 | vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); | 579 | if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { |
| 580 | vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; | ||
| 581 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | ||
| 582 | } | ||
| 569 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); | 583 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); |
| 570 | END_USE(vq); | 584 | END_USE(vq); |
| 571 | return last_used_idx; | 585 | return last_used_idx; |
| @@ -633,9 +647,12 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) | |||
| 633 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to | 647 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to |
| 634 | * either clear the flags bit or point the event index at the next | 648 | * either clear the flags bit or point the event index at the next |
| 635 | * entry. Always do both to keep code simple. */ | 649 | * entry. Always do both to keep code simple. */ |
| 636 | vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); | 650 | if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { |
| 651 | vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; | ||
| 652 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | ||
| 653 | } | ||
| 637 | /* TODO: tune this threshold */ | 654 | /* TODO: tune this threshold */ |
| 638 | bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4; | 655 | bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; |
| 639 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); | 656 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); |
| 640 | virtio_mb(vq->weak_barriers); | 657 | virtio_mb(vq->weak_barriers); |
| 641 | if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { | 658 | if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { |
| @@ -670,7 +687,8 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) | |||
| 670 | /* detach_buf clears data, so grab it now. */ | 687 | /* detach_buf clears data, so grab it now. */ |
| 671 | buf = vq->data[i]; | 688 | buf = vq->data[i]; |
| 672 | detach_buf(vq, i); | 689 | detach_buf(vq, i); |
| 673 | vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1); | 690 | vq->avail_idx_shadow--; |
| 691 | vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); | ||
| 674 | END_USE(vq); | 692 | END_USE(vq); |
| 675 | return buf; | 693 | return buf; |
| 676 | } | 694 | } |
| @@ -735,6 +753,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, | |||
| 735 | vq->weak_barriers = weak_barriers; | 753 | vq->weak_barriers = weak_barriers; |
| 736 | vq->broken = false; | 754 | vq->broken = false; |
| 737 | vq->last_used_idx = 0; | 755 | vq->last_used_idx = 0; |
| 756 | vq->avail_flags_shadow = 0; | ||
| 757 | vq->avail_idx_shadow = 0; | ||
| 738 | vq->num_added = 0; | 758 | vq->num_added = 0; |
| 739 | list_add_tail(&vq->vq.list, &vdev->vqs); | 759 | list_add_tail(&vq->vq.list, &vdev->vqs); |
| 740 | #ifdef DEBUG | 760 | #ifdef DEBUG |
| @@ -746,8 +766,10 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, | |||
| 746 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); | 766 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
| 747 | 767 | ||
| 748 | /* No callback? Tell other side not to bother us. */ | 768 | /* No callback? Tell other side not to bother us. */ |
| 749 | if (!callback) | 769 | if (!callback) { |
| 750 | vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT); | 770 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; |
| 771 | vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); | ||
| 772 | } | ||
| 751 | 773 | ||
| 752 | /* Put everything in free lists. */ | 774 | /* Put everything in free lists. */ |
| 753 | vq->free_head = 0; | 775 | vq->free_head = 0; |
