diff options
| author | Paolo Bonzini <pbonzini@redhat.com> | 2019-05-31 18:48:45 -0400 |
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-05-31 18:48:45 -0400 |
| commit | 24e8a2ca1f74574ad2ed1ac7af0260dd90fd911e (patch) | |
| tree | c863dd43c84579d853f3a2ae0ee6ead46c967703 /tools/io_uring/queue.c | |
| parent | 66f61c92889ff3ca365161fb29dd36d6354682ba (diff) | |
| parent | d724c9e54939a597592de3659541da11fc7aa112 (diff) | |
Merge tag 'kvm-ppc-fixes-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into kvm-master
PPC KVM fixes for 5.2
- Several bug fixes for the new XIVE-native code.
- Replace kvm->lock by other mutexes in several places where we hold a
vcpu mutex, to avoid lock order inversions.
- Fix a lockdep warning on guest entry for radix-mode guests.
- Fix a bug causing user-visible corruption of SPRG3 on the host.
Diffstat (limited to 'tools/io_uring/queue.c')
| -rw-r--r-- | tools/io_uring/queue.c | 36 |
1 files changed, 14 insertions, 22 deletions
diff --git a/tools/io_uring/queue.c b/tools/io_uring/queue.c index 88505e873ad9..321819c132c7 100644 --- a/tools/io_uring/queue.c +++ b/tools/io_uring/queue.c | |||
| @@ -8,8 +8,8 @@ | |||
| 8 | #include "liburing.h" | 8 | #include "liburing.h" |
| 9 | #include "barrier.h" | 9 | #include "barrier.h" |
| 10 | 10 | ||
| 11 | static int __io_uring_get_completion(struct io_uring *ring, | 11 | static int __io_uring_get_cqe(struct io_uring *ring, |
| 12 | struct io_uring_cqe **cqe_ptr, int wait) | 12 | struct io_uring_cqe **cqe_ptr, int wait) |
| 13 | { | 13 | { |
| 14 | struct io_uring_cq *cq = &ring->cq; | 14 | struct io_uring_cq *cq = &ring->cq; |
| 15 | const unsigned mask = *cq->kring_mask; | 15 | const unsigned mask = *cq->kring_mask; |
| @@ -39,34 +39,25 @@ static int __io_uring_get_completion(struct io_uring *ring, | |||
| 39 | return -errno; | 39 | return -errno; |
| 40 | } while (1); | 40 | } while (1); |
| 41 | 41 | ||
| 42 | if (*cqe_ptr) { | ||
| 43 | *cq->khead = head + 1; | ||
| 44 | /* | ||
| 45 | * Ensure that the kernel sees our new head, the kernel has | ||
| 46 | * the matching read barrier. | ||
| 47 | */ | ||
| 48 | write_barrier(); | ||
| 49 | } | ||
| 50 | |||
| 51 | return 0; | 42 | return 0; |
| 52 | } | 43 | } |
| 53 | 44 | ||
| 54 | /* | 45 | /* |
| 55 | * Return an IO completion, if one is readily available | 46 | * Return an IO completion, if one is readily available. Returns 0 with |
| 47 | * cqe_ptr filled in on success, -errno on failure. | ||
| 56 | */ | 48 | */ |
| 57 | int io_uring_get_completion(struct io_uring *ring, | 49 | int io_uring_peek_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr) |
| 58 | struct io_uring_cqe **cqe_ptr) | ||
| 59 | { | 50 | { |
| 60 | return __io_uring_get_completion(ring, cqe_ptr, 0); | 51 | return __io_uring_get_cqe(ring, cqe_ptr, 0); |
| 61 | } | 52 | } |
| 62 | 53 | ||
| 63 | /* | 54 | /* |
| 64 | * Return an IO completion, waiting for it if necessary | 55 | * Return an IO completion, waiting for it if necessary. Returns 0 with |
| 56 | * cqe_ptr filled in on success, -errno on failure. | ||
| 65 | */ | 57 | */ |
| 66 | int io_uring_wait_completion(struct io_uring *ring, | 58 | int io_uring_wait_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr) |
| 67 | struct io_uring_cqe **cqe_ptr) | ||
| 68 | { | 59 | { |
| 69 | return __io_uring_get_completion(ring, cqe_ptr, 1); | 60 | return __io_uring_get_cqe(ring, cqe_ptr, 1); |
| 70 | } | 61 | } |
| 71 | 62 | ||
| 72 | /* | 63 | /* |
| @@ -78,7 +69,7 @@ int io_uring_submit(struct io_uring *ring) | |||
| 78 | { | 69 | { |
| 79 | struct io_uring_sq *sq = &ring->sq; | 70 | struct io_uring_sq *sq = &ring->sq; |
| 80 | const unsigned mask = *sq->kring_mask; | 71 | const unsigned mask = *sq->kring_mask; |
| 81 | unsigned ktail, ktail_next, submitted; | 72 | unsigned ktail, ktail_next, submitted, to_submit; |
| 82 | int ret; | 73 | int ret; |
| 83 | 74 | ||
| 84 | /* | 75 | /* |
| @@ -100,7 +91,8 @@ int io_uring_submit(struct io_uring *ring) | |||
| 100 | */ | 91 | */ |
| 101 | submitted = 0; | 92 | submitted = 0; |
| 102 | ktail = ktail_next = *sq->ktail; | 93 | ktail = ktail_next = *sq->ktail; |
| 103 | while (sq->sqe_head < sq->sqe_tail) { | 94 | to_submit = sq->sqe_tail - sq->sqe_head; |
| 95 | while (to_submit--) { | ||
| 104 | ktail_next++; | 96 | ktail_next++; |
| 105 | read_barrier(); | 97 | read_barrier(); |
| 106 | 98 | ||
| @@ -136,7 +128,7 @@ submit: | |||
| 136 | if (ret < 0) | 128 | if (ret < 0) |
| 137 | return -errno; | 129 | return -errno; |
| 138 | 130 | ||
| 139 | return 0; | 131 | return ret; |
| 140 | } | 132 | } |
| 141 | 133 | ||
| 142 | /* | 134 | /* |
