aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/virtio
diff options
context:
space:
mode:
authorTiwei Bie <tiwei.bie@intel.com>2018-11-21 05:03:25 -0500
committerDavid S. Miller <davem@davemloft.net>2018-11-27 01:17:39 -0500
commitd79dca75c79680f52a27a7ee1b6ae75066f36b3e (patch)
treee497ab48d572448fb62802ab5da602b1906f57eb /drivers/virtio
parentcbeedb72b97ad826e31e68e0717b763e2db0806d (diff)
virtio_ring: extract split ring handling from ring creation
Introduce a specific function to create the split ring. And also move the DMA allocation and size information to the .split sub-structure. Signed-off-by: Tiwei Bie <tiwei.bie@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/virtio_ring.c220
1 files changed, 121 insertions, 99 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index acd851f3105c..d00a87909a7e 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -118,6 +118,10 @@ struct vring_virtqueue {
118 118
119 /* Per-descriptor state. */ 119 /* Per-descriptor state. */
120 struct vring_desc_state_split *desc_state; 120 struct vring_desc_state_split *desc_state;
121
122 /* DMA, allocation, and size information */
123 size_t queue_size_in_bytes;
124 dma_addr_t queue_dma_addr;
121 } split; 125 } split;
122 126
123 /* How to notify other side. FIXME: commonalize hcalls! */ 127 /* How to notify other side. FIXME: commonalize hcalls! */
@@ -125,8 +129,6 @@ struct vring_virtqueue {
125 129
126 /* DMA, allocation, and size information */ 130 /* DMA, allocation, and size information */
127 bool we_own_ring; 131 bool we_own_ring;
128 size_t queue_size_in_bytes;
129 dma_addr_t queue_dma_addr;
130 132
131#ifdef DEBUG 133#ifdef DEBUG
132 /* They're supposed to lock for us. */ 134 /* They're supposed to lock for us. */
@@ -203,6 +205,48 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
203 return false; 205 return false;
204} 206}
205 207
208static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
209 dma_addr_t *dma_handle, gfp_t flag)
210{
211 if (vring_use_dma_api(vdev)) {
212 return dma_alloc_coherent(vdev->dev.parent, size,
213 dma_handle, flag);
214 } else {
215 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
216
217 if (queue) {
218 phys_addr_t phys_addr = virt_to_phys(queue);
219 *dma_handle = (dma_addr_t)phys_addr;
220
221 /*
222 * Sanity check: make sure we dind't truncate
223 * the address. The only arches I can find that
224 * have 64-bit phys_addr_t but 32-bit dma_addr_t
225 * are certain non-highmem MIPS and x86
226 * configurations, but these configurations
227 * should never allocate physical pages above 32
228 * bits, so this is fine. Just in case, throw a
229 * warning and abort if we end up with an
230 * unrepresentable address.
231 */
232 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
233 free_pages_exact(queue, PAGE_ALIGN(size));
234 return NULL;
235 }
236 }
237 return queue;
238 }
239}
240
241static void vring_free_queue(struct virtio_device *vdev, size_t size,
242 void *queue, dma_addr_t dma_handle)
243{
244 if (vring_use_dma_api(vdev))
245 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
246 else
247 free_pages_exact(queue, PAGE_ALIGN(size));
248}
249
206/* 250/*
207 * The DMA ops on various arches are rather gnarly right now, and 251 * The DMA ops on various arches are rather gnarly right now, and
208 * making all of the arch DMA ops work on the vring device itself 252 * making all of the arch DMA ops work on the vring device itself
@@ -730,6 +774,68 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
730 return NULL; 774 return NULL;
731} 775}
732 776
777static struct virtqueue *vring_create_virtqueue_split(
778 unsigned int index,
779 unsigned int num,
780 unsigned int vring_align,
781 struct virtio_device *vdev,
782 bool weak_barriers,
783 bool may_reduce_num,
784 bool context,
785 bool (*notify)(struct virtqueue *),
786 void (*callback)(struct virtqueue *),
787 const char *name)
788{
789 struct virtqueue *vq;
790 void *queue = NULL;
791 dma_addr_t dma_addr;
792 size_t queue_size_in_bytes;
793 struct vring vring;
794
795 /* We assume num is a power of 2. */
796 if (num & (num - 1)) {
797 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
798 return NULL;
799 }
800
801 /* TODO: allocate each queue chunk individually */
802 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
803 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
804 &dma_addr,
805 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
806 if (queue)
807 break;
808 }
809
810 if (!num)
811 return NULL;
812
813 if (!queue) {
814 /* Try to get a single page. You are my only hope! */
815 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
816 &dma_addr, GFP_KERNEL|__GFP_ZERO);
817 }
818 if (!queue)
819 return NULL;
820
821 queue_size_in_bytes = vring_size(num, vring_align);
822 vring_init(&vring, num, queue, vring_align);
823
824 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
825 notify, callback, name);
826 if (!vq) {
827 vring_free_queue(vdev, queue_size_in_bytes, queue,
828 dma_addr);
829 return NULL;
830 }
831
832 to_vvq(vq)->split.queue_dma_addr = dma_addr;
833 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
834 to_vvq(vq)->we_own_ring = true;
835
836 return vq;
837}
838
733 839
734/* 840/*
735 * Generic functions and exported symbols. 841 * Generic functions and exported symbols.
@@ -1091,8 +1197,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
1091 vq->vq.num_free = vring.num; 1197 vq->vq.num_free = vring.num;
1092 vq->vq.index = index; 1198 vq->vq.index = index;
1093 vq->we_own_ring = false; 1199 vq->we_own_ring = false;
1094 vq->queue_dma_addr = 0;
1095 vq->queue_size_in_bytes = 0;
1096 vq->notify = notify; 1200 vq->notify = notify;
1097 vq->weak_barriers = weak_barriers; 1201 vq->weak_barriers = weak_barriers;
1098 vq->broken = false; 1202 vq->broken = false;
@@ -1108,6 +1212,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
1108 !context; 1212 !context;
1109 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 1213 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1110 1214
1215 vq->split.queue_dma_addr = 0;
1216 vq->split.queue_size_in_bytes = 0;
1217
1111 vq->split.vring = vring; 1218 vq->split.vring = vring;
1112 vq->split.avail_flags_shadow = 0; 1219 vq->split.avail_flags_shadow = 0;
1113 vq->split.avail_idx_shadow = 0; 1220 vq->split.avail_idx_shadow = 0;
@@ -1138,48 +1245,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
1138} 1245}
1139EXPORT_SYMBOL_GPL(__vring_new_virtqueue); 1246EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
1140 1247
1141static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
1142 dma_addr_t *dma_handle, gfp_t flag)
1143{
1144 if (vring_use_dma_api(vdev)) {
1145 return dma_alloc_coherent(vdev->dev.parent, size,
1146 dma_handle, flag);
1147 } else {
1148 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
1149 if (queue) {
1150 phys_addr_t phys_addr = virt_to_phys(queue);
1151 *dma_handle = (dma_addr_t)phys_addr;
1152
1153 /*
1154 * Sanity check: make sure we dind't truncate
1155 * the address. The only arches I can find that
1156 * have 64-bit phys_addr_t but 32-bit dma_addr_t
1157 * are certain non-highmem MIPS and x86
1158 * configurations, but these configurations
1159 * should never allocate physical pages above 32
1160 * bits, so this is fine. Just in case, throw a
1161 * warning and abort if we end up with an
1162 * unrepresentable address.
1163 */
1164 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
1165 free_pages_exact(queue, PAGE_ALIGN(size));
1166 return NULL;
1167 }
1168 }
1169 return queue;
1170 }
1171}
1172
1173static void vring_free_queue(struct virtio_device *vdev, size_t size,
1174 void *queue, dma_addr_t dma_handle)
1175{
1176 if (vring_use_dma_api(vdev)) {
1177 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1178 } else {
1179 free_pages_exact(queue, PAGE_ALIGN(size));
1180 }
1181}
1182
1183struct virtqueue *vring_create_virtqueue( 1248struct virtqueue *vring_create_virtqueue(
1184 unsigned int index, 1249 unsigned int index,
1185 unsigned int num, 1250 unsigned int num,
@@ -1192,54 +1257,9 @@ struct virtqueue *vring_create_virtqueue(
1192 void (*callback)(struct virtqueue *), 1257 void (*callback)(struct virtqueue *),
1193 const char *name) 1258 const char *name)
1194{ 1259{
1195 struct virtqueue *vq; 1260 return vring_create_virtqueue_split(index, num, vring_align,
1196 void *queue = NULL; 1261 vdev, weak_barriers, may_reduce_num,
1197 dma_addr_t dma_addr; 1262 context, notify, callback, name);
1198 size_t queue_size_in_bytes;
1199 struct vring vring;
1200
1201 /* We assume num is a power of 2. */
1202 if (num & (num - 1)) {
1203 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1204 return NULL;
1205 }
1206
1207 /* TODO: allocate each queue chunk individually */
1208 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1209 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1210 &dma_addr,
1211 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1212 if (queue)
1213 break;
1214 }
1215
1216 if (!num)
1217 return NULL;
1218
1219 if (!queue) {
1220 /* Try to get a single page. You are my only hope! */
1221 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1222 &dma_addr, GFP_KERNEL|__GFP_ZERO);
1223 }
1224 if (!queue)
1225 return NULL;
1226
1227 queue_size_in_bytes = vring_size(num, vring_align);
1228 vring_init(&vring, num, queue, vring_align);
1229
1230 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
1231 notify, callback, name);
1232 if (!vq) {
1233 vring_free_queue(vdev, queue_size_in_bytes, queue,
1234 dma_addr);
1235 return NULL;
1236 }
1237
1238 to_vvq(vq)->queue_dma_addr = dma_addr;
1239 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1240 to_vvq(vq)->we_own_ring = true;
1241
1242 return vq;
1243} 1263}
1244EXPORT_SYMBOL_GPL(vring_create_virtqueue); 1264EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1245 1265
@@ -1266,8 +1286,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
1266 struct vring_virtqueue *vq = to_vvq(_vq); 1286 struct vring_virtqueue *vq = to_vvq(_vq);
1267 1287
1268 if (vq->we_own_ring) { 1288 if (vq->we_own_ring) {
1269 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes, 1289 vring_free_queue(vq->vq.vdev,
1270 vq->split.vring.desc, vq->queue_dma_addr); 1290 vq->split.queue_size_in_bytes,
1291 vq->split.vring.desc,
1292 vq->split.queue_dma_addr);
1271 kfree(vq->split.desc_state); 1293 kfree(vq->split.desc_state);
1272 } 1294 }
1273 list_del(&_vq->list); 1295 list_del(&_vq->list);
@@ -1343,7 +1365,7 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
1343 1365
1344 BUG_ON(!vq->we_own_ring); 1366 BUG_ON(!vq->we_own_ring);
1345 1367
1346 return vq->queue_dma_addr; 1368 return vq->split.queue_dma_addr;
1347} 1369}
1348EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); 1370EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1349 1371
@@ -1353,7 +1375,7 @@ dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
1353 1375
1354 BUG_ON(!vq->we_own_ring); 1376 BUG_ON(!vq->we_own_ring);
1355 1377
1356 return vq->queue_dma_addr + 1378 return vq->split.queue_dma_addr +
1357 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); 1379 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
1358} 1380}
1359EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); 1381EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
@@ -1364,7 +1386,7 @@ dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1364 1386
1365 BUG_ON(!vq->we_own_ring); 1387 BUG_ON(!vq->we_own_ring);
1366 1388
1367 return vq->queue_dma_addr + 1389 return vq->split.queue_dma_addr +
1368 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); 1390 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
1369} 1391}
1370EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); 1392EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);