aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2017-04-04 14:15:41 -0400
committerMichael S. Tsirkin <mst@redhat.com>2017-04-10 17:28:57 -0400
commit0b0f9dc52ed0333fa52a9314b53d0b2b248b821d (patch)
tree19dd421911f6c608ec0ffcacc9dd042a011155a1
parent2008c1544c73d5190f81ef1790fa5bd2fade5bd0 (diff)
Revert "virtio_pci: use shared interrupts for virtqueues"
This reverts commit 07ec51480b5eb1233f8c1b0f5d7a7c8d1247c507. Conflicts: drivers/virtio/virtio_pci_common.c Unfortunately the idea does not work with threadirqs as more than 32 queues can then map to a single interrupts. Further, the cleanup seems to be one of the changes that broke hybernation for some users. We are still not sure why but revert helps. This reverts the cleanup changes but keeps the affinity support. Tested-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r--drivers/virtio/virtio_pci_common.c244
-rw-r--r--drivers/virtio/virtio_pci_common.h16
2 files changed, 148 insertions, 112 deletions
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 3921b0a2439e..d99029d3892e 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34 int i; 34 int i;
35 35
36 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); 36 if (vp_dev->intx_enabled)
37 for (i = 1; i < vp_dev->msix_vectors; i++) 37 synchronize_irq(vp_dev->pci_dev->irq);
38
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
38 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); 40 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
39} 41}
40 42
@@ -97,10 +99,79 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
97 return vp_vring_interrupt(irq, opaque); 99 return vp_vring_interrupt(irq, opaque);
98} 100}
99 101
100static void vp_remove_vqs(struct virtio_device *vdev) 102static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
103 bool per_vq_vectors, struct irq_affinity *desc)
104{
105 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
106 const char *name = dev_name(&vp_dev->vdev.dev);
107 unsigned i, v;
108 int err = -ENOMEM;
109
110 vp_dev->msix_vectors = nvectors;
111
112 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
113 GFP_KERNEL);
114 if (!vp_dev->msix_names)
115 goto error;
116 vp_dev->msix_affinity_masks
117 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
118 GFP_KERNEL);
119 if (!vp_dev->msix_affinity_masks)
120 goto error;
121 for (i = 0; i < nvectors; ++i)
122 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
123 GFP_KERNEL))
124 goto error;
125
126 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
127 nvectors, PCI_IRQ_MSIX |
128 (desc ? PCI_IRQ_AFFINITY : 0),
129 desc);
130 if (err < 0)
131 goto error;
132 vp_dev->msix_enabled = 1;
133
134 /* Set the vector used for configuration */
135 v = vp_dev->msix_used_vectors;
136 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
137 "%s-config", name);
138 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
139 vp_config_changed, 0, vp_dev->msix_names[v],
140 vp_dev);
141 if (err)
142 goto error;
143 ++vp_dev->msix_used_vectors;
144
145 v = vp_dev->config_vector(vp_dev, v);
146 /* Verify we had enough resources to assign the vector */
147 if (v == VIRTIO_MSI_NO_VECTOR) {
148 err = -EBUSY;
149 goto error;
150 }
151
152 if (!per_vq_vectors) {
153 /* Shared vector for all VQs */
154 v = vp_dev->msix_used_vectors;
155 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
156 "%s-virtqueues", name);
157 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
158 vp_vring_interrupt, 0, vp_dev->msix_names[v],
159 vp_dev);
160 if (err)
161 goto error;
162 ++vp_dev->msix_used_vectors;
163 }
164 return 0;
165error:
166 return err;
167}
168
169/* the config->del_vqs() implementation */
170void vp_del_vqs(struct virtio_device *vdev)
101{ 171{
102 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 172 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
103 struct virtqueue *vq, *n; 173 struct virtqueue *vq, *n;
174 int i;
104 175
105 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 176 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
106 if (vp_dev->msix_vector_map) { 177 if (vp_dev->msix_vector_map) {
@@ -112,33 +183,35 @@ static void vp_remove_vqs(struct virtio_device *vdev)
112 } 183 }
113 vp_dev->del_vq(vq); 184 vp_dev->del_vq(vq);
114 } 185 }
115}
116 186
117/* the config->del_vqs() implementation */ 187 if (vp_dev->intx_enabled) {
118void vp_del_vqs(struct virtio_device *vdev) 188 free_irq(vp_dev->pci_dev->irq, vp_dev);
119{ 189 vp_dev->intx_enabled = 0;
120 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 190 }
121 int i;
122
123 if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs)))
124 return;
125 191
126 vp_remove_vqs(vdev); 192 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
193 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
127 194
128 if (vp_dev->msix_enabled) { 195 for (i = 0; i < vp_dev->msix_vectors; i++)
129 for (i = 0; i < vp_dev->msix_vectors; i++) 196 if (vp_dev->msix_affinity_masks[i])
130 free_cpumask_var(vp_dev->msix_affinity_masks[i]); 197 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
131 198
199 if (vp_dev->msix_enabled) {
132 /* Disable the vector used for configuration */ 200 /* Disable the vector used for configuration */
133 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); 201 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
134 202
135 kfree(vp_dev->msix_affinity_masks); 203 pci_free_irq_vectors(vp_dev->pci_dev);
136 kfree(vp_dev->msix_names); 204 vp_dev->msix_enabled = 0;
137 kfree(vp_dev->msix_vector_map);
138 } 205 }
139 206
140 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); 207 vp_dev->msix_vectors = 0;
141 pci_free_irq_vectors(vp_dev->pci_dev); 208 vp_dev->msix_used_vectors = 0;
209 kfree(vp_dev->msix_names);
210 vp_dev->msix_names = NULL;
211 kfree(vp_dev->msix_affinity_masks);
212 vp_dev->msix_affinity_masks = NULL;
213 kfree(vp_dev->msix_vector_map);
214 vp_dev->msix_vector_map = NULL;
142} 215}
143 216
144static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, 217static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
@@ -147,128 +220,80 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
147 struct irq_affinity *desc) 220 struct irq_affinity *desc)
148{ 221{
149 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 222 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
150 const char *name = dev_name(&vp_dev->vdev.dev);
151 int i, err = -ENOMEM, allocated_vectors, nvectors;
152 unsigned flags = PCI_IRQ_MSIX;
153 u16 msix_vec; 223 u16 msix_vec;
154 224 int i, err, nvectors, allocated_vectors;
155 if (desc) {
156 flags |= PCI_IRQ_AFFINITY;
157 desc->pre_vectors++; /* virtio config vector */
158 }
159
160 nvectors = 1;
161 for (i = 0; i < nvqs; i++)
162 if (callbacks[i])
163 nvectors++;
164 225
165 if (per_vq_vectors) { 226 if (per_vq_vectors) {
166 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, 227 /* Best option: one for change interrupt, one per vq. */
167 nvectors, flags, desc); 228 nvectors = 1;
229 for (i = 0; i < nvqs; ++i)
230 if (callbacks[i])
231 ++nvectors;
168 } else { 232 } else {
169 err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2, 233 /* Second best: one for change, shared for all vqs. */
170 PCI_IRQ_MSIX); 234 nvectors = 2;
171 }
172 if (err < 0)
173 return err;
174
175 vp_dev->msix_vectors = nvectors;
176 vp_dev->msix_names = kmalloc_array(nvectors,
177 sizeof(*vp_dev->msix_names), GFP_KERNEL);
178 if (!vp_dev->msix_names)
179 goto out_free_irq_vectors;
180
181 vp_dev->msix_affinity_masks = kcalloc(nvectors,
182 sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
183 if (!vp_dev->msix_affinity_masks)
184 goto out_free_msix_names;
185
186 for (i = 0; i < nvectors; ++i) {
187 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
188 GFP_KERNEL))
189 goto out_free_msix_affinity_masks;
190 } 235 }
191 236
192 /* Set the vector used for configuration */ 237 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
193 snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), 238 per_vq_vectors ? desc : NULL);
194 "%s-config", name);
195 err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
196 0, vp_dev->msix_names[0], vp_dev);
197 if (err) 239 if (err)
198 goto out_free_irq_vectors; 240 goto error_find;
199 241
200 /* Verify we had enough resources to assign the vector */ 242 if (per_vq_vectors) {
201 if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { 243 vp_dev->msix_vector_map = kmalloc_array(nvqs,
202 err = -EBUSY; 244 sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
203 goto out_free_config_irq; 245 if (!vp_dev->msix_vector_map)
246 goto error_find;
204 } 247 }
205 248
206 vp_dev->msix_vector_map = kmalloc_array(nvqs, 249 allocated_vectors = vp_dev->msix_used_vectors;
207 sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
208 if (!vp_dev->msix_vector_map)
209 goto out_disable_config_irq;
210
211 allocated_vectors = 1; /* vector 0 is the config interrupt */
212 for (i = 0; i < nvqs; ++i) { 250 for (i = 0; i < nvqs; ++i) {
213 if (!names[i]) { 251 if (!names[i]) {
214 vqs[i] = NULL; 252 vqs[i] = NULL;
215 continue; 253 continue;
216 } 254 }
217 255
218 if (callbacks[i]) 256 if (!callbacks[i])
219 msix_vec = allocated_vectors;
220 else
221 msix_vec = VIRTIO_MSI_NO_VECTOR; 257 msix_vec = VIRTIO_MSI_NO_VECTOR;
222 258 else if (per_vq_vectors)
259 msix_vec = allocated_vectors++;
260 else
261 msix_vec = VP_MSIX_VQ_VECTOR;
223 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], 262 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
224 msix_vec); 263 msix_vec);
225 if (IS_ERR(vqs[i])) { 264 if (IS_ERR(vqs[i])) {
226 err = PTR_ERR(vqs[i]); 265 err = PTR_ERR(vqs[i]);
227 goto out_remove_vqs; 266 goto error_find;
228 } 267 }
229 268
269 if (!per_vq_vectors)
270 continue;
271
230 if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 272 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
231 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; 273 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
232 continue; 274 continue;
233 } 275 }
234 276
235 snprintf(vp_dev->msix_names[i + 1], 277 /* allocate per-vq irq if available and necessary */
236 sizeof(*vp_dev->msix_names), "%s-%s", 278 snprintf(vp_dev->msix_names[msix_vec],
279 sizeof *vp_dev->msix_names,
280 "%s-%s",
237 dev_name(&vp_dev->vdev.dev), names[i]); 281 dev_name(&vp_dev->vdev.dev), names[i]);
238 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 282 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
239 vring_interrupt, IRQF_SHARED, 283 vring_interrupt, 0,
240 vp_dev->msix_names[i + 1], vqs[i]); 284 vp_dev->msix_names[msix_vec],
285 vqs[i]);
241 if (err) { 286 if (err) {
242 /* don't free this irq on error */ 287 /* don't free this irq on error */
243 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; 288 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
244 goto out_remove_vqs; 289 goto error_find;
245 } 290 }
246 vp_dev->msix_vector_map[i] = msix_vec; 291 vp_dev->msix_vector_map[i] = msix_vec;
247
248 if (per_vq_vectors)
249 allocated_vectors++;
250 } 292 }
251
252 vp_dev->msix_enabled = 1;
253 return 0; 293 return 0;
254 294
255out_remove_vqs: 295error_find:
256 vp_remove_vqs(vdev); 296 vp_del_vqs(vdev);
257 kfree(vp_dev->msix_vector_map);
258out_disable_config_irq:
259 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
260out_free_config_irq:
261 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
262out_free_msix_affinity_masks:
263 for (i = 0; i < nvectors; i++) {
264 if (vp_dev->msix_affinity_masks[i])
265 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
266 }
267 kfree(vp_dev->msix_affinity_masks);
268out_free_msix_names:
269 kfree(vp_dev->msix_names);
270out_free_irq_vectors:
271 pci_free_irq_vectors(vp_dev->pci_dev);
272 return err; 297 return err;
273} 298}
274 299
@@ -282,8 +307,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
282 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, 307 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
283 dev_name(&vdev->dev), vp_dev); 308 dev_name(&vdev->dev), vp_dev);
284 if (err) 309 if (err)
285 return err; 310 goto out_del_vqs;
286 311
312 vp_dev->intx_enabled = 1;
287 for (i = 0; i < nvqs; ++i) { 313 for (i = 0; i < nvqs; ++i) {
288 if (!names[i]) { 314 if (!names[i]) {
289 vqs[i] = NULL; 315 vqs[i] = NULL;
@@ -293,15 +319,13 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
293 VIRTIO_MSI_NO_VECTOR); 319 VIRTIO_MSI_NO_VECTOR);
294 if (IS_ERR(vqs[i])) { 320 if (IS_ERR(vqs[i])) {
295 err = PTR_ERR(vqs[i]); 321 err = PTR_ERR(vqs[i]);
296 goto out_remove_vqs; 322 goto out_del_vqs;
297 } 323 }
298 } 324 }
299 325
300 return 0; 326 return 0;
301 327out_del_vqs:
302out_remove_vqs: 328 vp_del_vqs(vdev);
303 vp_remove_vqs(vdev);
304 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
305 return err; 329 return err;
306} 330}
307 331
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index c8074997fd28..3cdabba8415e 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -66,12 +66,16 @@ struct virtio_pci_device {
66 66
67 /* MSI-X support */ 67 /* MSI-X support */
68 int msix_enabled; 68 int msix_enabled;
69 int intx_enabled;
69 cpumask_var_t *msix_affinity_masks; 70 cpumask_var_t *msix_affinity_masks;
70 /* Name strings for interrupts. This size should be enough, 71 /* Name strings for interrupts. This size should be enough,
71 * and I'm too lazy to allocate each name separately. */ 72 * and I'm too lazy to allocate each name separately. */
72 char (*msix_names)[256]; 73 char (*msix_names)[256];
73 /* Total Number of MSI-X vectors (including per-VQ ones). */ 74 /* Number of available vectors */
74 int msix_vectors; 75 unsigned msix_vectors;
76 /* Vectors allocated, excluding per-vq vectors if any */
77 unsigned msix_used_vectors;
78
75 /* Map of per-VQ MSI-X vectors, may be NULL */ 79 /* Map of per-VQ MSI-X vectors, may be NULL */
76 unsigned *msix_vector_map; 80 unsigned *msix_vector_map;
77 81
@@ -85,6 +89,14 @@ struct virtio_pci_device {
85 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); 89 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
86}; 90};
87 91
92/* Constants for MSI-X */
93/* Use first vector for configuration changes, second and the rest for
94 * virtqueues Thus, we need at least 2 vectors for MSI. */
95enum {
96 VP_MSIX_CONFIG_VECTOR = 0,
97 VP_MSIX_VQ_VECTOR = 1,
98};
99
88/* Convert a generic virtio device to our structure */ 100/* Convert a generic virtio device to our structure */
89static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) 101static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
90{ 102{