summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-12-18 15:24:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-12-18 15:24:52 -0500
commit3273cba1956437820ae25d98e3ae57d1c094205c (patch)
tree8f559fa07cd581c0bd35beda3c4fb98ae98941ce /drivers
parent83ad283f6bdf024513a8c5dc7de514dbaafc6ff0 (diff)
parent584a561a6fee0d258f9ca644f58b73d9a41b8a46 (diff)
Merge tag 'for-linus-4.4-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen bug fixes from David Vrabel: - XSA-155 security fixes to backend drivers. - XSA-157 security fixes to pciback. * tag 'for-linus-4.4-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen-pciback: fix up cleanup path when alloc fails xen/pciback: Don't allow MSI-X ops if PCI_COMMAND_MEMORY is not set. xen/pciback: For XEN_PCI_OP_disable_msi[|x] only disable if device has MSI(X) enabled. xen/pciback: Do not install an IRQ handler for MSI interrupts. xen/pciback: Return error on XEN_PCI_OP_enable_msix when device has MSI or MSI-X enabled xen/pciback: Return error on XEN_PCI_OP_enable_msi when device has MSI or MSI-X enabled xen/pciback: Save xen_pci_op commands before processing it xen-scsiback: safely copy requests xen-blkback: read from indirect descriptors only once xen-blkback: only read request operation from shared ring once xen-netback: use RING_COPY_REQUEST() throughout xen-netback: don't use last request to determine minimum Tx credit xen: Add RING_COPY_REQUEST() xen/x86/pvh: Use HVM's flush_tlb_others op xen: Resume PMU from non-atomic context xen/events/fifo: Consume unprocessed events when a CPU dies
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/xen-blkback/blkback.c15
-rw-r--r--drivers/block/xen-blkback/common.h8
-rw-r--r--drivers/net/xen-netback/netback.c34
-rw-r--r--drivers/xen/events/events_fifo.c23
-rw-r--r--drivers/xen/xen-pciback/pciback.h1
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c75
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
-rw-r--r--drivers/xen/xen-scsiback.c2
8 files changed, 112 insertions, 50 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index f9099940c272..41fb1a917b17 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
950 goto unmap; 950 goto unmap;
951 951
952 for (n = 0, i = 0; n < nseg; n++) { 952 for (n = 0, i = 0; n < nseg; n++) {
953 uint8_t first_sect, last_sect;
954
953 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { 955 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
954 /* Map indirect segments */ 956 /* Map indirect segments */
955 if (segments) 957 if (segments)
@@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
957 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); 959 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
958 } 960 }
959 i = n % SEGS_PER_INDIRECT_FRAME; 961 i = n % SEGS_PER_INDIRECT_FRAME;
962
960 pending_req->segments[n]->gref = segments[i].gref; 963 pending_req->segments[n]->gref = segments[i].gref;
961 seg[n].nsec = segments[i].last_sect - 964
962 segments[i].first_sect + 1; 965 first_sect = READ_ONCE(segments[i].first_sect);
963 seg[n].offset = (segments[i].first_sect << 9); 966 last_sect = READ_ONCE(segments[i].last_sect);
964 if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || 967 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
965 (segments[i].last_sect < segments[i].first_sect)) {
966 rc = -EINVAL; 968 rc = -EINVAL;
967 goto unmap; 969 goto unmap;
968 } 970 }
971
972 seg[n].nsec = last_sect - first_sect + 1;
973 seg[n].offset = first_sect << 9;
969 preq->nr_sects += seg[n].nsec; 974 preq->nr_sects += seg[n].nsec;
970 } 975 }
971 976
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 68e87a037b99..c929ae22764c 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
408 struct blkif_x86_32_request *src) 408 struct blkif_x86_32_request *src)
409{ 409{
410 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 410 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
411 dst->operation = src->operation; 411 dst->operation = READ_ONCE(src->operation);
412 switch (src->operation) { 412 switch (dst->operation) {
413 case BLKIF_OP_READ: 413 case BLKIF_OP_READ:
414 case BLKIF_OP_WRITE: 414 case BLKIF_OP_WRITE:
415 case BLKIF_OP_WRITE_BARRIER: 415 case BLKIF_OP_WRITE_BARRIER:
@@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
456 struct blkif_x86_64_request *src) 456 struct blkif_x86_64_request *src)
457{ 457{
458 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 458 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
459 dst->operation = src->operation; 459 dst->operation = READ_ONCE(src->operation);
460 switch (src->operation) { 460 switch (dst->operation) {
461 case BLKIF_OP_READ: 461 case BLKIF_OP_READ:
462 case BLKIF_OP_WRITE: 462 case BLKIF_OP_WRITE:
463 case BLKIF_OP_WRITE_BARRIER: 463 case BLKIF_OP_WRITE_BARRIER:
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e481f3710bd3..1049c34e7d43 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
258 struct netrx_pending_operations *npo) 258 struct netrx_pending_operations *npo)
259{ 259{
260 struct xenvif_rx_meta *meta; 260 struct xenvif_rx_meta *meta;
261 struct xen_netif_rx_request *req; 261 struct xen_netif_rx_request req;
262 262
263 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 263 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
264 264
265 meta = npo->meta + npo->meta_prod++; 265 meta = npo->meta + npo->meta_prod++;
266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
267 meta->gso_size = 0; 267 meta->gso_size = 0;
268 meta->size = 0; 268 meta->size = 0;
269 meta->id = req->id; 269 meta->id = req.id;
270 270
271 npo->copy_off = 0; 271 npo->copy_off = 0;
272 npo->copy_gref = req->gref; 272 npo->copy_gref = req.gref;
273 273
274 return meta; 274 return meta;
275} 275}
@@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
424 struct xenvif *vif = netdev_priv(skb->dev); 424 struct xenvif *vif = netdev_priv(skb->dev);
425 int nr_frags = skb_shinfo(skb)->nr_frags; 425 int nr_frags = skb_shinfo(skb)->nr_frags;
426 int i; 426 int i;
427 struct xen_netif_rx_request *req; 427 struct xen_netif_rx_request req;
428 struct xenvif_rx_meta *meta; 428 struct xenvif_rx_meta *meta;
429 unsigned char *data; 429 unsigned char *data;
430 int head = 1; 430 int head = 1;
@@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
443 443
444 /* Set up a GSO prefix descriptor, if necessary */ 444 /* Set up a GSO prefix descriptor, if necessary */
445 if ((1 << gso_type) & vif->gso_prefix_mask) { 445 if ((1 << gso_type) & vif->gso_prefix_mask) {
446 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 446 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
447 meta = npo->meta + npo->meta_prod++; 447 meta = npo->meta + npo->meta_prod++;
448 meta->gso_type = gso_type; 448 meta->gso_type = gso_type;
449 meta->gso_size = skb_shinfo(skb)->gso_size; 449 meta->gso_size = skb_shinfo(skb)->gso_size;
450 meta->size = 0; 450 meta->size = 0;
451 meta->id = req->id; 451 meta->id = req.id;
452 } 452 }
453 453
454 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 454 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
455 meta = npo->meta + npo->meta_prod++; 455 meta = npo->meta + npo->meta_prod++;
456 456
457 if ((1 << gso_type) & vif->gso_mask) { 457 if ((1 << gso_type) & vif->gso_mask) {
@@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
463 } 463 }
464 464
465 meta->size = 0; 465 meta->size = 0;
466 meta->id = req->id; 466 meta->id = req.id;
467 npo->copy_off = 0; 467 npo->copy_off = 0;
468 npo->copy_gref = req->gref; 468 npo->copy_gref = req.gref;
469 469
470 data = skb->data; 470 data = skb->data;
471 while (data < skb_tail_pointer(skb)) { 471 while (data < skb_tail_pointer(skb)) {
@@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
680 * Otherwise the interface can seize up due to insufficient credit. 680 * Otherwise the interface can seize up due to insufficient credit.
681 */ 681 */
682 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; 682 max_burst = max(131072UL, queue->credit_bytes);
683 max_burst = min(max_burst, 131072UL);
684 max_burst = max(max_burst, queue->credit_bytes);
685 683
686 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 684 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
687 max_credit = queue->remaining_credit + queue->credit_bytes; 685 max_credit = queue->remaining_credit + queue->credit_bytes;
@@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
711 spin_unlock_irqrestore(&queue->response_lock, flags); 709 spin_unlock_irqrestore(&queue->response_lock, flags);
712 if (cons == end) 710 if (cons == end)
713 break; 711 break;
714 txp = RING_GET_REQUEST(&queue->tx, cons++); 712 RING_COPY_REQUEST(&queue->tx, cons++, txp);
715 } while (1); 713 } while (1);
716 queue->tx.req_cons = cons; 714 queue->tx.req_cons = cons;
717} 715}
@@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
778 if (drop_err) 776 if (drop_err)
779 txp = &dropped_tx; 777 txp = &dropped_tx;
780 778
781 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), 779 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
782 sizeof(*txp));
783 780
784 /* If the guest submitted a frame >= 64 KiB then 781 /* If the guest submitted a frame >= 64 KiB then
785 * first->size overflowed and following slots will 782 * first->size overflowed and following slots will
@@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
1112 return -EBADR; 1109 return -EBADR;
1113 } 1110 }
1114 1111
1115 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), 1112 RING_COPY_REQUEST(&queue->tx, cons, &extra);
1116 sizeof(extra));
1117 if (unlikely(!extra.type || 1113 if (unlikely(!extra.type ||
1118 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1114 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1119 queue->tx.req_cons = ++cons; 1115 queue->tx.req_cons = ++cons;
@@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1322 1318
1323 idx = queue->tx.req_cons; 1319 idx = queue->tx.req_cons;
1324 rmb(); /* Ensure that we see the request before we copy it. */ 1320 rmb(); /* Ensure that we see the request before we copy it. */
1325 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); 1321 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
1326 1322
1327 /* Credit-based scheduling. */ 1323 /* Credit-based scheduling. */
1328 if (txreq.size > queue->remaining_credit && 1324 if (txreq.size > queue->remaining_credit &&
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index e3e9e3d46d1b..96a1b8da5371 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -281,7 +281,8 @@ static void handle_irq_for_port(unsigned port)
281 281
282static void consume_one_event(unsigned cpu, 282static void consume_one_event(unsigned cpu,
283 struct evtchn_fifo_control_block *control_block, 283 struct evtchn_fifo_control_block *control_block,
284 unsigned priority, unsigned long *ready) 284 unsigned priority, unsigned long *ready,
285 bool drop)
285{ 286{
286 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 287 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
287 uint32_t head; 288 uint32_t head;
@@ -313,13 +314,17 @@ static void consume_one_event(unsigned cpu,
313 if (head == 0) 314 if (head == 0)
314 clear_bit(priority, ready); 315 clear_bit(priority, ready);
315 316
316 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) 317 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
317 handle_irq_for_port(port); 318 if (unlikely(drop))
319 pr_warn("Dropping pending event for port %u\n", port);
320 else
321 handle_irq_for_port(port);
322 }
318 323
319 q->head[priority] = head; 324 q->head[priority] = head;
320} 325}
321 326
322static void evtchn_fifo_handle_events(unsigned cpu) 327static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
323{ 328{
324 struct evtchn_fifo_control_block *control_block; 329 struct evtchn_fifo_control_block *control_block;
325 unsigned long ready; 330 unsigned long ready;
@@ -331,11 +336,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)
331 336
332 while (ready) { 337 while (ready) {
333 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); 338 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
334 consume_one_event(cpu, control_block, q, &ready); 339 consume_one_event(cpu, control_block, q, &ready, drop);
335 ready |= xchg(&control_block->ready, 0); 340 ready |= xchg(&control_block->ready, 0);
336 } 341 }
337} 342}
338 343
344static void evtchn_fifo_handle_events(unsigned cpu)
345{
346 __evtchn_fifo_handle_events(cpu, false);
347}
348
339static void evtchn_fifo_resume(void) 349static void evtchn_fifo_resume(void)
340{ 350{
341 unsigned cpu; 351 unsigned cpu;
@@ -420,6 +430,9 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
420 if (!per_cpu(cpu_control_block, cpu)) 430 if (!per_cpu(cpu_control_block, cpu))
421 ret = evtchn_fifo_alloc_control_block(cpu); 431 ret = evtchn_fifo_alloc_control_block(cpu);
422 break; 432 break;
433 case CPU_DEAD:
434 __evtchn_fifo_handle_events(cpu, true);
435 break;
423 default: 436 default:
424 break; 437 break;
425 } 438 }
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 58e38d586f52..4d529f3e40df 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -37,6 +37,7 @@ struct xen_pcibk_device {
37 struct xen_pci_sharedinfo *sh_info; 37 struct xen_pci_sharedinfo *sh_info;
38 unsigned long flags; 38 unsigned long flags;
39 struct work_struct op_work; 39 struct work_struct op_work;
40 struct xen_pci_op op;
40}; 41};
41 42
42struct xen_pcibk_dev_data { 43struct xen_pcibk_dev_data {
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index c4a0666de6f5..73dafdc494aa 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -70,6 +70,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
70 enable ? "enable" : "disable"); 70 enable ? "enable" : "disable");
71 71
72 if (enable) { 72 if (enable) {
73 /*
74 * The MSI or MSI-X should not have an IRQ handler. Otherwise
75 * if the guest terminates we BUG_ON in free_msi_irqs.
76 */
77 if (dev->msi_enabled || dev->msix_enabled)
78 goto out;
79
73 rc = request_irq(dev_data->irq, 80 rc = request_irq(dev_data->irq,
74 xen_pcibk_guest_interrupt, IRQF_SHARED, 81 xen_pcibk_guest_interrupt, IRQF_SHARED,
75 dev_data->irq_name, dev); 82 dev_data->irq_name, dev);
@@ -144,7 +151,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
144 if (unlikely(verbose_request)) 151 if (unlikely(verbose_request))
145 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); 152 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
146 153
147 status = pci_enable_msi(dev); 154 if (dev->msi_enabled)
155 status = -EALREADY;
156 else if (dev->msix_enabled)
157 status = -ENXIO;
158 else
159 status = pci_enable_msi(dev);
148 160
149 if (status) { 161 if (status) {
150 pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n", 162 pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
@@ -173,20 +185,23 @@ static
173int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, 185int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
174 struct pci_dev *dev, struct xen_pci_op *op) 186 struct pci_dev *dev, struct xen_pci_op *op)
175{ 187{
176 struct xen_pcibk_dev_data *dev_data;
177
178 if (unlikely(verbose_request)) 188 if (unlikely(verbose_request))
179 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", 189 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
180 pci_name(dev)); 190 pci_name(dev));
181 pci_disable_msi(dev);
182 191
192 if (dev->msi_enabled) {
193 struct xen_pcibk_dev_data *dev_data;
194
195 pci_disable_msi(dev);
196
197 dev_data = pci_get_drvdata(dev);
198 if (dev_data)
199 dev_data->ack_intr = 1;
200 }
183 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 201 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
184 if (unlikely(verbose_request)) 202 if (unlikely(verbose_request))
185 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), 203 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
186 op->value); 204 op->value);
187 dev_data = pci_get_drvdata(dev);
188 if (dev_data)
189 dev_data->ack_intr = 1;
190 return 0; 205 return 0;
191} 206}
192 207
@@ -197,13 +212,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
197 struct xen_pcibk_dev_data *dev_data; 212 struct xen_pcibk_dev_data *dev_data;
198 int i, result; 213 int i, result;
199 struct msix_entry *entries; 214 struct msix_entry *entries;
215 u16 cmd;
200 216
201 if (unlikely(verbose_request)) 217 if (unlikely(verbose_request))
202 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", 218 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
203 pci_name(dev)); 219 pci_name(dev));
220
204 if (op->value > SH_INFO_MAX_VEC) 221 if (op->value > SH_INFO_MAX_VEC)
205 return -EINVAL; 222 return -EINVAL;
206 223
224 if (dev->msix_enabled)
225 return -EALREADY;
226
227 /*
228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
229 * to access the BARs where the MSI-X entries reside.
230 */
231 pci_read_config_word(dev, PCI_COMMAND, &cmd);
232 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
233 return -ENXIO;
234
207 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); 235 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
208 if (entries == NULL) 236 if (entries == NULL)
209 return -ENOMEM; 237 return -ENOMEM;
@@ -245,23 +273,27 @@ static
245int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, 273int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
246 struct pci_dev *dev, struct xen_pci_op *op) 274 struct pci_dev *dev, struct xen_pci_op *op)
247{ 275{
248 struct xen_pcibk_dev_data *dev_data;
249 if (unlikely(verbose_request)) 276 if (unlikely(verbose_request))
250 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", 277 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
251 pci_name(dev)); 278 pci_name(dev));
252 pci_disable_msix(dev);
253 279
280 if (dev->msix_enabled) {
281 struct xen_pcibk_dev_data *dev_data;
282
283 pci_disable_msix(dev);
284
285 dev_data = pci_get_drvdata(dev);
286 if (dev_data)
287 dev_data->ack_intr = 1;
288 }
254 /* 289 /*
255 * SR-IOV devices (which don't have any legacy IRQ) have 290 * SR-IOV devices (which don't have any legacy IRQ) have
256 * an undefined IRQ value of zero. 291 * an undefined IRQ value of zero.
257 */ 292 */
258 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 293 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
259 if (unlikely(verbose_request)) 294 if (unlikely(verbose_request))
260 printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), 295 printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n",
261 op->value); 296 pci_name(dev), op->value);
262 dev_data = pci_get_drvdata(dev);
263 if (dev_data)
264 dev_data->ack_intr = 1;
265 return 0; 297 return 0;
266} 298}
267#endif 299#endif
@@ -298,9 +330,11 @@ void xen_pcibk_do_op(struct work_struct *data)
298 container_of(data, struct xen_pcibk_device, op_work); 330 container_of(data, struct xen_pcibk_device, op_work);
299 struct pci_dev *dev; 331 struct pci_dev *dev;
300 struct xen_pcibk_dev_data *dev_data = NULL; 332 struct xen_pcibk_dev_data *dev_data = NULL;
301 struct xen_pci_op *op = &pdev->sh_info->op; 333 struct xen_pci_op *op = &pdev->op;
302 int test_intx = 0; 334 int test_intx = 0;
303 335
336 *op = pdev->sh_info->op;
337 barrier();
304 dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); 338 dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
305 339
306 if (dev == NULL) 340 if (dev == NULL)
@@ -342,6 +376,17 @@ void xen_pcibk_do_op(struct work_struct *data)
342 if ((dev_data->enable_intx != test_intx)) 376 if ((dev_data->enable_intx != test_intx))
343 xen_pcibk_control_isr(dev, 0 /* no reset */); 377 xen_pcibk_control_isr(dev, 0 /* no reset */);
344 } 378 }
379 pdev->sh_info->op.err = op->err;
380 pdev->sh_info->op.value = op->value;
381#ifdef CONFIG_PCI_MSI
382 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
383 unsigned int i;
384
385 for (i = 0; i < op->value; i++)
386 pdev->sh_info->op.msix_entries[i].vector =
387 op->msix_entries[i].vector;
388 }
389#endif
345 /* Tell the driver domain that we're done. */ 390 /* Tell the driver domain that we're done. */
346 wmb(); 391 wmb();
347 clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); 392 clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 98bc345f296e..4843741e703a 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -44,7 +44,6 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
44 dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev); 44 dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
45 45
46 pdev->xdev = xdev; 46 pdev->xdev = xdev;
47 dev_set_drvdata(&xdev->dev, pdev);
48 47
49 mutex_init(&pdev->dev_lock); 48 mutex_init(&pdev->dev_lock);
50 49
@@ -58,6 +57,9 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
58 kfree(pdev); 57 kfree(pdev);
59 pdev = NULL; 58 pdev = NULL;
60 } 59 }
60
61 dev_set_drvdata(&xdev->dev, pdev);
62
61out: 63out:
62 return pdev; 64 return pdev;
63} 65}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 43bcae852546..ad4eb1024d1f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -726,7 +726,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
726 if (!pending_req) 726 if (!pending_req)
727 return 1; 727 return 1;
728 728
729 ring_req = *RING_GET_REQUEST(ring, rc); 729 RING_COPY_REQUEST(ring, rc, &ring_req);
730 ring->req_cons = ++rc; 730 ring->req_cons = ++rc;
731 731
732 err = prepare_pending_reqs(info, &ring_req, pending_req); 732 err = prepare_pending_reqs(info, &ring_req, pending_req);