aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-03 01:09:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-03 01:09:10 -0400
commit56d92aa5cf7c96c70f81d0350c94faf46a9fb76d (patch)
tree2fb5d5b891903cada4dff9c581c70d33340a3769 /drivers
parent33c2a174120b2c1baec9d1dac513f9d4b761b26a (diff)
parentc341ca45ce56143804ef5a8f4db753e554e640b4 (diff)
Merge tag 'stable/for-linus-3.7-x86-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull Xen update from Konrad Rzeszutek Wilk: "Features: - When hotplugging PCI devices in a PV guest we can allocate Xen-SWIOTLB later. - Cleanup Xen SWIOTLB. - Support pages out grants from HVM domains in the backends. - Support wild cards in xen-pciback.hide=(BDF) arguments. - Update grant status updates with upstream hypervisor. - Boot PV guests with more than 128GB. - Cleanup Xen MMU code/add comments. - Obtain XENVERS using a preferred method. - Lay out generic changes to support Xen ARM. - Allow privcmd ioctl for HVM (used to do only PV). - Do v2 of mmap_batch for privcmd ioctls. - If hypervisor saves the LED keyboard light - we will now instruct the kernel about its state. Fixes: - More fixes to Xen PCI backend for various calls/FLR/etc. - With more than 4GB in a 64-bit PV guest disable native SWIOTLB. - Fix up smatch warnings. - Fix up various return values in privmcmd and mm." * tag 'stable/for-linus-3.7-x86-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: (48 commits) xen/pciback: Restore the PCI config space after an FLR. xen-pciback: properly clean up after calling pcistub_device_find() xen/vga: add the xen EFI video mode support xen/x86: retrieve keyboard shift status flags from hypervisor. xen/gndev: Xen backend support for paged out grant targets V4. xen-pciback: support wild cards in slot specifications xen/swiotlb: Fix compile warnings when using plain integer instead of NULL pointer. xen/swiotlb: Remove functions not needed anymore. xen/pcifront: Use Xen-SWIOTLB when initting if required. xen/swiotlb: For early initialization, return zero on success. xen/swiotlb: Use the swiotlb_late_init_with_tbl to init Xen-SWIOTLB late when PV PCI is used. xen/swiotlb: Move the error strings to its own function. xen/swiotlb: Move the nr_tbl determination in its own function. xen/arm: compile and run xenbus xen: resynchronise grant table status codes with upstream xen/privcmd: return -EFAULT on error xen/privcmd: Fix mmap batch ioctl error status copy back. xen/privcmd: add PRIVCMD_MMAPBATCH_V2 ioctl xen/mm: return more precise error from xen_remap_domain_range() xen/mmu: If the revector fails, don't attempt to revector anything else. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/xen-netback/netback.c11
-rw-r--r--drivers/pci/xen-pcifront.c15
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/xen/events.c18
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/grant-table.c67
-rw-r--r--drivers/xen/privcmd.c135
-rw-r--r--drivers/xen/swiotlb-xen.c119
-rw-r--r--drivers/xen/sys-hypervisor.c13
-rw-r--r--drivers/xen/tmem.c1
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c136
-rw-r--r--drivers/xen/xenbus/xenbus_client.c6
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c56
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c1
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
17 files changed, 435 insertions, 154 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 682633bfe00f..05593d882023 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -635,9 +635,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
635 return; 635 return;
636 636
637 BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op)); 637 BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
638 ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op, 638 gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
639 npo.copy_prod);
640 BUG_ON(ret != 0);
641 639
642 while ((skb = __skb_dequeue(&rxq)) != NULL) { 640 while ((skb = __skb_dequeue(&rxq)) != NULL) {
643 sco = (struct skb_cb_overlay *)skb->cb; 641 sco = (struct skb_cb_overlay *)skb->cb;
@@ -1460,18 +1458,15 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1460static void xen_netbk_tx_action(struct xen_netbk *netbk) 1458static void xen_netbk_tx_action(struct xen_netbk *netbk)
1461{ 1459{
1462 unsigned nr_gops; 1460 unsigned nr_gops;
1463 int ret;
1464 1461
1465 nr_gops = xen_netbk_tx_build_gops(netbk); 1462 nr_gops = xen_netbk_tx_build_gops(netbk);
1466 1463
1467 if (nr_gops == 0) 1464 if (nr_gops == 0)
1468 return; 1465 return;
1469 ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
1470 netbk->tx_copy_ops, nr_gops);
1471 BUG_ON(ret);
1472 1466
1473 xen_netbk_tx_submit(netbk); 1467 gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
1474 1468
1469 xen_netbk_tx_submit(netbk);
1475} 1470}
1476 1471
1477static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) 1472static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index def8d0b5620c..0aab85a51559 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -21,6 +21,7 @@
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/time.h> 22#include <linux/time.h>
23 23
24#include <asm/xen/swiotlb-xen.h>
24#define INVALID_GRANT_REF (0) 25#define INVALID_GRANT_REF (0)
25#define INVALID_EVTCHN (-1) 26#define INVALID_EVTCHN (-1)
26 27
@@ -236,7 +237,7 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
236 return errno_to_pcibios_err(do_pci_op(pdev, &op)); 237 return errno_to_pcibios_err(do_pci_op(pdev, &op));
237} 238}
238 239
239struct pci_ops pcifront_bus_ops = { 240static struct pci_ops pcifront_bus_ops = {
240 .read = pcifront_bus_read, 241 .read = pcifront_bus_read,
241 .write = pcifront_bus_write, 242 .write = pcifront_bus_write,
242}; 243};
@@ -668,7 +669,7 @@ static irqreturn_t pcifront_handler_aer(int irq, void *dev)
668 schedule_pcifront_aer_op(pdev); 669 schedule_pcifront_aer_op(pdev);
669 return IRQ_HANDLED; 670 return IRQ_HANDLED;
670} 671}
671static int pcifront_connect(struct pcifront_device *pdev) 672static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
672{ 673{
673 int err = 0; 674 int err = 0;
674 675
@@ -681,9 +682,13 @@ static int pcifront_connect(struct pcifront_device *pdev)
681 dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n"); 682 dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
682 err = -EEXIST; 683 err = -EEXIST;
683 } 684 }
684
685 spin_unlock(&pcifront_dev_lock); 685 spin_unlock(&pcifront_dev_lock);
686 686
687 if (!err && !swiotlb_nr_tbl()) {
688 err = pci_xen_swiotlb_init_late();
689 if (err)
690 dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
691 }
687 return err; 692 return err;
688} 693}
689 694
@@ -842,10 +847,10 @@ static int __devinit pcifront_try_connect(struct pcifront_device *pdev)
842 XenbusStateInitialised) 847 XenbusStateInitialised)
843 goto out; 848 goto out;
844 849
845 err = pcifront_connect(pdev); 850 err = pcifront_connect_and_init_dma(pdev);
846 if (err) { 851 if (err) {
847 xenbus_dev_fatal(pdev->xdev, err, 852 xenbus_dev_fatal(pdev->xdev, err,
848 "Error connecting PCI Frontend"); 853 "Error setting up PCI Frontend");
849 goto out; 854 goto out;
850 } 855 }
851 856
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 1e456dca4f60..2944ff88fdc0 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -21,6 +21,7 @@
21#include <linux/console.h> 21#include <linux/console.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/irq.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <linux/types.h> 26#include <linux/types.h>
26#include <linux/list.h> 27#include <linux/list.h>
@@ -35,6 +36,7 @@
35#include <xen/page.h> 36#include <xen/page.h>
36#include <xen/events.h> 37#include <xen/events.h>
37#include <xen/interface/io/console.h> 38#include <xen/interface/io/console.h>
39#include <xen/interface/sched.h>
38#include <xen/hvc-console.h> 40#include <xen/hvc-console.h>
39#include <xen/xenbus.h> 41#include <xen/xenbus.h>
40 42
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 7595581d032c..c60d1629c916 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -373,11 +373,22 @@ static void unmask_evtchn(int port)
373{ 373{
374 struct shared_info *s = HYPERVISOR_shared_info; 374 struct shared_info *s = HYPERVISOR_shared_info;
375 unsigned int cpu = get_cpu(); 375 unsigned int cpu = get_cpu();
376 int do_hypercall = 0, evtchn_pending = 0;
376 377
377 BUG_ON(!irqs_disabled()); 378 BUG_ON(!irqs_disabled());
378 379
379 /* Slow path (hypercall) if this is a non-local port. */ 380 if (unlikely((cpu != cpu_from_evtchn(port))))
380 if (unlikely(cpu != cpu_from_evtchn(port))) { 381 do_hypercall = 1;
382 else
383 evtchn_pending = sync_test_bit(port, &s->evtchn_pending[0]);
384
385 if (unlikely(evtchn_pending && xen_hvm_domain()))
386 do_hypercall = 1;
387
388 /* Slow path (hypercall) if this is a non-local port or if this is
389 * an hvm domain and an event is pending (hvm domains don't have
390 * their own implementation of irq_enable). */
391 if (do_hypercall) {
381 struct evtchn_unmask unmask = { .port = port }; 392 struct evtchn_unmask unmask = { .port = port };
382 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); 393 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
383 } else { 394 } else {
@@ -390,7 +401,7 @@ static void unmask_evtchn(int port)
390 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose 401 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
391 * the interrupt edge' if the channel is masked. 402 * the interrupt edge' if the channel is masked.
392 */ 403 */
393 if (sync_test_bit(port, &s->evtchn_pending[0]) && 404 if (evtchn_pending &&
394 !sync_test_and_set_bit(port / BITS_PER_LONG, 405 !sync_test_and_set_bit(port / BITS_PER_LONG,
395 &vcpu_info->evtchn_pending_sel)) 406 &vcpu_info->evtchn_pending_sel))
396 vcpu_info->evtchn_upcall_pending = 1; 407 vcpu_info->evtchn_upcall_pending = 1;
@@ -831,6 +842,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
831 struct irq_info *info = info_for_irq(irq); 842 struct irq_info *info = info_for_irq(irq);
832 WARN_ON(info == NULL || info->type != IRQT_EVTCHN); 843 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
833 } 844 }
845 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
834 846
835out: 847out:
836 mutex_unlock(&irq_mapping_update_lock); 848 mutex_unlock(&irq_mapping_update_lock);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 7f1241608489..5df9fd847b2e 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -446,7 +446,7 @@ static void mn_release(struct mmu_notifier *mn,
446 spin_unlock(&priv->lock); 446 spin_unlock(&priv->lock);
447} 447}
448 448
449struct mmu_notifier_ops gntdev_mmu_ops = { 449static struct mmu_notifier_ops gntdev_mmu_ops = {
450 .release = mn_release, 450 .release = mn_release,
451 .invalidate_page = mn_invl_page, 451 .invalidate_page = mn_invl_page,
452 .invalidate_range_start = mn_invl_range_start, 452 .invalidate_range_start = mn_invl_range_start,
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 006726688baf..b2b0a375b348 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -38,6 +38,7 @@
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/uaccess.h> 39#include <linux/uaccess.h>
40#include <linux/io.h> 40#include <linux/io.h>
41#include <linux/delay.h>
41#include <linux/hardirq.h> 42#include <linux/hardirq.h>
42 43
43#include <xen/xen.h> 44#include <xen/xen.h>
@@ -47,6 +48,7 @@
47#include <xen/interface/memory.h> 48#include <xen/interface/memory.h>
48#include <xen/hvc-console.h> 49#include <xen/hvc-console.h>
49#include <asm/xen/hypercall.h> 50#include <asm/xen/hypercall.h>
51#include <asm/xen/interface.h>
50 52
51#include <asm/pgtable.h> 53#include <asm/pgtable.h>
52#include <asm/sync_bitops.h> 54#include <asm/sync_bitops.h>
@@ -285,10 +287,9 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
285} 287}
286EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); 288EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
287 289
288void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid, 290static void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
289 unsigned long frame, int flags, 291 unsigned long frame, int flags,
290 unsigned page_off, 292 unsigned page_off, unsigned length)
291 unsigned length)
292{ 293{
293 gnttab_shared.v2[ref].sub_page.frame = frame; 294 gnttab_shared.v2[ref].sub_page.frame = frame;
294 gnttab_shared.v2[ref].sub_page.page_off = page_off; 295 gnttab_shared.v2[ref].sub_page.page_off = page_off;
@@ -345,9 +346,9 @@ bool gnttab_subpage_grants_available(void)
345} 346}
346EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available); 347EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
347 348
348void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid, 349static void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
349 int flags, domid_t trans_domid, 350 int flags, domid_t trans_domid,
350 grant_ref_t trans_gref) 351 grant_ref_t trans_gref)
351{ 352{
352 gnttab_shared.v2[ref].transitive.trans_domid = trans_domid; 353 gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
353 gnttab_shared.v2[ref].transitive.gref = trans_gref; 354 gnttab_shared.v2[ref].transitive.gref = trans_gref;
@@ -823,6 +824,52 @@ unsigned int gnttab_max_grant_frames(void)
823} 824}
824EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); 825EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
825 826
827/* Handling of paged out grant targets (GNTST_eagain) */
828#define MAX_DELAY 256
829static inline void
830gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
831 const char *func)
832{
833 unsigned delay = 1;
834
835 do {
836 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
837 if (*status == GNTST_eagain)
838 msleep(delay++);
839 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
840
841 if (delay >= MAX_DELAY) {
842 printk(KERN_ERR "%s: %s eagain grant\n", func, current->comm);
843 *status = GNTST_bad_page;
844 }
845}
846
847void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
848{
849 struct gnttab_map_grant_ref *op;
850
851 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
852 BUG();
853 for (op = batch; op < batch + count; op++)
854 if (op->status == GNTST_eagain)
855 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
856 &op->status, __func__);
857}
858EXPORT_SYMBOL_GPL(gnttab_batch_map);
859
860void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
861{
862 struct gnttab_copy *op;
863
864 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
865 BUG();
866 for (op = batch; op < batch + count; op++)
867 if (op->status == GNTST_eagain)
868 gnttab_retry_eagain_gop(GNTTABOP_copy, op,
869 &op->status, __func__);
870}
871EXPORT_SYMBOL_GPL(gnttab_batch_copy);
872
826int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 873int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
827 struct gnttab_map_grant_ref *kmap_ops, 874 struct gnttab_map_grant_ref *kmap_ops,
828 struct page **pages, unsigned int count) 875 struct page **pages, unsigned int count)
@@ -836,6 +883,12 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
836 if (ret) 883 if (ret)
837 return ret; 884 return ret;
838 885
886 /* Retry eagain maps */
887 for (i = 0; i < count; i++)
888 if (map_ops[i].status == GNTST_eagain)
889 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
890 &map_ops[i].status, __func__);
891
839 if (xen_feature(XENFEAT_auto_translated_physmap)) 892 if (xen_feature(XENFEAT_auto_translated_physmap))
840 return ret; 893 return ret;
841 894
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index ccee0f16bcf8..ef6389580b8c 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -76,7 +76,7 @@ static void free_page_list(struct list_head *pages)
76 */ 76 */
77static int gather_array(struct list_head *pagelist, 77static int gather_array(struct list_head *pagelist,
78 unsigned nelem, size_t size, 78 unsigned nelem, size_t size,
79 void __user *data) 79 const void __user *data)
80{ 80{
81 unsigned pageidx; 81 unsigned pageidx;
82 void *pagedata; 82 void *pagedata;
@@ -246,61 +246,117 @@ struct mmap_batch_state {
246 domid_t domain; 246 domid_t domain;
247 unsigned long va; 247 unsigned long va;
248 struct vm_area_struct *vma; 248 struct vm_area_struct *vma;
249 int err; 249 /* A tristate:
250 250 * 0 for no errors
251 xen_pfn_t __user *user; 251 * 1 if at least one error has happened (and no
252 * -ENOENT errors have happened)
253 * -ENOENT if at least 1 -ENOENT has happened.
254 */
255 int global_error;
256 /* An array for individual errors */
257 int *err;
258
259 /* User-space mfn array to store errors in the second pass for V1. */
260 xen_pfn_t __user *user_mfn;
252}; 261};
253 262
254static int mmap_batch_fn(void *data, void *state) 263static int mmap_batch_fn(void *data, void *state)
255{ 264{
256 xen_pfn_t *mfnp = data; 265 xen_pfn_t *mfnp = data;
257 struct mmap_batch_state *st = state; 266 struct mmap_batch_state *st = state;
267 int ret;
268
269 ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
270 st->vma->vm_page_prot, st->domain);
258 271
259 if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, 272 /* Store error code for second pass. */
260 st->vma->vm_page_prot, st->domain) < 0) { 273 *(st->err++) = ret;
261 *mfnp |= 0xf0000000U; 274
262 st->err++; 275 /* And see if it affects the global_error. */
276 if (ret < 0) {
277 if (ret == -ENOENT)
278 st->global_error = -ENOENT;
279 else {
280 /* Record that at least one error has happened. */
281 if (st->global_error == 0)
282 st->global_error = 1;
283 }
263 } 284 }
264 st->va += PAGE_SIZE; 285 st->va += PAGE_SIZE;
265 286
266 return 0; 287 return 0;
267} 288}
268 289
269static int mmap_return_errors(void *data, void *state) 290static int mmap_return_errors_v1(void *data, void *state)
270{ 291{
271 xen_pfn_t *mfnp = data; 292 xen_pfn_t *mfnp = data;
272 struct mmap_batch_state *st = state; 293 struct mmap_batch_state *st = state;
273 294 int err = *(st->err++);
274 return put_user(*mfnp, st->user++); 295
296 /*
297 * V1 encodes the error codes in the 32bit top nibble of the
298 * mfn (with its known limitations vis-a-vis 64 bit callers).
299 */
300 *mfnp |= (err == -ENOENT) ?
301 PRIVCMD_MMAPBATCH_PAGED_ERROR :
302 PRIVCMD_MMAPBATCH_MFN_ERROR;
303 return __put_user(*mfnp, st->user_mfn++);
275} 304}
276 305
277static struct vm_operations_struct privcmd_vm_ops; 306static struct vm_operations_struct privcmd_vm_ops;
278 307
279static long privcmd_ioctl_mmap_batch(void __user *udata) 308static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
280{ 309{
281 int ret; 310 int ret;
282 struct privcmd_mmapbatch m; 311 struct privcmd_mmapbatch_v2 m;
283 struct mm_struct *mm = current->mm; 312 struct mm_struct *mm = current->mm;
284 struct vm_area_struct *vma; 313 struct vm_area_struct *vma;
285 unsigned long nr_pages; 314 unsigned long nr_pages;
286 LIST_HEAD(pagelist); 315 LIST_HEAD(pagelist);
316 int *err_array = NULL;
287 struct mmap_batch_state state; 317 struct mmap_batch_state state;
288 318
289 if (!xen_initial_domain()) 319 if (!xen_initial_domain())
290 return -EPERM; 320 return -EPERM;
291 321
292 if (copy_from_user(&m, udata, sizeof(m))) 322 switch (version) {
293 return -EFAULT; 323 case 1:
324 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
325 return -EFAULT;
326 /* Returns per-frame error in m.arr. */
327 m.err = NULL;
328 if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
329 return -EFAULT;
330 break;
331 case 2:
332 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
333 return -EFAULT;
334 /* Returns per-frame error code in m.err. */
335 if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
336 return -EFAULT;
337 break;
338 default:
339 return -EINVAL;
340 }
294 341
295 nr_pages = m.num; 342 nr_pages = m.num;
296 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) 343 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
297 return -EINVAL; 344 return -EINVAL;
298 345
299 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), 346 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
300 m.arr);
301 347
302 if (ret || list_empty(&pagelist)) 348 if (ret)
303 goto out; 349 goto out;
350 if (list_empty(&pagelist)) {
351 ret = -EINVAL;
352 goto out;
353 }
354
355 err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL);
356 if (err_array == NULL) {
357 ret = -ENOMEM;
358 goto out;
359 }
304 360
305 down_write(&mm->mmap_sem); 361 down_write(&mm->mmap_sem);
306 362
@@ -315,24 +371,37 @@ static long privcmd_ioctl_mmap_batch(void __user *udata)
315 goto out; 371 goto out;
316 } 372 }
317 373
318 state.domain = m.dom; 374 state.domain = m.dom;
319 state.vma = vma; 375 state.vma = vma;
320 state.va = m.addr; 376 state.va = m.addr;
321 state.err = 0; 377 state.global_error = 0;
378 state.err = err_array;
322 379
323 ret = traverse_pages(m.num, sizeof(xen_pfn_t), 380 /* mmap_batch_fn guarantees ret == 0 */
324 &pagelist, mmap_batch_fn, &state); 381 BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
382 &pagelist, mmap_batch_fn, &state));
325 383
326 up_write(&mm->mmap_sem); 384 up_write(&mm->mmap_sem);
327 385
328 if (state.err > 0) { 386 if (state.global_error && (version == 1)) {
329 state.user = m.arr; 387 /* Write back errors in second pass. */
388 state.user_mfn = (xen_pfn_t *)m.arr;
389 state.err = err_array;
330 ret = traverse_pages(m.num, sizeof(xen_pfn_t), 390 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
331 &pagelist, 391 &pagelist, mmap_return_errors_v1, &state);
332 mmap_return_errors, &state); 392 } else if (version == 2) {
393 ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
394 if (ret)
395 ret = -EFAULT;
333 } 396 }
334 397
398 /* If we have not had any EFAULT-like global errors then set the global
399 * error to -ENOENT if necessary. */
400 if ((ret == 0) && (state.global_error == -ENOENT))
401 ret = -ENOENT;
402
335out: 403out:
404 kfree(err_array);
336 free_page_list(&pagelist); 405 free_page_list(&pagelist);
337 406
338 return ret; 407 return ret;
@@ -354,7 +423,11 @@ static long privcmd_ioctl(struct file *file,
354 break; 423 break;
355 424
356 case IOCTL_PRIVCMD_MMAPBATCH: 425 case IOCTL_PRIVCMD_MMAPBATCH:
357 ret = privcmd_ioctl_mmap_batch(udata); 426 ret = privcmd_ioctl_mmap_batch(udata, 1);
427 break;
428
429 case IOCTL_PRIVCMD_MMAPBATCH_V2:
430 ret = privcmd_ioctl_mmap_batch(udata, 2);
358 break; 431 break;
359 432
360 default: 433 default:
@@ -380,10 +453,6 @@ static struct vm_operations_struct privcmd_vm_ops = {
380 453
381static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) 454static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
382{ 455{
383 /* Unsupported for auto-translate guests. */
384 if (xen_feature(XENFEAT_auto_translated_physmap))
385 return -ENOSYS;
386
387 /* DONTCOPY is essential for Xen because copy_page_range doesn't know 456 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
388 * how to recreate these mappings */ 457 * how to recreate these mappings */
389 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP; 458 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 4d519488d304..58db6df866ef 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -52,7 +52,7 @@ static unsigned long xen_io_tlb_nslabs;
52 * Quick lookup value of the bus address of the IOTLB. 52 * Quick lookup value of the bus address of the IOTLB.
53 */ 53 */
54 54
55u64 start_dma_addr; 55static u64 start_dma_addr;
56 56
57static dma_addr_t xen_phys_to_bus(phys_addr_t paddr) 57static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
58{ 58{
@@ -144,31 +144,72 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
144 } while (i < nslabs); 144 } while (i < nslabs);
145 return 0; 145 return 0;
146} 146}
147static unsigned long xen_set_nslabs(unsigned long nr_tbl)
148{
149 if (!nr_tbl) {
150 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
151 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
152 } else
153 xen_io_tlb_nslabs = nr_tbl;
147 154
148void __init xen_swiotlb_init(int verbose) 155 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
156}
157
158enum xen_swiotlb_err {
159 XEN_SWIOTLB_UNKNOWN = 0,
160 XEN_SWIOTLB_ENOMEM,
161 XEN_SWIOTLB_EFIXUP
162};
163
164static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
165{
166 switch (err) {
167 case XEN_SWIOTLB_ENOMEM:
168 return "Cannot allocate Xen-SWIOTLB buffer\n";
169 case XEN_SWIOTLB_EFIXUP:
170 return "Failed to get contiguous memory for DMA from Xen!\n"\
171 "You either: don't have the permissions, do not have"\
172 " enough free memory under 4GB, or the hypervisor memory"\
173 " is too fragmented!";
174 default:
175 break;
176 }
177 return "";
178}
179int __ref xen_swiotlb_init(int verbose, bool early)
149{ 180{
150 unsigned long bytes; 181 unsigned long bytes, order;
151 int rc = -ENOMEM; 182 int rc = -ENOMEM;
152 unsigned long nr_tbl; 183 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
153 char *m = NULL;
154 unsigned int repeat = 3; 184 unsigned int repeat = 3;
155 185
156 nr_tbl = swiotlb_nr_tbl(); 186 xen_io_tlb_nslabs = swiotlb_nr_tbl();
157 if (nr_tbl)
158 xen_io_tlb_nslabs = nr_tbl;
159 else {
160 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
161 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
162 }
163retry: 187retry:
164 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; 188 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
165 189 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
166 /* 190 /*
167 * Get IO TLB memory from any location. 191 * Get IO TLB memory from any location.
168 */ 192 */
169 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); 193 if (early)
194 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
195 else {
196#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
197#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
198 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
199 xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
200 if (xen_io_tlb_start)
201 break;
202 order--;
203 }
204 if (order != get_order(bytes)) {
205 pr_warn("Warning: only able to allocate %ld MB "
206 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
207 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
208 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
209 }
210 }
170 if (!xen_io_tlb_start) { 211 if (!xen_io_tlb_start) {
171 m = "Cannot allocate Xen-SWIOTLB buffer!\n"; 212 m_ret = XEN_SWIOTLB_ENOMEM;
172 goto error; 213 goto error;
173 } 214 }
174 xen_io_tlb_end = xen_io_tlb_start + bytes; 215 xen_io_tlb_end = xen_io_tlb_start + bytes;
@@ -179,17 +220,22 @@ retry:
179 bytes, 220 bytes,
180 xen_io_tlb_nslabs); 221 xen_io_tlb_nslabs);
181 if (rc) { 222 if (rc) {
182 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes)); 223 if (early)
183 m = "Failed to get contiguous memory for DMA from Xen!\n"\ 224 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
184 "You either: don't have the permissions, do not have"\ 225 else {
185 " enough free memory under 4GB, or the hypervisor memory"\ 226 free_pages((unsigned long)xen_io_tlb_start, order);
186 "is too fragmented!"; 227 xen_io_tlb_start = NULL;
228 }
229 m_ret = XEN_SWIOTLB_EFIXUP;
187 goto error; 230 goto error;
188 } 231 }
189 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); 232 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
190 swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose); 233 if (early) {
191 234 swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
192 return; 235 rc = 0;
236 } else
237 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
238 return rc;
193error: 239error:
194 if (repeat--) { 240 if (repeat--) {
195 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ 241 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
@@ -198,10 +244,13 @@ error:
198 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); 244 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
199 goto retry; 245 goto retry;
200 } 246 }
201 xen_raw_printk("%s (rc:%d)", m, rc); 247 pr_err("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
202 panic("%s (rc:%d)", m, rc); 248 if (early)
249 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
250 else
251 free_pages((unsigned long)xen_io_tlb_start, order);
252 return rc;
203} 253}
204
205void * 254void *
206xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 255xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
207 dma_addr_t *dma_handle, gfp_t flags, 256 dma_addr_t *dma_handle, gfp_t flags,
@@ -466,14 +515,6 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
466} 515}
467EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs); 516EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
468 517
469int
470xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
471 enum dma_data_direction dir)
472{
473 return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
474}
475EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg);
476
477/* 518/*
478 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 519 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
479 * concerning calls here are the same as for swiotlb_unmap_page() above. 520 * concerning calls here are the same as for swiotlb_unmap_page() above.
@@ -494,14 +535,6 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
494} 535}
495EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); 536EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
496 537
497void
498xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
499 enum dma_data_direction dir)
500{
501 return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
502}
503EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg);
504
505/* 538/*
506 * Make physical memory consistent for a set of streaming mode DMA translations 539 * Make physical memory consistent for a set of streaming mode DMA translations
507 * after a transfer. 540 * after a transfer.
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index fdb6d229c9bb..5e5ad7e28858 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -114,7 +114,7 @@ static void xen_sysfs_version_destroy(void)
114 114
115/* UUID */ 115/* UUID */
116 116
117static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer) 117static ssize_t uuid_show_fallback(struct hyp_sysfs_attr *attr, char *buffer)
118{ 118{
119 char *vm, *val; 119 char *vm, *val;
120 int ret; 120 int ret;
@@ -135,6 +135,17 @@ static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
135 return ret; 135 return ret;
136} 136}
137 137
138static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
139{
140 xen_domain_handle_t uuid;
141 int ret;
142 ret = HYPERVISOR_xen_version(XENVER_guest_handle, uuid);
143 if (ret)
144 return uuid_show_fallback(attr, buffer);
145 ret = sprintf(buffer, "%pU\n", uuid);
146 return ret;
147}
148
138HYPERVISOR_ATTR_RO(uuid); 149HYPERVISOR_ATTR_RO(uuid);
139 150
140static int __init xen_sysfs_uuid_init(void) 151static int __init xen_sysfs_uuid_init(void)
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 89f264c67420..144564e5eb29 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -21,6 +21,7 @@
21#include <asm/xen/hypercall.h> 21#include <asm/xen/hypercall.h>
22#include <asm/xen/page.h> 22#include <asm/xen/page.h>
23#include <asm/xen/hypervisor.h> 23#include <asm/xen/hypervisor.h>
24#include <xen/tmem.h>
24 25
25#define TMEM_CONTROL 0 26#define TMEM_CONTROL 0
26#define TMEM_NEW_POOL 1 27#define TMEM_NEW_POOL 1
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 92ff01dbeb10..961d664e2d2f 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -362,6 +362,7 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
362 else { 362 else {
363 dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); 363 dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
364 __pci_reset_function_locked(dev); 364 __pci_reset_function_locked(dev);
365 pci_restore_state(dev);
365 } 366 }
366 /* Now disable the device (this also ensures some private device 367 /* Now disable the device (this also ensures some private device
367 * data is setup before we export) 368 * data is setup before we export)
@@ -681,14 +682,14 @@ static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
681 dev_err(&dev->dev, DRV_NAME " device is not connected or owned" 682 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
682 " by HVM, kill it\n"); 683 " by HVM, kill it\n");
683 kill_domain_by_device(psdev); 684 kill_domain_by_device(psdev);
684 goto release; 685 goto end;
685 } 686 }
686 687
687 if (!test_bit(_XEN_PCIB_AERHANDLER, 688 if (!test_bit(_XEN_PCIB_AERHANDLER,
688 (unsigned long *)&psdev->pdev->sh_info->flags)) { 689 (unsigned long *)&psdev->pdev->sh_info->flags)) {
689 dev_err(&dev->dev, 690 dev_err(&dev->dev,
690 "guest with no AER driver should have been killed\n"); 691 "guest with no AER driver should have been killed\n");
691 goto release; 692 goto end;
692 } 693 }
693 result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result); 694 result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
694 695
@@ -698,9 +699,9 @@ static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
698 "No AER slot_reset service or disconnected!\n"); 699 "No AER slot_reset service or disconnected!\n");
699 kill_domain_by_device(psdev); 700 kill_domain_by_device(psdev);
700 } 701 }
701release:
702 pcistub_device_put(psdev);
703end: 702end:
703 if (psdev)
704 pcistub_device_put(psdev);
704 up_write(&pcistub_sem); 705 up_write(&pcistub_sem);
705 return result; 706 return result;
706 707
@@ -739,14 +740,14 @@ static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
739 dev_err(&dev->dev, DRV_NAME " device is not connected or owned" 740 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
740 " by HVM, kill it\n"); 741 " by HVM, kill it\n");
741 kill_domain_by_device(psdev); 742 kill_domain_by_device(psdev);
742 goto release; 743 goto end;
743 } 744 }
744 745
745 if (!test_bit(_XEN_PCIB_AERHANDLER, 746 if (!test_bit(_XEN_PCIB_AERHANDLER,
746 (unsigned long *)&psdev->pdev->sh_info->flags)) { 747 (unsigned long *)&psdev->pdev->sh_info->flags)) {
747 dev_err(&dev->dev, 748 dev_err(&dev->dev,
748 "guest with no AER driver should have been killed\n"); 749 "guest with no AER driver should have been killed\n");
749 goto release; 750 goto end;
750 } 751 }
751 result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result); 752 result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
752 753
@@ -756,9 +757,9 @@ static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
756 "No AER mmio_enabled service or disconnected!\n"); 757 "No AER mmio_enabled service or disconnected!\n");
757 kill_domain_by_device(psdev); 758 kill_domain_by_device(psdev);
758 } 759 }
759release:
760 pcistub_device_put(psdev);
761end: 760end:
761 if (psdev)
762 pcistub_device_put(psdev);
762 up_write(&pcistub_sem); 763 up_write(&pcistub_sem);
763 return result; 764 return result;
764} 765}
@@ -797,7 +798,7 @@ static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
797 dev_err(&dev->dev, DRV_NAME " device is not connected or owned" 798 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
798 " by HVM, kill it\n"); 799 " by HVM, kill it\n");
799 kill_domain_by_device(psdev); 800 kill_domain_by_device(psdev);
800 goto release; 801 goto end;
801 } 802 }
802 803
803 /*Guest owns the device yet no aer handler regiested, kill guest*/ 804 /*Guest owns the device yet no aer handler regiested, kill guest*/
@@ -805,7 +806,7 @@ static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
805 (unsigned long *)&psdev->pdev->sh_info->flags)) { 806 (unsigned long *)&psdev->pdev->sh_info->flags)) {
806 dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n"); 807 dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
807 kill_domain_by_device(psdev); 808 kill_domain_by_device(psdev);
808 goto release; 809 goto end;
809 } 810 }
810 result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result); 811 result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
811 812
@@ -815,9 +816,9 @@ static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
815 "No AER error_detected service or disconnected!\n"); 816 "No AER error_detected service or disconnected!\n");
816 kill_domain_by_device(psdev); 817 kill_domain_by_device(psdev);
817 } 818 }
818release:
819 pcistub_device_put(psdev);
820end: 819end:
820 if (psdev)
821 pcistub_device_put(psdev);
821 up_write(&pcistub_sem); 822 up_write(&pcistub_sem);
822 return result; 823 return result;
823} 824}
@@ -851,7 +852,7 @@ static void xen_pcibk_error_resume(struct pci_dev *dev)
851 dev_err(&dev->dev, DRV_NAME " device is not connected or owned" 852 dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
852 " by HVM, kill it\n"); 853 " by HVM, kill it\n");
853 kill_domain_by_device(psdev); 854 kill_domain_by_device(psdev);
854 goto release; 855 goto end;
855 } 856 }
856 857
857 if (!test_bit(_XEN_PCIB_AERHANDLER, 858 if (!test_bit(_XEN_PCIB_AERHANDLER,
@@ -859,13 +860,13 @@ static void xen_pcibk_error_resume(struct pci_dev *dev)
859 dev_err(&dev->dev, 860 dev_err(&dev->dev,
860 "guest with no AER driver should have been killed\n"); 861 "guest with no AER driver should have been killed\n");
861 kill_domain_by_device(psdev); 862 kill_domain_by_device(psdev);
862 goto release; 863 goto end;
863 } 864 }
864 common_process(psdev, 1, XEN_PCI_OP_aer_resume, 865 common_process(psdev, 1, XEN_PCI_OP_aer_resume,
865 PCI_ERS_RESULT_RECOVERED); 866 PCI_ERS_RESULT_RECOVERED);
866release:
867 pcistub_device_put(psdev);
868end: 867end:
868 if (psdev)
869 pcistub_device_put(psdev);
869 up_write(&pcistub_sem); 870 up_write(&pcistub_sem);
870 return; 871 return;
871} 872}
@@ -897,17 +898,41 @@ static inline int str_to_slot(const char *buf, int *domain, int *bus,
897 int *slot, int *func) 898 int *slot, int *func)
898{ 899{
899 int err; 900 int err;
901 char wc = '*';
900 902
901 err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func); 903 err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
902 if (err == 4) 904 switch (err) {
905 case 3:
906 *func = -1;
907 err = sscanf(buf, " %x:%x:%x.%c", domain, bus, slot, &wc);
908 break;
909 case 2:
910 *slot = *func = -1;
911 err = sscanf(buf, " %x:%x:*.%c", domain, bus, &wc);
912 if (err >= 2)
913 ++err;
914 break;
915 }
916 if (err == 4 && wc == '*')
903 return 0; 917 return 0;
904 else if (err < 0) 918 else if (err < 0)
905 return -EINVAL; 919 return -EINVAL;
906 920
907 /* try again without domain */ 921 /* try again without domain */
908 *domain = 0; 922 *domain = 0;
923 wc = '*';
909 err = sscanf(buf, " %x:%x.%x", bus, slot, func); 924 err = sscanf(buf, " %x:%x.%x", bus, slot, func);
910 if (err == 3) 925 switch (err) {
926 case 2:
927 *func = -1;
928 err = sscanf(buf, " %x:%x.%c", bus, slot, &wc);
929 break;
930 case 1:
931 *slot = *func = -1;
932 err = sscanf(buf, " %x:*.%c", bus, &wc) + 1;
933 break;
934 }
935 if (err == 3 && wc == '*')
911 return 0; 936 return 0;
912 937
913 return -EINVAL; 938 return -EINVAL;
@@ -930,6 +955,19 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
930{ 955{
931 struct pcistub_device_id *pci_dev_id; 956 struct pcistub_device_id *pci_dev_id;
932 unsigned long flags; 957 unsigned long flags;
958 int rc = 0;
959
960 if (slot < 0) {
961 for (slot = 0; !rc && slot < 32; ++slot)
962 rc = pcistub_device_id_add(domain, bus, slot, func);
963 return rc;
964 }
965
966 if (func < 0) {
967 for (func = 0; !rc && func < 8; ++func)
968 rc = pcistub_device_id_add(domain, bus, slot, func);
969 return rc;
970 }
933 971
934 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL); 972 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
935 if (!pci_dev_id) 973 if (!pci_dev_id)
@@ -952,15 +990,15 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
952static int pcistub_device_id_remove(int domain, int bus, int slot, int func) 990static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
953{ 991{
954 struct pcistub_device_id *pci_dev_id, *t; 992 struct pcistub_device_id *pci_dev_id, *t;
955 int devfn = PCI_DEVFN(slot, func);
956 int err = -ENOENT; 993 int err = -ENOENT;
957 unsigned long flags; 994 unsigned long flags;
958 995
959 spin_lock_irqsave(&device_ids_lock, flags); 996 spin_lock_irqsave(&device_ids_lock, flags);
960 list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids, 997 list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
961 slot_list) { 998 slot_list) {
962 if (pci_dev_id->domain == domain 999 if (pci_dev_id->domain == domain && pci_dev_id->bus == bus
963 && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) { 1000 && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot)
1001 && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) {
964 /* Don't break; here because it's possible the same 1002 /* Don't break; here because it's possible the same
965 * slot could be in the list more than once 1003 * slot could be in the list more than once
966 */ 1004 */
@@ -987,7 +1025,7 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
987 struct config_field *field; 1025 struct config_field *field;
988 1026
989 psdev = pcistub_device_find(domain, bus, slot, func); 1027 psdev = pcistub_device_find(domain, bus, slot, func);
990 if (!psdev || !psdev->dev) { 1028 if (!psdev) {
991 err = -ENODEV; 1029 err = -ENODEV;
992 goto out; 1030 goto out;
993 } 1031 }
@@ -1011,6 +1049,8 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
1011 if (err) 1049 if (err)
1012 kfree(field); 1050 kfree(field);
1013out: 1051out:
1052 if (psdev)
1053 pcistub_device_put(psdev);
1014 return err; 1054 return err;
1015} 1055}
1016 1056
@@ -1115,10 +1155,9 @@ static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
1115 1155
1116 err = str_to_slot(buf, &domain, &bus, &slot, &func); 1156 err = str_to_slot(buf, &domain, &bus, &slot, &func);
1117 if (err) 1157 if (err)
1118 goto out; 1158 return err;
1119 1159
1120 psdev = pcistub_device_find(domain, bus, slot, func); 1160 psdev = pcistub_device_find(domain, bus, slot, func);
1121
1122 if (!psdev) 1161 if (!psdev)
1123 goto out; 1162 goto out;
1124 1163
@@ -1134,6 +1173,8 @@ static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
1134 if (dev_data->isr_on) 1173 if (dev_data->isr_on)
1135 dev_data->ack_intr = 1; 1174 dev_data->ack_intr = 1;
1136out: 1175out:
1176 if (psdev)
1177 pcistub_device_put(psdev);
1137 if (!err) 1178 if (!err)
1138 err = count; 1179 err = count;
1139 return err; 1180 return err;
@@ -1216,15 +1257,16 @@ static ssize_t permissive_add(struct device_driver *drv, const char *buf,
1216 err = str_to_slot(buf, &domain, &bus, &slot, &func); 1257 err = str_to_slot(buf, &domain, &bus, &slot, &func);
1217 if (err) 1258 if (err)
1218 goto out; 1259 goto out;
1260 if (slot < 0 || func < 0) {
1261 err = -EINVAL;
1262 goto out;
1263 }
1219 psdev = pcistub_device_find(domain, bus, slot, func); 1264 psdev = pcistub_device_find(domain, bus, slot, func);
1220 if (!psdev) { 1265 if (!psdev) {
1221 err = -ENODEV; 1266 err = -ENODEV;
1222 goto out; 1267 goto out;
1223 } 1268 }
1224 if (!psdev->dev) { 1269
1225 err = -ENODEV;
1226 goto release;
1227 }
1228 dev_data = pci_get_drvdata(psdev->dev); 1270 dev_data = pci_get_drvdata(psdev->dev);
1229 /* the driver data for a device should never be null at this point */ 1271 /* the driver data for a device should never be null at this point */
1230 if (!dev_data) { 1272 if (!dev_data) {
@@ -1297,17 +1339,51 @@ static int __init pcistub_init(void)
1297 1339
1298 if (pci_devs_to_hide && *pci_devs_to_hide) { 1340 if (pci_devs_to_hide && *pci_devs_to_hide) {
1299 do { 1341 do {
1342 char wc = '*';
1343
1300 parsed = 0; 1344 parsed = 0;
1301 1345
1302 err = sscanf(pci_devs_to_hide + pos, 1346 err = sscanf(pci_devs_to_hide + pos,
1303 " (%x:%x:%x.%x) %n", 1347 " (%x:%x:%x.%x) %n",
1304 &domain, &bus, &slot, &func, &parsed); 1348 &domain, &bus, &slot, &func, &parsed);
1305 if (err != 4) { 1349 switch (err) {
1350 case 3:
1351 func = -1;
1352 err = sscanf(pci_devs_to_hide + pos,
1353 " (%x:%x:%x.%c) %n",
1354 &domain, &bus, &slot, &wc,
1355 &parsed);
1356 break;
1357 case 2:
1358 slot = func = -1;
1359 err = sscanf(pci_devs_to_hide + pos,
1360 " (%x:%x:*.%c) %n",
1361 &domain, &bus, &wc, &parsed) + 1;
1362 break;
1363 }
1364
1365 if (err != 4 || wc != '*') {
1306 domain = 0; 1366 domain = 0;
1367 wc = '*';
1307 err = sscanf(pci_devs_to_hide + pos, 1368 err = sscanf(pci_devs_to_hide + pos,
1308 " (%x:%x.%x) %n", 1369 " (%x:%x.%x) %n",
1309 &bus, &slot, &func, &parsed); 1370 &bus, &slot, &func, &parsed);
1310 if (err != 3) 1371 switch (err) {
1372 case 2:
1373 func = -1;
1374 err = sscanf(pci_devs_to_hide + pos,
1375 " (%x:%x.%c) %n",
1376 &bus, &slot, &wc,
1377 &parsed);
1378 break;
1379 case 1:
1380 slot = func = -1;
1381 err = sscanf(pci_devs_to_hide + pos,
1382 " (%x:*.%c) %n",
1383 &bus, &wc, &parsed) + 1;
1384 break;
1385 }
1386 if (err != 3 || wc != '*')
1311 goto parse_error; 1387 goto parse_error;
1312 } 1388 }
1313 1389
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index b3e146edb51d..bcf3ba4a6ec1 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -490,8 +490,7 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
490 490
491 op.host_addr = arbitrary_virt_to_machine(pte).maddr; 491 op.host_addr = arbitrary_virt_to_machine(pte).maddr;
492 492
493 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) 493 gnttab_batch_map(&op, 1);
494 BUG();
495 494
496 if (op.status != GNTST_okay) { 495 if (op.status != GNTST_okay) {
497 free_vm_area(area); 496 free_vm_area(area);
@@ -572,8 +571,7 @@ int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
572 gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, 571 gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
573 dev->otherend_id); 572 dev->otherend_id);
574 573
575 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) 574 gnttab_batch_map(&op, 1);
576 BUG();
577 575
578 if (op.status != GNTST_okay) { 576 if (op.status != GNTST_okay) {
579 xenbus_dev_fatal(dev, op.status, 577 xenbus_dev_fatal(dev, op.status,
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 52fe7ad07666..c5aa55c5d371 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -224,7 +224,7 @@ int xb_init_comms(void)
224 int err; 224 int err;
225 err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, 225 err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
226 0, "xenbus", &xb_waitq); 226 0, "xenbus", &xb_waitq);
227 if (err <= 0) { 227 if (err < 0) {
228 printk(KERN_ERR "XENBUS request irq failed %i\n", err); 228 printk(KERN_ERR "XENBUS request irq failed %i\n", err);
229 return err; 229 return err;
230 } 230 }
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index be738c43104b..d73000800762 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -107,7 +107,7 @@ static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
107 return 0; 107 return 0;
108} 108}
109 109
110const struct file_operations xenbus_backend_fops = { 110static const struct file_operations xenbus_backend_fops = {
111 .open = xenbus_backend_open, 111 .open = xenbus_backend_open,
112 .mmap = xenbus_backend_mmap, 112 .mmap = xenbus_backend_mmap,
113 .unlocked_ioctl = xenbus_backend_ioctl, 113 .unlocked_ioctl = xenbus_backend_ioctl,
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index b793723e724d..038b71dbf03c 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -324,8 +324,8 @@ static int cmp_dev(struct device *dev, void *data)
324 return 0; 324 return 0;
325} 325}
326 326
327struct xenbus_device *xenbus_device_find(const char *nodename, 327static struct xenbus_device *xenbus_device_find(const char *nodename,
328 struct bus_type *bus) 328 struct bus_type *bus)
329{ 329{
330 struct xb_find_info info = { .dev = NULL, .nodename = nodename }; 330 struct xb_find_info info = { .dev = NULL, .nodename = nodename };
331 331
@@ -719,17 +719,47 @@ static int __init xenstored_local_init(void)
719 return err; 719 return err;
720} 720}
721 721
722enum xenstore_init {
723 UNKNOWN,
724 PV,
725 HVM,
726 LOCAL,
727};
722static int __init xenbus_init(void) 728static int __init xenbus_init(void)
723{ 729{
724 int err = 0; 730 int err = 0;
731 enum xenstore_init usage = UNKNOWN;
732 uint64_t v = 0;
725 733
726 if (!xen_domain()) 734 if (!xen_domain())
727 return -ENODEV; 735 return -ENODEV;
728 736
729 xenbus_ring_ops_init(); 737 xenbus_ring_ops_init();
730 738
731 if (xen_hvm_domain()) { 739 if (xen_pv_domain())
732 uint64_t v = 0; 740 usage = PV;
741 if (xen_hvm_domain())
742 usage = HVM;
743 if (xen_hvm_domain() && xen_initial_domain())
744 usage = LOCAL;
745 if (xen_pv_domain() && !xen_start_info->store_evtchn)
746 usage = LOCAL;
747 if (xen_pv_domain() && xen_start_info->store_evtchn)
748 xenstored_ready = 1;
749
750 switch (usage) {
751 case LOCAL:
752 err = xenstored_local_init();
753 if (err)
754 goto out_error;
755 xen_store_interface = mfn_to_virt(xen_store_mfn);
756 break;
757 case PV:
758 xen_store_evtchn = xen_start_info->store_evtchn;
759 xen_store_mfn = xen_start_info->store_mfn;
760 xen_store_interface = mfn_to_virt(xen_store_mfn);
761 break;
762 case HVM:
733 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); 763 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
734 if (err) 764 if (err)
735 goto out_error; 765 goto out_error;
@@ -738,18 +768,12 @@ static int __init xenbus_init(void)
738 if (err) 768 if (err)
739 goto out_error; 769 goto out_error;
740 xen_store_mfn = (unsigned long)v; 770 xen_store_mfn = (unsigned long)v;
741 xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); 771 xen_store_interface =
742 } else { 772 ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
743 xen_store_evtchn = xen_start_info->store_evtchn; 773 break;
744 xen_store_mfn = xen_start_info->store_mfn; 774 default:
745 if (xen_store_evtchn) 775 pr_warn("Xenstore state unknown\n");
746 xenstored_ready = 1; 776 break;
747 else {
748 err = xenstored_local_init();
749 if (err)
750 goto out_error;
751 }
752 xen_store_interface = mfn_to_virt(xen_store_mfn);
753 } 777 }
754 778
755 /* Initialize the interface to xenstore. */ 779 /* Initialize the interface to xenstore. */
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index a31b54d48839..3159a37d966d 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -21,6 +21,7 @@
21#include <xen/xenbus.h> 21#include <xen/xenbus.h>
22#include <xen/events.h> 22#include <xen/events.h>
23#include <xen/page.h> 23#include <xen/page.h>
24#include <xen/xen.h>
24 25
25#include <xen/platform_pci.h> 26#include <xen/platform_pci.h>
26 27
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index bce15cf4a8df..131dec04794e 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -44,6 +44,7 @@
44#include <linux/rwsem.h> 44#include <linux/rwsem.h>
45#include <linux/module.h> 45#include <linux/module.h>
46#include <linux/mutex.h> 46#include <linux/mutex.h>
47#include <asm/xen/hypervisor.h>
47#include <xen/xenbus.h> 48#include <xen/xenbus.h>
48#include <xen/xen.h> 49#include <xen/xen.h>
49#include "xenbus_comms.h" 50#include "xenbus_comms.h"
@@ -622,7 +623,7 @@ static void xs_reset_watches(void)
622{ 623{
623 int err, supported = 0; 624 int err, supported = 0;
624 625
625 if (!xen_hvm_domain()) 626 if (!xen_hvm_domain() || xen_initial_domain())
626 return; 627 return;
627 628
628 err = xenbus_scanf(XBT_NIL, "control", 629 err = xenbus_scanf(XBT_NIL, "control",