diff options
-rw-r--r-- | arch/x86/include/asm/xen/page.h | 35 | ||||
-rw-r--r-- | arch/x86/xen/p2m.c | 3 | ||||
-rw-r--r-- | arch/x86/xen/spinlock.c | 14 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 1 | ||||
-rw-r--r-- | drivers/xen/grant-table.c | 2 | ||||
-rw-r--r-- | drivers/xen/privcmd-buf.c | 22 | ||||
-rw-r--r-- | include/xen/xen-ops.h | 12 |
7 files changed, 53 insertions, 36 deletions
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 123e669bf363..790ce08e41f2 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
11 | 11 | ||
12 | #include <linux/uaccess.h> | 12 | #include <asm/extable.h> |
13 | #include <asm/page.h> | 13 | #include <asm/page.h> |
14 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
15 | 15 | ||
@@ -93,12 +93,39 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, | |||
93 | */ | 93 | */ |
94 | static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) | 94 | static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) |
95 | { | 95 | { |
96 | return __put_user(val, (unsigned long __user *)addr); | 96 | int ret = 0; |
97 | |||
98 | asm volatile("1: mov %[val], %[ptr]\n" | ||
99 | "2:\n" | ||
100 | ".section .fixup, \"ax\"\n" | ||
101 | "3: sub $1, %[ret]\n" | ||
102 | " jmp 2b\n" | ||
103 | ".previous\n" | ||
104 | _ASM_EXTABLE(1b, 3b) | ||
105 | : [ret] "+r" (ret), [ptr] "=m" (*addr) | ||
106 | : [val] "r" (val)); | ||
107 | |||
108 | return ret; | ||
97 | } | 109 | } |
98 | 110 | ||
99 | static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val) | 111 | static inline int xen_safe_read_ulong(const unsigned long *addr, |
112 | unsigned long *val) | ||
100 | { | 113 | { |
101 | return __get_user(*val, (unsigned long __user *)addr); | 114 | int ret = 0; |
115 | unsigned long rval = ~0ul; | ||
116 | |||
117 | asm volatile("1: mov %[ptr], %[rval]\n" | ||
118 | "2:\n" | ||
119 | ".section .fixup, \"ax\"\n" | ||
120 | "3: sub $1, %[ret]\n" | ||
121 | " jmp 2b\n" | ||
122 | ".previous\n" | ||
123 | _ASM_EXTABLE(1b, 3b) | ||
124 | : [ret] "+r" (ret), [rval] "+r" (rval) | ||
125 | : [ptr] "m" (*addr)); | ||
126 | *val = rval; | ||
127 | |||
128 | return ret; | ||
102 | } | 129 | } |
103 | 130 | ||
104 | #ifdef CONFIG_XEN_PV | 131 | #ifdef CONFIG_XEN_PV |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index b06731705529..055e37e43541 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -656,8 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
656 | 656 | ||
657 | /* | 657 | /* |
658 | * The interface requires atomic updates on p2m elements. | 658 | * The interface requires atomic updates on p2m elements. |
659 | * xen_safe_write_ulong() is using __put_user which does an atomic | 659 | * xen_safe_write_ulong() is using an atomic store via asm(). |
660 | * store via asm(). | ||
661 | */ | 660 | */ |
662 | if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn))) | 661 | if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn))) |
663 | return true; | 662 | return true; |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 441c88262169..1c8a8816a402 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/log2.h> | 9 | #include <linux/log2.h> |
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/atomic.h> | ||
12 | 13 | ||
13 | #include <asm/paravirt.h> | 14 | #include <asm/paravirt.h> |
14 | #include <asm/qspinlock.h> | 15 | #include <asm/qspinlock.h> |
@@ -21,6 +22,7 @@ | |||
21 | 22 | ||
22 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | 23 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; |
23 | static DEFINE_PER_CPU(char *, irq_name); | 24 | static DEFINE_PER_CPU(char *, irq_name); |
25 | static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest); | ||
24 | static bool xen_pvspin = true; | 26 | static bool xen_pvspin = true; |
25 | 27 | ||
26 | static void xen_qlock_kick(int cpu) | 28 | static void xen_qlock_kick(int cpu) |
@@ -39,25 +41,25 @@ static void xen_qlock_kick(int cpu) | |||
39 | */ | 41 | */ |
40 | static void xen_qlock_wait(u8 *byte, u8 val) | 42 | static void xen_qlock_wait(u8 *byte, u8 val) |
41 | { | 43 | { |
42 | unsigned long flags; | ||
43 | int irq = __this_cpu_read(lock_kicker_irq); | 44 | int irq = __this_cpu_read(lock_kicker_irq); |
45 | atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest); | ||
44 | 46 | ||
45 | /* If kicker interrupts not initialized yet, just spin */ | 47 | /* If kicker interrupts not initialized yet, just spin */ |
46 | if (irq == -1 || in_nmi()) | 48 | if (irq == -1 || in_nmi()) |
47 | return; | 49 | return; |
48 | 50 | ||
49 | /* Guard against reentry. */ | 51 | /* Detect reentry. */ |
50 | local_irq_save(flags); | 52 | atomic_inc(nest_cnt); |
51 | 53 | ||
52 | /* If irq pending already clear it. */ | 54 | /* If irq pending already and no nested call clear it. */ |
53 | if (xen_test_irq_pending(irq)) { | 55 | if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) { |
54 | xen_clear_irq_pending(irq); | 56 | xen_clear_irq_pending(irq); |
55 | } else if (READ_ONCE(*byte) == val) { | 57 | } else if (READ_ONCE(*byte) == val) { |
56 | /* Block until irq becomes pending (or a spurious wakeup) */ | 58 | /* Block until irq becomes pending (or a spurious wakeup) */ |
57 | xen_poll_irq(irq); | 59 | xen_poll_irq(irq); |
58 | } | 60 | } |
59 | 61 | ||
60 | local_irq_restore(flags); | 62 | atomic_dec(nest_cnt); |
61 | } | 63 | } |
62 | 64 | ||
63 | static irqreturn_t dummy_handler(int irq, void *dev_id) | 65 | static irqreturn_t dummy_handler(int irq, void *dev_id) |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 56452cabce5b..0ed4b200fa58 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info) | |||
1919 | GFP_KERNEL); | 1919 | GFP_KERNEL); |
1920 | if (!info->rinfo) { | 1920 | if (!info->rinfo) { |
1921 | xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); | 1921 | xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); |
1922 | info->nr_rings = 0; | ||
1922 | return -ENOMEM; | 1923 | return -ENOMEM; |
1923 | } | 1924 | } |
1924 | 1925 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index f15f89df1f36..7ea6fb6a2e5d 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -914,7 +914,7 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args) | |||
914 | 914 | ||
915 | ret = xenmem_reservation_increase(args->nr_pages, args->frames); | 915 | ret = xenmem_reservation_increase(args->nr_pages, args->frames); |
916 | if (ret != args->nr_pages) { | 916 | if (ret != args->nr_pages) { |
917 | pr_debug("Failed to decrease reservation for DMA buffer\n"); | 917 | pr_debug("Failed to increase reservation for DMA buffer\n"); |
918 | ret = -EFAULT; | 918 | ret = -EFAULT; |
919 | } else { | 919 | } else { |
920 | ret = 0; | 920 | ret = 0; |
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c index df1ed37c3269..de01a6d0059d 100644 --- a/drivers/xen/privcmd-buf.c +++ b/drivers/xen/privcmd-buf.c | |||
@@ -21,15 +21,9 @@ | |||
21 | 21 | ||
22 | MODULE_LICENSE("GPL"); | 22 | MODULE_LICENSE("GPL"); |
23 | 23 | ||
24 | static unsigned int limit = 64; | ||
25 | module_param(limit, uint, 0644); | ||
26 | MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by " | ||
27 | "the privcmd-buf device per open file"); | ||
28 | |||
29 | struct privcmd_buf_private { | 24 | struct privcmd_buf_private { |
30 | struct mutex lock; | 25 | struct mutex lock; |
31 | struct list_head list; | 26 | struct list_head list; |
32 | unsigned int allocated; | ||
33 | }; | 27 | }; |
34 | 28 | ||
35 | struct privcmd_buf_vma_private { | 29 | struct privcmd_buf_vma_private { |
@@ -60,13 +54,10 @@ static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv) | |||
60 | { | 54 | { |
61 | unsigned int i; | 55 | unsigned int i; |
62 | 56 | ||
63 | vma_priv->file_priv->allocated -= vma_priv->n_pages; | ||
64 | |||
65 | list_del(&vma_priv->list); | 57 | list_del(&vma_priv->list); |
66 | 58 | ||
67 | for (i = 0; i < vma_priv->n_pages; i++) | 59 | for (i = 0; i < vma_priv->n_pages; i++) |
68 | if (vma_priv->pages[i]) | 60 | __free_page(vma_priv->pages[i]); |
69 | __free_page(vma_priv->pages[i]); | ||
70 | 61 | ||
71 | kfree(vma_priv); | 62 | kfree(vma_priv); |
72 | } | 63 | } |
@@ -146,8 +137,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) | |||
146 | unsigned int i; | 137 | unsigned int i; |
147 | int ret = 0; | 138 | int ret = 0; |
148 | 139 | ||
149 | if (!(vma->vm_flags & VM_SHARED) || count > limit || | 140 | if (!(vma->vm_flags & VM_SHARED)) |
150 | file_priv->allocated + count > limit) | ||
151 | return -EINVAL; | 141 | return -EINVAL; |
152 | 142 | ||
153 | vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), | 143 | vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), |
@@ -155,19 +145,15 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) | |||
155 | if (!vma_priv) | 145 | if (!vma_priv) |
156 | return -ENOMEM; | 146 | return -ENOMEM; |
157 | 147 | ||
158 | vma_priv->n_pages = count; | 148 | for (i = 0; i < count; i++) { |
159 | count = 0; | ||
160 | for (i = 0; i < vma_priv->n_pages; i++) { | ||
161 | vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); | 149 | vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); |
162 | if (!vma_priv->pages[i]) | 150 | if (!vma_priv->pages[i]) |
163 | break; | 151 | break; |
164 | count++; | 152 | vma_priv->n_pages++; |
165 | } | 153 | } |
166 | 154 | ||
167 | mutex_lock(&file_priv->lock); | 155 | mutex_lock(&file_priv->lock); |
168 | 156 | ||
169 | file_priv->allocated += count; | ||
170 | |||
171 | vma_priv->file_priv = file_priv; | 157 | vma_priv->file_priv = file_priv; |
172 | vma_priv->users = 1; | 158 | vma_priv->users = 1; |
173 | 159 | ||
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 18803ff76e27..4969817124a8 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h | |||
@@ -42,16 +42,12 @@ int xen_setup_shutdown_event(void); | |||
42 | 42 | ||
43 | extern unsigned long *xen_contiguous_bitmap; | 43 | extern unsigned long *xen_contiguous_bitmap; |
44 | 44 | ||
45 | #ifdef CONFIG_XEN_PV | 45 | #if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) |
46 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, | 46 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
47 | unsigned int address_bits, | 47 | unsigned int address_bits, |
48 | dma_addr_t *dma_handle); | 48 | dma_addr_t *dma_handle); |
49 | 49 | ||
50 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); | 50 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); |
51 | |||
52 | int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, | ||
53 | xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, | ||
54 | unsigned int domid, bool no_translate, struct page **pages); | ||
55 | #else | 51 | #else |
56 | static inline int xen_create_contiguous_region(phys_addr_t pstart, | 52 | static inline int xen_create_contiguous_region(phys_addr_t pstart, |
57 | unsigned int order, | 53 | unsigned int order, |
@@ -63,7 +59,13 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart, | |||
63 | 59 | ||
64 | static inline void xen_destroy_contiguous_region(phys_addr_t pstart, | 60 | static inline void xen_destroy_contiguous_region(phys_addr_t pstart, |
65 | unsigned int order) { } | 61 | unsigned int order) { } |
62 | #endif | ||
66 | 63 | ||
64 | #if defined(CONFIG_XEN_PV) | ||
65 | int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, | ||
66 | xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, | ||
67 | unsigned int domid, bool no_translate, struct page **pages); | ||
68 | #else | ||
67 | static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, | 69 | static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, |
68 | xen_pfn_t *pfn, int nr, int *err_ptr, | 70 | xen_pfn_t *pfn, int nr, int *err_ptr, |
69 | pgprot_t prot, unsigned int domid, | 71 | pgprot_t prot, unsigned int domid, |