diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 48 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu_init.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/devicetree.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 13 | ||||
-rw-r--r-- | arch/x86/kvm/emulate.c | 82 | ||||
-rw-r--r-- | arch/x86/xen/multicalls.c | 12 |
8 files changed, 126 insertions, 50 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index cd8cbeb5fa34..7c3a95e54ec5 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/proto.h> | 30 | #include <asm/proto.h> |
31 | #include <asm/iommu.h> | 31 | #include <asm/iommu.h> |
32 | #include <asm/gart.h> | 32 | #include <asm/gart.h> |
33 | #include <asm/dma.h> | ||
33 | #include <asm/amd_iommu_proto.h> | 34 | #include <asm/amd_iommu_proto.h> |
34 | #include <asm/amd_iommu_types.h> | 35 | #include <asm/amd_iommu_types.h> |
35 | #include <asm/amd_iommu.h> | 36 | #include <asm/amd_iommu.h> |
@@ -154,6 +155,10 @@ static int iommu_init_device(struct device *dev) | |||
154 | pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); | 155 | pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); |
155 | if (pdev) | 156 | if (pdev) |
156 | dev_data->alias = &pdev->dev; | 157 | dev_data->alias = &pdev->dev; |
158 | else { | ||
159 | kfree(dev_data); | ||
160 | return -ENOTSUPP; | ||
161 | } | ||
157 | 162 | ||
158 | atomic_set(&dev_data->bind, 0); | 163 | atomic_set(&dev_data->bind, 0); |
159 | 164 | ||
@@ -163,6 +168,20 @@ static int iommu_init_device(struct device *dev) | |||
163 | return 0; | 168 | return 0; |
164 | } | 169 | } |
165 | 170 | ||
171 | static void iommu_ignore_device(struct device *dev) | ||
172 | { | ||
173 | u16 devid, alias; | ||
174 | |||
175 | devid = get_device_id(dev); | ||
176 | alias = amd_iommu_alias_table[devid]; | ||
177 | |||
178 | memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); | ||
179 | memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); | ||
180 | |||
181 | amd_iommu_rlookup_table[devid] = NULL; | ||
182 | amd_iommu_rlookup_table[alias] = NULL; | ||
183 | } | ||
184 | |||
166 | static void iommu_uninit_device(struct device *dev) | 185 | static void iommu_uninit_device(struct device *dev) |
167 | { | 186 | { |
168 | kfree(dev->archdata.iommu); | 187 | kfree(dev->archdata.iommu); |
@@ -192,7 +211,9 @@ int __init amd_iommu_init_devices(void) | |||
192 | continue; | 211 | continue; |
193 | 212 | ||
194 | ret = iommu_init_device(&pdev->dev); | 213 | ret = iommu_init_device(&pdev->dev); |
195 | if (ret) | 214 | if (ret == -ENOTSUPP) |
215 | iommu_ignore_device(&pdev->dev); | ||
216 | else if (ret) | ||
196 | goto out_free; | 217 | goto out_free; |
197 | } | 218 | } |
198 | 219 | ||
@@ -2383,6 +2404,23 @@ static struct dma_map_ops amd_iommu_dma_ops = { | |||
2383 | .dma_supported = amd_iommu_dma_supported, | 2404 | .dma_supported = amd_iommu_dma_supported, |
2384 | }; | 2405 | }; |
2385 | 2406 | ||
2407 | static unsigned device_dma_ops_init(void) | ||
2408 | { | ||
2409 | struct pci_dev *pdev = NULL; | ||
2410 | unsigned unhandled = 0; | ||
2411 | |||
2412 | for_each_pci_dev(pdev) { | ||
2413 | if (!check_device(&pdev->dev)) { | ||
2414 | unhandled += 1; | ||
2415 | continue; | ||
2416 | } | ||
2417 | |||
2418 | pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops; | ||
2419 | } | ||
2420 | |||
2421 | return unhandled; | ||
2422 | } | ||
2423 | |||
2386 | /* | 2424 | /* |
2387 | * The function which clues the AMD IOMMU driver into dma_ops. | 2425 | * The function which clues the AMD IOMMU driver into dma_ops. |
2388 | */ | 2426 | */ |
@@ -2395,7 +2433,7 @@ void __init amd_iommu_init_api(void) | |||
2395 | int __init amd_iommu_init_dma_ops(void) | 2433 | int __init amd_iommu_init_dma_ops(void) |
2396 | { | 2434 | { |
2397 | struct amd_iommu *iommu; | 2435 | struct amd_iommu *iommu; |
2398 | int ret; | 2436 | int ret, unhandled; |
2399 | 2437 | ||
2400 | /* | 2438 | /* |
2401 | * first allocate a default protection domain for every IOMMU we | 2439 | * first allocate a default protection domain for every IOMMU we |
@@ -2421,7 +2459,11 @@ int __init amd_iommu_init_dma_ops(void) | |||
2421 | swiotlb = 0; | 2459 | swiotlb = 0; |
2422 | 2460 | ||
2423 | /* Make the driver finally visible to the drivers */ | 2461 | /* Make the driver finally visible to the drivers */ |
2424 | dma_ops = &amd_iommu_dma_ops; | 2462 | unhandled = device_dma_ops_init(); |
2463 | if (unhandled && max_pfn > MAX_DMA32_PFN) { | ||
2464 | /* There are unhandled devices - initialize swiotlb for them */ | ||
2465 | swiotlb = 1; | ||
2466 | } | ||
2425 | 2467 | ||
2426 | amd_iommu_stats_init(); | 2468 | amd_iommu_stats_init(); |
2427 | 2469 | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 9179c21120a8..bfc8453bd98d 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -731,8 +731,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, | |||
731 | { | 731 | { |
732 | u8 *p = (u8 *)h; | 732 | u8 *p = (u8 *)h; |
733 | u8 *end = p, flags = 0; | 733 | u8 *end = p, flags = 0; |
734 | u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; | 734 | u16 devid = 0, devid_start = 0, devid_to = 0; |
735 | u32 ext_flags = 0; | 735 | u32 dev_i, ext_flags = 0; |
736 | bool alias = false; | 736 | bool alias = false; |
737 | struct ivhd_entry *e; | 737 | struct ivhd_entry *e; |
738 | 738 | ||
@@ -887,7 +887,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, | |||
887 | /* Initializes the device->iommu mapping for the driver */ | 887 | /* Initializes the device->iommu mapping for the driver */ |
888 | static int __init init_iommu_devices(struct amd_iommu *iommu) | 888 | static int __init init_iommu_devices(struct amd_iommu *iommu) |
889 | { | 889 | { |
890 | u16 i; | 890 | u32 i; |
891 | 891 | ||
892 | for (i = iommu->first_device; i <= iommu->last_device; ++i) | 892 | for (i = iommu->first_device; i <= iommu->last_device; ++i) |
893 | set_iommu_for_device(iommu, i); | 893 | set_iommu_for_device(iommu, i); |
@@ -1177,7 +1177,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table) | |||
1177 | */ | 1177 | */ |
1178 | static void init_device_table(void) | 1178 | static void init_device_table(void) |
1179 | { | 1179 | { |
1180 | u16 devid; | 1180 | u32 devid; |
1181 | 1181 | ||
1182 | for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { | 1182 | for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { |
1183 | set_dev_entry_bit(devid, DEV_ENTRY_VALID); | 1183 | set_dev_entry_bit(devid, DEV_ENTRY_VALID); |
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 690bc8461835..9aeb78a23de4 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
15 | #include <linux/of_pci.h> | 15 | #include <linux/of_pci.h> |
16 | #include <linux/initrd.h> | ||
16 | 17 | ||
17 | #include <asm/hpet.h> | 18 | #include <asm/hpet.h> |
18 | #include <asm/irq_controller.h> | 19 | #include <asm/irq_controller.h> |
@@ -98,6 +99,16 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | |||
98 | return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); | 99 | return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); |
99 | } | 100 | } |
100 | 101 | ||
102 | #ifdef CONFIG_BLK_DEV_INITRD | ||
103 | void __init early_init_dt_setup_initrd_arch(unsigned long start, | ||
104 | unsigned long end) | ||
105 | { | ||
106 | initrd_start = (unsigned long)__va(start); | ||
107 | initrd_end = (unsigned long)__va(end); | ||
108 | initrd_below_start_ok = 1; | ||
109 | } | ||
110 | #endif | ||
111 | |||
101 | void __init add_dtb(u64 data) | 112 | void __init add_dtb(u64 data) |
102 | { | 113 | { |
103 | initial_dtb = data + offsetof(struct setup_data, data); | 114 | initial_dtb = data + offsetof(struct setup_data, data); |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 8d128783af47..a3d0dc59067b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |||
245 | { | 245 | { |
246 | set_user_gs(regs, 0); | 246 | set_user_gs(regs, 0); |
247 | regs->fs = 0; | 247 | regs->fs = 0; |
248 | set_fs(USER_DS); | ||
249 | regs->ds = __USER_DS; | 248 | regs->ds = __USER_DS; |
250 | regs->es = __USER_DS; | 249 | regs->es = __USER_DS; |
251 | regs->ss = __USER_DS; | 250 | regs->ss = __USER_DS; |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6c9dd922ac0d..ca6f7ab8df33 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip, | |||
338 | regs->cs = _cs; | 338 | regs->cs = _cs; |
339 | regs->ss = _ss; | 339 | regs->ss = _ss; |
340 | regs->flags = X86_EFLAGS_IF; | 340 | regs->flags = X86_EFLAGS_IF; |
341 | set_fs(USER_DS); | ||
342 | /* | 341 | /* |
343 | * Free the old FP and other extended state | 342 | * Free the old FP and other extended state |
344 | */ | 343 | */ |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 33a0c11797de..9fd3137230d4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
285 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 285 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
286 | x86_platform.nmi_init(); | 286 | x86_platform.nmi_init(); |
287 | 287 | ||
288 | /* | ||
289 | * Wait until the cpu which brought this one up marked it | ||
290 | * online before enabling interrupts. If we don't do that then | ||
291 | * we can end up waking up the softirq thread before this cpu | ||
292 | * reached the active state, which makes the scheduler unhappy | ||
293 | * and schedule the softirq thread on the wrong cpu. This is | ||
294 | * only observable with forced threaded interrupts, but in | ||
295 | * theory it could also happen w/o them. It's just way harder | ||
296 | * to achieve. | ||
297 | */ | ||
298 | while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) | ||
299 | cpu_relax(); | ||
300 | |||
288 | /* enable local interrupts */ | 301 | /* enable local interrupts */ |
289 | local_irq_enable(); | 302 | local_irq_enable(); |
290 | 303 | ||
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d6e2477feb18..6df88c7885c0 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -47,38 +47,40 @@ | |||
47 | #define DstDI (5<<1) /* Destination is in ES:(E)DI */ | 47 | #define DstDI (5<<1) /* Destination is in ES:(E)DI */ |
48 | #define DstMem64 (6<<1) /* 64bit memory operand */ | 48 | #define DstMem64 (6<<1) /* 64bit memory operand */ |
49 | #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */ | 49 | #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */ |
50 | #define DstMask (7<<1) | 50 | #define DstDX (8<<1) /* Destination is in DX register */ |
51 | #define DstMask (0xf<<1) | ||
51 | /* Source operand type. */ | 52 | /* Source operand type. */ |
52 | #define SrcNone (0<<4) /* No source operand. */ | 53 | #define SrcNone (0<<5) /* No source operand. */ |
53 | #define SrcReg (1<<4) /* Register operand. */ | 54 | #define SrcReg (1<<5) /* Register operand. */ |
54 | #define SrcMem (2<<4) /* Memory operand. */ | 55 | #define SrcMem (2<<5) /* Memory operand. */ |
55 | #define SrcMem16 (3<<4) /* Memory operand (16-bit). */ | 56 | #define SrcMem16 (3<<5) /* Memory operand (16-bit). */ |
56 | #define SrcMem32 (4<<4) /* Memory operand (32-bit). */ | 57 | #define SrcMem32 (4<<5) /* Memory operand (32-bit). */ |
57 | #define SrcImm (5<<4) /* Immediate operand. */ | 58 | #define SrcImm (5<<5) /* Immediate operand. */ |
58 | #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ | 59 | #define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */ |
59 | #define SrcOne (7<<4) /* Implied '1' */ | 60 | #define SrcOne (7<<5) /* Implied '1' */ |
60 | #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ | 61 | #define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */ |
61 | #define SrcImmU (9<<4) /* Immediate operand, unsigned */ | 62 | #define SrcImmU (9<<5) /* Immediate operand, unsigned */ |
62 | #define SrcSI (0xa<<4) /* Source is in the DS:RSI */ | 63 | #define SrcSI (0xa<<5) /* Source is in the DS:RSI */ |
63 | #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */ | 64 | #define SrcImmFAddr (0xb<<5) /* Source is immediate far address */ |
64 | #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */ | 65 | #define SrcMemFAddr (0xc<<5) /* Source is far address in memory */ |
65 | #define SrcAcc (0xd<<4) /* Source Accumulator */ | 66 | #define SrcAcc (0xd<<5) /* Source Accumulator */ |
66 | #define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */ | 67 | #define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */ |
67 | #define SrcMask (0xf<<4) | 68 | #define SrcDX (0xf<<5) /* Source is in DX register */ |
69 | #define SrcMask (0xf<<5) | ||
68 | /* Generic ModRM decode. */ | 70 | /* Generic ModRM decode. */ |
69 | #define ModRM (1<<8) | 71 | #define ModRM (1<<9) |
70 | /* Destination is only written; never read. */ | 72 | /* Destination is only written; never read. */ |
71 | #define Mov (1<<9) | 73 | #define Mov (1<<10) |
72 | #define BitOp (1<<10) | 74 | #define BitOp (1<<11) |
73 | #define MemAbs (1<<11) /* Memory operand is absolute displacement */ | 75 | #define MemAbs (1<<12) /* Memory operand is absolute displacement */ |
74 | #define String (1<<12) /* String instruction (rep capable) */ | 76 | #define String (1<<13) /* String instruction (rep capable) */ |
75 | #define Stack (1<<13) /* Stack instruction (push/pop) */ | 77 | #define Stack (1<<14) /* Stack instruction (push/pop) */ |
76 | #define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */ | 78 | #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ |
77 | #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ | 79 | #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ |
78 | #define GroupDual (2<<14) /* Alternate decoding of mod == 3 */ | 80 | #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ |
79 | #define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */ | 81 | #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ |
80 | #define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */ | 82 | #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ |
81 | #define Sse (1<<17) /* SSE Vector instruction */ | 83 | #define Sse (1<<18) /* SSE Vector instruction */ |
82 | /* Misc flags */ | 84 | /* Misc flags */ |
83 | #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ | 85 | #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ |
84 | #define VendorSpecific (1<<22) /* Vendor specific instruction */ | 86 | #define VendorSpecific (1<<22) /* Vendor specific instruction */ |
@@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = { | |||
3154 | I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), | 3156 | I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), |
3155 | I(SrcImmByte | Mov | Stack, em_push), | 3157 | I(SrcImmByte | Mov | Stack, em_push), |
3156 | I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), | 3158 | I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), |
3157 | D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */ | 3159 | D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */ |
3158 | D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */ | 3160 | D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */ |
3159 | /* 0x70 - 0x7F */ | 3161 | /* 0x70 - 0x7F */ |
3160 | X16(D(SrcImmByte)), | 3162 | X16(D(SrcImmByte)), |
3161 | /* 0x80 - 0x87 */ | 3163 | /* 0x80 - 0x87 */ |
@@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = { | |||
3212 | /* 0xE8 - 0xEF */ | 3214 | /* 0xE8 - 0xEF */ |
3213 | D(SrcImm | Stack), D(SrcImm | ImplicitOps), | 3215 | D(SrcImm | Stack), D(SrcImm | ImplicitOps), |
3214 | D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), | 3216 | D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), |
3215 | D2bvIP(SrcNone | DstAcc, in, check_perm_in), | 3217 | D2bvIP(SrcDX | DstAcc, in, check_perm_in), |
3216 | D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out), | 3218 | D2bvIP(SrcAcc | DstDX, out, check_perm_out), |
3217 | /* 0xF0 - 0xF7 */ | 3219 | /* 0xF0 - 0xF7 */ |
3218 | N, DI(ImplicitOps, icebp), N, N, | 3220 | N, DI(ImplicitOps, icebp), N, N, |
3219 | DI(ImplicitOps | Priv, hlt), D(ImplicitOps), | 3221 | DI(ImplicitOps | Priv, hlt), D(ImplicitOps), |
@@ -3613,6 +3615,12 @@ done_prefixes: | |||
3613 | memop.bytes = c->op_bytes + 2; | 3615 | memop.bytes = c->op_bytes + 2; |
3614 | goto srcmem_common; | 3616 | goto srcmem_common; |
3615 | break; | 3617 | break; |
3618 | case SrcDX: | ||
3619 | c->src.type = OP_REG; | ||
3620 | c->src.bytes = 2; | ||
3621 | c->src.addr.reg = &c->regs[VCPU_REGS_RDX]; | ||
3622 | fetch_register_operand(&c->src); | ||
3623 | break; | ||
3616 | } | 3624 | } |
3617 | 3625 | ||
3618 | if (rc != X86EMUL_CONTINUE) | 3626 | if (rc != X86EMUL_CONTINUE) |
@@ -3682,6 +3690,12 @@ done_prefixes: | |||
3682 | c->dst.addr.mem.seg = VCPU_SREG_ES; | 3690 | c->dst.addr.mem.seg = VCPU_SREG_ES; |
3683 | c->dst.val = 0; | 3691 | c->dst.val = 0; |
3684 | break; | 3692 | break; |
3693 | case DstDX: | ||
3694 | c->dst.type = OP_REG; | ||
3695 | c->dst.bytes = 2; | ||
3696 | c->dst.addr.reg = &c->regs[VCPU_REGS_RDX]; | ||
3697 | fetch_register_operand(&c->dst); | ||
3698 | break; | ||
3685 | case ImplicitOps: | 3699 | case ImplicitOps: |
3686 | /* Special instructions do their own operand decoding. */ | 3700 | /* Special instructions do their own operand decoding. */ |
3687 | default: | 3701 | default: |
@@ -4027,7 +4041,6 @@ special_insn: | |||
4027 | break; | 4041 | break; |
4028 | case 0xec: /* in al,dx */ | 4042 | case 0xec: /* in al,dx */ |
4029 | case 0xed: /* in (e/r)ax,dx */ | 4043 | case 0xed: /* in (e/r)ax,dx */ |
4030 | c->src.val = c->regs[VCPU_REGS_RDX]; | ||
4031 | do_io_in: | 4044 | do_io_in: |
4032 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, | 4045 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, |
4033 | &c->dst.val)) | 4046 | &c->dst.val)) |
@@ -4035,7 +4048,6 @@ special_insn: | |||
4035 | break; | 4048 | break; |
4036 | case 0xee: /* out dx,al */ | 4049 | case 0xee: /* out dx,al */ |
4037 | case 0xef: /* out dx,(e/r)ax */ | 4050 | case 0xef: /* out dx,(e/r)ax */ |
4038 | c->dst.val = c->regs[VCPU_REGS_RDX]; | ||
4039 | do_io_out: | 4051 | do_io_out: |
4040 | ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, | 4052 | ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, |
4041 | &c->src.val, 1); | 4053 | &c->src.val, 1); |
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 8bff7e7c290b..1b2b73ff0a6e 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c | |||
@@ -189,10 +189,10 @@ struct multicall_space __xen_mc_entry(size_t args) | |||
189 | unsigned argidx = roundup(b->argidx, sizeof(u64)); | 189 | unsigned argidx = roundup(b->argidx, sizeof(u64)); |
190 | 190 | ||
191 | BUG_ON(preemptible()); | 191 | BUG_ON(preemptible()); |
192 | BUG_ON(b->argidx > MC_ARGS); | 192 | BUG_ON(b->argidx >= MC_ARGS); |
193 | 193 | ||
194 | if (b->mcidx == MC_BATCH || | 194 | if (b->mcidx == MC_BATCH || |
195 | (argidx + args) > MC_ARGS) { | 195 | (argidx + args) >= MC_ARGS) { |
196 | mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS); | 196 | mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS); |
197 | xen_mc_flush(); | 197 | xen_mc_flush(); |
198 | argidx = roundup(b->argidx, sizeof(u64)); | 198 | argidx = roundup(b->argidx, sizeof(u64)); |
@@ -206,7 +206,7 @@ struct multicall_space __xen_mc_entry(size_t args) | |||
206 | ret.args = &b->args[argidx]; | 206 | ret.args = &b->args[argidx]; |
207 | b->argidx = argidx + args; | 207 | b->argidx = argidx + args; |
208 | 208 | ||
209 | BUG_ON(b->argidx > MC_ARGS); | 209 | BUG_ON(b->argidx >= MC_ARGS); |
210 | return ret; | 210 | return ret; |
211 | } | 211 | } |
212 | 212 | ||
@@ -216,7 +216,7 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) | |||
216 | struct multicall_space ret = { NULL, NULL }; | 216 | struct multicall_space ret = { NULL, NULL }; |
217 | 217 | ||
218 | BUG_ON(preemptible()); | 218 | BUG_ON(preemptible()); |
219 | BUG_ON(b->argidx > MC_ARGS); | 219 | BUG_ON(b->argidx >= MC_ARGS); |
220 | 220 | ||
221 | if (b->mcidx == 0) | 221 | if (b->mcidx == 0) |
222 | return ret; | 222 | return ret; |
@@ -224,14 +224,14 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) | |||
224 | if (b->entries[b->mcidx - 1].op != op) | 224 | if (b->entries[b->mcidx - 1].op != op) |
225 | return ret; | 225 | return ret; |
226 | 226 | ||
227 | if ((b->argidx + size) > MC_ARGS) | 227 | if ((b->argidx + size) >= MC_ARGS) |
228 | return ret; | 228 | return ret; |
229 | 229 | ||
230 | ret.mc = &b->entries[b->mcidx - 1]; | 230 | ret.mc = &b->entries[b->mcidx - 1]; |
231 | ret.args = &b->args[b->argidx]; | 231 | ret.args = &b->args[b->argidx]; |
232 | b->argidx += size; | 232 | b->argidx += size; |
233 | 233 | ||
234 | BUG_ON(b->argidx > MC_ARGS); | 234 | BUG_ON(b->argidx >= MC_ARGS); |
235 | return ret; | 235 | return ret; |
236 | } | 236 | } |
237 | 237 | ||