diff options
-rw-r--r-- | MAINTAINERS | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/lguest.h | 7 | ||||
-rw-r--r-- | arch/x86/lguest/boot.c | 7 | ||||
-rw-r--r-- | arch/x86/lguest/head_32.S | 30 | ||||
-rw-r--r-- | drivers/lguest/hypercalls.c | 5 | ||||
-rw-r--r-- | drivers/lguest/interrupts_and_traps.c | 105 | ||||
-rw-r--r-- | drivers/lguest/lg.h | 2 | ||||
-rw-r--r-- | drivers/lguest/lguest_user.c | 8 | ||||
-rw-r--r-- | drivers/s390/kvm/virtio_ccw.c | 10 | ||||
-rw-r--r-- | drivers/virtio/Kconfig | 10 | ||||
-rw-r--r-- | drivers/virtio/Makefile | 1 | ||||
-rw-r--r-- | drivers/virtio/virtio.c | 6 | ||||
-rw-r--r-- | drivers/virtio/virtio_balloon.c | 21 | ||||
-rw-r--r-- | drivers/virtio/virtio_input.c | 384 | ||||
-rw-r--r-- | drivers/virtio/virtio_mmio.c | 8 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_modern.c | 123 | ||||
-rw-r--r-- | include/linux/lguest.h | 4 | ||||
-rw-r--r-- | include/linux/virtio.h | 2 | ||||
-rw-r--r-- | include/linux/virtio_config.h | 16 | ||||
-rw-r--r-- | include/linux/virtio_ring.h | 23 | ||||
-rw-r--r-- | include/uapi/linux/Kbuild | 1 | ||||
-rw-r--r-- | include/uapi/linux/virtio_balloon.h | 32 | ||||
-rw-r--r-- | include/uapi/linux/virtio_ids.h | 1 | ||||
-rw-r--r-- | include/uapi/linux/virtio_input.h | 76 |
24 files changed, 721 insertions, 167 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index f6f595021d6b..df536b1207ee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -10517,6 +10517,12 @@ S: Maintained | |||
10517 | F: drivers/vhost/ | 10517 | F: drivers/vhost/ |
10518 | F: include/uapi/linux/vhost.h | 10518 | F: include/uapi/linux/vhost.h |
10519 | 10519 | ||
10520 | VIRTIO INPUT DRIVER | ||
10521 | M: Gerd Hoffmann <kraxel@redhat.com> | ||
10522 | S: Maintained | ||
10523 | F: drivers/virtio/virtio_input.c | ||
10524 | F: include/uapi/linux/virtio_input.h | ||
10525 | |||
10520 | VIA RHINE NETWORK DRIVER | 10526 | VIA RHINE NETWORK DRIVER |
10521 | M: Roger Luethi <rl@hellgate.ch> | 10527 | M: Roger Luethi <rl@hellgate.ch> |
10522 | S: Maintained | 10528 | S: Maintained |
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h index e2d4a4afa8c3..3bbc07a57a31 100644 --- a/arch/x86/include/asm/lguest.h +++ b/arch/x86/include/asm/lguest.h | |||
@@ -20,13 +20,10 @@ extern unsigned long switcher_addr; | |||
20 | /* Found in switcher.S */ | 20 | /* Found in switcher.S */ |
21 | extern unsigned long default_idt_entries[]; | 21 | extern unsigned long default_idt_entries[]; |
22 | 22 | ||
23 | /* Declarations for definitions in lguest_guest.S */ | 23 | /* Declarations for definitions in arch/x86/lguest/head_32.S */ |
24 | extern char lguest_noirq_start[], lguest_noirq_end[]; | 24 | extern char lguest_noirq_iret[]; |
25 | extern const char lgstart_cli[], lgend_cli[]; | 25 | extern const char lgstart_cli[], lgend_cli[]; |
26 | extern const char lgstart_sti[], lgend_sti[]; | ||
27 | extern const char lgstart_popf[], lgend_popf[]; | ||
28 | extern const char lgstart_pushf[], lgend_pushf[]; | 26 | extern const char lgstart_pushf[], lgend_pushf[]; |
29 | extern const char lgstart_iret[], lgend_iret[]; | ||
30 | 27 | ||
31 | extern void lguest_iret(void); | 28 | extern void lguest_iret(void); |
32 | extern void lguest_init(void); | 29 | extern void lguest_init(void); |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 717908b16037..8f9a133cc099 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -87,8 +87,7 @@ | |||
87 | 87 | ||
88 | struct lguest_data lguest_data = { | 88 | struct lguest_data lguest_data = { |
89 | .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF }, | 89 | .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF }, |
90 | .noirq_start = (u32)lguest_noirq_start, | 90 | .noirq_iret = (u32)lguest_noirq_iret, |
91 | .noirq_end = (u32)lguest_noirq_end, | ||
92 | .kernel_address = PAGE_OFFSET, | 91 | .kernel_address = PAGE_OFFSET, |
93 | .blocked_interrupts = { 1 }, /* Block timer interrupts */ | 92 | .blocked_interrupts = { 1 }, /* Block timer interrupts */ |
94 | .syscall_vec = SYSCALL_VECTOR, | 93 | .syscall_vec = SYSCALL_VECTOR, |
@@ -262,7 +261,7 @@ PV_CALLEE_SAVE_REGS_THUNK(lguest_save_fl); | |||
262 | PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable); | 261 | PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable); |
263 | /*:*/ | 262 | /*:*/ |
264 | 263 | ||
265 | /* These are in i386_head.S */ | 264 | /* These are in head_32.S */ |
266 | extern void lg_irq_enable(void); | 265 | extern void lg_irq_enable(void); |
267 | extern void lg_restore_fl(unsigned long flags); | 266 | extern void lg_restore_fl(unsigned long flags); |
268 | 267 | ||
@@ -1368,7 +1367,7 @@ static void lguest_restart(char *reason) | |||
1368 | * fit comfortably. | 1367 | * fit comfortably. |
1369 | * | 1368 | * |
1370 | * First we need assembly templates of each of the patchable Guest operations, | 1369 | * First we need assembly templates of each of the patchable Guest operations, |
1371 | * and these are in i386_head.S. | 1370 | * and these are in head_32.S. |
1372 | */ | 1371 | */ |
1373 | 1372 | ||
1374 | /*G:060 We construct a table from the assembler templates: */ | 1373 | /*G:060 We construct a table from the assembler templates: */ |
diff --git a/arch/x86/lguest/head_32.S b/arch/x86/lguest/head_32.S index 6ddfe4fc23c3..d5ae63f5ec5d 100644 --- a/arch/x86/lguest/head_32.S +++ b/arch/x86/lguest/head_32.S | |||
@@ -84,7 +84,7 @@ ENTRY(lg_irq_enable) | |||
84 | * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we | 84 | * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we |
85 | * jump to send_interrupts, otherwise we're done. | 85 | * jump to send_interrupts, otherwise we're done. |
86 | */ | 86 | */ |
87 | testl $0, lguest_data+LGUEST_DATA_irq_pending | 87 | cmpl $0, lguest_data+LGUEST_DATA_irq_pending |
88 | jnz send_interrupts | 88 | jnz send_interrupts |
89 | /* | 89 | /* |
90 | * One cool thing about x86 is that you can do many things without using | 90 | * One cool thing about x86 is that you can do many things without using |
@@ -133,9 +133,8 @@ ENTRY(lg_restore_fl) | |||
133 | ret | 133 | ret |
134 | /*:*/ | 134 | /*:*/ |
135 | 135 | ||
136 | /* These demark the EIP range where host should never deliver interrupts. */ | 136 | /* These demark the EIP where host should never deliver interrupts. */ |
137 | .global lguest_noirq_start | 137 | .global lguest_noirq_iret |
138 | .global lguest_noirq_end | ||
139 | 138 | ||
140 | /*M:004 | 139 | /*M:004 |
141 | * When the Host reflects a trap or injects an interrupt into the Guest, it | 140 | * When the Host reflects a trap or injects an interrupt into the Guest, it |
@@ -168,29 +167,26 @@ ENTRY(lg_restore_fl) | |||
168 | * So we have to copy eflags from the stack to lguest_data.irq_enabled before | 167 | * So we have to copy eflags from the stack to lguest_data.irq_enabled before |
169 | * we do the "iret". | 168 | * we do the "iret". |
170 | * | 169 | * |
171 | * There are two problems with this: firstly, we need to use a register to do | 170 | * There are two problems with this: firstly, we can't clobber any registers |
172 | * the copy and secondly, the whole thing needs to be atomic. The first | 171 | * and secondly, the whole thing needs to be atomic. The first problem |
173 | * problem is easy to solve: push %eax on the stack so we can use it, and then | 172 | * is solved by using "push memory"/"pop memory" instruction pair for copying. |
174 | * restore it at the end just before the real "iret". | ||
175 | * | 173 | * |
176 | * The second is harder: copying eflags to lguest_data.irq_enabled will turn | 174 | * The second is harder: copying eflags to lguest_data.irq_enabled will turn |
177 | * interrupts on before we're finished, so we could be interrupted before we | 175 | * interrupts on before we're finished, so we could be interrupted before we |
178 | * return to userspace or wherever. Our solution to this is to surround the | 176 | * return to userspace or wherever. Our solution to this is to tell the |
179 | * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the | ||
180 | * Host that it is *never* to interrupt us there, even if interrupts seem to be | 177 | * Host that it is *never* to interrupt us there, even if interrupts seem to be |
181 | * enabled. | 178 | * enabled. (It's not necessary to protect pop instruction, since |
179 | * data gets updated only after it completes, so we only need to protect | ||
180 | * one instruction, iret). | ||
182 | */ | 181 | */ |
183 | ENTRY(lguest_iret) | 182 | ENTRY(lguest_iret) |
184 | pushl %eax | 183 | pushl 2*4(%esp) |
185 | movl 12(%esp), %eax | ||
186 | lguest_noirq_start: | ||
187 | /* | 184 | /* |
188 | * Note the %ss: segment prefix here. Normal data accesses use the | 185 | * Note the %ss: segment prefix here. Normal data accesses use the |
189 | * "ds" segment, but that will have already been restored for whatever | 186 | * "ds" segment, but that will have already been restored for whatever |
190 | * we're returning to (such as userspace): we can't trust it. The %ss: | 187 | * we're returning to (such as userspace): we can't trust it. The %ss: |
191 | * prefix makes sure we use the stack segment, which is still valid. | 188 | * prefix makes sure we use the stack segment, which is still valid. |
192 | */ | 189 | */ |
193 | movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled | 190 | popl %ss:lguest_data+LGUEST_DATA_irq_enabled |
194 | popl %eax | 191 | lguest_noirq_iret: |
195 | iret | 192 | iret |
196 | lguest_noirq_end: | ||
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index 1219af493c0f..19a32280731d 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c | |||
@@ -211,10 +211,9 @@ static void initialize(struct lg_cpu *cpu) | |||
211 | 211 | ||
212 | /* | 212 | /* |
213 | * The Guest tells us where we're not to deliver interrupts by putting | 213 | * The Guest tells us where we're not to deliver interrupts by putting |
214 | * the range of addresses into "struct lguest_data". | 214 | * the instruction address into "struct lguest_data". |
215 | */ | 215 | */ |
216 | if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start) | 216 | if (get_user(cpu->lg->noirq_iret, &cpu->lg->lguest_data->noirq_iret)) |
217 | || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end)) | ||
218 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); | 217 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); |
219 | 218 | ||
220 | /* | 219 | /* |
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 70dfcdc29f1f..5e7559be222a 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c | |||
@@ -56,21 +56,16 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) | |||
56 | } | 56 | } |
57 | 57 | ||
58 | /*H:210 | 58 | /*H:210 |
59 | * The set_guest_interrupt() routine actually delivers the interrupt or | 59 | * The push_guest_interrupt_stack() routine saves Guest state on the stack for |
60 | * trap. The mechanics of delivering traps and interrupts to the Guest are the | 60 | * an interrupt or trap. The mechanics of delivering traps and interrupts to |
61 | * same, except some traps have an "error code" which gets pushed onto the | 61 | * the Guest are the same, except some traps have an "error code" which gets |
62 | * stack as well: the caller tells us if this is one. | 62 | * pushed onto the stack as well: the caller tells us if this is one. |
63 | * | ||
64 | * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this | ||
65 | * interrupt or trap. It's split into two parts for traditional reasons: gcc | ||
66 | * on i386 used to be frightened by 64 bit numbers. | ||
67 | * | 63 | * |
68 | * We set up the stack just like the CPU does for a real interrupt, so it's | 64 | * We set up the stack just like the CPU does for a real interrupt, so it's |
69 | * identical for the Guest (and the standard "iret" instruction will undo | 65 | * identical for the Guest (and the standard "iret" instruction will undo |
70 | * it). | 66 | * it). |
71 | */ | 67 | */ |
72 | static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | 68 | static void push_guest_interrupt_stack(struct lg_cpu *cpu, bool has_err) |
73 | bool has_err) | ||
74 | { | 69 | { |
75 | unsigned long gstack, origstack; | 70 | unsigned long gstack, origstack; |
76 | u32 eflags, ss, irq_enable; | 71 | u32 eflags, ss, irq_enable; |
@@ -130,12 +125,28 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | |||
130 | if (has_err) | 125 | if (has_err) |
131 | push_guest_stack(cpu, &gstack, cpu->regs->errcode); | 126 | push_guest_stack(cpu, &gstack, cpu->regs->errcode); |
132 | 127 | ||
133 | /* | 128 | /* Adjust the stack pointer and stack segment. */ |
134 | * Now we've pushed all the old state, we change the stack, the code | ||
135 | * segment and the address to execute. | ||
136 | */ | ||
137 | cpu->regs->ss = ss; | 129 | cpu->regs->ss = ss; |
138 | cpu->regs->esp = virtstack + (gstack - origstack); | 130 | cpu->regs->esp = virtstack + (gstack - origstack); |
131 | } | ||
132 | |||
133 | /* | ||
134 | * This actually makes the Guest start executing the given interrupt/trap | ||
135 | * handler. | ||
136 | * | ||
137 | * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this | ||
138 | * interrupt or trap. It's split into two parts for traditional reasons: gcc | ||
139 | * on i386 used to be frightened by 64 bit numbers. | ||
140 | */ | ||
141 | static void guest_run_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi) | ||
142 | { | ||
143 | /* If we're already in the kernel, we don't change stacks. */ | ||
144 | if ((cpu->regs->ss&0x3) != GUEST_PL) | ||
145 | cpu->regs->ss = cpu->esp1; | ||
146 | |||
147 | /* | ||
148 | * Set the code segment and the address to execute. | ||
149 | */ | ||
139 | cpu->regs->cs = (__KERNEL_CS|GUEST_PL); | 150 | cpu->regs->cs = (__KERNEL_CS|GUEST_PL); |
140 | cpu->regs->eip = idt_address(lo, hi); | 151 | cpu->regs->eip = idt_address(lo, hi); |
141 | 152 | ||
@@ -158,6 +169,24 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | |||
158 | kill_guest(cpu, "Disabling interrupts"); | 169 | kill_guest(cpu, "Disabling interrupts"); |
159 | } | 170 | } |
160 | 171 | ||
172 | /* This restores the eflags word which was pushed on the stack by a trap */ | ||
173 | static void restore_eflags(struct lg_cpu *cpu) | ||
174 | { | ||
175 | /* This is the physical address of the stack. */ | ||
176 | unsigned long stack_pa = guest_pa(cpu, cpu->regs->esp); | ||
177 | |||
178 | /* | ||
179 | * Stack looks like this: | ||
180 | * Address Contents | ||
181 | * esp EIP | ||
182 | * esp + 4 CS | ||
183 | * esp + 8 EFLAGS | ||
184 | */ | ||
185 | cpu->regs->eflags = lgread(cpu, stack_pa + 8, u32); | ||
186 | cpu->regs->eflags &= | ||
187 | ~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT); | ||
188 | } | ||
189 | |||
161 | /*H:205 | 190 | /*H:205 |
162 | * Virtual Interrupts. | 191 | * Virtual Interrupts. |
163 | * | 192 | * |
@@ -200,14 +229,6 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) | |||
200 | 229 | ||
201 | BUG_ON(irq >= LGUEST_IRQS); | 230 | BUG_ON(irq >= LGUEST_IRQS); |
202 | 231 | ||
203 | /* | ||
204 | * They may be in the middle of an iret, where they asked us never to | ||
205 | * deliver interrupts. | ||
206 | */ | ||
207 | if (cpu->regs->eip >= cpu->lg->noirq_start && | ||
208 | (cpu->regs->eip < cpu->lg->noirq_end)) | ||
209 | return; | ||
210 | |||
211 | /* If they're halted, interrupts restart them. */ | 232 | /* If they're halted, interrupts restart them. */ |
212 | if (cpu->halted) { | 233 | if (cpu->halted) { |
213 | /* Re-enable interrupts. */ | 234 | /* Re-enable interrupts. */ |
@@ -237,12 +258,34 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) | |||
237 | if (idt_present(idt->a, idt->b)) { | 258 | if (idt_present(idt->a, idt->b)) { |
238 | /* OK, mark it no longer pending and deliver it. */ | 259 | /* OK, mark it no longer pending and deliver it. */ |
239 | clear_bit(irq, cpu->irqs_pending); | 260 | clear_bit(irq, cpu->irqs_pending); |
261 | |||
240 | /* | 262 | /* |
241 | * set_guest_interrupt() takes the interrupt descriptor and a | 263 | * They may be about to iret, where they asked us never to |
242 | * flag to say whether this interrupt pushes an error code onto | 264 | * deliver interrupts. In this case, we can emulate that iret |
243 | * the stack as well: virtual interrupts never do. | 265 | * then immediately deliver the interrupt. This is basically |
266 | * a noop: the iret would pop the interrupt frame and restore | ||
267 | * eflags, and then we'd set it up again. So just restore the | ||
268 | * eflags word and jump straight to the handler in this case. | ||
269 | * | ||
270 | * Denys Vlasenko points out that this isn't quite right: if | ||
271 | * the iret was returning to userspace, then that interrupt | ||
272 | * would reset the stack pointer (which the Guest told us | ||
273 | * about via LHCALL_SET_STACK). But unless the Guest is being | ||
274 | * *really* weird, that will be the same as the current stack | ||
275 | * anyway. | ||
244 | */ | 276 | */ |
245 | set_guest_interrupt(cpu, idt->a, idt->b, false); | 277 | if (cpu->regs->eip == cpu->lg->noirq_iret) { |
278 | restore_eflags(cpu); | ||
279 | } else { | ||
280 | /* | ||
281 | * set_guest_interrupt() takes a flag to say whether | ||
282 | * this interrupt pushes an error code onto the stack | ||
283 | * as well: virtual interrupts never do. | ||
284 | */ | ||
285 | push_guest_interrupt_stack(cpu, false); | ||
286 | } | ||
287 | /* Actually make Guest cpu jump to handler. */ | ||
288 | guest_run_interrupt(cpu, idt->a, idt->b); | ||
246 | } | 289 | } |
247 | 290 | ||
248 | /* | 291 | /* |
@@ -353,8 +396,9 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num) | |||
353 | */ | 396 | */ |
354 | if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) | 397 | if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) |
355 | return false; | 398 | return false; |
356 | set_guest_interrupt(cpu, cpu->arch.idt[num].a, | 399 | push_guest_interrupt_stack(cpu, has_err(num)); |
357 | cpu->arch.idt[num].b, has_err(num)); | 400 | guest_run_interrupt(cpu, cpu->arch.idt[num].a, |
401 | cpu->arch.idt[num].b); | ||
358 | return true; | 402 | return true; |
359 | } | 403 | } |
360 | 404 | ||
@@ -395,8 +439,9 @@ static bool direct_trap(unsigned int num) | |||
395 | * The Guest has the ability to turn its interrupt gates into trap gates, | 439 | * The Guest has the ability to turn its interrupt gates into trap gates, |
396 | * if it is careful. The Host will let trap gates can go directly to the | 440 | * if it is careful. The Host will let trap gates can go directly to the |
397 | * Guest, but the Guest needs the interrupts atomically disabled for an | 441 | * Guest, but the Guest needs the interrupts atomically disabled for an |
398 | * interrupt gate. It can do this by pointing the trap gate at instructions | 442 | * interrupt gate. The Host could provide a mechanism to register more |
399 | * within noirq_start and noirq_end, where it can safely disable interrupts. | 443 | * "no-interrupt" regions, and the Guest could point the trap gate at |
444 | * instructions within that region, where it can safely disable interrupts. | ||
400 | */ | 445 | */ |
401 | 446 | ||
402 | /*M:006 | 447 | /*M:006 |
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 307e8b39e7d1..ac8ad0461e80 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h | |||
@@ -102,7 +102,7 @@ struct lguest { | |||
102 | 102 | ||
103 | struct pgdir pgdirs[4]; | 103 | struct pgdir pgdirs[4]; |
104 | 104 | ||
105 | unsigned long noirq_start, noirq_end; | 105 | unsigned long noirq_iret; |
106 | 106 | ||
107 | unsigned int stack_pages; | 107 | unsigned int stack_pages; |
108 | u32 tsc_khz; | 108 | u32 tsc_khz; |
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index c4c6113eb9a6..30c60687d277 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c | |||
@@ -339,6 +339,13 @@ static ssize_t write(struct file *file, const char __user *in, | |||
339 | } | 339 | } |
340 | } | 340 | } |
341 | 341 | ||
342 | static int open(struct inode *inode, struct file *file) | ||
343 | { | ||
344 | file->private_data = NULL; | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
342 | /*L:060 | 349 | /*L:060 |
343 | * The final piece of interface code is the close() routine. It reverses | 350 | * The final piece of interface code is the close() routine. It reverses |
344 | * everything done in initialize(). This is usually called because the | 351 | * everything done in initialize(). This is usually called because the |
@@ -409,6 +416,7 @@ static int close(struct inode *inode, struct file *file) | |||
409 | */ | 416 | */ |
410 | static const struct file_operations lguest_fops = { | 417 | static const struct file_operations lguest_fops = { |
411 | .owner = THIS_MODULE, | 418 | .owner = THIS_MODULE, |
419 | .open = open, | ||
412 | .release = close, | 420 | .release = close, |
413 | .write = write, | 421 | .write = write, |
414 | .read = read, | 422 | .read = read, |
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c index 71d7802aa8b4..6f1fa1773e76 100644 --- a/drivers/s390/kvm/virtio_ccw.c +++ b/drivers/s390/kvm/virtio_ccw.c | |||
@@ -1201,13 +1201,9 @@ static int virtio_ccw_online(struct ccw_device *cdev) | |||
1201 | vcdev->vdev.id.vendor = cdev->id.cu_type; | 1201 | vcdev->vdev.id.vendor = cdev->id.cu_type; |
1202 | vcdev->vdev.id.device = cdev->id.cu_model; | 1202 | vcdev->vdev.id.device = cdev->id.cu_model; |
1203 | 1203 | ||
1204 | if (virtio_device_is_legacy_only(vcdev->vdev.id)) { | 1204 | ret = virtio_ccw_set_transport_rev(vcdev); |
1205 | vcdev->revision = 0; | 1205 | if (ret) |
1206 | } else { | 1206 | goto out_free; |
1207 | ret = virtio_ccw_set_transport_rev(vcdev); | ||
1208 | if (ret) | ||
1209 | goto out_free; | ||
1210 | } | ||
1211 | 1207 | ||
1212 | ret = register_virtio_device(&vcdev->vdev); | 1208 | ret = register_virtio_device(&vcdev->vdev); |
1213 | if (ret) { | 1209 | if (ret) { |
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index b546da5d8ea3..cab9f3f63a38 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig | |||
@@ -48,6 +48,16 @@ config VIRTIO_BALLOON | |||
48 | 48 | ||
49 | If unsure, say M. | 49 | If unsure, say M. |
50 | 50 | ||
51 | config VIRTIO_INPUT | ||
52 | tristate "Virtio input driver" | ||
53 | depends on VIRTIO | ||
54 | depends on INPUT | ||
55 | ---help--- | ||
56 | This driver supports virtio input devices such as | ||
57 | keyboards, mice and tablets. | ||
58 | |||
59 | If unsure, say M. | ||
60 | |||
51 | config VIRTIO_MMIO | 61 | config VIRTIO_MMIO |
52 | tristate "Platform bus driver for memory mapped virtio devices" | 62 | tristate "Platform bus driver for memory mapped virtio devices" |
53 | depends on HAS_IOMEM | 63 | depends on HAS_IOMEM |
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index d85565b8ea46..41e30e3dc842 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile | |||
@@ -4,3 +4,4 @@ obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o | |||
4 | virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o | 4 | virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o |
5 | virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o | 5 | virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o |
6 | obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o | 6 | obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o |
7 | obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o | ||
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 5ce2aa48fc6e..b1877d73fa56 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c | |||
@@ -278,12 +278,6 @@ static struct bus_type virtio_bus = { | |||
278 | .remove = virtio_dev_remove, | 278 | .remove = virtio_dev_remove, |
279 | }; | 279 | }; |
280 | 280 | ||
281 | bool virtio_device_is_legacy_only(struct virtio_device_id id) | ||
282 | { | ||
283 | return id.device == VIRTIO_ID_BALLOON; | ||
284 | } | ||
285 | EXPORT_SYMBOL_GPL(virtio_device_is_legacy_only); | ||
286 | |||
287 | int register_virtio_driver(struct virtio_driver *driver) | 281 | int register_virtio_driver(struct virtio_driver *driver) |
288 | { | 282 | { |
289 | /* Catch this early. */ | 283 | /* Catch this early. */ |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 6a356e344f82..82e80e034f25 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -214,8 +214,8 @@ static inline void update_stat(struct virtio_balloon *vb, int idx, | |||
214 | u16 tag, u64 val) | 214 | u16 tag, u64 val) |
215 | { | 215 | { |
216 | BUG_ON(idx >= VIRTIO_BALLOON_S_NR); | 216 | BUG_ON(idx >= VIRTIO_BALLOON_S_NR); |
217 | vb->stats[idx].tag = tag; | 217 | vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag); |
218 | vb->stats[idx].val = val; | 218 | vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val); |
219 | } | 219 | } |
220 | 220 | ||
221 | #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) | 221 | #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) |
@@ -283,18 +283,27 @@ static void virtballoon_changed(struct virtio_device *vdev) | |||
283 | 283 | ||
284 | static inline s64 towards_target(struct virtio_balloon *vb) | 284 | static inline s64 towards_target(struct virtio_balloon *vb) |
285 | { | 285 | { |
286 | __le32 v; | ||
287 | s64 target; | 286 | s64 target; |
287 | u32 num_pages; | ||
288 | 288 | ||
289 | virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v); | 289 | virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, |
290 | &num_pages); | ||
290 | 291 | ||
291 | target = le32_to_cpu(v); | 292 | /* Legacy balloon config space is LE, unlike all other devices. */ |
293 | if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) | ||
294 | num_pages = le32_to_cpu((__force __le32)num_pages); | ||
295 | |||
296 | target = num_pages; | ||
292 | return target - vb->num_pages; | 297 | return target - vb->num_pages; |
293 | } | 298 | } |
294 | 299 | ||
295 | static void update_balloon_size(struct virtio_balloon *vb) | 300 | static void update_balloon_size(struct virtio_balloon *vb) |
296 | { | 301 | { |
297 | __le32 actual = cpu_to_le32(vb->num_pages); | 302 | u32 actual = vb->num_pages; |
303 | |||
304 | /* Legacy balloon config space is LE, unlike all other devices. */ | ||
305 | if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) | ||
306 | actual = (__force u32)cpu_to_le32(actual); | ||
298 | 307 | ||
299 | virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual, | 308 | virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual, |
300 | &actual); | 309 | &actual); |
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c new file mode 100644 index 000000000000..60e2a1677563 --- /dev/null +++ b/drivers/virtio/virtio_input.c | |||
@@ -0,0 +1,384 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/virtio.h> | ||
3 | #include <linux/virtio_config.h> | ||
4 | #include <linux/input.h> | ||
5 | |||
6 | #include <uapi/linux/virtio_ids.h> | ||
7 | #include <uapi/linux/virtio_input.h> | ||
8 | |||
9 | struct virtio_input { | ||
10 | struct virtio_device *vdev; | ||
11 | struct input_dev *idev; | ||
12 | char name[64]; | ||
13 | char serial[64]; | ||
14 | char phys[64]; | ||
15 | struct virtqueue *evt, *sts; | ||
16 | struct virtio_input_event evts[64]; | ||
17 | spinlock_t lock; | ||
18 | bool ready; | ||
19 | }; | ||
20 | |||
21 | static void virtinput_queue_evtbuf(struct virtio_input *vi, | ||
22 | struct virtio_input_event *evtbuf) | ||
23 | { | ||
24 | struct scatterlist sg[1]; | ||
25 | |||
26 | sg_init_one(sg, evtbuf, sizeof(*evtbuf)); | ||
27 | virtqueue_add_inbuf(vi->evt, sg, 1, evtbuf, GFP_ATOMIC); | ||
28 | } | ||
29 | |||
30 | static void virtinput_recv_events(struct virtqueue *vq) | ||
31 | { | ||
32 | struct virtio_input *vi = vq->vdev->priv; | ||
33 | struct virtio_input_event *event; | ||
34 | unsigned long flags; | ||
35 | unsigned int len; | ||
36 | |||
37 | spin_lock_irqsave(&vi->lock, flags); | ||
38 | if (vi->ready) { | ||
39 | while ((event = virtqueue_get_buf(vi->evt, &len)) != NULL) { | ||
40 | spin_unlock_irqrestore(&vi->lock, flags); | ||
41 | input_event(vi->idev, | ||
42 | le16_to_cpu(event->type), | ||
43 | le16_to_cpu(event->code), | ||
44 | le32_to_cpu(event->value)); | ||
45 | spin_lock_irqsave(&vi->lock, flags); | ||
46 | virtinput_queue_evtbuf(vi, event); | ||
47 | } | ||
48 | virtqueue_kick(vq); | ||
49 | } | ||
50 | spin_unlock_irqrestore(&vi->lock, flags); | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * On error we are losing the status update, which isn't critical as | ||
55 | * this is typically used for stuff like keyboard leds. | ||
56 | */ | ||
57 | static int virtinput_send_status(struct virtio_input *vi, | ||
58 | u16 type, u16 code, s32 value) | ||
59 | { | ||
60 | struct virtio_input_event *stsbuf; | ||
61 | struct scatterlist sg[1]; | ||
62 | unsigned long flags; | ||
63 | int rc; | ||
64 | |||
65 | stsbuf = kzalloc(sizeof(*stsbuf), GFP_ATOMIC); | ||
66 | if (!stsbuf) | ||
67 | return -ENOMEM; | ||
68 | |||
69 | stsbuf->type = cpu_to_le16(type); | ||
70 | stsbuf->code = cpu_to_le16(code); | ||
71 | stsbuf->value = cpu_to_le32(value); | ||
72 | sg_init_one(sg, stsbuf, sizeof(*stsbuf)); | ||
73 | |||
74 | spin_lock_irqsave(&vi->lock, flags); | ||
75 | if (vi->ready) { | ||
76 | rc = virtqueue_add_outbuf(vi->sts, sg, 1, stsbuf, GFP_ATOMIC); | ||
77 | virtqueue_kick(vi->sts); | ||
78 | } else { | ||
79 | rc = -ENODEV; | ||
80 | } | ||
81 | spin_unlock_irqrestore(&vi->lock, flags); | ||
82 | |||
83 | if (rc != 0) | ||
84 | kfree(stsbuf); | ||
85 | return rc; | ||
86 | } | ||
87 | |||
88 | static void virtinput_recv_status(struct virtqueue *vq) | ||
89 | { | ||
90 | struct virtio_input *vi = vq->vdev->priv; | ||
91 | struct virtio_input_event *stsbuf; | ||
92 | unsigned long flags; | ||
93 | unsigned int len; | ||
94 | |||
95 | spin_lock_irqsave(&vi->lock, flags); | ||
96 | while ((stsbuf = virtqueue_get_buf(vi->sts, &len)) != NULL) | ||
97 | kfree(stsbuf); | ||
98 | spin_unlock_irqrestore(&vi->lock, flags); | ||
99 | } | ||
100 | |||
101 | static int virtinput_status(struct input_dev *idev, unsigned int type, | ||
102 | unsigned int code, int value) | ||
103 | { | ||
104 | struct virtio_input *vi = input_get_drvdata(idev); | ||
105 | |||
106 | return virtinput_send_status(vi, type, code, value); | ||
107 | } | ||
108 | |||
109 | static u8 virtinput_cfg_select(struct virtio_input *vi, | ||
110 | u8 select, u8 subsel) | ||
111 | { | ||
112 | u8 size; | ||
113 | |||
114 | virtio_cwrite(vi->vdev, struct virtio_input_config, select, &select); | ||
115 | virtio_cwrite(vi->vdev, struct virtio_input_config, subsel, &subsel); | ||
116 | virtio_cread(vi->vdev, struct virtio_input_config, size, &size); | ||
117 | return size; | ||
118 | } | ||
119 | |||
120 | static void virtinput_cfg_bits(struct virtio_input *vi, int select, int subsel, | ||
121 | unsigned long *bits, unsigned int bitcount) | ||
122 | { | ||
123 | unsigned int bit; | ||
124 | u8 *virtio_bits; | ||
125 | u8 bytes; | ||
126 | |||
127 | bytes = virtinput_cfg_select(vi, select, subsel); | ||
128 | if (!bytes) | ||
129 | return; | ||
130 | if (bitcount > bytes * 8) | ||
131 | bitcount = bytes * 8; | ||
132 | |||
133 | /* | ||
134 | * Bitmap in virtio config space is a simple stream of bytes, | ||
135 | * with the first byte carrying bits 0-7, second bits 8-15 and | ||
136 | * so on. | ||
137 | */ | ||
138 | virtio_bits = kzalloc(bytes, GFP_KERNEL); | ||
139 | if (!virtio_bits) | ||
140 | return; | ||
141 | virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config, | ||
142 | u.bitmap), | ||
143 | virtio_bits, bytes); | ||
144 | for (bit = 0; bit < bitcount; bit++) { | ||
145 | if (virtio_bits[bit / 8] & (1 << (bit % 8))) | ||
146 | __set_bit(bit, bits); | ||
147 | } | ||
148 | kfree(virtio_bits); | ||
149 | |||
150 | if (select == VIRTIO_INPUT_CFG_EV_BITS) | ||
151 | __set_bit(subsel, vi->idev->evbit); | ||
152 | } | ||
153 | |||
154 | static void virtinput_cfg_abs(struct virtio_input *vi, int abs) | ||
155 | { | ||
156 | u32 mi, ma, re, fu, fl; | ||
157 | |||
158 | virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ABS_INFO, abs); | ||
159 | virtio_cread(vi->vdev, struct virtio_input_config, u.abs.min, &mi); | ||
160 | virtio_cread(vi->vdev, struct virtio_input_config, u.abs.max, &ma); | ||
161 | virtio_cread(vi->vdev, struct virtio_input_config, u.abs.res, &re); | ||
162 | virtio_cread(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu); | ||
163 | virtio_cread(vi->vdev, struct virtio_input_config, u.abs.flat, &fl); | ||
164 | input_set_abs_params(vi->idev, abs, mi, ma, fu, fl); | ||
165 | input_abs_set_res(vi->idev, abs, re); | ||
166 | } | ||
167 | |||
168 | static int virtinput_init_vqs(struct virtio_input *vi) | ||
169 | { | ||
170 | struct virtqueue *vqs[2]; | ||
171 | vq_callback_t *cbs[] = { virtinput_recv_events, | ||
172 | virtinput_recv_status }; | ||
173 | static const char *names[] = { "events", "status" }; | ||
174 | int err; | ||
175 | |||
176 | err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names); | ||
177 | if (err) | ||
178 | return err; | ||
179 | vi->evt = vqs[0]; | ||
180 | vi->sts = vqs[1]; | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static void virtinput_fill_evt(struct virtio_input *vi) | ||
186 | { | ||
187 | unsigned long flags; | ||
188 | int i, size; | ||
189 | |||
190 | spin_lock_irqsave(&vi->lock, flags); | ||
191 | size = virtqueue_get_vring_size(vi->evt); | ||
192 | if (size > ARRAY_SIZE(vi->evts)) | ||
193 | size = ARRAY_SIZE(vi->evts); | ||
194 | for (i = 0; i < size; i++) | ||
195 | virtinput_queue_evtbuf(vi, &vi->evts[i]); | ||
196 | virtqueue_kick(vi->evt); | ||
197 | spin_unlock_irqrestore(&vi->lock, flags); | ||
198 | } | ||
199 | |||
200 | static int virtinput_probe(struct virtio_device *vdev) | ||
201 | { | ||
202 | struct virtio_input *vi; | ||
203 | unsigned long flags; | ||
204 | size_t size; | ||
205 | int abs, err; | ||
206 | |||
207 | if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | ||
208 | return -ENODEV; | ||
209 | |||
210 | vi = kzalloc(sizeof(*vi), GFP_KERNEL); | ||
211 | if (!vi) | ||
212 | return -ENOMEM; | ||
213 | |||
214 | vdev->priv = vi; | ||
215 | vi->vdev = vdev; | ||
216 | spin_lock_init(&vi->lock); | ||
217 | |||
218 | err = virtinput_init_vqs(vi); | ||
219 | if (err) | ||
220 | goto err_init_vq; | ||
221 | |||
222 | vi->idev = input_allocate_device(); | ||
223 | if (!vi->idev) { | ||
224 | err = -ENOMEM; | ||
225 | goto err_input_alloc; | ||
226 | } | ||
227 | input_set_drvdata(vi->idev, vi); | ||
228 | |||
229 | size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_NAME, 0); | ||
230 | virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config, | ||
231 | u.string), | ||
232 | vi->name, min(size, sizeof(vi->name))); | ||
233 | size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_SERIAL, 0); | ||
234 | virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config, | ||
235 | u.string), | ||
236 | vi->serial, min(size, sizeof(vi->serial))); | ||
237 | snprintf(vi->phys, sizeof(vi->phys), | ||
238 | "virtio%d/input0", vdev->index); | ||
239 | vi->idev->name = vi->name; | ||
240 | vi->idev->phys = vi->phys; | ||
241 | vi->idev->uniq = vi->serial; | ||
242 | |||
243 | size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_DEVIDS, 0); | ||
244 | if (size >= sizeof(struct virtio_input_devids)) { | ||
245 | virtio_cread(vi->vdev, struct virtio_input_config, | ||
246 | u.ids.bustype, &vi->idev->id.bustype); | ||
247 | virtio_cread(vi->vdev, struct virtio_input_config, | ||
248 | u.ids.vendor, &vi->idev->id.vendor); | ||
249 | virtio_cread(vi->vdev, struct virtio_input_config, | ||
250 | u.ids.product, &vi->idev->id.product); | ||
251 | virtio_cread(vi->vdev, struct virtio_input_config, | ||
252 | u.ids.version, &vi->idev->id.version); | ||
253 | } else { | ||
254 | vi->idev->id.bustype = BUS_VIRTUAL; | ||
255 | } | ||
256 | |||
257 | virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_PROP_BITS, 0, | ||
258 | vi->idev->propbit, INPUT_PROP_CNT); | ||
259 | size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_REP); | ||
260 | if (size) | ||
261 | __set_bit(EV_REP, vi->idev->evbit); | ||
262 | |||
263 | vi->idev->dev.parent = &vdev->dev; | ||
264 | vi->idev->event = virtinput_status; | ||
265 | |||
266 | /* device -> kernel */ | ||
267 | virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_KEY, | ||
268 | vi->idev->keybit, KEY_CNT); | ||
269 | virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_REL, | ||
270 | vi->idev->relbit, REL_CNT); | ||
271 | virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_ABS, | ||
272 | vi->idev->absbit, ABS_CNT); | ||
273 | virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_MSC, | ||
274 | vi->idev->mscbit, MSC_CNT); | ||
275 | virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_SW, | ||
276 | vi->idev->swbit, SW_CNT); | ||
277 | |||
278 | /* kernel -> device */ | ||
279 | virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_LED, | ||
280 | vi->idev->ledbit, LED_CNT); | ||
281 | virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_SND, | ||
282 | vi->idev->sndbit, SND_CNT); | ||
283 | |||
284 | if (test_bit(EV_ABS, vi->idev->evbit)) { | ||
285 | for (abs = 0; abs < ABS_CNT; abs++) { | ||
286 | if (!test_bit(abs, vi->idev->absbit)) | ||
287 | continue; | ||
288 | virtinput_cfg_abs(vi, abs); | ||
289 | } | ||
290 | } | ||
291 | |||
292 | virtio_device_ready(vdev); | ||
293 | vi->ready = true; | ||
294 | err = input_register_device(vi->idev); | ||
295 | if (err) | ||
296 | goto err_input_register; | ||
297 | |||
298 | virtinput_fill_evt(vi); | ||
299 | return 0; | ||
300 | |||
301 | err_input_register: | ||
302 | spin_lock_irqsave(&vi->lock, flags); | ||
303 | vi->ready = false; | ||
304 | spin_unlock_irqrestore(&vi->lock, flags); | ||
305 | input_free_device(vi->idev); | ||
306 | err_input_alloc: | ||
307 | vdev->config->del_vqs(vdev); | ||
308 | err_init_vq: | ||
309 | kfree(vi); | ||
310 | return err; | ||
311 | } | ||
312 | |||
313 | static void virtinput_remove(struct virtio_device *vdev) | ||
314 | { | ||
315 | struct virtio_input *vi = vdev->priv; | ||
316 | unsigned long flags; | ||
317 | |||
318 | spin_lock_irqsave(&vi->lock, flags); | ||
319 | vi->ready = false; | ||
320 | spin_unlock_irqrestore(&vi->lock, flags); | ||
321 | |||
322 | input_unregister_device(vi->idev); | ||
323 | vdev->config->del_vqs(vdev); | ||
324 | kfree(vi); | ||
325 | } | ||
326 | |||
327 | #ifdef CONFIG_PM_SLEEP | ||
328 | static int virtinput_freeze(struct virtio_device *vdev) | ||
329 | { | ||
330 | struct virtio_input *vi = vdev->priv; | ||
331 | unsigned long flags; | ||
332 | |||
333 | spin_lock_irqsave(&vi->lock, flags); | ||
334 | vi->ready = false; | ||
335 | spin_unlock_irqrestore(&vi->lock, flags); | ||
336 | |||
337 | vdev->config->del_vqs(vdev); | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | static int virtinput_restore(struct virtio_device *vdev) | ||
342 | { | ||
343 | struct virtio_input *vi = vdev->priv; | ||
344 | int err; | ||
345 | |||
346 | err = virtinput_init_vqs(vi); | ||
347 | if (err) | ||
348 | return err; | ||
349 | |||
350 | virtio_device_ready(vdev); | ||
351 | vi->ready = true; | ||
352 | virtinput_fill_evt(vi); | ||
353 | return 0; | ||
354 | } | ||
355 | #endif | ||
356 | |||
357 | static unsigned int features[] = { | ||
358 | /* none */ | ||
359 | }; | ||
360 | static struct virtio_device_id id_table[] = { | ||
361 | { VIRTIO_ID_INPUT, VIRTIO_DEV_ANY_ID }, | ||
362 | { 0 }, | ||
363 | }; | ||
364 | |||
365 | static struct virtio_driver virtio_input_driver = { | ||
366 | .driver.name = KBUILD_MODNAME, | ||
367 | .driver.owner = THIS_MODULE, | ||
368 | .feature_table = features, | ||
369 | .feature_table_size = ARRAY_SIZE(features), | ||
370 | .id_table = id_table, | ||
371 | .probe = virtinput_probe, | ||
372 | .remove = virtinput_remove, | ||
373 | #ifdef CONFIG_PM_SLEEP | ||
374 | .freeze = virtinput_freeze, | ||
375 | .restore = virtinput_restore, | ||
376 | #endif | ||
377 | }; | ||
378 | |||
379 | module_virtio_driver(virtio_input_driver); | ||
380 | MODULE_DEVICE_TABLE(virtio, id_table); | ||
381 | |||
382 | MODULE_LICENSE("GPL"); | ||
383 | MODULE_DESCRIPTION("Virtio input device driver"); | ||
384 | MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); | ||
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 6010d7ec0a0f..7a5e60dea6c5 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
@@ -581,14 +581,6 @@ static int virtio_mmio_probe(struct platform_device *pdev) | |||
581 | } | 581 | } |
582 | vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); | 582 | vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); |
583 | 583 | ||
584 | /* Reject legacy-only IDs for version 2 devices */ | ||
585 | if (vm_dev->version == 2 && | ||
586 | virtio_device_is_legacy_only(vm_dev->vdev.id)) { | ||
587 | dev_err(&pdev->dev, "Version 2 not supported for devices %u!\n", | ||
588 | vm_dev->vdev.id.device); | ||
589 | return -ENODEV; | ||
590 | } | ||
591 | |||
592 | if (vm_dev->version == 1) | 584 | if (vm_dev->version == 1) |
593 | writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); | 585 | writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); |
594 | 586 | ||
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 2aa38e59db2e..e88e0997a889 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c | |||
@@ -20,6 +20,50 @@ | |||
20 | #define VIRTIO_PCI_NO_LEGACY | 20 | #define VIRTIO_PCI_NO_LEGACY |
21 | #include "virtio_pci_common.h" | 21 | #include "virtio_pci_common.h" |
22 | 22 | ||
23 | /* | ||
24 | * Type-safe wrappers for io accesses. | ||
25 | * Use these to enforce at compile time the following spec requirement: | ||
26 | * | ||
27 | * The driver MUST access each field using the “natural” access | ||
28 | * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses | ||
29 | * for 16-bit fields and 8-bit accesses for 8-bit fields. | ||
30 | */ | ||
31 | static inline u8 vp_ioread8(u8 __iomem *addr) | ||
32 | { | ||
33 | return ioread8(addr); | ||
34 | } | ||
35 | static inline u16 vp_ioread16 (u16 __iomem *addr) | ||
36 | { | ||
37 | return ioread16(addr); | ||
38 | } | ||
39 | |||
40 | static inline u32 vp_ioread32(u32 __iomem *addr) | ||
41 | { | ||
42 | return ioread32(addr); | ||
43 | } | ||
44 | |||
45 | static inline void vp_iowrite8(u8 value, u8 __iomem *addr) | ||
46 | { | ||
47 | iowrite8(value, addr); | ||
48 | } | ||
49 | |||
50 | static inline void vp_iowrite16(u16 value, u16 __iomem *addr) | ||
51 | { | ||
52 | iowrite16(value, addr); | ||
53 | } | ||
54 | |||
55 | static inline void vp_iowrite32(u32 value, u32 __iomem *addr) | ||
56 | { | ||
57 | iowrite32(value, addr); | ||
58 | } | ||
59 | |||
60 | static void vp_iowrite64_twopart(u64 val, | ||
61 | __le32 __iomem *lo, __le32 __iomem *hi) | ||
62 | { | ||
63 | vp_iowrite32((u32)val, lo); | ||
64 | vp_iowrite32(val >> 32, hi); | ||
65 | } | ||
66 | |||
23 | static void __iomem *map_capability(struct pci_dev *dev, int off, | 67 | static void __iomem *map_capability(struct pci_dev *dev, int off, |
24 | size_t minlen, | 68 | size_t minlen, |
25 | u32 align, | 69 | u32 align, |
@@ -94,22 +138,16 @@ static void __iomem *map_capability(struct pci_dev *dev, int off, | |||
94 | return p; | 138 | return p; |
95 | } | 139 | } |
96 | 140 | ||
97 | static void iowrite64_twopart(u64 val, __le32 __iomem *lo, __le32 __iomem *hi) | ||
98 | { | ||
99 | iowrite32((u32)val, lo); | ||
100 | iowrite32(val >> 32, hi); | ||
101 | } | ||
102 | |||
103 | /* virtio config->get_features() implementation */ | 141 | /* virtio config->get_features() implementation */ |
104 | static u64 vp_get_features(struct virtio_device *vdev) | 142 | static u64 vp_get_features(struct virtio_device *vdev) |
105 | { | 143 | { |
106 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 144 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
107 | u64 features; | 145 | u64 features; |
108 | 146 | ||
109 | iowrite32(0, &vp_dev->common->device_feature_select); | 147 | vp_iowrite32(0, &vp_dev->common->device_feature_select); |
110 | features = ioread32(&vp_dev->common->device_feature); | 148 | features = vp_ioread32(&vp_dev->common->device_feature); |
111 | iowrite32(1, &vp_dev->common->device_feature_select); | 149 | vp_iowrite32(1, &vp_dev->common->device_feature_select); |
112 | features |= ((u64)ioread32(&vp_dev->common->device_feature) << 32); | 150 | features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32); |
113 | 151 | ||
114 | return features; | 152 | return features; |
115 | } | 153 | } |
@@ -128,10 +166,10 @@ static int vp_finalize_features(struct virtio_device *vdev) | |||
128 | return -EINVAL; | 166 | return -EINVAL; |
129 | } | 167 | } |
130 | 168 | ||
131 | iowrite32(0, &vp_dev->common->guest_feature_select); | 169 | vp_iowrite32(0, &vp_dev->common->guest_feature_select); |
132 | iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); | 170 | vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); |
133 | iowrite32(1, &vp_dev->common->guest_feature_select); | 171 | vp_iowrite32(1, &vp_dev->common->guest_feature_select); |
134 | iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); | 172 | vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); |
135 | 173 | ||
136 | return 0; | 174 | return 0; |
137 | } | 175 | } |
@@ -210,14 +248,14 @@ static void vp_set(struct virtio_device *vdev, unsigned offset, | |||
210 | static u32 vp_generation(struct virtio_device *vdev) | 248 | static u32 vp_generation(struct virtio_device *vdev) |
211 | { | 249 | { |
212 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 250 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
213 | return ioread8(&vp_dev->common->config_generation); | 251 | return vp_ioread8(&vp_dev->common->config_generation); |
214 | } | 252 | } |
215 | 253 | ||
216 | /* config->{get,set}_status() implementations */ | 254 | /* config->{get,set}_status() implementations */ |
217 | static u8 vp_get_status(struct virtio_device *vdev) | 255 | static u8 vp_get_status(struct virtio_device *vdev) |
218 | { | 256 | { |
219 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 257 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
220 | return ioread8(&vp_dev->common->device_status); | 258 | return vp_ioread8(&vp_dev->common->device_status); |
221 | } | 259 | } |
222 | 260 | ||
223 | static void vp_set_status(struct virtio_device *vdev, u8 status) | 261 | static void vp_set_status(struct virtio_device *vdev, u8 status) |
@@ -225,17 +263,17 @@ static void vp_set_status(struct virtio_device *vdev, u8 status) | |||
225 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 263 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
226 | /* We should never be setting status to 0. */ | 264 | /* We should never be setting status to 0. */ |
227 | BUG_ON(status == 0); | 265 | BUG_ON(status == 0); |
228 | iowrite8(status, &vp_dev->common->device_status); | 266 | vp_iowrite8(status, &vp_dev->common->device_status); |
229 | } | 267 | } |
230 | 268 | ||
231 | static void vp_reset(struct virtio_device *vdev) | 269 | static void vp_reset(struct virtio_device *vdev) |
232 | { | 270 | { |
233 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 271 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
234 | /* 0 status means a reset. */ | 272 | /* 0 status means a reset. */ |
235 | iowrite8(0, &vp_dev->common->device_status); | 273 | vp_iowrite8(0, &vp_dev->common->device_status); |
236 | /* Flush out the status write, and flush in device writes, | 274 | /* Flush out the status write, and flush in device writes, |
237 | * including MSI-X interrupts, if any. */ | 275 | * including MSI-X interrupts, if any. */ |
238 | ioread8(&vp_dev->common->device_status); | 276 | vp_ioread8(&vp_dev->common->device_status); |
239 | /* Flush pending VQ/configuration callbacks. */ | 277 | /* Flush pending VQ/configuration callbacks. */ |
240 | vp_synchronize_vectors(vdev); | 278 | vp_synchronize_vectors(vdev); |
241 | } | 279 | } |
@@ -243,10 +281,10 @@ static void vp_reset(struct virtio_device *vdev) | |||
243 | static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) | 281 | static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) |
244 | { | 282 | { |
245 | /* Setup the vector used for configuration events */ | 283 | /* Setup the vector used for configuration events */ |
246 | iowrite16(vector, &vp_dev->common->msix_config); | 284 | vp_iowrite16(vector, &vp_dev->common->msix_config); |
247 | /* Verify we had enough resources to assign the vector */ | 285 | /* Verify we had enough resources to assign the vector */ |
248 | /* Will also flush the write out to device */ | 286 | /* Will also flush the write out to device */ |
249 | return ioread16(&vp_dev->common->msix_config); | 287 | return vp_ioread16(&vp_dev->common->msix_config); |
250 | } | 288 | } |
251 | 289 | ||
252 | static size_t vring_pci_size(u16 num) | 290 | static size_t vring_pci_size(u16 num) |
@@ -286,15 +324,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, | |||
286 | u16 num, off; | 324 | u16 num, off; |
287 | int err; | 325 | int err; |
288 | 326 | ||
289 | if (index >= ioread16(&cfg->num_queues)) | 327 | if (index >= vp_ioread16(&cfg->num_queues)) |
290 | return ERR_PTR(-ENOENT); | 328 | return ERR_PTR(-ENOENT); |
291 | 329 | ||
292 | /* Select the queue we're interested in */ | 330 | /* Select the queue we're interested in */ |
293 | iowrite16(index, &cfg->queue_select); | 331 | vp_iowrite16(index, &cfg->queue_select); |
294 | 332 | ||
295 | /* Check if queue is either not available or already active. */ | 333 | /* Check if queue is either not available or already active. */ |
296 | num = ioread16(&cfg->queue_size); | 334 | num = vp_ioread16(&cfg->queue_size); |
297 | if (!num || ioread16(&cfg->queue_enable)) | 335 | if (!num || vp_ioread16(&cfg->queue_enable)) |
298 | return ERR_PTR(-ENOENT); | 336 | return ERR_PTR(-ENOENT); |
299 | 337 | ||
300 | if (num & (num - 1)) { | 338 | if (num & (num - 1)) { |
@@ -303,7 +341,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, | |||
303 | } | 341 | } |
304 | 342 | ||
305 | /* get offset of notification word for this vq */ | 343 | /* get offset of notification word for this vq */ |
306 | off = ioread16(&cfg->queue_notify_off); | 344 | off = vp_ioread16(&cfg->queue_notify_off); |
307 | 345 | ||
308 | info->num = num; | 346 | info->num = num; |
309 | info->msix_vector = msix_vec; | 347 | info->msix_vector = msix_vec; |
@@ -322,13 +360,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, | |||
322 | } | 360 | } |
323 | 361 | ||
324 | /* activate the queue */ | 362 | /* activate the queue */ |
325 | iowrite16(num, &cfg->queue_size); | 363 | vp_iowrite16(num, &cfg->queue_size); |
326 | iowrite64_twopart(virt_to_phys(info->queue), | 364 | vp_iowrite64_twopart(virt_to_phys(info->queue), |
327 | &cfg->queue_desc_lo, &cfg->queue_desc_hi); | 365 | &cfg->queue_desc_lo, &cfg->queue_desc_hi); |
328 | iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)), | 366 | vp_iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)), |
329 | &cfg->queue_avail_lo, &cfg->queue_avail_hi); | 367 | &cfg->queue_avail_lo, &cfg->queue_avail_hi); |
330 | iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)), | 368 | vp_iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)), |
331 | &cfg->queue_used_lo, &cfg->queue_used_hi); | 369 | &cfg->queue_used_lo, &cfg->queue_used_hi); |
332 | 370 | ||
333 | if (vp_dev->notify_base) { | 371 | if (vp_dev->notify_base) { |
334 | /* offset should not wrap */ | 372 | /* offset should not wrap */ |
@@ -357,8 +395,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, | |||
357 | } | 395 | } |
358 | 396 | ||
359 | if (msix_vec != VIRTIO_MSI_NO_VECTOR) { | 397 | if (msix_vec != VIRTIO_MSI_NO_VECTOR) { |
360 | iowrite16(msix_vec, &cfg->queue_msix_vector); | 398 | vp_iowrite16(msix_vec, &cfg->queue_msix_vector); |
361 | msix_vec = ioread16(&cfg->queue_msix_vector); | 399 | msix_vec = vp_ioread16(&cfg->queue_msix_vector); |
362 | if (msix_vec == VIRTIO_MSI_NO_VECTOR) { | 400 | if (msix_vec == VIRTIO_MSI_NO_VECTOR) { |
363 | err = -EBUSY; | 401 | err = -EBUSY; |
364 | goto err_assign_vector; | 402 | goto err_assign_vector; |
@@ -393,8 +431,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
393 | * this, there's no way to go back except reset. | 431 | * this, there's no way to go back except reset. |
394 | */ | 432 | */ |
395 | list_for_each_entry(vq, &vdev->vqs, list) { | 433 | list_for_each_entry(vq, &vdev->vqs, list) { |
396 | iowrite16(vq->index, &vp_dev->common->queue_select); | 434 | vp_iowrite16(vq->index, &vp_dev->common->queue_select); |
397 | iowrite16(1, &vp_dev->common->queue_enable); | 435 | vp_iowrite16(1, &vp_dev->common->queue_enable); |
398 | } | 436 | } |
399 | 437 | ||
400 | return 0; | 438 | return 0; |
@@ -405,13 +443,13 @@ static void del_vq(struct virtio_pci_vq_info *info) | |||
405 | struct virtqueue *vq = info->vq; | 443 | struct virtqueue *vq = info->vq; |
406 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | 444 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); |
407 | 445 | ||
408 | iowrite16(vq->index, &vp_dev->common->queue_select); | 446 | vp_iowrite16(vq->index, &vp_dev->common->queue_select); |
409 | 447 | ||
410 | if (vp_dev->msix_enabled) { | 448 | if (vp_dev->msix_enabled) { |
411 | iowrite16(VIRTIO_MSI_NO_VECTOR, | 449 | vp_iowrite16(VIRTIO_MSI_NO_VECTOR, |
412 | &vp_dev->common->queue_msix_vector); | 450 | &vp_dev->common->queue_msix_vector); |
413 | /* Flush the write out to device */ | 451 | /* Flush the write out to device */ |
414 | ioread16(&vp_dev->common->queue_msix_vector); | 452 | vp_ioread16(&vp_dev->common->queue_msix_vector); |
415 | } | 453 | } |
416 | 454 | ||
417 | if (!vp_dev->notify_base) | 455 | if (!vp_dev->notify_base) |
@@ -577,9 +615,6 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) | |||
577 | } | 615 | } |
578 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; | 616 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; |
579 | 617 | ||
580 | if (virtio_device_is_legacy_only(vp_dev->vdev.id)) | ||
581 | return -ENODEV; | ||
582 | |||
583 | /* check for a common config: if not, use legacy mode (bar 0). */ | 618 | /* check for a common config: if not, use legacy mode (bar 0). */ |
584 | common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, | 619 | common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, |
585 | IORESOURCE_IO | IORESOURCE_MEM); | 620 | IORESOURCE_IO | IORESOURCE_MEM); |
diff --git a/include/linux/lguest.h b/include/linux/lguest.h index 9962c6bb1311..6db19f35f7c5 100644 --- a/include/linux/lguest.h +++ b/include/linux/lguest.h | |||
@@ -61,8 +61,8 @@ struct lguest_data { | |||
61 | u32 tsc_khz; | 61 | u32 tsc_khz; |
62 | 62 | ||
63 | /* Fields initialized by the Guest at boot: */ | 63 | /* Fields initialized by the Guest at boot: */ |
64 | /* Instruction range to suppress interrupts even if enabled */ | 64 | /* Instruction to suppress interrupts even if enabled */ |
65 | unsigned long noirq_start, noirq_end; | 65 | unsigned long noirq_iret; |
66 | /* Address above which page tables are all identical. */ | 66 | /* Address above which page tables are all identical. */ |
67 | unsigned long kernel_address; | 67 | unsigned long kernel_address; |
68 | /* The vector to try to use for system calls (0x40 or 0x80). */ | 68 | /* The vector to try to use for system calls (0x40 or 0x80). */ |
diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 28f0e65b9a11..8f4d4bfa6d46 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h | |||
@@ -108,8 +108,6 @@ struct virtio_device { | |||
108 | void *priv; | 108 | void *priv; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | bool virtio_device_is_legacy_only(struct virtio_device_id id); | ||
112 | |||
113 | static inline struct virtio_device *dev_to_virtio(struct device *_dev) | 111 | static inline struct virtio_device *dev_to_virtio(struct device *_dev) |
114 | { | 112 | { |
115 | return container_of(_dev, struct virtio_device, dev); | 113 | return container_of(_dev, struct virtio_device, dev); |
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index ca3ed78e5ec7..1e306f727edc 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
@@ -298,13 +298,6 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) | |||
298 | } \ | 298 | } \ |
299 | } while(0) | 299 | } while(0) |
300 | 300 | ||
301 | static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) | ||
302 | { | ||
303 | u8 ret; | ||
304 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | ||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | /* Read @count fields, @bytes each. */ | 301 | /* Read @count fields, @bytes each. */ |
309 | static inline void __virtio_cread_many(struct virtio_device *vdev, | 302 | static inline void __virtio_cread_many(struct virtio_device *vdev, |
310 | unsigned int offset, | 303 | unsigned int offset, |
@@ -326,7 +319,6 @@ static inline void __virtio_cread_many(struct virtio_device *vdev, | |||
326 | } while (gen != old); | 319 | } while (gen != old); |
327 | } | 320 | } |
328 | 321 | ||
329 | |||
330 | static inline void virtio_cread_bytes(struct virtio_device *vdev, | 322 | static inline void virtio_cread_bytes(struct virtio_device *vdev, |
331 | unsigned int offset, | 323 | unsigned int offset, |
332 | void *buf, size_t len) | 324 | void *buf, size_t len) |
@@ -334,6 +326,13 @@ static inline void virtio_cread_bytes(struct virtio_device *vdev, | |||
334 | __virtio_cread_many(vdev, offset, buf, len, 1); | 326 | __virtio_cread_many(vdev, offset, buf, len, 1); |
335 | } | 327 | } |
336 | 328 | ||
329 | static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) | ||
330 | { | ||
331 | u8 ret; | ||
332 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | ||
333 | return ret; | ||
334 | } | ||
335 | |||
337 | static inline void virtio_cwrite8(struct virtio_device *vdev, | 336 | static inline void virtio_cwrite8(struct virtio_device *vdev, |
338 | unsigned int offset, u8 val) | 337 | unsigned int offset, u8 val) |
339 | { | 338 | { |
@@ -374,7 +373,6 @@ static inline u64 virtio_cread64(struct virtio_device *vdev, | |||
374 | unsigned int offset) | 373 | unsigned int offset) |
375 | { | 374 | { |
376 | u64 ret; | 375 | u64 ret; |
377 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | ||
378 | __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); | 376 | __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); |
379 | return virtio64_to_cpu(vdev, (__force __virtio64)ret); | 377 | return virtio64_to_cpu(vdev, (__force __virtio64)ret); |
380 | } | 378 | } |
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 67e06fe18c03..8e50888a6d59 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
@@ -21,19 +21,20 @@ | |||
21 | * actually quite cheap. | 21 | * actually quite cheap. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #ifdef CONFIG_SMP | ||
25 | static inline void virtio_mb(bool weak_barriers) | 24 | static inline void virtio_mb(bool weak_barriers) |
26 | { | 25 | { |
26 | #ifdef CONFIG_SMP | ||
27 | if (weak_barriers) | 27 | if (weak_barriers) |
28 | smp_mb(); | 28 | smp_mb(); |
29 | else | 29 | else |
30 | #endif | ||
30 | mb(); | 31 | mb(); |
31 | } | 32 | } |
32 | 33 | ||
33 | static inline void virtio_rmb(bool weak_barriers) | 34 | static inline void virtio_rmb(bool weak_barriers) |
34 | { | 35 | { |
35 | if (weak_barriers) | 36 | if (weak_barriers) |
36 | smp_rmb(); | 37 | dma_rmb(); |
37 | else | 38 | else |
38 | rmb(); | 39 | rmb(); |
39 | } | 40 | } |
@@ -41,26 +42,10 @@ static inline void virtio_rmb(bool weak_barriers) | |||
41 | static inline void virtio_wmb(bool weak_barriers) | 42 | static inline void virtio_wmb(bool weak_barriers) |
42 | { | 43 | { |
43 | if (weak_barriers) | 44 | if (weak_barriers) |
44 | smp_wmb(); | 45 | dma_wmb(); |
45 | else | 46 | else |
46 | wmb(); | 47 | wmb(); |
47 | } | 48 | } |
48 | #else | ||
49 | static inline void virtio_mb(bool weak_barriers) | ||
50 | { | ||
51 | mb(); | ||
52 | } | ||
53 | |||
54 | static inline void virtio_rmb(bool weak_barriers) | ||
55 | { | ||
56 | rmb(); | ||
57 | } | ||
58 | |||
59 | static inline void virtio_wmb(bool weak_barriers) | ||
60 | { | ||
61 | wmb(); | ||
62 | } | ||
63 | #endif | ||
64 | 49 | ||
65 | struct virtio_device; | 50 | struct virtio_device; |
66 | struct virtqueue; | 51 | struct virtqueue; |
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 640954b9ecf9..1a0006a76b00 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild | |||
@@ -431,6 +431,7 @@ header-y += virtio_blk.h | |||
431 | header-y += virtio_config.h | 431 | header-y += virtio_config.h |
432 | header-y += virtio_console.h | 432 | header-y += virtio_console.h |
433 | header-y += virtio_ids.h | 433 | header-y += virtio_ids.h |
434 | header-y += virtio_input.h | ||
434 | header-y += virtio_net.h | 435 | header-y += virtio_net.h |
435 | header-y += virtio_pci.h | 436 | header-y += virtio_pci.h |
436 | header-y += virtio_ring.h | 437 | header-y += virtio_ring.h |
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index 4b0488f20b2e..984169a819ee 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h | |||
@@ -25,6 +25,7 @@ | |||
25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 | * SUCH DAMAGE. */ | 27 | * SUCH DAMAGE. */ |
28 | #include <linux/types.h> | ||
28 | #include <linux/virtio_ids.h> | 29 | #include <linux/virtio_ids.h> |
29 | #include <linux/virtio_config.h> | 30 | #include <linux/virtio_config.h> |
30 | 31 | ||
@@ -38,9 +39,9 @@ | |||
38 | 39 | ||
39 | struct virtio_balloon_config { | 40 | struct virtio_balloon_config { |
40 | /* Number of pages host wants Guest to give up. */ | 41 | /* Number of pages host wants Guest to give up. */ |
41 | __le32 num_pages; | 42 | __u32 num_pages; |
42 | /* Number of pages we've actually got in balloon. */ | 43 | /* Number of pages we've actually got in balloon. */ |
43 | __le32 actual; | 44 | __u32 actual; |
44 | }; | 45 | }; |
45 | 46 | ||
46 | #define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */ | 47 | #define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */ |
@@ -51,9 +52,32 @@ struct virtio_balloon_config { | |||
51 | #define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ | 52 | #define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ |
52 | #define VIRTIO_BALLOON_S_NR 6 | 53 | #define VIRTIO_BALLOON_S_NR 6 |
53 | 54 | ||
55 | /* | ||
56 | * Memory statistics structure. | ||
57 | * Driver fills an array of these structures and passes to device. | ||
58 | * | ||
59 | * NOTE: fields are laid out in a way that would make compiler add padding | ||
60 | * between and after fields, so we have to use compiler-specific attributes to | ||
61 | * pack it, to disable this padding. This also often causes compiler to | ||
62 | * generate suboptimal code. | ||
63 | * | ||
64 | * We maintain this statistics structure format for backwards compatibility, | ||
65 | * but don't follow this example. | ||
66 | * | ||
67 | * If implementing a similar structure, do something like the below instead: | ||
68 | * struct virtio_balloon_stat { | ||
69 | * __virtio16 tag; | ||
70 | * __u8 reserved[6]; | ||
71 | * __virtio64 val; | ||
72 | * }; | ||
73 | * | ||
74 | * In other words, add explicit reserved fields to align field and | ||
75 | * structure boundaries at field size, avoiding compiler padding | ||
76 | * without the packed attribute. | ||
77 | */ | ||
54 | struct virtio_balloon_stat { | 78 | struct virtio_balloon_stat { |
55 | __u16 tag; | 79 | __virtio16 tag; |
56 | __u64 val; | 80 | __virtio64 val; |
57 | } __attribute__((packed)); | 81 | } __attribute__((packed)); |
58 | 82 | ||
59 | #endif /* _LINUX_VIRTIO_BALLOON_H */ | 83 | #endif /* _LINUX_VIRTIO_BALLOON_H */ |
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index 284fc3a05f7b..5f60aa4be50a 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h | |||
@@ -39,5 +39,6 @@ | |||
39 | #define VIRTIO_ID_9P 9 /* 9p virtio console */ | 39 | #define VIRTIO_ID_9P 9 /* 9p virtio console */ |
40 | #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ | 40 | #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ |
41 | #define VIRTIO_ID_CAIF 12 /* Virtio caif */ | 41 | #define VIRTIO_ID_CAIF 12 /* Virtio caif */ |
42 | #define VIRTIO_ID_INPUT 18 /* virtio input */ | ||
42 | 43 | ||
43 | #endif /* _LINUX_VIRTIO_IDS_H */ | 44 | #endif /* _LINUX_VIRTIO_IDS_H */ |
diff --git a/include/uapi/linux/virtio_input.h b/include/uapi/linux/virtio_input.h new file mode 100644 index 000000000000..a7fe5c8fb135 --- /dev/null +++ b/include/uapi/linux/virtio_input.h | |||
@@ -0,0 +1,76 @@ | |||
1 | #ifndef _LINUX_VIRTIO_INPUT_H | ||
2 | #define _LINUX_VIRTIO_INPUT_H | ||
3 | /* This header is BSD licensed so anyone can use the definitions to implement | ||
4 | * compatible drivers/servers. | ||
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions | ||
8 | * are met: | ||
9 | * 1. Redistributions of source code must retain the above copyright | ||
10 | * notice, this list of conditions and the following disclaimer. | ||
11 | * 2. Redistributions in binary form must reproduce the above copyright | ||
12 | * notice, this list of conditions and the following disclaimer in the | ||
13 | * documentation and/or other materials provided with the distribution. | ||
14 | * 3. Neither the name of IBM nor the names of its contributors | ||
15 | * may be used to endorse or promote products derived from this software | ||
16 | * without specific prior written permission. | ||
17 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
18 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
19 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
20 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR | ||
21 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
22 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
23 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
24 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
25 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
26 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | ||
27 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
28 | * SUCH DAMAGE. */ | ||
29 | |||
30 | #include <linux/types.h> | ||
31 | |||
32 | enum virtio_input_config_select { | ||
33 | VIRTIO_INPUT_CFG_UNSET = 0x00, | ||
34 | VIRTIO_INPUT_CFG_ID_NAME = 0x01, | ||
35 | VIRTIO_INPUT_CFG_ID_SERIAL = 0x02, | ||
36 | VIRTIO_INPUT_CFG_ID_DEVIDS = 0x03, | ||
37 | VIRTIO_INPUT_CFG_PROP_BITS = 0x10, | ||
38 | VIRTIO_INPUT_CFG_EV_BITS = 0x11, | ||
39 | VIRTIO_INPUT_CFG_ABS_INFO = 0x12, | ||
40 | }; | ||
41 | |||
42 | struct virtio_input_absinfo { | ||
43 | __u32 min; | ||
44 | __u32 max; | ||
45 | __u32 fuzz; | ||
46 | __u32 flat; | ||
47 | __u32 res; | ||
48 | }; | ||
49 | |||
50 | struct virtio_input_devids { | ||
51 | __u16 bustype; | ||
52 | __u16 vendor; | ||
53 | __u16 product; | ||
54 | __u16 version; | ||
55 | }; | ||
56 | |||
57 | struct virtio_input_config { | ||
58 | __u8 select; | ||
59 | __u8 subsel; | ||
60 | __u8 size; | ||
61 | __u8 reserved[5]; | ||
62 | union { | ||
63 | char string[128]; | ||
64 | __u8 bitmap[128]; | ||
65 | struct virtio_input_absinfo abs; | ||
66 | struct virtio_input_devids ids; | ||
67 | } u; | ||
68 | }; | ||
69 | |||
70 | struct virtio_input_event { | ||
71 | __le16 type; | ||
72 | __le16 code; | ||
73 | __le32 value; | ||
74 | }; | ||
75 | |||
76 | #endif /* _LINUX_VIRTIO_INPUT_H */ | ||