aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/i8254.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/i8254.c')
-rw-r--r--arch/x86/kvm/i8254.c350
1 files changed, 151 insertions, 199 deletions
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index b0ea42b78ccd..a4bf5b45d65a 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -51,32 +51,9 @@
51#define RW_STATE_WORD0 3 51#define RW_STATE_WORD0 3
52#define RW_STATE_WORD1 4 52#define RW_STATE_WORD1 4
53 53
54/* Compute with 96 bit intermediate result: (a*b)/c */ 54static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val)
55static u64 muldiv64(u64 a, u32 b, u32 c)
56{ 55{
57 union { 56 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
58 u64 ll;
59 struct {
60 u32 low, high;
61 } l;
62 } u, res;
63 u64 rl, rh;
64
65 u.ll = a;
66 rl = (u64)u.l.low * (u64)b;
67 rh = (u64)u.l.high * (u64)b;
68 rh += (rl >> 32);
69 res.l.high = div64_u64(rh, c);
70 res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
71 return res.ll;
72}
73
74static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
75{
76 struct kvm_kpit_channel_state *c =
77 &kvm->arch.vpit->pit_state.channels[channel];
78
79 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
80 57
81 switch (c->mode) { 58 switch (c->mode) {
82 default: 59 default:
@@ -97,18 +74,16 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
97 c->gate = val; 74 c->gate = val;
98} 75}
99 76
100static int pit_get_gate(struct kvm *kvm, int channel) 77static int pit_get_gate(struct kvm_pit *pit, int channel)
101{ 78{
102 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 79 return pit->pit_state.channels[channel].gate;
103
104 return kvm->arch.vpit->pit_state.channels[channel].gate;
105} 80}
106 81
107static s64 __kpit_elapsed(struct kvm *kvm) 82static s64 __kpit_elapsed(struct kvm_pit *pit)
108{ 83{
109 s64 elapsed; 84 s64 elapsed;
110 ktime_t remaining; 85 ktime_t remaining;
111 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; 86 struct kvm_kpit_state *ps = &pit->pit_state;
112 87
113 if (!ps->period) 88 if (!ps->period)
114 return 0; 89 return 0;
@@ -128,26 +103,23 @@ static s64 __kpit_elapsed(struct kvm *kvm)
128 return elapsed; 103 return elapsed;
129} 104}
130 105
131static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c, 106static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c,
132 int channel) 107 int channel)
133{ 108{
134 if (channel == 0) 109 if (channel == 0)
135 return __kpit_elapsed(kvm); 110 return __kpit_elapsed(pit);
136 111
137 return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time)); 112 return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
138} 113}
139 114
140static int pit_get_count(struct kvm *kvm, int channel) 115static int pit_get_count(struct kvm_pit *pit, int channel)
141{ 116{
142 struct kvm_kpit_channel_state *c = 117 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
143 &kvm->arch.vpit->pit_state.channels[channel];
144 s64 d, t; 118 s64 d, t;
145 int counter; 119 int counter;
146 120
147 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 121 t = kpit_elapsed(pit, c, channel);
148 122 d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
149 t = kpit_elapsed(kvm, c, channel);
150 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
151 123
152 switch (c->mode) { 124 switch (c->mode) {
153 case 0: 125 case 0:
@@ -167,17 +139,14 @@ static int pit_get_count(struct kvm *kvm, int channel)
167 return counter; 139 return counter;
168} 140}
169 141
170static int pit_get_out(struct kvm *kvm, int channel) 142static int pit_get_out(struct kvm_pit *pit, int channel)
171{ 143{
172 struct kvm_kpit_channel_state *c = 144 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
173 &kvm->arch.vpit->pit_state.channels[channel];
174 s64 d, t; 145 s64 d, t;
175 int out; 146 int out;
176 147
177 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 148 t = kpit_elapsed(pit, c, channel);
178 149 d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
179 t = kpit_elapsed(kvm, c, channel);
180 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
181 150
182 switch (c->mode) { 151 switch (c->mode) {
183 default: 152 default:
@@ -202,29 +171,23 @@ static int pit_get_out(struct kvm *kvm, int channel)
202 return out; 171 return out;
203} 172}
204 173
205static void pit_latch_count(struct kvm *kvm, int channel) 174static void pit_latch_count(struct kvm_pit *pit, int channel)
206{ 175{
207 struct kvm_kpit_channel_state *c = 176 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
208 &kvm->arch.vpit->pit_state.channels[channel];
209
210 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
211 177
212 if (!c->count_latched) { 178 if (!c->count_latched) {
213 c->latched_count = pit_get_count(kvm, channel); 179 c->latched_count = pit_get_count(pit, channel);
214 c->count_latched = c->rw_mode; 180 c->count_latched = c->rw_mode;
215 } 181 }
216} 182}
217 183
218static void pit_latch_status(struct kvm *kvm, int channel) 184static void pit_latch_status(struct kvm_pit *pit, int channel)
219{ 185{
220 struct kvm_kpit_channel_state *c = 186 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
221 &kvm->arch.vpit->pit_state.channels[channel];
222
223 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
224 187
225 if (!c->status_latched) { 188 if (!c->status_latched) {
226 /* TODO: Return NULL COUNT (bit 6). */ 189 /* TODO: Return NULL COUNT (bit 6). */
227 c->status = ((pit_get_out(kvm, channel) << 7) | 190 c->status = ((pit_get_out(pit, channel) << 7) |
228 (c->rw_mode << 4) | 191 (c->rw_mode << 4) |
229 (c->mode << 1) | 192 (c->mode << 1) |
230 c->bcd); 193 c->bcd);
@@ -232,26 +195,24 @@ static void pit_latch_status(struct kvm *kvm, int channel)
232 } 195 }
233} 196}
234 197
198static inline struct kvm_pit *pit_state_to_pit(struct kvm_kpit_state *ps)
199{
200 return container_of(ps, struct kvm_pit, pit_state);
201}
202
235static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) 203static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
236{ 204{
237 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, 205 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
238 irq_ack_notifier); 206 irq_ack_notifier);
239 int value; 207 struct kvm_pit *pit = pit_state_to_pit(ps);
240 208
241 spin_lock(&ps->inject_lock); 209 atomic_set(&ps->irq_ack, 1);
242 value = atomic_dec_return(&ps->pending); 210 /* irq_ack should be set before pending is read. Order accesses with
243 if (value < 0) 211 * inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work.
244 /* spurious acks can be generated if, for example, the 212 */
245 * PIC is being reset. Handle it gracefully here 213 smp_mb();
246 */ 214 if (atomic_dec_if_positive(&ps->pending) > 0)
247 atomic_inc(&ps->pending); 215 queue_kthread_work(&pit->worker, &pit->expired);
248 else if (value > 0)
249 /* in this case, we had multiple outstanding pit interrupts
250 * that we needed to inject. Reinject
251 */
252 queue_kthread_work(&ps->pit->worker, &ps->pit->expired);
253 ps->irq_ack = 1;
254 spin_unlock(&ps->inject_lock);
255} 216}
256 217
257void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) 218void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -282,45 +243,36 @@ static void pit_do_work(struct kthread_work *work)
282 struct kvm_vcpu *vcpu; 243 struct kvm_vcpu *vcpu;
283 int i; 244 int i;
284 struct kvm_kpit_state *ps = &pit->pit_state; 245 struct kvm_kpit_state *ps = &pit->pit_state;
285 int inject = 0;
286 246
287 /* Try to inject pending interrupts when 247 if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0))
288 * last one has been acked. 248 return;
249
250 kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false);
251 kvm_set_irq(kvm, pit->irq_source_id, 0, 0, false);
252
253 /*
254 * Provides NMI watchdog support via Virtual Wire mode.
255 * The route is: PIT -> LVT0 in NMI mode.
256 *
257 * Note: Our Virtual Wire implementation does not follow
258 * the MP specification. We propagate a PIT interrupt to all
259 * VCPUs and only when LVT0 is in NMI mode. The interrupt can
260 * also be simultaneously delivered through PIC and IOAPIC.
289 */ 261 */
290 spin_lock(&ps->inject_lock); 262 if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
291 if (ps->irq_ack) { 263 kvm_for_each_vcpu(i, vcpu, kvm)
292 ps->irq_ack = 0; 264 kvm_apic_nmi_wd_deliver(vcpu);
293 inject = 1;
294 }
295 spin_unlock(&ps->inject_lock);
296 if (inject) {
297 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false);
298 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false);
299
300 /*
301 * Provides NMI watchdog support via Virtual Wire mode.
302 * The route is: PIT -> PIC -> LVT0 in NMI mode.
303 *
304 * Note: Our Virtual Wire implementation is simplified, only
305 * propagating PIT interrupts to all VCPUs when they have set
306 * LVT0 to NMI delivery. Other PIC interrupts are just sent to
307 * VCPU0, and only if its LVT0 is in EXTINT mode.
308 */
309 if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
310 kvm_for_each_vcpu(i, vcpu, kvm)
311 kvm_apic_nmi_wd_deliver(vcpu);
312 }
313} 265}
314 266
315static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) 267static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
316{ 268{
317 struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer); 269 struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
318 struct kvm_pit *pt = ps->kvm->arch.vpit; 270 struct kvm_pit *pt = pit_state_to_pit(ps);
319 271
320 if (ps->reinject || !atomic_read(&ps->pending)) { 272 if (atomic_read(&ps->reinject))
321 atomic_inc(&ps->pending); 273 atomic_inc(&ps->pending);
322 queue_kthread_work(&pt->worker, &pt->expired); 274
323 } 275 queue_kthread_work(&pt->worker, &pt->expired);
324 276
325 if (ps->is_periodic) { 277 if (ps->is_periodic) {
326 hrtimer_add_expires_ns(&ps->timer, ps->period); 278 hrtimer_add_expires_ns(&ps->timer, ps->period);
@@ -329,30 +281,54 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
329 return HRTIMER_NORESTART; 281 return HRTIMER_NORESTART;
330} 282}
331 283
332static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) 284static inline void kvm_pit_reset_reinject(struct kvm_pit *pit)
333{ 285{
334 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; 286 atomic_set(&pit->pit_state.pending, 0);
287 atomic_set(&pit->pit_state.irq_ack, 1);
288}
289
290void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
291{
292 struct kvm_kpit_state *ps = &pit->pit_state;
293 struct kvm *kvm = pit->kvm;
294
295 if (atomic_read(&ps->reinject) == reinject)
296 return;
297
298 if (reinject) {
299 /* The initial state is preserved while ps->reinject == 0. */
300 kvm_pit_reset_reinject(pit);
301 kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
302 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
303 } else {
304 kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
305 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
306 }
307
308 atomic_set(&ps->reinject, reinject);
309}
310
311static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
312{
313 struct kvm_kpit_state *ps = &pit->pit_state;
314 struct kvm *kvm = pit->kvm;
335 s64 interval; 315 s64 interval;
336 316
337 if (!ioapic_in_kernel(kvm) || 317 if (!ioapic_in_kernel(kvm) ||
338 ps->flags & KVM_PIT_FLAGS_HPET_LEGACY) 318 ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
339 return; 319 return;
340 320
341 interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); 321 interval = mul_u64_u32_div(val, NSEC_PER_SEC, KVM_PIT_FREQ);
342 322
343 pr_debug("create pit timer, interval is %llu nsec\n", interval); 323 pr_debug("create pit timer, interval is %llu nsec\n", interval);
344 324
345 /* TODO The new value only affected after the retriggered */ 325 /* TODO The new value only affected after the retriggered */
346 hrtimer_cancel(&ps->timer); 326 hrtimer_cancel(&ps->timer);
347 flush_kthread_work(&ps->pit->expired); 327 flush_kthread_work(&pit->expired);
348 ps->period = interval; 328 ps->period = interval;
349 ps->is_periodic = is_period; 329 ps->is_periodic = is_period;
350 330
351 ps->timer.function = pit_timer_fn; 331 kvm_pit_reset_reinject(pit);
352 ps->kvm = ps->pit->kvm;
353
354 atomic_set(&ps->pending, 0);
355 ps->irq_ack = 1;
356 332
357 /* 333 /*
358 * Do not allow the guest to program periodic timers with small 334 * Do not allow the guest to program periodic timers with small
@@ -375,11 +351,9 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
375 HRTIMER_MODE_ABS); 351 HRTIMER_MODE_ABS);
376} 352}
377 353
378static void pit_load_count(struct kvm *kvm, int channel, u32 val) 354static void pit_load_count(struct kvm_pit *pit, int channel, u32 val)
379{ 355{
380 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; 356 struct kvm_kpit_state *ps = &pit->pit_state;
381
382 WARN_ON(!mutex_is_locked(&ps->lock));
383 357
384 pr_debug("load_count val is %d, channel is %d\n", val, channel); 358 pr_debug("load_count val is %d, channel is %d\n", val, channel);
385 359
@@ -404,29 +378,33 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
404 case 1: 378 case 1:
405 /* FIXME: enhance mode 4 precision */ 379 /* FIXME: enhance mode 4 precision */
406 case 4: 380 case 4:
407 create_pit_timer(kvm, val, 0); 381 create_pit_timer(pit, val, 0);
408 break; 382 break;
409 case 2: 383 case 2:
410 case 3: 384 case 3:
411 create_pit_timer(kvm, val, 1); 385 create_pit_timer(pit, val, 1);
412 break; 386 break;
413 default: 387 default:
414 destroy_pit_timer(kvm->arch.vpit); 388 destroy_pit_timer(pit);
415 } 389 }
416} 390}
417 391
418void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start) 392void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
393 int hpet_legacy_start)
419{ 394{
420 u8 saved_mode; 395 u8 saved_mode;
396
397 WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock));
398
421 if (hpet_legacy_start) { 399 if (hpet_legacy_start) {
422 /* save existing mode for later reenablement */ 400 /* save existing mode for later reenablement */
423 WARN_ON(channel != 0); 401 WARN_ON(channel != 0);
424 saved_mode = kvm->arch.vpit->pit_state.channels[0].mode; 402 saved_mode = pit->pit_state.channels[0].mode;
425 kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */ 403 pit->pit_state.channels[0].mode = 0xff; /* disable timer */
426 pit_load_count(kvm, channel, val); 404 pit_load_count(pit, channel, val);
427 kvm->arch.vpit->pit_state.channels[0].mode = saved_mode; 405 pit->pit_state.channels[0].mode = saved_mode;
428 } else { 406 } else {
429 pit_load_count(kvm, channel, val); 407 pit_load_count(pit, channel, val);
430 } 408 }
431} 409}
432 410
@@ -452,7 +430,6 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu,
452{ 430{
453 struct kvm_pit *pit = dev_to_pit(this); 431 struct kvm_pit *pit = dev_to_pit(this);
454 struct kvm_kpit_state *pit_state = &pit->pit_state; 432 struct kvm_kpit_state *pit_state = &pit->pit_state;
455 struct kvm *kvm = pit->kvm;
456 int channel, access; 433 int channel, access;
457 struct kvm_kpit_channel_state *s; 434 struct kvm_kpit_channel_state *s;
458 u32 val = *(u32 *) data; 435 u32 val = *(u32 *) data;
@@ -476,9 +453,9 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu,
476 s = &pit_state->channels[channel]; 453 s = &pit_state->channels[channel];
477 if (val & (2 << channel)) { 454 if (val & (2 << channel)) {
478 if (!(val & 0x20)) 455 if (!(val & 0x20))
479 pit_latch_count(kvm, channel); 456 pit_latch_count(pit, channel);
480 if (!(val & 0x10)) 457 if (!(val & 0x10))
481 pit_latch_status(kvm, channel); 458 pit_latch_status(pit, channel);
482 } 459 }
483 } 460 }
484 } else { 461 } else {
@@ -486,7 +463,7 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu,
486 s = &pit_state->channels[channel]; 463 s = &pit_state->channels[channel];
487 access = (val >> 4) & KVM_PIT_CHANNEL_MASK; 464 access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
488 if (access == 0) { 465 if (access == 0) {
489 pit_latch_count(kvm, channel); 466 pit_latch_count(pit, channel);
490 } else { 467 } else {
491 s->rw_mode = access; 468 s->rw_mode = access;
492 s->read_state = access; 469 s->read_state = access;
@@ -503,17 +480,17 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu,
503 switch (s->write_state) { 480 switch (s->write_state) {
504 default: 481 default:
505 case RW_STATE_LSB: 482 case RW_STATE_LSB:
506 pit_load_count(kvm, addr, val); 483 pit_load_count(pit, addr, val);
507 break; 484 break;
508 case RW_STATE_MSB: 485 case RW_STATE_MSB:
509 pit_load_count(kvm, addr, val << 8); 486 pit_load_count(pit, addr, val << 8);
510 break; 487 break;
511 case RW_STATE_WORD0: 488 case RW_STATE_WORD0:
512 s->write_latch = val; 489 s->write_latch = val;
513 s->write_state = RW_STATE_WORD1; 490 s->write_state = RW_STATE_WORD1;
514 break; 491 break;
515 case RW_STATE_WORD1: 492 case RW_STATE_WORD1:
516 pit_load_count(kvm, addr, s->write_latch | (val << 8)); 493 pit_load_count(pit, addr, s->write_latch | (val << 8));
517 s->write_state = RW_STATE_WORD0; 494 s->write_state = RW_STATE_WORD0;
518 break; 495 break;
519 } 496 }
@@ -529,7 +506,6 @@ static int pit_ioport_read(struct kvm_vcpu *vcpu,
529{ 506{
530 struct kvm_pit *pit = dev_to_pit(this); 507 struct kvm_pit *pit = dev_to_pit(this);
531 struct kvm_kpit_state *pit_state = &pit->pit_state; 508 struct kvm_kpit_state *pit_state = &pit->pit_state;
532 struct kvm *kvm = pit->kvm;
533 int ret, count; 509 int ret, count;
534 struct kvm_kpit_channel_state *s; 510 struct kvm_kpit_channel_state *s;
535 if (!pit_in_range(addr)) 511 if (!pit_in_range(addr))
@@ -566,20 +542,20 @@ static int pit_ioport_read(struct kvm_vcpu *vcpu,
566 switch (s->read_state) { 542 switch (s->read_state) {
567 default: 543 default:
568 case RW_STATE_LSB: 544 case RW_STATE_LSB:
569 count = pit_get_count(kvm, addr); 545 count = pit_get_count(pit, addr);
570 ret = count & 0xff; 546 ret = count & 0xff;
571 break; 547 break;
572 case RW_STATE_MSB: 548 case RW_STATE_MSB:
573 count = pit_get_count(kvm, addr); 549 count = pit_get_count(pit, addr);
574 ret = (count >> 8) & 0xff; 550 ret = (count >> 8) & 0xff;
575 break; 551 break;
576 case RW_STATE_WORD0: 552 case RW_STATE_WORD0:
577 count = pit_get_count(kvm, addr); 553 count = pit_get_count(pit, addr);
578 ret = count & 0xff; 554 ret = count & 0xff;
579 s->read_state = RW_STATE_WORD1; 555 s->read_state = RW_STATE_WORD1;
580 break; 556 break;
581 case RW_STATE_WORD1: 557 case RW_STATE_WORD1:
582 count = pit_get_count(kvm, addr); 558 count = pit_get_count(pit, addr);
583 ret = (count >> 8) & 0xff; 559 ret = (count >> 8) & 0xff;
584 s->read_state = RW_STATE_WORD0; 560 s->read_state = RW_STATE_WORD0;
585 break; 561 break;
@@ -600,14 +576,13 @@ static int speaker_ioport_write(struct kvm_vcpu *vcpu,
600{ 576{
601 struct kvm_pit *pit = speaker_to_pit(this); 577 struct kvm_pit *pit = speaker_to_pit(this);
602 struct kvm_kpit_state *pit_state = &pit->pit_state; 578 struct kvm_kpit_state *pit_state = &pit->pit_state;
603 struct kvm *kvm = pit->kvm;
604 u32 val = *(u32 *) data; 579 u32 val = *(u32 *) data;
605 if (addr != KVM_SPEAKER_BASE_ADDRESS) 580 if (addr != KVM_SPEAKER_BASE_ADDRESS)
606 return -EOPNOTSUPP; 581 return -EOPNOTSUPP;
607 582
608 mutex_lock(&pit_state->lock); 583 mutex_lock(&pit_state->lock);
609 pit_state->speaker_data_on = (val >> 1) & 1; 584 pit_state->speaker_data_on = (val >> 1) & 1;
610 pit_set_gate(kvm, 2, val & 1); 585 pit_set_gate(pit, 2, val & 1);
611 mutex_unlock(&pit_state->lock); 586 mutex_unlock(&pit_state->lock);
612 return 0; 587 return 0;
613} 588}
@@ -618,7 +593,6 @@ static int speaker_ioport_read(struct kvm_vcpu *vcpu,
618{ 593{
619 struct kvm_pit *pit = speaker_to_pit(this); 594 struct kvm_pit *pit = speaker_to_pit(this);
620 struct kvm_kpit_state *pit_state = &pit->pit_state; 595 struct kvm_kpit_state *pit_state = &pit->pit_state;
621 struct kvm *kvm = pit->kvm;
622 unsigned int refresh_clock; 596 unsigned int refresh_clock;
623 int ret; 597 int ret;
624 if (addr != KVM_SPEAKER_BASE_ADDRESS) 598 if (addr != KVM_SPEAKER_BASE_ADDRESS)
@@ -628,8 +602,8 @@ static int speaker_ioport_read(struct kvm_vcpu *vcpu,
628 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; 602 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
629 603
630 mutex_lock(&pit_state->lock); 604 mutex_lock(&pit_state->lock);
631 ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) | 605 ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(pit, 2) |
632 (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4)); 606 (pit_get_out(pit, 2) << 5) | (refresh_clock << 4));
633 if (len > sizeof(ret)) 607 if (len > sizeof(ret))
634 len = sizeof(ret); 608 len = sizeof(ret);
635 memcpy(data, (char *)&ret, len); 609 memcpy(data, (char *)&ret, len);
@@ -637,33 +611,28 @@ static int speaker_ioport_read(struct kvm_vcpu *vcpu,
637 return 0; 611 return 0;
638} 612}
639 613
640void kvm_pit_reset(struct kvm_pit *pit) 614static void kvm_pit_reset(struct kvm_pit *pit)
641{ 615{
642 int i; 616 int i;
643 struct kvm_kpit_channel_state *c; 617 struct kvm_kpit_channel_state *c;
644 618
645 mutex_lock(&pit->pit_state.lock);
646 pit->pit_state.flags = 0; 619 pit->pit_state.flags = 0;
647 for (i = 0; i < 3; i++) { 620 for (i = 0; i < 3; i++) {
648 c = &pit->pit_state.channels[i]; 621 c = &pit->pit_state.channels[i];
649 c->mode = 0xff; 622 c->mode = 0xff;
650 c->gate = (i != 2); 623 c->gate = (i != 2);
651 pit_load_count(pit->kvm, i, 0); 624 pit_load_count(pit, i, 0);
652 } 625 }
653 mutex_unlock(&pit->pit_state.lock);
654 626
655 atomic_set(&pit->pit_state.pending, 0); 627 kvm_pit_reset_reinject(pit);
656 pit->pit_state.irq_ack = 1;
657} 628}
658 629
659static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) 630static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
660{ 631{
661 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); 632 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
662 633
663 if (!mask) { 634 if (!mask)
664 atomic_set(&pit->pit_state.pending, 0); 635 kvm_pit_reset_reinject(pit);
665 pit->pit_state.irq_ack = 1;
666 }
667} 636}
668 637
669static const struct kvm_io_device_ops pit_dev_ops = { 638static const struct kvm_io_device_ops pit_dev_ops = {
@@ -690,14 +659,10 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
690 return NULL; 659 return NULL;
691 660
692 pit->irq_source_id = kvm_request_irq_source_id(kvm); 661 pit->irq_source_id = kvm_request_irq_source_id(kvm);
693 if (pit->irq_source_id < 0) { 662 if (pit->irq_source_id < 0)
694 kfree(pit); 663 goto fail_request;
695 return NULL;
696 }
697 664
698 mutex_init(&pit->pit_state.lock); 665 mutex_init(&pit->pit_state.lock);
699 mutex_lock(&pit->pit_state.lock);
700 spin_lock_init(&pit->pit_state.inject_lock);
701 666
702 pid = get_pid(task_tgid(current)); 667 pid = get_pid(task_tgid(current));
703 pid_nr = pid_vnr(pid); 668 pid_nr = pid_vnr(pid);
@@ -706,36 +671,30 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
706 init_kthread_worker(&pit->worker); 671 init_kthread_worker(&pit->worker);
707 pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker, 672 pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
708 "kvm-pit/%d", pid_nr); 673 "kvm-pit/%d", pid_nr);
709 if (IS_ERR(pit->worker_task)) { 674 if (IS_ERR(pit->worker_task))
710 mutex_unlock(&pit->pit_state.lock); 675 goto fail_kthread;
711 kvm_free_irq_source_id(kvm, pit->irq_source_id); 676
712 kfree(pit);
713 return NULL;
714 }
715 init_kthread_work(&pit->expired, pit_do_work); 677 init_kthread_work(&pit->expired, pit_do_work);
716 678
717 kvm->arch.vpit = pit;
718 pit->kvm = kvm; 679 pit->kvm = kvm;
719 680
720 pit_state = &pit->pit_state; 681 pit_state = &pit->pit_state;
721 pit_state->pit = pit;
722 hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 682 hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
683 pit_state->timer.function = pit_timer_fn;
684
723 pit_state->irq_ack_notifier.gsi = 0; 685 pit_state->irq_ack_notifier.gsi = 0;
724 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; 686 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
725 kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); 687 pit->mask_notifier.func = pit_mask_notifer;
726 pit_state->reinject = true;
727 mutex_unlock(&pit->pit_state.lock);
728 688
729 kvm_pit_reset(pit); 689 kvm_pit_reset(pit);
730 690
731 pit->mask_notifier.func = pit_mask_notifer; 691 kvm_pit_set_reinject(pit, true);
732 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
733 692
734 kvm_iodevice_init(&pit->dev, &pit_dev_ops); 693 kvm_iodevice_init(&pit->dev, &pit_dev_ops);
735 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS, 694 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS,
736 KVM_PIT_MEM_LENGTH, &pit->dev); 695 KVM_PIT_MEM_LENGTH, &pit->dev);
737 if (ret < 0) 696 if (ret < 0)
738 goto fail; 697 goto fail_register_pit;
739 698
740 if (flags & KVM_PIT_SPEAKER_DUMMY) { 699 if (flags & KVM_PIT_SPEAKER_DUMMY) {
741 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops); 700 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
@@ -743,42 +702,35 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
743 KVM_SPEAKER_BASE_ADDRESS, 4, 702 KVM_SPEAKER_BASE_ADDRESS, 4,
744 &pit->speaker_dev); 703 &pit->speaker_dev);
745 if (ret < 0) 704 if (ret < 0)
746 goto fail_unregister; 705 goto fail_register_speaker;
747 } 706 }
748 707
749 return pit; 708 return pit;
750 709
751fail_unregister: 710fail_register_speaker:
752 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev); 711 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
753 712fail_register_pit:
754fail: 713 kvm_pit_set_reinject(pit, false);
755 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
756 kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
757 kvm_free_irq_source_id(kvm, pit->irq_source_id);
758 kthread_stop(pit->worker_task); 714 kthread_stop(pit->worker_task);
715fail_kthread:
716 kvm_free_irq_source_id(kvm, pit->irq_source_id);
717fail_request:
759 kfree(pit); 718 kfree(pit);
760 return NULL; 719 return NULL;
761} 720}
762 721
763void kvm_free_pit(struct kvm *kvm) 722void kvm_free_pit(struct kvm *kvm)
764{ 723{
765 struct hrtimer *timer; 724 struct kvm_pit *pit = kvm->arch.vpit;
766 725
767 if (kvm->arch.vpit) { 726 if (pit) {
768 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->dev); 727 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
769 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 728 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
770 &kvm->arch.vpit->speaker_dev); 729 kvm_pit_set_reinject(pit, false);
771 kvm_unregister_irq_mask_notifier(kvm, 0, 730 hrtimer_cancel(&pit->pit_state.timer);
772 &kvm->arch.vpit->mask_notifier); 731 flush_kthread_work(&pit->expired);
773 kvm_unregister_irq_ack_notifier(kvm, 732 kthread_stop(pit->worker_task);
774 &kvm->arch.vpit->pit_state.irq_ack_notifier); 733 kvm_free_irq_source_id(kvm, pit->irq_source_id);
775 mutex_lock(&kvm->arch.vpit->pit_state.lock); 734 kfree(pit);
776 timer = &kvm->arch.vpit->pit_state.timer;
777 hrtimer_cancel(timer);
778 flush_kthread_work(&kvm->arch.vpit->expired);
779 kthread_stop(kvm->arch.vpit->worker_task);
780 kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
781 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
782 kfree(kvm->arch.vpit);
783 } 735 }
784} 736}