diff options
author | Radim Krčmář <rkrcmar@redhat.com> | 2016-03-02 16:56:43 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2016-03-04 03:29:55 -0500 |
commit | 09edea72b7f9fd8a8d26c1f7504d989b9773ee5e (patch) | |
tree | 41d3482e71883d6ebabce616bc91c8d6d982da9c /arch/x86/kvm/i8254.c | |
parent | b69d920f68b119bdc0483f0c33d34fd0c57724f5 (diff) |
KVM: i8254: pass struct kvm_pit instead of kvm in PIT
This patch passes struct kvm_pit into internal PIT functions.
Those functions used to get PIT through kvm->arch.vpit, even though most
of them never used *kvm for other purposes. Another benefit is that we
don't need to set kvm->arch.vpit during initialization.
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/i8254.c')
-rw-r--r-- | arch/x86/kvm/i8254.c | 112 |
1 files changed, 52 insertions, 60 deletions
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index e5a3e8015e30..2afe09b054e7 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -71,10 +71,9 @@ static u64 muldiv64(u64 a, u32 b, u32 c) | |||
71 | return res.ll; | 71 | return res.ll; |
72 | } | 72 | } |
73 | 73 | ||
74 | static void pit_set_gate(struct kvm *kvm, int channel, u32 val) | 74 | static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val) |
75 | { | 75 | { |
76 | struct kvm_kpit_channel_state *c = | 76 | struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; |
77 | &kvm->arch.vpit->pit_state.channels[channel]; | ||
78 | 77 | ||
79 | switch (c->mode) { | 78 | switch (c->mode) { |
80 | default: | 79 | default: |
@@ -95,16 +94,16 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val) | |||
95 | c->gate = val; | 94 | c->gate = val; |
96 | } | 95 | } |
97 | 96 | ||
98 | static int pit_get_gate(struct kvm *kvm, int channel) | 97 | static int pit_get_gate(struct kvm_pit *pit, int channel) |
99 | { | 98 | { |
100 | return kvm->arch.vpit->pit_state.channels[channel].gate; | 99 | return pit->pit_state.channels[channel].gate; |
101 | } | 100 | } |
102 | 101 | ||
103 | static s64 __kpit_elapsed(struct kvm *kvm) | 102 | static s64 __kpit_elapsed(struct kvm_pit *pit) |
104 | { | 103 | { |
105 | s64 elapsed; | 104 | s64 elapsed; |
106 | ktime_t remaining; | 105 | ktime_t remaining; |
107 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; | 106 | struct kvm_kpit_state *ps = &pit->pit_state; |
108 | 107 | ||
109 | if (!ps->period) | 108 | if (!ps->period) |
110 | return 0; | 109 | return 0; |
@@ -124,23 +123,22 @@ static s64 __kpit_elapsed(struct kvm *kvm) | |||
124 | return elapsed; | 123 | return elapsed; |
125 | } | 124 | } |
126 | 125 | ||
127 | static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c, | 126 | static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c, |
128 | int channel) | 127 | int channel) |
129 | { | 128 | { |
130 | if (channel == 0) | 129 | if (channel == 0) |
131 | return __kpit_elapsed(kvm); | 130 | return __kpit_elapsed(pit); |
132 | 131 | ||
133 | return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time)); | 132 | return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time)); |
134 | } | 133 | } |
135 | 134 | ||
136 | static int pit_get_count(struct kvm *kvm, int channel) | 135 | static int pit_get_count(struct kvm_pit *pit, int channel) |
137 | { | 136 | { |
138 | struct kvm_kpit_channel_state *c = | 137 | struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; |
139 | &kvm->arch.vpit->pit_state.channels[channel]; | ||
140 | s64 d, t; | 138 | s64 d, t; |
141 | int counter; | 139 | int counter; |
142 | 140 | ||
143 | t = kpit_elapsed(kvm, c, channel); | 141 | t = kpit_elapsed(pit, c, channel); |
144 | d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); | 142 | d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); |
145 | 143 | ||
146 | switch (c->mode) { | 144 | switch (c->mode) { |
@@ -161,14 +159,13 @@ static int pit_get_count(struct kvm *kvm, int channel) | |||
161 | return counter; | 159 | return counter; |
162 | } | 160 | } |
163 | 161 | ||
164 | static int pit_get_out(struct kvm *kvm, int channel) | 162 | static int pit_get_out(struct kvm_pit *pit, int channel) |
165 | { | 163 | { |
166 | struct kvm_kpit_channel_state *c = | 164 | struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; |
167 | &kvm->arch.vpit->pit_state.channels[channel]; | ||
168 | s64 d, t; | 165 | s64 d, t; |
169 | int out; | 166 | int out; |
170 | 167 | ||
171 | t = kpit_elapsed(kvm, c, channel); | 168 | t = kpit_elapsed(pit, c, channel); |
172 | d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); | 169 | d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); |
173 | 170 | ||
174 | switch (c->mode) { | 171 | switch (c->mode) { |
@@ -194,25 +191,23 @@ static int pit_get_out(struct kvm *kvm, int channel) | |||
194 | return out; | 191 | return out; |
195 | } | 192 | } |
196 | 193 | ||
197 | static void pit_latch_count(struct kvm *kvm, int channel) | 194 | static void pit_latch_count(struct kvm_pit *pit, int channel) |
198 | { | 195 | { |
199 | struct kvm_kpit_channel_state *c = | 196 | struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; |
200 | &kvm->arch.vpit->pit_state.channels[channel]; | ||
201 | 197 | ||
202 | if (!c->count_latched) { | 198 | if (!c->count_latched) { |
203 | c->latched_count = pit_get_count(kvm, channel); | 199 | c->latched_count = pit_get_count(pit, channel); |
204 | c->count_latched = c->rw_mode; | 200 | c->count_latched = c->rw_mode; |
205 | } | 201 | } |
206 | } | 202 | } |
207 | 203 | ||
208 | static void pit_latch_status(struct kvm *kvm, int channel) | 204 | static void pit_latch_status(struct kvm_pit *pit, int channel) |
209 | { | 205 | { |
210 | struct kvm_kpit_channel_state *c = | 206 | struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; |
211 | &kvm->arch.vpit->pit_state.channels[channel]; | ||
212 | 207 | ||
213 | if (!c->status_latched) { | 208 | if (!c->status_latched) { |
214 | /* TODO: Return NULL COUNT (bit 6). */ | 209 | /* TODO: Return NULL COUNT (bit 6). */ |
215 | c->status = ((pit_get_out(kvm, channel) << 7) | | 210 | c->status = ((pit_get_out(pit, channel) << 7) | |
216 | (c->rw_mode << 4) | | 211 | (c->rw_mode << 4) | |
217 | (c->mode << 1) | | 212 | (c->mode << 1) | |
218 | c->bcd); | 213 | c->bcd); |
@@ -306,9 +301,10 @@ static inline void kvm_pit_reset_reinject(struct kvm_pit *pit) | |||
306 | atomic_set(&pit->pit_state.irq_ack, 1); | 301 | atomic_set(&pit->pit_state.irq_ack, 1); |
307 | } | 302 | } |
308 | 303 | ||
309 | static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) | 304 | static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period) |
310 | { | 305 | { |
311 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; | 306 | struct kvm_kpit_state *ps = &pit->pit_state; |
307 | struct kvm *kvm = pit->kvm; | ||
312 | s64 interval; | 308 | s64 interval; |
313 | 309 | ||
314 | if (!ioapic_in_kernel(kvm) || | 310 | if (!ioapic_in_kernel(kvm) || |
@@ -326,9 +322,9 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) | |||
326 | ps->is_periodic = is_period; | 322 | ps->is_periodic = is_period; |
327 | 323 | ||
328 | ps->timer.function = pit_timer_fn; | 324 | ps->timer.function = pit_timer_fn; |
329 | ps->kvm = ps->pit->kvm; | 325 | ps->kvm = pit->kvm; |
330 | 326 | ||
331 | kvm_pit_reset_reinject(ps->pit); | 327 | kvm_pit_reset_reinject(pit); |
332 | 328 | ||
333 | /* | 329 | /* |
334 | * Do not allow the guest to program periodic timers with small | 330 | * Do not allow the guest to program periodic timers with small |
@@ -351,9 +347,9 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) | |||
351 | HRTIMER_MODE_ABS); | 347 | HRTIMER_MODE_ABS); |
352 | } | 348 | } |
353 | 349 | ||
354 | static void pit_load_count(struct kvm *kvm, int channel, u32 val) | 350 | static void pit_load_count(struct kvm_pit *pit, int channel, u32 val) |
355 | { | 351 | { |
356 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; | 352 | struct kvm_kpit_state *ps = &pit->pit_state; |
357 | 353 | ||
358 | pr_debug("load_count val is %d, channel is %d\n", val, channel); | 354 | pr_debug("load_count val is %d, channel is %d\n", val, channel); |
359 | 355 | ||
@@ -378,32 +374,33 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) | |||
378 | case 1: | 374 | case 1: |
379 | /* FIXME: enhance mode 4 precision */ | 375 | /* FIXME: enhance mode 4 precision */ |
380 | case 4: | 376 | case 4: |
381 | create_pit_timer(kvm, val, 0); | 377 | create_pit_timer(pit, val, 0); |
382 | break; | 378 | break; |
383 | case 2: | 379 | case 2: |
384 | case 3: | 380 | case 3: |
385 | create_pit_timer(kvm, val, 1); | 381 | create_pit_timer(pit, val, 1); |
386 | break; | 382 | break; |
387 | default: | 383 | default: |
388 | destroy_pit_timer(kvm->arch.vpit); | 384 | destroy_pit_timer(pit); |
389 | } | 385 | } |
390 | } | 386 | } |
391 | 387 | ||
392 | void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start) | 388 | void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val, |
389 | int hpet_legacy_start) | ||
393 | { | 390 | { |
394 | u8 saved_mode; | 391 | u8 saved_mode; |
395 | 392 | ||
396 | WARN_ON_ONCE(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | 393 | WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock)); |
397 | 394 | ||
398 | if (hpet_legacy_start) { | 395 | if (hpet_legacy_start) { |
399 | /* save existing mode for later reenablement */ | 396 | /* save existing mode for later reenablement */ |
400 | WARN_ON(channel != 0); | 397 | WARN_ON(channel != 0); |
401 | saved_mode = kvm->arch.vpit->pit_state.channels[0].mode; | 398 | saved_mode = pit->pit_state.channels[0].mode; |
402 | kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */ | 399 | pit->pit_state.channels[0].mode = 0xff; /* disable timer */ |
403 | pit_load_count(kvm, channel, val); | 400 | pit_load_count(pit, channel, val); |
404 | kvm->arch.vpit->pit_state.channels[0].mode = saved_mode; | 401 | pit->pit_state.channels[0].mode = saved_mode; |
405 | } else { | 402 | } else { |
406 | pit_load_count(kvm, channel, val); | 403 | pit_load_count(pit, channel, val); |
407 | } | 404 | } |
408 | } | 405 | } |
409 | 406 | ||
@@ -429,7 +426,6 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu, | |||
429 | { | 426 | { |
430 | struct kvm_pit *pit = dev_to_pit(this); | 427 | struct kvm_pit *pit = dev_to_pit(this); |
431 | struct kvm_kpit_state *pit_state = &pit->pit_state; | 428 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
432 | struct kvm *kvm = pit->kvm; | ||
433 | int channel, access; | 429 | int channel, access; |
434 | struct kvm_kpit_channel_state *s; | 430 | struct kvm_kpit_channel_state *s; |
435 | u32 val = *(u32 *) data; | 431 | u32 val = *(u32 *) data; |
@@ -453,9 +449,9 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu, | |||
453 | s = &pit_state->channels[channel]; | 449 | s = &pit_state->channels[channel]; |
454 | if (val & (2 << channel)) { | 450 | if (val & (2 << channel)) { |
455 | if (!(val & 0x20)) | 451 | if (!(val & 0x20)) |
456 | pit_latch_count(kvm, channel); | 452 | pit_latch_count(pit, channel); |
457 | if (!(val & 0x10)) | 453 | if (!(val & 0x10)) |
458 | pit_latch_status(kvm, channel); | 454 | pit_latch_status(pit, channel); |
459 | } | 455 | } |
460 | } | 456 | } |
461 | } else { | 457 | } else { |
@@ -463,7 +459,7 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu, | |||
463 | s = &pit_state->channels[channel]; | 459 | s = &pit_state->channels[channel]; |
464 | access = (val >> 4) & KVM_PIT_CHANNEL_MASK; | 460 | access = (val >> 4) & KVM_PIT_CHANNEL_MASK; |
465 | if (access == 0) { | 461 | if (access == 0) { |
466 | pit_latch_count(kvm, channel); | 462 | pit_latch_count(pit, channel); |
467 | } else { | 463 | } else { |
468 | s->rw_mode = access; | 464 | s->rw_mode = access; |
469 | s->read_state = access; | 465 | s->read_state = access; |
@@ -480,17 +476,17 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu, | |||
480 | switch (s->write_state) { | 476 | switch (s->write_state) { |
481 | default: | 477 | default: |
482 | case RW_STATE_LSB: | 478 | case RW_STATE_LSB: |
483 | pit_load_count(kvm, addr, val); | 479 | pit_load_count(pit, addr, val); |
484 | break; | 480 | break; |
485 | case RW_STATE_MSB: | 481 | case RW_STATE_MSB: |
486 | pit_load_count(kvm, addr, val << 8); | 482 | pit_load_count(pit, addr, val << 8); |
487 | break; | 483 | break; |
488 | case RW_STATE_WORD0: | 484 | case RW_STATE_WORD0: |
489 | s->write_latch = val; | 485 | s->write_latch = val; |
490 | s->write_state = RW_STATE_WORD1; | 486 | s->write_state = RW_STATE_WORD1; |
491 | break; | 487 | break; |
492 | case RW_STATE_WORD1: | 488 | case RW_STATE_WORD1: |
493 | pit_load_count(kvm, addr, s->write_latch | (val << 8)); | 489 | pit_load_count(pit, addr, s->write_latch | (val << 8)); |
494 | s->write_state = RW_STATE_WORD0; | 490 | s->write_state = RW_STATE_WORD0; |
495 | break; | 491 | break; |
496 | } | 492 | } |
@@ -506,7 +502,6 @@ static int pit_ioport_read(struct kvm_vcpu *vcpu, | |||
506 | { | 502 | { |
507 | struct kvm_pit *pit = dev_to_pit(this); | 503 | struct kvm_pit *pit = dev_to_pit(this); |
508 | struct kvm_kpit_state *pit_state = &pit->pit_state; | 504 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
509 | struct kvm *kvm = pit->kvm; | ||
510 | int ret, count; | 505 | int ret, count; |
511 | struct kvm_kpit_channel_state *s; | 506 | struct kvm_kpit_channel_state *s; |
512 | if (!pit_in_range(addr)) | 507 | if (!pit_in_range(addr)) |
@@ -543,20 +538,20 @@ static int pit_ioport_read(struct kvm_vcpu *vcpu, | |||
543 | switch (s->read_state) { | 538 | switch (s->read_state) { |
544 | default: | 539 | default: |
545 | case RW_STATE_LSB: | 540 | case RW_STATE_LSB: |
546 | count = pit_get_count(kvm, addr); | 541 | count = pit_get_count(pit, addr); |
547 | ret = count & 0xff; | 542 | ret = count & 0xff; |
548 | break; | 543 | break; |
549 | case RW_STATE_MSB: | 544 | case RW_STATE_MSB: |
550 | count = pit_get_count(kvm, addr); | 545 | count = pit_get_count(pit, addr); |
551 | ret = (count >> 8) & 0xff; | 546 | ret = (count >> 8) & 0xff; |
552 | break; | 547 | break; |
553 | case RW_STATE_WORD0: | 548 | case RW_STATE_WORD0: |
554 | count = pit_get_count(kvm, addr); | 549 | count = pit_get_count(pit, addr); |
555 | ret = count & 0xff; | 550 | ret = count & 0xff; |
556 | s->read_state = RW_STATE_WORD1; | 551 | s->read_state = RW_STATE_WORD1; |
557 | break; | 552 | break; |
558 | case RW_STATE_WORD1: | 553 | case RW_STATE_WORD1: |
559 | count = pit_get_count(kvm, addr); | 554 | count = pit_get_count(pit, addr); |
560 | ret = (count >> 8) & 0xff; | 555 | ret = (count >> 8) & 0xff; |
561 | s->read_state = RW_STATE_WORD0; | 556 | s->read_state = RW_STATE_WORD0; |
562 | break; | 557 | break; |
@@ -577,14 +572,13 @@ static int speaker_ioport_write(struct kvm_vcpu *vcpu, | |||
577 | { | 572 | { |
578 | struct kvm_pit *pit = speaker_to_pit(this); | 573 | struct kvm_pit *pit = speaker_to_pit(this); |
579 | struct kvm_kpit_state *pit_state = &pit->pit_state; | 574 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
580 | struct kvm *kvm = pit->kvm; | ||
581 | u32 val = *(u32 *) data; | 575 | u32 val = *(u32 *) data; |
582 | if (addr != KVM_SPEAKER_BASE_ADDRESS) | 576 | if (addr != KVM_SPEAKER_BASE_ADDRESS) |
583 | return -EOPNOTSUPP; | 577 | return -EOPNOTSUPP; |
584 | 578 | ||
585 | mutex_lock(&pit_state->lock); | 579 | mutex_lock(&pit_state->lock); |
586 | pit_state->speaker_data_on = (val >> 1) & 1; | 580 | pit_state->speaker_data_on = (val >> 1) & 1; |
587 | pit_set_gate(kvm, 2, val & 1); | 581 | pit_set_gate(pit, 2, val & 1); |
588 | mutex_unlock(&pit_state->lock); | 582 | mutex_unlock(&pit_state->lock); |
589 | return 0; | 583 | return 0; |
590 | } | 584 | } |
@@ -595,7 +589,6 @@ static int speaker_ioport_read(struct kvm_vcpu *vcpu, | |||
595 | { | 589 | { |
596 | struct kvm_pit *pit = speaker_to_pit(this); | 590 | struct kvm_pit *pit = speaker_to_pit(this); |
597 | struct kvm_kpit_state *pit_state = &pit->pit_state; | 591 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
598 | struct kvm *kvm = pit->kvm; | ||
599 | unsigned int refresh_clock; | 592 | unsigned int refresh_clock; |
600 | int ret; | 593 | int ret; |
601 | if (addr != KVM_SPEAKER_BASE_ADDRESS) | 594 | if (addr != KVM_SPEAKER_BASE_ADDRESS) |
@@ -605,8 +598,8 @@ static int speaker_ioport_read(struct kvm_vcpu *vcpu, | |||
605 | refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; | 598 | refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; |
606 | 599 | ||
607 | mutex_lock(&pit_state->lock); | 600 | mutex_lock(&pit_state->lock); |
608 | ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) | | 601 | ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(pit, 2) | |
609 | (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4)); | 602 | (pit_get_out(pit, 2) << 5) | (refresh_clock << 4)); |
610 | if (len > sizeof(ret)) | 603 | if (len > sizeof(ret)) |
611 | len = sizeof(ret); | 604 | len = sizeof(ret); |
612 | memcpy(data, (char *)&ret, len); | 605 | memcpy(data, (char *)&ret, len); |
@@ -625,7 +618,7 @@ void kvm_pit_reset(struct kvm_pit *pit) | |||
625 | c = &pit->pit_state.channels[i]; | 618 | c = &pit->pit_state.channels[i]; |
626 | c->mode = 0xff; | 619 | c->mode = 0xff; |
627 | c->gate = (i != 2); | 620 | c->gate = (i != 2); |
628 | pit_load_count(pit->kvm, i, 0); | 621 | pit_load_count(pit, i, 0); |
629 | } | 622 | } |
630 | mutex_unlock(&pit->pit_state.lock); | 623 | mutex_unlock(&pit->pit_state.lock); |
631 | 624 | ||
@@ -687,7 +680,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) | |||
687 | } | 680 | } |
688 | init_kthread_work(&pit->expired, pit_do_work); | 681 | init_kthread_work(&pit->expired, pit_do_work); |
689 | 682 | ||
690 | kvm->arch.vpit = pit; | ||
691 | pit->kvm = kvm; | 683 | pit->kvm = kvm; |
692 | 684 | ||
693 | pit_state = &pit->pit_state; | 685 | pit_state = &pit->pit_state; |