2 * 8253/8254 interval timer emulation
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2006 Intel Corporation
6 * Copyright (c) 2007 Keir Fraser, XenSource Inc
7 * Copyright (c) 2008 Intel Corporation
9 * Permission is hereby granted, free of charge, to any person obtaining a copy
10 * of this software and associated documentation files (the "Software"), to deal
11 * in the Software without restriction, including without limitation the rights
12 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 * copies of the Software, and to permit persons to whom the Software is
14 * furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 * Sheng Yang <sheng.yang@intel.com>
29 * Based on QEMU and Xen.
32 #include <linux/kvm_host.h>
38 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
40 #define mod_64(x, y) ((x) % (y))
43 #define RW_STATE_LSB 1
44 #define RW_STATE_MSB 2
45 #define RW_STATE_WORD0 3
46 #define RW_STATE_WORD1 4
48 /* Compute with 96 bit intermediate result: (a*b)/c */
49 static u64 muldiv64(u64 a, u32 b, u32 c)
60 rl = (u64)u.l.low * (u64)b;
61 rh = (u64)u.l.high * (u64)b;
63 res.l.high = div64_u64(rh, c);
64 res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
68 static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
70 struct kvm_kpit_channel_state *c =
71 &kvm->arch.vpit->pit_state.channels[channel];
73 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
79 /* XXX: just disable/enable counting */
85 /* Restart counting on rising edge. */
87 c->count_load_time = ktime_get();
94 static int pit_get_gate(struct kvm *kvm, int channel)
96 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
98 return kvm->arch.vpit->pit_state.channels[channel].gate;
101 static s64 __kpit_elapsed(struct kvm *kvm)
105 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
107 remaining = hrtimer_expires_remaining(&ps->pit_timer.timer);
108 if (ktime_to_ns(remaining) < 0)
109 remaining = ktime_set(0, 0);
111 elapsed = ps->pit_timer.period;
112 if (ktime_to_ns(remaining) <= ps->pit_timer.period)
113 elapsed = ps->pit_timer.period - ktime_to_ns(remaining);
118 static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c,
122 return __kpit_elapsed(kvm);
124 return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
127 static int pit_get_count(struct kvm *kvm, int channel)
129 struct kvm_kpit_channel_state *c =
130 &kvm->arch.vpit->pit_state.channels[channel];
134 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
136 t = kpit_elapsed(kvm, c, channel);
137 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
144 counter = (c->count - d) & 0xffff;
147 /* XXX: may be incorrect for odd counts */
148 counter = c->count - (mod_64((2 * d), c->count));
151 counter = c->count - mod_64(d, c->count);
157 static int pit_get_out(struct kvm *kvm, int channel)
159 struct kvm_kpit_channel_state *c =
160 &kvm->arch.vpit->pit_state.channels[channel];
164 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
166 t = kpit_elapsed(kvm, c, channel);
167 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
172 out = (d >= c->count);
175 out = (d < c->count);
178 out = ((mod_64(d, c->count) == 0) && (d != 0));
181 out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
185 out = (d == c->count);
192 static void pit_latch_count(struct kvm *kvm, int channel)
194 struct kvm_kpit_channel_state *c =
195 &kvm->arch.vpit->pit_state.channels[channel];
197 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
199 if (!c->count_latched) {
200 c->latched_count = pit_get_count(kvm, channel);
201 c->count_latched = c->rw_mode;
205 static void pit_latch_status(struct kvm *kvm, int channel)
207 struct kvm_kpit_channel_state *c =
208 &kvm->arch.vpit->pit_state.channels[channel];
210 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
212 if (!c->status_latched) {
213 /* TODO: Return NULL COUNT (bit 6). */
214 c->status = ((pit_get_out(kvm, channel) << 7) |
218 c->status_latched = 1;
222 int pit_has_pending_timer(struct kvm_vcpu *vcpu)
224 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
226 if (pit && vcpu->vcpu_id == 0 && pit->pit_state.irq_ack)
227 return atomic_read(&pit->pit_state.pit_timer.pending);
231 static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
233 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
235 spin_lock(&ps->inject_lock);
236 if (atomic_dec_return(&ps->pit_timer.pending) < 0)
237 atomic_inc(&ps->pit_timer.pending);
239 spin_unlock(&ps->inject_lock);
242 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
244 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
245 struct hrtimer *timer;
247 if (vcpu->vcpu_id != 0 || !pit)
250 timer = &pit->pit_state.pit_timer.timer;
251 if (hrtimer_cancel(timer))
252 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
255 static void destroy_pit_timer(struct kvm_timer *pt)
257 pr_debug("pit: execute del timer!\n");
258 hrtimer_cancel(&pt->timer);
261 static bool kpit_is_periodic(struct kvm_timer *ktimer)
263 struct kvm_kpit_state *ps = container_of(ktimer, struct kvm_kpit_state,
265 return ps->is_periodic;
268 struct kvm_timer_ops kpit_ops = {
269 .is_periodic = kpit_is_periodic,
272 static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
274 struct kvm_timer *pt = &ps->pit_timer;
277 interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
279 pr_debug("pit: create pit timer, interval is %llu nsec\n", interval);
281 /* TODO The new value only affected after the retriggered */
282 hrtimer_cancel(&pt->timer);
283 pt->period = (is_period == 0) ? 0 : interval;
284 ps->is_periodic = is_period;
286 pt->timer.function = kvm_timer_fn;
287 pt->t_ops = &kpit_ops;
288 pt->kvm = ps->pit->kvm;
291 atomic_set(&pt->pending, 0);
294 hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval),
298 static void pit_load_count(struct kvm *kvm, int channel, u32 val)
300 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
302 WARN_ON(!mutex_is_locked(&ps->lock));
304 pr_debug("pit: load_count val is %d, channel is %d\n", val, channel);
307 * Though spec said the state of 8254 is undefined after power-up,
308 * seems some tricky OS like Windows XP depends on IRQ0 interrupt
310 * So here setting initialize rate for it, and not a specific number
315 ps->channels[channel].count = val;
318 ps->channels[channel].count_load_time = ktime_get();
322 /* Two types of timer
323 * mode 1 is one shot, mode 2 is period, otherwise del timer */
324 switch (ps->channels[0].mode) {
326 /* FIXME: enhance mode 4 precision */
328 create_pit_timer(ps, val, 0);
332 create_pit_timer(ps, val, 1);
335 destroy_pit_timer(&ps->pit_timer);
339 void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val)
341 mutex_lock(&kvm->arch.vpit->pit_state.lock);
342 pit_load_count(kvm, channel, val);
343 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
346 static void pit_ioport_write(struct kvm_io_device *this,
347 gpa_t addr, int len, const void *data)
349 struct kvm_pit *pit = (struct kvm_pit *)this->private;
350 struct kvm_kpit_state *pit_state = &pit->pit_state;
351 struct kvm *kvm = pit->kvm;
353 struct kvm_kpit_channel_state *s;
354 u32 val = *(u32 *) data;
357 addr &= KVM_PIT_CHANNEL_MASK;
359 mutex_lock(&pit_state->lock);
362 pr_debug("pit: write addr is 0x%x, len is %d, val is 0x%x\n",
363 (unsigned int)addr, len, val);
368 /* Read-Back Command. */
369 for (channel = 0; channel < 3; channel++) {
370 s = &pit_state->channels[channel];
371 if (val & (2 << channel)) {
373 pit_latch_count(kvm, channel);
375 pit_latch_status(kvm, channel);
379 /* Select Counter <channel>. */
380 s = &pit_state->channels[channel];
381 access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
383 pit_latch_count(kvm, channel);
386 s->read_state = access;
387 s->write_state = access;
388 s->mode = (val >> 1) & 7;
396 s = &pit_state->channels[addr];
397 switch (s->write_state) {
400 pit_load_count(kvm, addr, val);
403 pit_load_count(kvm, addr, val << 8);
406 s->write_latch = val;
407 s->write_state = RW_STATE_WORD1;
410 pit_load_count(kvm, addr, s->write_latch | (val << 8));
411 s->write_state = RW_STATE_WORD0;
416 mutex_unlock(&pit_state->lock);
419 static void pit_ioport_read(struct kvm_io_device *this,
420 gpa_t addr, int len, void *data)
422 struct kvm_pit *pit = (struct kvm_pit *)this->private;
423 struct kvm_kpit_state *pit_state = &pit->pit_state;
424 struct kvm *kvm = pit->kvm;
426 struct kvm_kpit_channel_state *s;
428 addr &= KVM_PIT_CHANNEL_MASK;
429 s = &pit_state->channels[addr];
431 mutex_lock(&pit_state->lock);
433 if (s->status_latched) {
434 s->status_latched = 0;
436 } else if (s->count_latched) {
437 switch (s->count_latched) {
440 ret = s->latched_count & 0xff;
441 s->count_latched = 0;
444 ret = s->latched_count >> 8;
445 s->count_latched = 0;
448 ret = s->latched_count & 0xff;
449 s->count_latched = RW_STATE_MSB;
453 switch (s->read_state) {
456 count = pit_get_count(kvm, addr);
460 count = pit_get_count(kvm, addr);
461 ret = (count >> 8) & 0xff;
464 count = pit_get_count(kvm, addr);
466 s->read_state = RW_STATE_WORD1;
469 count = pit_get_count(kvm, addr);
470 ret = (count >> 8) & 0xff;
471 s->read_state = RW_STATE_WORD0;
476 if (len > sizeof(ret))
478 memcpy(data, (char *)&ret, len);
480 mutex_unlock(&pit_state->lock);
483 static int pit_in_range(struct kvm_io_device *this, gpa_t addr,
484 int len, int is_write)
486 return ((addr >= KVM_PIT_BASE_ADDRESS) &&
487 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
490 static void speaker_ioport_write(struct kvm_io_device *this,
491 gpa_t addr, int len, const void *data)
493 struct kvm_pit *pit = (struct kvm_pit *)this->private;
494 struct kvm_kpit_state *pit_state = &pit->pit_state;
495 struct kvm *kvm = pit->kvm;
496 u32 val = *(u32 *) data;
498 mutex_lock(&pit_state->lock);
499 pit_state->speaker_data_on = (val >> 1) & 1;
500 pit_set_gate(kvm, 2, val & 1);
501 mutex_unlock(&pit_state->lock);
504 static void speaker_ioport_read(struct kvm_io_device *this,
505 gpa_t addr, int len, void *data)
507 struct kvm_pit *pit = (struct kvm_pit *)this->private;
508 struct kvm_kpit_state *pit_state = &pit->pit_state;
509 struct kvm *kvm = pit->kvm;
510 unsigned int refresh_clock;
513 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
514 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
516 mutex_lock(&pit_state->lock);
517 ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) |
518 (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4));
519 if (len > sizeof(ret))
521 memcpy(data, (char *)&ret, len);
522 mutex_unlock(&pit_state->lock);
525 static int speaker_in_range(struct kvm_io_device *this, gpa_t addr,
526 int len, int is_write)
528 return (addr == KVM_SPEAKER_BASE_ADDRESS);
531 void kvm_pit_reset(struct kvm_pit *pit)
534 struct kvm_kpit_channel_state *c;
536 mutex_lock(&pit->pit_state.lock);
537 for (i = 0; i < 3; i++) {
538 c = &pit->pit_state.channels[i];
541 pit_load_count(pit->kvm, i, 0);
543 mutex_unlock(&pit->pit_state.lock);
545 atomic_set(&pit->pit_state.pit_timer.pending, 0);
546 pit->pit_state.irq_ack = 1;
549 static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
551 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
554 atomic_set(&pit->pit_state.pit_timer.pending, 0);
555 pit->pit_state.irq_ack = 1;
559 struct kvm_pit *kvm_create_pit(struct kvm *kvm)
562 struct kvm_kpit_state *pit_state;
564 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
568 pit->irq_source_id = kvm_request_irq_source_id(kvm);
569 if (pit->irq_source_id < 0) {
574 mutex_init(&pit->pit_state.lock);
575 mutex_lock(&pit->pit_state.lock);
576 spin_lock_init(&pit->pit_state.inject_lock);
578 /* Initialize PIO device */
579 pit->dev.read = pit_ioport_read;
580 pit->dev.write = pit_ioport_write;
581 pit->dev.in_range = pit_in_range;
582 pit->dev.private = pit;
583 kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
585 pit->speaker_dev.read = speaker_ioport_read;
586 pit->speaker_dev.write = speaker_ioport_write;
587 pit->speaker_dev.in_range = speaker_in_range;
588 pit->speaker_dev.private = pit;
589 kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev);
591 kvm->arch.vpit = pit;
594 pit_state = &pit->pit_state;
595 pit_state->pit = pit;
596 hrtimer_init(&pit_state->pit_timer.timer,
597 CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
598 pit_state->irq_ack_notifier.gsi = 0;
599 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
600 kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
601 pit_state->pit_timer.reinject = true;
602 mutex_unlock(&pit->pit_state.lock);
606 pit->mask_notifier.func = pit_mask_notifer;
607 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
612 void kvm_free_pit(struct kvm *kvm)
614 struct hrtimer *timer;
616 if (kvm->arch.vpit) {
617 kvm_unregister_irq_mask_notifier(kvm, 0,
618 &kvm->arch.vpit->mask_notifier);
619 mutex_lock(&kvm->arch.vpit->pit_state.lock);
620 timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
621 hrtimer_cancel(timer);
622 kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
623 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
624 kfree(kvm->arch.vpit);
628 static void __inject_pit_timer_intr(struct kvm *kvm)
630 struct kvm_vcpu *vcpu;
633 mutex_lock(&kvm->lock);
634 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1);
635 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0);
636 mutex_unlock(&kvm->lock);
639 * Provides NMI watchdog support via Virtual Wire mode.
640 * The route is: PIT -> PIC -> LVT0 in NMI mode.
642 * Note: Our Virtual Wire implementation is simplified, only
643 * propagating PIT interrupts to all VCPUs when they have set
644 * LVT0 to NMI delivery. Other PIC interrupts are just sent to
645 * VCPU0, and only if its LVT0 is in EXTINT mode.
647 if (kvm->arch.vapics_in_nmi_mode > 0)
648 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
649 vcpu = kvm->vcpus[i];
651 kvm_apic_nmi_wd_deliver(vcpu);
655 void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
657 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
658 struct kvm *kvm = vcpu->kvm;
659 struct kvm_kpit_state *ps;
663 ps = &pit->pit_state;
665 /* Try to inject pending interrupts when
666 * last one has been acked.
668 spin_lock(&ps->inject_lock);
669 if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) {
673 spin_unlock(&ps->inject_lock);
675 __inject_pit_timer_intr(kvm);