KVM: x86: Convert tsc_write_lock to raw_spinlock
authorJan Kiszka <jan.kiszka@siemens.com>
Fri, 4 Feb 2011 09:49:11 +0000 (10:49 +0100)
committerMarcelo Tosatti <mtosatti@redhat.com>
Thu, 17 Mar 2011 16:08:30 +0000 (13:08 -0300)
Code under this lock requires non-preemptibility. Ensure this also over
-rt by converting it to raw spinlock.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c

index a58aebe..37bd730 100644 (file)
@@ -448,7 +448,7 @@ struct kvm_arch {
 
        unsigned long irq_sources_bitmap;
        s64 kvmclock_offset;
-       spinlock_t tsc_write_lock;
+       raw_spinlock_t tsc_write_lock;
        u64 last_tsc_nsec;
        u64 last_tsc_offset;
        u64 last_tsc_write;
index 9000829..17af71d 100644 (file)
@@ -1017,7 +1017,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
        unsigned long flags;
        s64 sdiff;
 
-       spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
+       raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
        offset = data - native_read_tsc();
        ns = get_kernel_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
@@ -1050,7 +1050,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
        kvm->arch.last_tsc_write = data;
        kvm->arch.last_tsc_offset = offset;
        kvm_x86_ops->write_tsc_offset(vcpu, offset);
-       spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
+       raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
        /* Reset of TSC must disable overshoot protection below */
        vcpu->arch.hv_clock.tsc_timestamp = 0;
@@ -6004,7 +6004,7 @@ int kvm_arch_init_vm(struct kvm *kvm)
        /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
        set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
 
-       spin_lock_init(&kvm->arch.tsc_write_lock);
+       raw_spin_lock_init(&kvm->arch.tsc_write_lock);
 
        return 0;
 }