KVM: struct kvm_memory_slot.user_alloc -> bool
authorAlex Williamson <alex.williamson@redhat.com>
Mon, 10 Dec 2012 17:33:21 +0000 (10:33 -0700)
committerMarcelo Tosatti <mtosatti@redhat.com>
Fri, 14 Dec 2012 01:24:38 +0000 (23:24 -0200)
There's no need for this to be an int, it holds a boolean.
Move to the end of the struct for alignment.

Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/ia64/kvm/kvm-ia64.c
arch/powerpc/kvm/powerpc.c
arch/s390/kvm/kvm-s390.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 9bacfe2..ad3126a 100644 (file)
@@ -955,7 +955,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
                                        kvm_mem.guest_phys_addr;
                kvm_userspace_mem.memory_size = kvm_mem.memory_size;
                r = kvm_vm_ioctl_set_memory_region(kvm,
-                                       &kvm_userspace_mem, 0);
+                                       &kvm_userspace_mem, false);
                if (r)
                        goto out;
                break;
@@ -1580,7 +1580,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                struct kvm_memory_slot *memslot,
                struct kvm_memory_slot old,
                struct kvm_userspace_memory_region *mem,
-               int user_alloc)
+               bool user_alloc)
 {
        unsigned long i;
        unsigned long pfn;
@@ -1611,7 +1611,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                struct kvm_userspace_memory_region *mem,
                struct kvm_memory_slot old,
-               int user_alloc)
+               bool user_alloc)
 {
        return;
 }
index 70739a0..be83fca 100644 (file)
@@ -412,7 +412,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                    struct kvm_memory_slot *memslot,
                                    struct kvm_memory_slot old,
                                    struct kvm_userspace_memory_region *mem,
-                                   int user_alloc)
+                                   bool user_alloc)
 {
        return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
 }
@@ -420,7 +420,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                struct kvm_userspace_memory_region *mem,
                struct kvm_memory_slot old,
-               int user_alloc)
+               bool user_alloc)
 {
        kvmppc_core_commit_memory_region(kvm, mem, old);
 }
index c9011bf..f718bc6 100644 (file)
@@ -928,7 +928,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                   struct kvm_memory_slot *memslot,
                                   struct kvm_memory_slot old,
                                   struct kvm_userspace_memory_region *mem,
-                                  int user_alloc)
+                                  bool user_alloc)
 {
        /* A few sanity checks. We can have exactly one memory slot which has
           to start at guest virtual zero and which has to be located at a
@@ -958,7 +958,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem,
                                struct kvm_memory_slot old,
-                               int user_alloc)
+                               bool user_alloc)
 {
        int rc;
 
index 9120ae1..b3101e3 100644 (file)
@@ -3667,7 +3667,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
        kvm_userspace_mem.flags = 0;
        kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
        kvm_userspace_mem.memory_size = PAGE_SIZE;
-       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
+       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
        if (r)
                goto out;
 
@@ -3697,7 +3697,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
        kvm_userspace_mem.guest_phys_addr =
                kvm->arch.ept_identity_map_addr;
        kvm_userspace_mem.memory_size = PAGE_SIZE;
-       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
+       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
        if (r)
                goto out;
 
@@ -4251,7 +4251,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
                .flags = 0,
        };
 
-       ret = kvm_set_memory_region(kvm, &tss_mem, 0);
+       ret = kvm_set_memory_region(kvm, &tss_mem, false);
        if (ret)
                return ret;
        kvm->arch.tss_addr = addr;
index 8160747..1c9c834 100644 (file)
@@ -6839,7 +6839,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                struct kvm_memory_slot *memslot,
                                struct kvm_memory_slot old,
                                struct kvm_userspace_memory_region *mem,
-                               int user_alloc)
+                               bool user_alloc)
 {
        int npages = memslot->npages;
        int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
@@ -6875,7 +6875,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem,
                                struct kvm_memory_slot old,
-                               int user_alloc)
+                               bool user_alloc)
 {
 
        int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
index 5a3581c..d897f03 100644 (file)
@@ -270,8 +270,8 @@ struct kvm_memory_slot {
        unsigned long *dirty_bitmap;
        struct kvm_arch_memory_slot arch;
        unsigned long userspace_addr;
-       int user_alloc;
        int id;
+       bool user_alloc;
 };
 
 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
@@ -451,10 +451,10 @@ id_to_memslot(struct kvm_memslots *slots, int id)
 
 int kvm_set_memory_region(struct kvm *kvm,
                          struct kvm_userspace_memory_region *mem,
-                         int user_alloc);
+                         bool user_alloc);
 int __kvm_set_memory_region(struct kvm *kvm,
                            struct kvm_userspace_memory_region *mem,
-                           int user_alloc);
+                           bool user_alloc);
 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
                           struct kvm_memory_slot *dont);
 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
@@ -462,11 +462,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                struct kvm_memory_slot *memslot,
                                struct kvm_memory_slot old,
                                struct kvm_userspace_memory_region *mem,
-                               int user_alloc);
+                               bool user_alloc);
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem,
                                struct kvm_memory_slot old,
-                               int user_alloc);
+                               bool user_alloc);
 bool kvm_largepages_enabled(void);
 void kvm_disable_largepages(void);
 /* flush all memory translations */
@@ -553,7 +553,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
                                   struct
                                   kvm_userspace_memory_region *mem,
-                                  int user_alloc);
+                                  bool user_alloc);
 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
 long kvm_arch_vm_ioctl(struct file *filp,
                       unsigned int ioctl, unsigned long arg);
index 5f0638c..42c1eb7 100644 (file)
@@ -709,7 +709,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
  */
 int __kvm_set_memory_region(struct kvm *kvm,
                            struct kvm_userspace_memory_region *mem,
-                           int user_alloc)
+                           bool user_alloc)
 {
        int r;
        gfn_t base_gfn;
@@ -889,7 +889,7 @@ EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
 
 int kvm_set_memory_region(struct kvm *kvm,
                          struct kvm_userspace_memory_region *mem,
-                         int user_alloc)
+                         bool user_alloc)
 {
        int r;
 
@@ -903,7 +903,7 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region);
 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
                                   struct
                                   kvm_userspace_memory_region *mem,
-                                  int user_alloc)
+                                  bool user_alloc)
 {
        if (mem->slot >= KVM_USER_MEM_SLOTS)
                return -EINVAL;
@@ -2148,7 +2148,7 @@ static long kvm_vm_ioctl(struct file *filp,
                                                sizeof kvm_userspace_mem))
                        goto out;
 
-               r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
+               r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true);
                break;
        }
        case KVM_GET_DIRTY_LOG: {