Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 29 Jun 2008 19:19:02 +0000 (12:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 29 Jun 2008 19:19:02 +0000 (12:19 -0700)
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-udf-2.6:
  udf: Fix regression in UDF anchor block detection

48 files changed:
.gitignore
Documentation/DocBook/kgdb.tmpl
Makefile
arch/blackfin/kernel/cplb-nompu/cplbinit.c
arch/blackfin/kernel/irqchip.c
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/setup.c
arch/ia64/sn/kernel/sn2/sn2_smp.c
arch/x86/Kconfig
arch/x86/kernel/Makefile
arch/x86/kernel/kvmclock.c
arch/x86/kernel/pvclock.c [new file with mode: 0644]
arch/x86/kvm/i8254.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/xen/Kconfig
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/mmu.h
arch/x86/xen/time.c
arch/x86/xen/xen-head.S
drivers/char/drm/i915_drv.c
drivers/watchdog/Makefile
fs/block_dev.c
fs/dcache.c
fs/gfs2/bmap.c
fs/gfs2/rgrp.c
fs/locks.c
fs/namei.c
fs/pipe.c
fs/utimes.c
include/asm-alpha/percpu.h
include/asm-x86/kvm_host.h
include/asm-x86/kvm_para.h
include/asm-x86/pvclock-abi.h [new file with mode: 0644]
include/asm-x86/pvclock.h [new file with mode: 0644]
include/asm-x86/xen/page.h
include/linux/audit.h
include/linux/dcache.h
include/linux/fs.h
include/linux/kvm_host.h
include/xen/interface/xen.h
kernel/audit.c
kernel/auditfilter.c
kernel/kgdb.c
virt/kvm/ioapic.c

index 9bb1cb6..869e1a3 100644 (file)
@@ -3,6 +3,10 @@
 # subdirectories here. Add them in the ".gitignore" file
 # in that subdirectory instead.
 #
+# NOTE! Please use 'git-ls-files -i --exclude-standard'
+# command after changing this file, to see if there are
+# any tracked files which get ignored after the change.
+#
 # Normal rules
 #
 .*
 *.lst
 *.symtypes
 *.order
+*.elf
+*.bin
+*.gz
 
 #
 # Top-level generic files
 #
 tags
 TAGS
-vmlinux*
-!vmlinux.lds.S
-!vmlinux.lds.h
+vmlinux
 System.map
 Module.markers
 Module.symvers
 !.gitignore
+!.mailmap
 
 #
 # Generated include files
index 028a844..e8acd1f 100644 (file)
     runs an instance of gdb against the vmlinux file which contains
     the symbols (not boot image such as bzImage, zImage, uImage...).
     In gdb the developer specifies the connection parameters and
-    connects to kgdb.  Depending on which kgdb I/O modules exist in
-    the kernel for a given architecture, it may be possible to debug
-    the test machine's kernel with the development machine using a
-    rs232 or ethernet connection.
+    connects to kgdb.  The type of connection a developer makes with
+    gdb depends on the availability of kgdb I/O modules compiled as
+    builtin's or kernel modules in the test machine's kernel.
     </para>
   </chapter>
   <chapter id="CompilingAKernel">
   </para>
   <para>
   IMPORTANT NOTE: Using this option with kgdb over the console
-  (kgdboc) or kgdb over ethernet (kgdboe) is not supported.
+  (kgdboc) is not supported.
   </para>
   </sect1>
   </chapter>
     (gdb) target remote /dev/ttyS0
     </programlisting>
     <para>
-    Example (kgdb to a terminal server):
+    Example (kgdb to a terminal server on tcp port 2012):
     </para>
     <programlisting>
     % gdb ./vmlinux
-    (gdb) target remote udp:192.168.2.2:6443
-    </programlisting>
-    <para>
-    Example (kgdb over ethernet):
-    </para>
-    <programlisting>
-    % gdb ./vmlinux
-    (gdb) target remote udp:192.168.2.2:6443
+    (gdb) target remote 192.168.2.2:2012
     </programlisting>
     <para>
     Once connected, you can debug a kernel the way you would debug an
index 2b4977c..6aff5f4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 26
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Rotary Wombat
 
 # *DOCUMENTATION*
index 917325b..6be0c50 100644 (file)
@@ -254,7 +254,8 @@ close_cplbtab(struct cplb_tab *table)
 }
 
 /* helper function */
-static void __fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
+static void __init
+__fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
 {
        if (cplb_data[i].psize) {
                fill_cplbtab(t,
@@ -291,7 +292,8 @@ static void __fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_en
        }
 }
 
-static void __fill_data_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
+static void __init
+__fill_data_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
 {
        if (cplb_data[i].psize) {
                fill_cplbtab(t,
index 73647c1..07402f5 100644 (file)
@@ -60,9 +60,14 @@ static struct irq_chip bad_chip = {
 };
 
 static struct irq_desc bad_irq_desc = {
+       .status = IRQ_DISABLED,
        .chip = &bad_chip,
        .handle_irq = handle_bad_irq,
        .depth = 1,
+       .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+#ifdef CONFIG_SMP
+       .affinity = CPU_MASK_ALL
+#endif
 };
 
 int show_interrupts(struct seq_file *p, void *v)
index 082c31d..39752cd 100644 (file)
@@ -558,8 +558,6 @@ static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
        if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
                rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
                                    NR_PREALLOCATE_RTE_ENTRIES);
-               if (!rte)
-                       return NULL;
                for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
                        list_add(&rte->rte_list, &free_rte_list);
        }
index f48a809..4ae15c8 100644 (file)
@@ -578,8 +578,6 @@ setup_arch (char **cmdline_p)
        cpu_init();     /* initialize the bootstrap CPU */
        mmu_context_init();     /* initialize context_id bitmap */
 
-       check_sal_cache_flush();
-
 #ifdef CONFIG_ACPI
        acpi_boot_init();
 #endif
@@ -607,6 +605,7 @@ setup_arch (char **cmdline_p)
                ia64_mca_init();
 
        platform_setup(cmdline_p);
+       check_sal_cache_flush();
        paging_init();
 }
 
index 6dd886c..e585f9a 100644 (file)
@@ -512,7 +512,7 @@ static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, si
        int cpu;
        char optstr[64];
 
-       if (count > sizeof(optstr))
+       if (count == 0 || count > sizeof(optstr))
                return -EINVAL;
        if (copy_from_user(optstr, user, count))
                return -EFAULT;
index 52e18e6..e0edaaa 100644 (file)
@@ -383,6 +383,7 @@ config VMI
 config KVM_CLOCK
        bool "KVM paravirtualized clock"
        select PARAVIRT
+       select PARAVIRT_CLOCK
        depends on !(X86_VISWS || X86_VOYAGER)
        help
          Turning on this option will allow you to run a paravirtualized clock
@@ -410,6 +411,10 @@ config PARAVIRT
          over full virtualization.  However, when run without a hypervisor
          the kernel is theoretically slower and slightly larger.
 
+config PARAVIRT_CLOCK
+       bool
+       default n
+
 endif
 
 config MEMTEST_BOOTPARAM
index 5e618c3..77807d4 100644 (file)
@@ -82,6 +82,7 @@ obj-$(CONFIG_VMI)             += vmi_32.o vmiclock_32.o
 obj-$(CONFIG_KVM_GUEST)                += kvm.o
 obj-$(CONFIG_KVM_CLOCK)                += kvmclock.o
 obj-$(CONFIG_PARAVIRT)         += paravirt.o paravirt_patch_$(BITS).o
+obj-$(CONFIG_PARAVIRT_CLOCK)   += pvclock.o
 
 obj-$(CONFIG_PCSPKR_PLATFORM)  += pcspeaker.o
 
index 08a3098..87edf1c 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/clocksource.h>
 #include <linux/kvm_para.h>
+#include <asm/pvclock.h>
 #include <asm/arch_hooks.h>
 #include <asm/msr.h>
 #include <asm/apic.h>
@@ -36,18 +37,9 @@ static int parse_no_kvmclock(char *arg)
 early_param("no-kvmclock", parse_no_kvmclock);
 
 /* The hypervisor will put information about time periodically here */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct kvm_vcpu_time_info, hv_clock);
-#define get_clock(cpu, field) per_cpu(hv_clock, cpu).field
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct pvclock_vcpu_time_info, hv_clock);
+static struct pvclock_wall_clock wall_clock;
 
-static inline u64 kvm_get_delta(u64 last_tsc)
-{
-       int cpu = smp_processor_id();
-       u64 delta = native_read_tsc() - last_tsc;
-       return (delta * get_clock(cpu, tsc_to_system_mul)) >> KVM_SCALE;
-}
-
-static struct kvm_wall_clock wall_clock;
-static cycle_t kvm_clock_read(void);
 /*
  * The wallclock is the time of day when we booted. Since then, some time may
  * have elapsed since the hypervisor wrote the data. So we try to account for
@@ -55,64 +47,37 @@ static cycle_t kvm_clock_read(void);
  */
 static unsigned long kvm_get_wallclock(void)
 {
-       u32 wc_sec, wc_nsec;
-       u64 delta;
+       struct pvclock_vcpu_time_info *vcpu_time;
        struct timespec ts;
-       int version, nsec;
        int low, high;
 
        low = (int)__pa(&wall_clock);
        high = ((u64)__pa(&wall_clock) >> 32);
+       native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
 
-       delta = kvm_clock_read();
+       vcpu_time = &get_cpu_var(hv_clock);
+       pvclock_read_wallclock(&wall_clock, vcpu_time, &ts);
+       put_cpu_var(hv_clock);
 
-       native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
-       do {
-               version = wall_clock.wc_version;
-               rmb();
-               wc_sec = wall_clock.wc_sec;
-               wc_nsec = wall_clock.wc_nsec;
-               rmb();
-       } while ((wall_clock.wc_version != version) || (version & 1));
-
-       delta = kvm_clock_read() - delta;
-       delta += wc_nsec;
-       nsec = do_div(delta, NSEC_PER_SEC);
-       set_normalized_timespec(&ts, wc_sec + delta, nsec);
-       /*
-        * Of all mechanisms of time adjustment I've tested, this one
-        * was the champion!
-        */
-       return ts.tv_sec + 1;
+       return ts.tv_sec;
 }
 
 static int kvm_set_wallclock(unsigned long now)
 {
-       return 0;
+       return -1;
 }
 
-/*
- * This is our read_clock function. The host puts an tsc timestamp each time
- * it updates a new time. Without the tsc adjustment, we can have a situation
- * in which a vcpu starts to run earlier (smaller system_time), but probes
- * time later (compared to another vcpu), leading to backwards time
- */
 static cycle_t kvm_clock_read(void)
 {
-       u64 last_tsc, now;
-       int cpu;
+       struct pvclock_vcpu_time_info *src;
+       cycle_t ret;
 
-       preempt_disable();
-       cpu = smp_processor_id();
-
-       last_tsc = get_clock(cpu, tsc_timestamp);
-       now = get_clock(cpu, system_time);
-
-       now += kvm_get_delta(last_tsc);
-       preempt_enable();
-
-       return now;
+       src = &get_cpu_var(hv_clock);
+       ret = pvclock_clocksource_read(src);
+       put_cpu_var(hv_clock);
+       return ret;
 }
+
 static struct clocksource kvm_clock = {
        .name = "kvm-clock",
        .read = kvm_clock_read,
@@ -123,13 +88,14 @@ static struct clocksource kvm_clock = {
        .flags = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static int kvm_register_clock(void)
+static int kvm_register_clock(char *txt)
 {
        int cpu = smp_processor_id();
        int low, high;
        low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
        high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
-
+       printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
+              cpu, high, low, txt);
        return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high);
 }
 
@@ -140,12 +106,20 @@ static void kvm_setup_secondary_clock(void)
         * Now that the first cpu already had this clocksource initialized,
         * we shouldn't fail.
         */
-       WARN_ON(kvm_register_clock());
+       WARN_ON(kvm_register_clock("secondary cpu clock"));
        /* ok, done with our trickery, call native */
        setup_secondary_APIC_clock();
 }
 #endif
 
+#ifdef CONFIG_SMP
+void __init kvm_smp_prepare_boot_cpu(void)
+{
+       WARN_ON(kvm_register_clock("primary cpu clock"));
+       native_smp_prepare_boot_cpu();
+}
+#endif
+
 /*
  * After the clock is registered, the host will keep writing to the
  * registered memory location. If the guest happens to shutdown, this memory
@@ -174,13 +148,16 @@ void __init kvmclock_init(void)
                return;
 
        if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
-               if (kvm_register_clock())
+               if (kvm_register_clock("boot clock"))
                        return;
                pv_time_ops.get_wallclock = kvm_get_wallclock;
                pv_time_ops.set_wallclock = kvm_set_wallclock;
                pv_time_ops.sched_clock = kvm_clock_read;
 #ifdef CONFIG_X86_LOCAL_APIC
                pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
+#endif
+#ifdef CONFIG_SMP
+               smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
 #endif
                machine_ops.shutdown  = kvm_shutdown;
 #ifdef CONFIG_KEXEC
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
new file mode 100644 (file)
index 0000000..05fbe9a
--- /dev/null
@@ -0,0 +1,141 @@
+/*  paravirtual clock -- common code used by kvm/xen
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <asm/pvclock.h>
+
+/*
+ * These are perodically updated
+ *    xen: magic shared_info page
+ *    kvm: gpa registered via msr
+ * and then copied here.
+ */
+struct pvclock_shadow_time {
+       u64 tsc_timestamp;     /* TSC at last update of time vals.  */
+       u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
+       u32 tsc_to_nsec_mul;
+       int tsc_shift;
+       u32 version;
+};
+
+/*
+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
+ * yielding a 64-bit result.
+ */
+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
+{
+       u64 product;
+#ifdef __i386__
+       u32 tmp1, tmp2;
+#endif
+
+       if (shift < 0)
+               delta >>= -shift;
+       else
+               delta <<= shift;
+
+#ifdef __i386__
+       __asm__ (
+               "mul  %5       ; "
+               "mov  %4,%%eax ; "
+               "mov  %%edx,%4 ; "
+               "mul  %5       ; "
+               "xor  %5,%5    ; "
+               "add  %4,%%eax ; "
+               "adc  %5,%%edx ; "
+               : "=A" (product), "=r" (tmp1), "=r" (tmp2)
+               : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
+#elif __x86_64__
+       __asm__ (
+               "mul %%rdx ; shrd $32,%%rdx,%%rax"
+               : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
+#else
+#error implement me!
+#endif
+
+       return product;
+}
+
+static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
+{
+       u64 delta = native_read_tsc() - shadow->tsc_timestamp;
+       return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
+}
+
+/*
+ * Reads a consistent set of time-base values from hypervisor,
+ * into a shadow data area.
+ */
+static unsigned pvclock_get_time_values(struct pvclock_shadow_time *dst,
+                                       struct pvclock_vcpu_time_info *src)
+{
+       do {
+               dst->version = src->version;
+               rmb();          /* fetch version before data */
+               dst->tsc_timestamp     = src->tsc_timestamp;
+               dst->system_timestamp  = src->system_time;
+               dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
+               dst->tsc_shift         = src->tsc_shift;
+               rmb();          /* test version after fetching data */
+       } while ((src->version & 1) || (dst->version != src->version));
+
+       return dst->version;
+}
+
+cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+{
+       struct pvclock_shadow_time shadow;
+       unsigned version;
+       cycle_t ret, offset;
+
+       do {
+               version = pvclock_get_time_values(&shadow, src);
+               barrier();
+               offset = pvclock_get_nsec_offset(&shadow);
+               ret = shadow.system_timestamp + offset;
+               barrier();
+       } while (version != src->version);
+
+       return ret;
+}
+
+void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
+                           struct pvclock_vcpu_time_info *vcpu_time,
+                           struct timespec *ts)
+{
+       u32 version;
+       u64 delta;
+       struct timespec now;
+
+       /* get wallclock at system boot */
+       do {
+               version = wall_clock->version;
+               rmb();          /* fetch version before time */
+               now.tv_sec  = wall_clock->sec;
+               now.tv_nsec = wall_clock->nsec;
+               rmb();          /* fetch time before checking version */
+       } while ((wall_clock->version & 1) || (version != wall_clock->version));
+
+       delta = pvclock_clocksource_read(vcpu_time);    /* time since system boot */
+       delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
+
+       now.tv_nsec = do_div(delta, NSEC_PER_SEC);
+       now.tv_sec = delta;
+
+       set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
+}
index f2f5d26..3829aa7 100644 (file)
@@ -200,9 +200,12 @@ int __pit_timer_fn(struct kvm_kpit_state *ps)
 
        atomic_inc(&pt->pending);
        smp_mb__after_atomic_inc();
-       if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
-               vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-               wake_up_interruptible(&vcpu0->wq);
+       if (vcpu0) {
+               set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests);
+               if (waitqueue_active(&vcpu0->wq)) {
+                       vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+                       wake_up_interruptible(&vcpu0->wq);
+               }
        }
 
        pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period);
index c297c50..ebc03f5 100644 (file)
@@ -940,6 +940,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
        wait_queue_head_t *q = &apic->vcpu->wq;
 
        atomic_inc(&apic->timer.pending);
+       set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests);
        if (waitqueue_active(q)) {
                apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
                wake_up_interruptible(q);
index ee3f530..7e7c396 100644 (file)
@@ -640,6 +640,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
                        rmap_remove(kvm, spte);
                        --kvm->stat.lpages;
                        set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+                       spte = NULL;
                        write_protected = 1;
                }
                spte = rmap_next(kvm, rmapp, spte);
@@ -1082,10 +1083,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                struct kvm_mmu_page *shadow;
 
                spte |= PT_WRITABLE_MASK;
-               if (user_fault) {
-                       mmu_unshadow(vcpu->kvm, gfn);
-                       goto unshadowed;
-               }
 
                shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
                if (shadow ||
@@ -1102,8 +1099,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                }
        }
 
-unshadowed:
-
        if (pte_access & ACC_WRITE_MASK)
                mark_page_dirty(vcpu->kvm, gfn);
 
@@ -1580,11 +1575,13 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
                                  u64 *spte,
                                  const void *new)
 {
-       if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
-           && !vcpu->arch.update_pte.largepage) {
-               ++vcpu->kvm->stat.mmu_pde_zapped;
-               return;
-       }
+       if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
+               if (!vcpu->arch.update_pte.largepage ||
+                   sp->role.glevels == PT32_ROOT_LEVEL) {
+                       ++vcpu->kvm->stat.mmu_pde_zapped;
+                       return;
+               }
+        }
 
        ++vcpu->kvm->stat.mmu_pte_updated;
        if (sp->role.glevels == PT32_ROOT_LEVEL)
index 02efbe7..540e951 100644 (file)
@@ -566,7 +566,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        load_transition_efer(vmx);
 }
 
-static void vmx_load_host_state(struct vcpu_vmx *vmx)
+static void __vmx_load_host_state(struct vcpu_vmx *vmx)
 {
        unsigned long flags;
 
@@ -596,6 +596,13 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
        reload_host_efer(vmx);
 }
 
+static void vmx_load_host_state(struct vcpu_vmx *vmx)
+{
+       preempt_disable();
+       __vmx_load_host_state(vmx);
+       preempt_enable();
+}
+
 /*
  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  * vcpu mutex is already taken.
@@ -654,7 +661,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
 {
-       vmx_load_host_state(to_vmx(vcpu));
+       __vmx_load_host_state(to_vmx(vcpu));
 }
 
 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
@@ -884,11 +891,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
        switch (msr_index) {
 #ifdef CONFIG_X86_64
        case MSR_EFER:
+               vmx_load_host_state(vmx);
                ret = kvm_set_msr_common(vcpu, msr_index, data);
-               if (vmx->host_state.loaded) {
-                       reload_host_efer(vmx);
-                       load_transition_efer(vmx);
-               }
                break;
        case MSR_FS_BASE:
                vmcs_writel(GUEST_FS_BASE, data);
@@ -910,11 +914,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
                guest_write_tsc(data);
                break;
        default:
+               vmx_load_host_state(vmx);
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
                        msr->data = data;
-                       if (vmx->host_state.loaded)
-                               load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
                        break;
                }
                ret = kvm_set_msr_common(vcpu, msr_index, data);
index 00acf13..63a77ca 100644 (file)
@@ -492,8 +492,8 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 {
        static int version;
-       struct kvm_wall_clock wc;
-       struct timespec wc_ts;
+       struct pvclock_wall_clock wc;
+       struct timespec now, sys, boot;
 
        if (!wall_clock)
                return;
@@ -502,10 +502,19 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 
        kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
 
-       wc_ts = current_kernel_time();
-       wc.wc_sec = wc_ts.tv_sec;
-       wc.wc_nsec = wc_ts.tv_nsec;
-       wc.wc_version = version;
+       /*
+        * The guest calculates current wall clock time by adding
+        * system time (updated by kvm_write_guest_time below) to the
+        * wall clock specified here.  guest system time equals host
+        * system time for us, thus we must fill in host boot time here.
+        */
+       now = current_kernel_time();
+       ktime_get_ts(&sys);
+       boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
+
+       wc.sec = boot.tv_sec;
+       wc.nsec = boot.tv_nsec;
+       wc.version = version;
 
        kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
 
@@ -513,6 +522,45 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
        kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
 }
 
+static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
+{
+       uint32_t quotient, remainder;
+
+       /* Don't try to replace with do_div(), this one calculates
+        * "(dividend << 32) / divisor" */
+       __asm__ ( "divl %4"
+                 : "=a" (quotient), "=d" (remainder)
+                 : "0" (0), "1" (dividend), "r" (divisor) );
+       return quotient;
+}
+
+static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
+{
+       uint64_t nsecs = 1000000000LL;
+       int32_t  shift = 0;
+       uint64_t tps64;
+       uint32_t tps32;
+
+       tps64 = tsc_khz * 1000LL;
+       while (tps64 > nsecs*2) {
+               tps64 >>= 1;
+               shift--;
+       }
+
+       tps32 = (uint32_t)tps64;
+       while (tps32 <= (uint32_t)nsecs) {
+               tps32 <<= 1;
+               shift++;
+       }
+
+       hv_clock->tsc_shift = shift;
+       hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
+
+       pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
+                __FUNCTION__, tsc_khz, hv_clock->tsc_shift,
+                hv_clock->tsc_to_system_mul);
+}
+
 static void kvm_write_guest_time(struct kvm_vcpu *v)
 {
        struct timespec ts;
@@ -523,6 +571,11 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
        if ((!vcpu->time_page))
                return;
 
+       if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) {
+               kvm_set_time_scale(tsc_khz, &vcpu->hv_clock);
+               vcpu->hv_clock_tsc_khz = tsc_khz;
+       }
+
        /* Keep irq disabled to prevent changes to the clock */
        local_irq_save(flags);
        kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
@@ -537,14 +590,14 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
        /*
         * The interface expects us to write an even number signaling that the
         * update is finished. Since the guest won't see the intermediate
-        * state, we just write "2" at the end
+        * state, we just increase by 2 at the end.
         */
-       vcpu->hv_clock.version = 2;
+       vcpu->hv_clock.version += 2;
 
        shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
 
        memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
-               sizeof(vcpu->hv_clock));
+              sizeof(vcpu->hv_clock));
 
        kunmap_atomic(shared_kaddr, KM_USER0);
 
@@ -599,10 +652,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                /* ...but clean it before doing the actual write */
                vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
 
-               vcpu->arch.hv_clock.tsc_to_system_mul =
-                                       clocksource_khz2mult(tsc_khz, 22);
-               vcpu->arch.hv_clock.tsc_shift = 22;
-
                down_read(&current->mm->mmap_sem);
                vcpu->arch.time_page =
                                gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
@@ -2759,6 +2808,8 @@ again:
        if (vcpu->requests) {
                if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
                        __kvm_migrate_timers(vcpu);
+               if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
+                       kvm_x86_ops->tlb_flush(vcpu);
                if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
                                       &vcpu->requests)) {
                        kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
@@ -2772,6 +2823,7 @@ again:
                }
        }
 
+       clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
        kvm_inject_pending_timer_irqs(vcpu);
 
        preempt_disable();
@@ -2781,21 +2833,13 @@ again:
 
        local_irq_disable();
 
-       if (need_resched()) {
+       if (vcpu->requests || need_resched()) {
                local_irq_enable();
                preempt_enable();
                r = 1;
                goto out;
        }
 
-       if (vcpu->requests)
-               if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
-                       local_irq_enable();
-                       preempt_enable();
-                       r = 1;
-                       goto out;
-               }
-
        if (signal_pending(current)) {
                local_irq_enable();
                preempt_enable();
@@ -2825,9 +2869,6 @@ again:
 
        kvm_guest_enter();
 
-       if (vcpu->requests)
-               if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
-                       kvm_x86_ops->tlb_flush(vcpu);
 
        KVMTRACE_0D(VMENTRY, vcpu, entryexit);
        kvm_x86_ops->run(vcpu, kvm_run);
index 2e641be..6c388e5 100644 (file)
@@ -5,8 +5,9 @@
 config XEN
        bool "Xen guest support"
        select PARAVIRT
+       select PARAVIRT_CLOCK
        depends on X86_32
-       depends on X86_CMPXCHG && X86_TSC && !(X86_VISWS || X86_VOYAGER)
+       depends on X86_CMPXCHG && X86_TSC && X86_PAE && !(X86_VISWS || X86_VOYAGER)
        help
          This is the Linux Xen port.  Enabling this will allow the
          kernel to boot in a paravirtualized environment under the
index c048de3..f09c1c6 100644 (file)
@@ -785,38 +785,35 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
 static __init void xen_pagetable_setup_start(pgd_t *base)
 {
        pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
+       int i;
 
        /* special set_pte for pagetable initialization */
        pv_mmu_ops.set_pte = xen_set_pte_init;
 
        init_mm.pgd = base;
        /*
-        * copy top-level of Xen-supplied pagetable into place.  For
-        * !PAE we can use this as-is, but for PAE it is a stand-in
-        * while we copy the pmd pages.
+        * copy top-level of Xen-supplied pagetable into place.  This
+        * is a stand-in while we copy the pmd pages.
         */
        memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t));
 
-       if (PTRS_PER_PMD > 1) {
-               int i;
-               /*
-                * For PAE, need to allocate new pmds, rather than
-                * share Xen's, since Xen doesn't like pmd's being
-                * shared between address spaces.
-                */
-               for (i = 0; i < PTRS_PER_PGD; i++) {
-                       if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
-                               pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+       /*
+        * For PAE, need to allocate new pmds, rather than
+        * share Xen's, since Xen doesn't like pmd's being
+        * shared between address spaces.
+        */
+       for (i = 0; i < PTRS_PER_PGD; i++) {
+               if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
+                       pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
 
-                               memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
-                                      PAGE_SIZE);
+                       memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
+                              PAGE_SIZE);
 
-                               make_lowmem_page_readonly(pmd);
+                       make_lowmem_page_readonly(pmd);
 
-                               set_pgd(&base[i], __pgd(1 + __pa(pmd)));
-                       } else
-                               pgd_clear(&base[i]);
-               }
+                       set_pgd(&base[i], __pgd(1 + __pa(pmd)));
+               } else
+                       pgd_clear(&base[i]);
        }
 
        /* make sure zero_page is mapped RO so we can use it in pagetables */
@@ -873,17 +870,7 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
 
        /* Actually pin the pagetable down, but we can't set PG_pinned
           yet because the page structures don't exist yet. */
-       {
-               unsigned level;
-
-#ifdef CONFIG_X86_PAE
-               level = MMUEXT_PIN_L3_TABLE;
-#else
-               level = MMUEXT_PIN_L2_TABLE;
-#endif
-
-               pin_pagetable_pfn(level, PFN_DOWN(__pa(base)));
-       }
+       pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(base)));
 }
 
 /* This is called once we have the cpu_possible_map */
@@ -1093,7 +1080,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
        .make_pte = xen_make_pte,
        .make_pgd = xen_make_pgd,
 
-#ifdef CONFIG_X86_PAE
        .set_pte_atomic = xen_set_pte_atomic,
        .set_pte_present = xen_set_pte_at,
        .set_pud = xen_set_pud,
@@ -1102,7 +1088,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
 
        .make_pmd = xen_make_pmd,
        .pmd_val = xen_pmd_val,
-#endif /* PAE */
 
        .activate_mm = xen_activate_mm,
        .dup_mmap = xen_dup_mmap,
index 265601d..df40bf7 100644 (file)
@@ -228,7 +228,7 @@ pmdval_t xen_pmd_val(pmd_t pmd)
 {
        return pte_mfn_to_pfn(pmd.pmd);
 }
-#ifdef CONFIG_X86_PAE
+
 void xen_set_pud(pud_t *ptr, pud_t val)
 {
        struct multicall_space mcs;
@@ -276,12 +276,6 @@ pmd_t xen_make_pmd(pmdval_t pmd)
        pmd = pte_pfn_to_mfn(pmd);
        return native_make_pmd(pmd);
 }
-#else  /* !PAE */
-void xen_set_pte(pte_t *ptep, pte_t pte)
-{
-       *ptep = pte;
-}
-#endif /* CONFIG_X86_PAE */
 
 /*
   (Yet another) pagetable walker.  This one is intended for pinning a
@@ -434,8 +428,6 @@ static int pin_page(struct page *page, enum pt_level level)
    read-only, and can be pinned. */
 void xen_pgd_pin(pgd_t *pgd)
 {
-       unsigned level;
-
        xen_mc_batch();
 
        if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
@@ -445,14 +437,7 @@ void xen_pgd_pin(pgd_t *pgd)
                xen_mc_batch();
        }
 
-#ifdef CONFIG_X86_PAE
-       level = MMUEXT_PIN_L3_TABLE;
-#else
-       level = MMUEXT_PIN_L2_TABLE;
-#endif
-
-       xen_do_pin(level, PFN_DOWN(__pa(pgd)));
-
+       xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
        xen_mc_issue(0);
 }
 
index b5e189b..5fe961c 100644 (file)
@@ -37,14 +37,13 @@ void xen_exit_mmap(struct mm_struct *mm);
 void xen_pgd_pin(pgd_t *pgd);
 //void xen_pgd_unpin(pgd_t *pgd);
 
-#ifdef CONFIG_X86_PAE
-unsigned long long xen_pte_val(pte_t);
-unsigned long long xen_pmd_val(pmd_t);
-unsigned long long xen_pgd_val(pgd_t);
+pteval_t xen_pte_val(pte_t);
+pmdval_t xen_pmd_val(pmd_t);
+pgdval_t xen_pgd_val(pgd_t);
 
-pte_t xen_make_pte(unsigned long long);
-pmd_t xen_make_pmd(unsigned long long);
-pgd_t xen_make_pgd(unsigned long long);
+pte_t xen_make_pte(pteval_t);
+pmd_t xen_make_pmd(pmdval_t);
+pgd_t xen_make_pgd(pgdval_t);
 
 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
                    pte_t *ptep, pte_t pteval);
@@ -53,15 +52,4 @@ void xen_set_pud(pud_t *ptr, pud_t val);
 void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
 void xen_pmd_clear(pmd_t *pmdp);
 
-
-#else
-unsigned long xen_pte_val(pte_t);
-unsigned long xen_pmd_val(pmd_t);
-unsigned long xen_pgd_val(pgd_t);
-
-pte_t xen_make_pte(unsigned long);
-pmd_t xen_make_pmd(unsigned long);
-pgd_t xen_make_pgd(unsigned long);
-#endif
-
 #endif /* _XEN_MMU_H */
index 52b2e38..41e2175 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/math64.h>
 
+#include <asm/pvclock.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 
 
 static cycle_t xen_clocksource_read(void);
 
-/* These are perodically updated in shared_info, and then copied here. */
-struct shadow_time_info {
-       u64 tsc_timestamp;     /* TSC at last update of time vals.  */
-       u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
-       u32 tsc_to_nsec_mul;
-       int tsc_shift;
-       u32 version;
-};
-
-static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
-
 /* runstate info updated by Xen */
 static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
 
@@ -211,7 +201,7 @@ unsigned long long xen_sched_clock(void)
 unsigned long xen_cpu_khz(void)
 {
        u64 xen_khz = 1000000ULL << 32;
-       const struct vcpu_time_info *info =
+       const struct pvclock_vcpu_time_info *info =
                &HYPERVISOR_shared_info->vcpu_info[0].time;
 
        do_div(xen_khz, info->tsc_to_system_mul);
@@ -223,121 +213,26 @@ unsigned long xen_cpu_khz(void)
        return xen_khz;
 }
 
-/*
- * Reads a consistent set of time-base values from Xen, into a shadow data
- * area.
- */
-static unsigned get_time_values_from_xen(void)
-{
-       struct vcpu_time_info   *src;
-       struct shadow_time_info *dst;
-
-       /* src is shared memory with the hypervisor, so we need to
-          make sure we get a consistent snapshot, even in the face of
-          being preempted. */
-       src = &__get_cpu_var(xen_vcpu)->time;
-       dst = &__get_cpu_var(shadow_time);
-
-       do {
-               dst->version = src->version;
-               rmb();          /* fetch version before data */
-               dst->tsc_timestamp     = src->tsc_timestamp;
-               dst->system_timestamp  = src->system_time;
-               dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
-               dst->tsc_shift         = src->tsc_shift;
-               rmb();          /* test version after fetching data */
-       } while ((src->version & 1) | (dst->version ^ src->version));
-
-       return dst->version;
-}
-
-/*
- * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
- * yielding a 64-bit result.
- */
-static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
-{
-       u64 product;
-#ifdef __i386__
-       u32 tmp1, tmp2;
-#endif
-
-       if (shift < 0)
-               delta >>= -shift;
-       else
-               delta <<= shift;
-
-#ifdef __i386__
-       __asm__ (
-               "mul  %5       ; "
-               "mov  %4,%%eax ; "
-               "mov  %%edx,%4 ; "
-               "mul  %5       ; "
-               "xor  %5,%5    ; "
-               "add  %4,%%eax ; "
-               "adc  %5,%%edx ; "
-               : "=A" (product), "=r" (tmp1), "=r" (tmp2)
-               : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
-#elif __x86_64__
-       __asm__ (
-               "mul %%rdx ; shrd $32,%%rdx,%%rax"
-               : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-#else
-#error implement me!
-#endif
-
-       return product;
-}
-
-static u64 get_nsec_offset(struct shadow_time_info *shadow)
-{
-       u64 now, delta;
-       now = native_read_tsc();
-       delta = now - shadow->tsc_timestamp;
-       return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
-}
-
 static cycle_t xen_clocksource_read(void)
 {
-       struct shadow_time_info *shadow = &get_cpu_var(shadow_time);
+        struct pvclock_vcpu_time_info *src;
        cycle_t ret;
-       unsigned version;
-
-       do {
-               version = get_time_values_from_xen();
-               barrier();
-               ret = shadow->system_timestamp + get_nsec_offset(shadow);
-               barrier();
-       } while (version != __get_cpu_var(xen_vcpu)->time.version);
-
-       put_cpu_var(shadow_time);
 
+       src = &get_cpu_var(xen_vcpu)->time;
+       ret = pvclock_clocksource_read(src);
+       put_cpu_var(xen_vcpu);
        return ret;
 }
 
 static void xen_read_wallclock(struct timespec *ts)
 {
-       const struct shared_info *s = HYPERVISOR_shared_info;
-       u32 version;
-       u64 delta;
-       struct timespec now;
-
-       /* get wallclock at system boot */
-       do {
-               version = s->wc_version;
-               rmb();          /* fetch version before time */
-               now.tv_sec  = s->wc_sec;
-               now.tv_nsec = s->wc_nsec;
-               rmb();          /* fetch time before checking version */
-       } while ((s->wc_version & 1) | (version ^ s->wc_version));
+       struct shared_info *s = HYPERVISOR_shared_info;
+       struct pvclock_wall_clock *wall_clock = &(s->wc);
+        struct pvclock_vcpu_time_info *vcpu_time;
 
-       delta = xen_clocksource_read(); /* time since system boot */
-       delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
-
-       now.tv_nsec = do_div(delta, NSEC_PER_SEC);
-       now.tv_sec = delta;
-
-       set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
+       vcpu_time = &get_cpu_var(xen_vcpu)->time;
+       pvclock_read_wallclock(wall_clock, vcpu_time, ts);
+       put_cpu_var(xen_vcpu);
 }
 
 unsigned long xen_get_wallclock(void)
@@ -345,7 +240,6 @@ unsigned long xen_get_wallclock(void)
        struct timespec ts;
 
        xen_read_wallclock(&ts);
-
        return ts.tv_sec;
 }
 
@@ -569,8 +463,6 @@ __init void xen_time_init(void)
 {
        int cpu = smp_processor_id();
 
-       get_time_values_from_xen();
-
        clocksource_register(&xen_clocksource);
 
        if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
index 3175e97..6ec3b4f 100644 (file)
@@ -30,11 +30,7 @@ ENTRY(hypercall_page)
        ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,          .long  startup_xen)
        ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long  hypercall_page)
        ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,       .asciz "!writable_page_tables|pae_pgdir_above_4gb")
-#ifdef CONFIG_X86_PAE
        ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE,       .asciz "yes")
-#else
-       ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE,       .asciz "no")
-#endif
        ELFNOTE(Xen, XEN_ELFNOTE_LOADER,         .asciz "generic")
 
 #endif /*CONFIG_XEN */
index e8f3d68..93aed1c 100644 (file)
@@ -389,6 +389,7 @@ static int i915_resume(struct drm_device *dev)
        pci_restore_state(dev->pdev);
        if (pci_enable_device(dev->pdev))
                return -1;
+       pci_set_master(dev->pdev);
 
        pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
 
index 8662a6b..25b352b 100644 (file)
@@ -68,7 +68,6 @@ obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o
 obj-$(CONFIG_I6300ESB_WDT) += i6300esb.o
 obj-$(CONFIG_ITCO_WDT) += iTCO_wdt.o iTCO_vendor_support.o
 obj-$(CONFIG_IT8712F_WDT) += it8712f_wdt.o
-CFLAGS_hpwdt.o += -O
 obj-$(CONFIG_HP_WATCHDOG) += hpwdt.o
 obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
 obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
index 470c10c..10d8a0a 100644 (file)
@@ -931,8 +931,16 @@ static int do_open(struct block_device *bdev, struct file *file, int for_part)
        struct gendisk *disk;
        int ret;
        int part;
+       int perm = 0;
 
-       ret = devcgroup_inode_permission(bdev->bd_inode, file->f_mode);
+       if (file->f_mode & FMODE_READ)
+               perm |= MAY_READ;
+       if (file->f_mode & FMODE_WRITE)
+               perm |= MAY_WRITE;
+       /*
+        * hooks: /n/, see "layering violations".
+        */
+       ret = devcgroup_inode_permission(bdev->bd_inode, perm);
        if (ret != 0)
                return ret;
 
index 3ee588d..6068c25 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/syscalls.h>
 #include <linux/string.h>
 #include <linux/mm.h>
+#include <linux/fdtable.h>
 #include <linux/fs.h>
 #include <linux/fsnotify.h>
 #include <linux/slab.h>
@@ -106,9 +107,10 @@ static void dentry_lru_remove(struct dentry *dentry)
 /*
  * Release the dentry's inode, using the filesystem
  * d_iput() operation if defined.
- * Called with dcache_lock and per dentry lock held, drops both.
  */
 static void dentry_iput(struct dentry * dentry)
+       __releases(dentry->d_lock)
+       __releases(dcache_lock)
 {
        struct inode *inode = dentry->d_inode;
        if (inode) {
@@ -132,12 +134,13 @@ static void dentry_iput(struct dentry * dentry)
  * d_kill - kill dentry and return parent
  * @dentry: dentry to kill
  *
- * Called with dcache_lock and d_lock, releases both.  The dentry must
- * already be unhashed and removed from the LRU.
+ * The dentry must already be unhashed and removed from the LRU.
  *
  * If this is the root of the dentry tree, return NULL.
  */
 static struct dentry *d_kill(struct dentry *dentry)
+       __releases(dentry->d_lock)
+       __releases(dcache_lock)
 {
        struct dentry *parent;
 
@@ -383,11 +386,11 @@ restart:
  * Try to prune ancestors as well.  This is necessary to prevent
  * quadratic behavior of shrink_dcache_parent(), but is also expected
  * to be beneficial in reducing dentry cache fragmentation.
- *
- * Called with dcache_lock, drops it and then regains.
- * Called with dentry->d_lock held, drops it.
  */
 static void prune_one_dentry(struct dentry * dentry)
+       __releases(dentry->d_lock)
+       __releases(dcache_lock)
+       __acquires(dcache_lock)
 {
        __d_drop(dentry);
        dentry = d_kill(dentry);
@@ -1604,10 +1607,9 @@ static int d_isparent(struct dentry *p1, struct dentry *p2)
  *
  * Note: If ever the locking in lock_rename() changes, then please
  * remember to update this too...
- *
- * On return, dcache_lock will have been unlocked.
  */
 static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
+       __releases(dcache_lock)
 {
        struct mutex *m1 = NULL, *m2 = NULL;
        struct dentry *ret;
@@ -1743,11 +1745,9 @@ out_nolock:
 shouldnt_be_hashed:
        spin_unlock(&dcache_lock);
        BUG();
-       goto shouldnt_be_hashed;
 }
 
-static int prepend(char **buffer, int *buflen, const char *str,
-                         int namelen)
+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
 {
        *buflen -= namelen;
        if (*buflen < 0)
@@ -1757,8 +1757,13 @@ static int prepend(char **buffer, int *buflen, const char *str,
        return 0;
 }
 
+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
+{
+       return prepend(buffer, buflen, name->name, name->len);
+}
+
 /**
- * d_path - return the path of a dentry
+ * __d_path - return the path of a dentry
  * @path: the dentry/vfsmount to report
  * @root: root vfsmnt/dentry (may be modified by this function)
  * @buffer: buffer to return value in
@@ -1779,9 +1784,10 @@ char *__d_path(const struct path *path, struct path *root,
 {
        struct dentry *dentry = path->dentry;
        struct vfsmount *vfsmnt = path->mnt;
-       char * end = buffer+buflen;
-       char * retval;
+       char *end = buffer + buflen;
+       char *retval;
 
+       spin_lock(&vfsmount_lock);
        prepend(&end, &buflen, "\0", 1);
        if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
                (prepend(&end, &buflen, " (deleted)", 10) != 0))
@@ -1800,38 +1806,37 @@ char *__d_path(const struct path *path, struct path *root,
                        break;
                if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
                        /* Global root? */
-                       spin_lock(&vfsmount_lock);
                        if (vfsmnt->mnt_parent == vfsmnt) {
-                               spin_unlock(&vfsmount_lock);
                                goto global_root;
                        }
                        dentry = vfsmnt->mnt_mountpoint;
                        vfsmnt = vfsmnt->mnt_parent;
-                       spin_unlock(&vfsmount_lock);
                        continue;
                }
                parent = dentry->d_parent;
                prefetch(parent);
-               if ((prepend(&end, &buflen, dentry->d_name.name,
-                               dentry->d_name.len) != 0) ||
+               if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
                    (prepend(&end, &buflen, "/", 1) != 0))
                        goto Elong;
                retval = end;
                dentry = parent;
        }
 
+out:
+       spin_unlock(&vfsmount_lock);
        return retval;
 
 global_root:
        retval += 1;    /* hit the slash */
-       if (prepend(&retval, &buflen, dentry->d_name.name,
-                   dentry->d_name.len) != 0)
+       if (prepend_name(&retval, &buflen, &dentry->d_name) != 0)
                goto Elong;
        root->mnt = vfsmnt;
        root->dentry = dentry;
-       return retval;
+       goto out;
+
 Elong:
-       return ERR_PTR(-ENAMETOOLONG);
+       retval = ERR_PTR(-ENAMETOOLONG);
+       goto out;
 }
 
 /**
@@ -1845,9 +1850,9 @@ Elong:
  *
  * Returns the buffer or an error code if the path was too long.
  *
- * "buflen" should be positive. Caller holds the dcache_lock.
+ * "buflen" should be positive.
  */
-char *d_path(struct path *path, char *buf, int buflen)
+char *d_path(const struct path *path, char *buf, int buflen)
 {
        char *res;
        struct path root;
@@ -1915,16 +1920,11 @@ char *dentry_path(struct dentry *dentry, char *buf, int buflen)
        retval = end-1;
        *retval = '/';
 
-       for (;;) {
-               struct dentry *parent;
-               if (IS_ROOT(dentry))
-                       break;
+       while (!IS_ROOT(dentry)) {
+               struct dentry *parent = dentry->d_parent;
 
-               parent = dentry->d_parent;
                prefetch(parent);
-
-               if ((prepend(&end, &buflen, dentry->d_name.name,
-                               dentry->d_name.len) != 0) ||
+               if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
                    (prepend(&end, &buflen, "/", 1) != 0))
                        goto Elong;
 
@@ -1975,7 +1975,7 @@ asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
        error = -ENOENT;
        /* Has the current directory has been unlinked? */
        spin_lock(&dcache_lock);
-       if (pwd.dentry->d_parent == pwd.dentry || !d_unhashed(pwd.dentry)) {
+       if (IS_ROOT(pwd.dentry) || !d_unhashed(pwd.dentry)) {
                unsigned long len;
                struct path tmp = root;
                char * cwd;
index c19184f..bec76b1 100644 (file)
@@ -246,15 +246,11 @@ static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
 
 }
 
-static inline unsigned int zero_metapath_length(const struct metapath *mp,
-                                               unsigned height)
+static inline unsigned int metapath_branch_start(const struct metapath *mp)
 {
-       unsigned int i;
-       for (i = 0; i < height - 1; i++) {
-               if (mp->mp_list[i] != 0)
-                       return i;
-       }
-       return height;
+       if (mp->mp_list[0] == 0)
+               return 2;
+       return 1;
 }
 
 /**
@@ -436,7 +432,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct buffer_head *dibh = mp->mp_bh[0];
        u64 bn, dblock = 0;
-       unsigned n, i, blks, alloced = 0, iblks = 0, zmpl = 0;
+       unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
        unsigned dblks = 0;
        unsigned ptrs_per_blk;
        const unsigned end_of_metadata = height - 1;
@@ -471,9 +467,8 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
                        /* Building up tree height */
                        state = ALLOC_GROW_HEIGHT;
                        iblks = height - ip->i_height;
-                       zmpl = zero_metapath_length(mp, height);
-                       iblks -= zmpl;
-                       iblks += height;
+                       branch_start = metapath_branch_start(mp);
+                       iblks += (height - branch_start);
                }
        }
 
@@ -509,13 +504,13 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
                                        sizeof(struct gfs2_meta_header));
                                *ptr = zero_bn;
                                state = ALLOC_GROW_DEPTH;
-                               for(i = zmpl; i < height; i++) {
+                               for(i = branch_start; i < height; i++) {
                                        if (mp->mp_bh[i] == NULL)
                                                break;
                                        brelse(mp->mp_bh[i]);
                                        mp->mp_bh[i] = NULL;
                                }
-                               i = zmpl;
+                               i = branch_start;
                        }
                        if (n == 0)
                                break;
index 6387523..3401628 100644 (file)
@@ -195,7 +195,7 @@ ulong_aligned:
           depending on architecture.  I've experimented with several ways
           of writing this section such as using an else before the goto
           but this one seems to be the fastest. */
-       while ((unsigned char *)plong < end - 1) {
+       while ((unsigned char *)plong < end - sizeof(unsigned long)) {
                prefetch(plong + 1);
                if (((*plong) & LBITMASK) != lskipval)
                        break;
index 11dbf08..dce8c74 100644 (file)
@@ -561,9 +561,6 @@ static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
        /* insert into file's list */
        fl->fl_next = *pos;
        *pos = fl;
-
-       if (fl->fl_ops && fl->fl_ops->fl_insert)
-               fl->fl_ops->fl_insert(fl);
 }
 
 /*
@@ -586,9 +583,6 @@ static void locks_delete_lock(struct file_lock **thisfl_p)
                fl->fl_fasync = NULL;
        }
 
-       if (fl->fl_ops && fl->fl_ops->fl_remove)
-               fl->fl_ops->fl_remove(fl);
-
        if (fl->fl_nspid) {
                put_pid(fl->fl_nspid);
                fl->fl_nspid = NULL;
index c7e4353..01e67dd 100644 (file)
@@ -581,15 +581,13 @@ static __always_inline int link_path_walk(const char *name, struct nameidata *nd
        int result;
 
        /* make sure the stuff we saved doesn't go away */
-       dget(save.dentry);
-       mntget(save.mnt);
+       path_get(&save);
 
        result = __link_path_walk(name, nd);
        if (result == -ESTALE) {
                /* nd->path had been dropped */
                nd->path = save;
-               dget(nd->path.dentry);
-               mntget(nd->path.mnt);
+               path_get(&nd->path);
                nd->flags |= LOOKUP_REVAL;
                result = __link_path_walk(name, nd);
        }
@@ -1216,8 +1214,9 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
        nd->flags = flags;
        nd->depth = 0;
 
-       nd->path.mnt = mntget(mnt);
-       nd->path.dentry = dget(dentry);
+       nd->path.dentry = dentry;
+       nd->path.mnt = mnt;
+       path_get(&nd->path);
 
        retval = path_walk(name, nd);
        if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
@@ -2857,16 +2856,17 @@ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
 {
        struct nameidata nd;
        void *cookie;
+       int res;
 
        nd.depth = 0;
        cookie = dentry->d_inode->i_op->follow_link(dentry, &nd);
-       if (!IS_ERR(cookie)) {
-               int res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
-               if (dentry->d_inode->i_op->put_link)
-                       dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
-               cookie = ERR_PTR(res);
-       }
-       return PTR_ERR(cookie);
+       if (IS_ERR(cookie))
+               return PTR_ERR(cookie);
+
+       res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
+       if (dentry->d_inode->i_op->put_link)
+               dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
+       return res;
 }
 
 int vfs_follow_link(struct nameidata *nd, const char *link)
index ec228bc..700f4e0 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1003,8 +1003,7 @@ struct file *create_write_pipe(void)
 void free_write_pipe(struct file *f)
 {
        free_pipe_info(f->f_dentry->d_inode);
-       dput(f->f_path.dentry);
-       mntput(f->f_path.mnt);
+       path_put(&f->f_path);
        put_filp(f);
 }
 
@@ -1015,8 +1014,8 @@ struct file *create_read_pipe(struct file *wrf)
                return ERR_PTR(-ENFILE);
 
        /* Grab pipe from the writer */
-       f->f_path.mnt = mntget(wrf->f_path.mnt);
-       f->f_path.dentry = dget(wrf->f_path.dentry);
+       f->f_path = wrf->f_path;
+       path_get(&wrf->f_path);
        f->f_mapping = wrf->f_path.dentry->d_inode->i_mapping;
 
        f->f_pos = 0;
@@ -1068,8 +1067,7 @@ int do_pipe(int *fd)
  err_fdr:
        put_unused_fd(fdr);
  err_read_pipe:
-       dput(fr->f_dentry);
-       mntput(fr->f_vfsmnt);
+       path_put(&fr->f_path);
        put_filp(fr);
  err_write_pipe:
        free_write_pipe(fw);
index af059d5..b6b664e 100644 (file)
@@ -40,14 +40,9 @@ asmlinkage long sys_utime(char __user *filename, struct utimbuf __user *times)
 
 #endif
 
-static bool nsec_special(long nsec)
-{
-       return nsec == UTIME_OMIT || nsec == UTIME_NOW;
-}
-
 static bool nsec_valid(long nsec)
 {
-       if (nsec_special(nsec))
+       if (nsec == UTIME_OMIT || nsec == UTIME_NOW)
                return true;
 
        return nsec >= 0 && nsec <= 999999999;
@@ -102,7 +97,11 @@ long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags
        if (error)
                goto dput_and_out;
 
-       /* Don't worry, the checks are done in inode_change_ok() */
+       if (times && times[0].tv_nsec == UTIME_NOW &&
+                    times[1].tv_nsec == UTIME_NOW)
+               times = NULL;
+
+       /* In most cases, the checks are done in inode_change_ok() */
        newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME;
        if (times) {
                error = -EPERM;
@@ -124,28 +123,34 @@ long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags
                        newattrs.ia_mtime.tv_nsec = times[1].tv_nsec;
                        newattrs.ia_valid |= ATTR_MTIME_SET;
                }
-       }
 
-       /*
-        * If times is NULL or both times are either UTIME_OMIT or
-        * UTIME_NOW, then need to check permissions, because
-        * inode_change_ok() won't do it.
-        */
-       if (!times || (nsec_special(times[0].tv_nsec) &&
-                      nsec_special(times[1].tv_nsec))) {
+               /*
+                * For the UTIME_OMIT/UTIME_NOW and UTIME_NOW/UTIME_OMIT
+                * cases, we need to make an extra check that is not done by
+                * inode_change_ok().
+                */
+               if (((times[0].tv_nsec == UTIME_NOW &&
+                           times[1].tv_nsec == UTIME_OMIT)
+                    ||
+                    (times[0].tv_nsec == UTIME_OMIT &&
+                           times[1].tv_nsec == UTIME_NOW))
+                   && !is_owner_or_cap(inode))
+                       goto mnt_drop_write_and_out;
+       } else {
+
+               /*
+                * If times is NULL (or both times are UTIME_NOW),
+                * then we need to check permissions, because
+                * inode_change_ok() won't do it.
+                */
                error = -EACCES;
                 if (IS_IMMUTABLE(inode))
                        goto mnt_drop_write_and_out;
 
                if (!is_owner_or_cap(inode)) {
-                       if (f) {
-                               if (!(f->f_mode & FMODE_WRITE))
-                                       goto mnt_drop_write_and_out;
-                       } else {
-                               error = vfs_permission(&nd, MAY_WRITE);
-                               if (error)
-                                       goto mnt_drop_write_and_out;
-                       }
+                       error = permission(inode, MAY_WRITE, NULL);
+                       if (error)
+                               goto mnt_drop_write_and_out;
                }
        }
        mutex_lock(&inode->i_mutex);
@@ -169,14 +174,6 @@ asmlinkage long sys_utimensat(int dfd, char __user *filename, struct timespec __
        if (utimes) {
                if (copy_from_user(&tstimes, utimes, sizeof(tstimes)))
                        return -EFAULT;
-               if ((tstimes[0].tv_nsec == UTIME_OMIT ||
-                    tstimes[0].tv_nsec == UTIME_NOW) &&
-                   tstimes[0].tv_sec != 0)
-                       return -EINVAL;
-               if ((tstimes[1].tv_nsec == UTIME_OMIT ||
-                    tstimes[1].tv_nsec == UTIME_NOW) &&
-                   tstimes[1].tv_sec != 0)
-                       return -EINVAL;
 
                /* Nothing to do, we must not even check the path.  */
                if (tstimes[0].tv_nsec == UTIME_OMIT &&
index 82e8a94..3495e8e 100644 (file)
@@ -69,6 +69,8 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
 #define __get_cpu_var(var)             per_cpu_var(var)
 #define __raw_get_cpu_var(var)         per_cpu_var(var)
 
+#define PER_CPU_ATTRIBUTES
+
 #endif /* SMP */
 
 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name)
index 1d8cd01..844f2a8 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kvm_para.h>
 #include <linux/kvm_types.h>
 
+#include <asm/pvclock-abi.h>
 #include <asm/desc.h>
 
 #define KVM_MAX_VCPUS 16
@@ -282,7 +283,8 @@ struct kvm_vcpu_arch {
        struct x86_emulate_ctxt emulate_ctxt;
 
        gpa_t time;
-       struct kvm_vcpu_time_info hv_clock;
+       struct pvclock_vcpu_time_info hv_clock;
+       unsigned int hv_clock_tsc_khz;
        unsigned int time_offset;
        struct page *time_page;
 };
index 5098459..bfd9900 100644 (file)
@@ -48,24 +48,6 @@ struct kvm_mmu_op_release_pt {
 #ifdef __KERNEL__
 #include <asm/processor.h>
 
-/* xen binary-compatible interface. See xen headers for details */
-struct kvm_vcpu_time_info {
-       uint32_t version;
-       uint32_t pad0;
-       uint64_t tsc_timestamp;
-       uint64_t system_time;
-       uint32_t tsc_to_system_mul;
-       int8_t   tsc_shift;
-       int8_t   pad[3];
-} __attribute__((__packed__)); /* 32 bytes */
-
-struct kvm_wall_clock {
-       uint32_t wc_version;
-       uint32_t wc_sec;
-       uint32_t wc_nsec;
-} __attribute__((__packed__));
-
-
 extern void kvmclock_init(void);
 
 
diff --git a/include/asm-x86/pvclock-abi.h b/include/asm-x86/pvclock-abi.h
new file mode 100644 (file)
index 0000000..6857f84
--- /dev/null
@@ -0,0 +1,42 @@
+#ifndef _ASM_X86_PVCLOCK_ABI_H_
+#define _ASM_X86_PVCLOCK_ABI_H_
+#ifndef __ASSEMBLY__
+
+/*
+ * These structs MUST NOT be changed.
+ * They are the ABI between hypervisor and guest OS.
+ * Both Xen and KVM are using this.
+ *
+ * pvclock_vcpu_time_info holds the system time and the tsc timestamp
+ * of the last update. So the guest can use the tsc delta to get a
+ * more precise system time.  There is one per virtual cpu.
+ *
+ * pvclock_wall_clock references the point in time when the system
+ * time was zero (usually boot time), thus the guest calculates the
+ * current wall clock by adding the system time.
+ *
+ * Protocol for the "version" fields is: hypervisor raises it (making
+ * it uneven) before it starts updating the fields and raises it again
+ * (making it even) when it is done.  Thus the guest can make sure the
+ * time values it got are consistent by checking the version before
+ * and after reading them.
+ */
+
+struct pvclock_vcpu_time_info {
+       u32   version;
+       u32   pad0;
+       u64   tsc_timestamp;
+       u64   system_time;
+       u32   tsc_to_system_mul;
+       s8    tsc_shift;
+       u8    pad[3];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct pvclock_wall_clock {
+       u32   version;
+       u32   sec;
+       u32   nsec;
+} __attribute__((__packed__));
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_PVCLOCK_ABI_H_ */
diff --git a/include/asm-x86/pvclock.h b/include/asm-x86/pvclock.h
new file mode 100644 (file)
index 0000000..85b1bba
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _ASM_X86_PVCLOCK_H_
+#define _ASM_X86_PVCLOCK_H_
+
+#include <linux/clocksource.h>
+#include <asm/pvclock-abi.h>
+
+/* some helper functions for xen and kvm pv clock sources */
+cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
+void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
+                           struct pvclock_vcpu_time_info *vcpu,
+                           struct timespec *ts);
+
+#endif /* _ASM_X86_PVCLOCK_H_ */
index baf3a4d..e11f240 100644 (file)
@@ -150,13 +150,9 @@ static inline pte_t __pte_ma(pteval_t x)
        return (pte_t) { .pte = x };
 }
 
-#ifdef CONFIG_X86_PAE
 #define pmd_val_ma(v) ((v).pmd)
 #define pud_val_ma(v) ((v).pgd.pgd)
 #define __pmd_ma(x)    ((pmd_t) { (x) } )
-#else  /* !X86_PAE */
-#define pmd_val_ma(v)  ((v).pud.pgd.pgd)
-#endif /* CONFIG_X86_PAE */
 
 #define pgd_val_ma(x)  ((x).pgd)
 
index 63c3bb9..8b82974 100644 (file)
@@ -571,7 +571,7 @@ extern void             audit_log_lost(const char *message);
 extern int                 audit_update_lsm_rules(void);
 
                                /* Private API (for audit.c only) */
-extern int audit_filter_user(struct netlink_skb_parms *cb, int type);
+extern int audit_filter_user(struct netlink_skb_parms *cb);
 extern int audit_filter_type(int type);
 extern int  audit_receive_filter(int type, int pid, int uid, int seq,
                                void *data, size_t datasz, uid_t loginuid,
index 2a66394..d982eb8 100644 (file)
@@ -300,7 +300,7 @@ extern int d_validate(struct dentry *, struct dentry *);
 extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
 
 extern char *__d_path(const struct path *path, struct path *root, char *, int);
-extern char *d_path(struct path *, char *, int);
+extern char *d_path(const struct path *, char *, int);
 extern char *dentry_path(struct dentry *, char *, int);
 
 /* Allocation counts.. */
index d490779..7c10808 100644 (file)
@@ -894,8 +894,6 @@ static inline int file_check_writeable(struct file *filp)
 typedef struct files_struct *fl_owner_t;
 
 struct file_lock_operations {
-       void (*fl_insert)(struct file_lock *);  /* lock insertion callback */
-       void (*fl_remove)(struct file_lock *);  /* lock removal callback */
        void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
        void (*fl_release_private)(struct file_lock *);
 };
index 092b1b2..de9d1df 100644 (file)
@@ -33,6 +33,7 @@
 #define KVM_REQ_REPORT_TPR_ACCESS  2
 #define KVM_REQ_MMU_RELOAD         3
 #define KVM_REQ_TRIPLE_FAULT       4
+#define KVM_REQ_PENDING_TIMER      5
 
 struct kvm_vcpu;
 extern struct kmem_cache *kvm_vcpu_cache;
index 9b018da..819a033 100644 (file)
@@ -10,6 +10,7 @@
 #define __XEN_PUBLIC_XEN_H__
 
 #include <asm/xen/interface.h>
+#include <asm/pvclock-abi.h>
 
 /*
  * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
@@ -336,7 +337,7 @@ struct vcpu_info {
        uint8_t evtchn_upcall_mask;
        unsigned long evtchn_pending_sel;
        struct arch_vcpu_info arch;
-       struct vcpu_time_info time;
+       struct pvclock_vcpu_time_info time;
 }; /* 64 bytes (x86) */
 
 /*
@@ -384,9 +385,7 @@ struct shared_info {
         * Wallclock time: updated only by control software. Guests should base
         * their gettimeofday() syscall on this wallclock-base value.
         */
-       uint32_t wc_version;      /* Version counter: see vcpu_time_info_t. */
-       uint32_t wc_sec;          /* Secs  00:00:00 UTC, Jan 1, 1970.  */
-       uint32_t wc_nsec;         /* Nsecs 00:00:00 UTC, Jan 1, 1970.  */
+       struct pvclock_wall_clock wc;
 
        struct arch_shared_info arch;
 
index e8692a5..e092f1c 100644 (file)
@@ -738,7 +738,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (!audit_enabled && msg_type != AUDIT_USER_AVC)
                        return 0;
 
-               err = audit_filter_user(&NETLINK_CB(skb), msg_type);
+               err = audit_filter_user(&NETLINK_CB(skb));
                if (err == 1) {
                        err = 0;
                        if (msg_type == AUDIT_USER_TTY) {
@@ -779,7 +779,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                }
                /* fallthrough */
        case AUDIT_LIST:
-               err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
+               err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
                                           uid, seq, data, nlmsg_len(nlh),
                                           loginuid, sessionid, sid);
                break;
@@ -798,7 +798,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                }
                /* fallthrough */
        case AUDIT_LIST_RULES:
-               err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
+               err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
                                           uid, seq, data, nlmsg_len(nlh),
                                           loginuid, sessionid, sid);
                break;
index 0e0bd27..98c50cc 100644 (file)
@@ -1544,6 +1544,7 @@ static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid,
  * @data: payload data
  * @datasz: size of payload data
  * @loginuid: loginuid of sender
+ * @sessionid: sessionid for netlink audit message
  * @sid: SE Linux Security ID of sender
  */
 int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
@@ -1720,7 +1721,7 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
        return 1;
 }
 
-int audit_filter_user(struct netlink_skb_parms *cb, int type)
+int audit_filter_user(struct netlink_skb_parms *cb)
 {
        enum audit_state state = AUDIT_DISABLED;
        struct audit_entry *e;
index 79e3c90..3ec23c3 100644 (file)
@@ -1499,7 +1499,8 @@ int kgdb_nmicallback(int cpu, void *regs)
        return 1;
 }
 
-void kgdb_console_write(struct console *co, const char *s, unsigned count)
+static void kgdb_console_write(struct console *co, const char *s,
+   unsigned count)
 {
        unsigned long flags;
 
index 98778cb..1dcf9f3 100644 (file)
@@ -269,28 +269,9 @@ void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
        }
 }
 
-static int get_eoi_gsi(struct kvm_ioapic *ioapic, int vector)
+static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int gsi)
 {
-       int i;
-
-       for (i = 0; i < IOAPIC_NUM_PINS; i++)
-               if (ioapic->redirtbl[i].fields.vector == vector)
-                       return i;
-       return -1;
-}
-
-void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
-{
-       struct kvm_ioapic *ioapic = kvm->arch.vioapic;
        union ioapic_redir_entry *ent;
-       int gsi;
-
-       gsi = get_eoi_gsi(ioapic, vector);
-       if (gsi == -1) {
-               printk(KERN_WARNING "Can't find redir item for %d EOI\n",
-                      vector);
-               return;
-       }
 
        ent = &ioapic->redirtbl[gsi];
        ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
@@ -300,6 +281,16 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
                ioapic_deliver(ioapic, gsi);
 }
 
+void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
+{
+       struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+       int i;
+
+       for (i = 0; i < IOAPIC_NUM_PINS; i++)
+               if (ioapic->redirtbl[i].fields.vector == vector)
+                       __kvm_ioapic_update_eoi(ioapic, i);
+}
+
 static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr)
 {
        struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;