Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 16 Apr 2010 14:25:48 +0000 (07:25 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 16 Apr 2010 14:25:48 +0000 (07:25 -0700)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6:
  ASoC: imx-ssi: do not call hrtimer_disable in trigger function
  ALSA: hda - Add position_fix quirk for Biostar mobo
  ALSA: hda - add a quirk for Clevo M570U laptop
  ASoC: imx-ssi: increase minimum periods to 4
  ALSA: hda - Avoid invalid "Independent HP" control for VIA codecs
  ALSA: hda - Fix control element allocations in VIA codec parser
  ALSA: aaci - Fix alignment faults on ARM Cortex introduced by commit 29a4f2d3
  ALSA: hda - Add fix-up for Sony VAIO with ALC269
  ALSA: hda - Enhance fix-up table for Realtek codecs
  ALSA: usb - Fix Oops after usb-midi disconnection
  ALSA: hda - Fix initial capture source connections of ALC880/260
  ALSA: hda - Fix setup for ALC269vb amic and dmic models
  ALSA: hda - Fix auto-parser of ALC269vb for HP pin NID 0x21
  ASoC: imx-ssi: Use a hrtimer in FIQ mode
  ASoC: imx-pcm-dma-mx2: restart DMA after an error
  ASoC: imx-ssi: honor IMX_SSI_DMA flag
  ASoC: wm2000: remove unused #include <linux/version.h>
  ALSA: hda: Add support for Medion WIM2160

98 files changed:
Documentation/input/multi-touch-protocol.txt
Documentation/kernel-parameters.txt
Documentation/networking/timestamping.txt
MAINTAINERS
arch/m68k/include/asm/atomic_mm.h
arch/m68k/include/asm/sigcontext.h
arch/sparc/Kconfig
arch/sparc/Kconfig.debug
arch/sparc/include/asm/cpudata_64.h
arch/sparc/include/asm/irqflags_64.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/ftrace.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/kgdb_64.c
arch/sparc/kernel/nmi.c
arch/sparc/kernel/pci_common.c
arch/sparc/kernel/pcr.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/time_64.c
arch/sparc/kernel/traps_64.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/lib/mcount.S
arch/x86/include/asm/amd_iommu_types.h
arch/x86/include/asm/lguest_hcall.h
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu_init.c
arch/x86/kernel/aperture_64.c
arch/x86/kernel/crash.c
arch/x86/kernel/pci-gart_64.c
arch/x86/lguest/boot.c
arch/x86/lguest/i386_head.S
drivers/firewire/core-cdev.c
drivers/hwmon/applesmc.c
drivers/hwmon/it87.c
drivers/hwmon/sht15.c
drivers/input/input.c
drivers/input/keyboard/matrix_keypad.c
drivers/input/mouse/alps.c
drivers/input/mouse/bcm5974.c
drivers/input/serio/i8042.c
drivers/input/sparse-keymap.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/lguest/lguest_device.c
drivers/lguest/x86/core.c
drivers/net/cnic.c
drivers/net/e1000e/netdev.c
drivers/net/igb/igb_ethtool.c
drivers/net/igb/igb_main.c
drivers/net/myri10ge/myri10ge.c
drivers/net/pcmcia/smc91c92_cs.c
drivers/net/qlcnic/qlcnic_hw.c
drivers/net/r6040.c
drivers/net/stmmac/stmmac_main.c
drivers/net/virtio_net.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/iwlwifi/iwl-4965.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-tx.c
drivers/vhost/vhost.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/messenger.c
fs/ceph/osdmap.c
fs/ceph/osdmap.h
fs/ceph/rados.h
fs/ceph/snap.c
fs/ceph/super.h
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/inode.c
fs/nfs/nfs4proc.c
fs/nfs/write.c
include/linux/firewire-cdev.h
include/linux/firewire-constants.h
include/linux/input/matrix_keypad.h
include/linux/nfs_fs_sb.h
include/net/x25.h
kernel/power/user.c
lib/Kconfig.debug
lib/dma-debug.c
lib/vsprintf.c
net/bridge/br_multicast.c
net/can/raw.c
net/ipv4/udp.c
net/ipv6/udp.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/x25/af_x25.c
net/x25/x25_facilities.c
net/x25/x25_in.c
security/selinux/ss/avtab.h

index 8490480..c0fc1c7 100644 (file)
@@ -68,6 +68,22 @@ like:
    SYN_MT_REPORT
    SYN_REPORT
 
+Here is the sequence after lifting one of the fingers:
+
+   ABS_MT_POSITION_X
+   ABS_MT_POSITION_Y
+   SYN_MT_REPORT
+   SYN_REPORT
+
+And here is the sequence after lifting the remaining finger:
+
+   SYN_MT_REPORT
+   SYN_REPORT
+
+If the driver reports one of BTN_TOUCH or ABS_PRESSURE in addition to the
+ABS_MT events, the last SYN_MT_REPORT event may be omitted. Otherwise, the
+last SYN_REPORT will be dropped by the input core, resulting in no
+zero-finger event reaching userland.
 
 Event Semantics
 ---------------
@@ -217,11 +233,6 @@ where examples can be found.
 difference between the contact position and the approaching tool position
 could be used to derive tilt.
 [2] The list can of course be extended.
-[3] The multi-touch X driver is currently in the prototyping stage. At the
-time of writing (April 2009), the MT protocol is not yet merged, and the
-prototype implements finger matching, basic mouse support and two-finger
-scrolling. The project aims at improving the quality of current multi-touch
-functionality available in the Synaptics X driver, and in addition
-implement more advanced gestures.
+[3] Multitouch X driver project: http://bitmath.org/code/multitouch/.
 [4] See the section on event computation.
 [5] See the section on finger tracking.
index e4cbca5..e2202e9 100644 (file)
@@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file
        amd_iommu=      [HW,X86-84]
                        Pass parameters to the AMD IOMMU driver in the system.
                        Possible values are:
-                       isolate - enable device isolation (each device, as far
-                                 as possible, will get its own protection
-                                 domain) [default]
-                       share - put every device behind one IOMMU into the
-                               same protection domain
                        fullflush - enable flushing of IO/TLB entries when
                                    they are unmapped. Otherwise they are
                                    flushed before they will be reused, which
index 0e58b45..e8c8f4f 100644 (file)
@@ -41,11 +41,12 @@ SOF_TIMESTAMPING_SOFTWARE:     return system time stamp generated in
 SOF_TIMESTAMPING_TX/RX determine how time stamps are generated.
 SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the
 following control message:
-    struct scm_timestamping {
-           struct timespec systime;
-           struct timespec hwtimetrans;
-           struct timespec hwtimeraw;
-    };
+
+struct scm_timestamping {
+       struct timespec systime;
+       struct timespec hwtimetrans;
+       struct timespec hwtimeraw;
+};
 
 recvmsg() can be used to get this control message for regular incoming
 packets. For send time stamps the outgoing packet is looped back to
@@ -87,12 +88,13 @@ by the network device and will be empty without that support.
 SIOCSHWTSTAMP:
 
 Hardware time stamping must also be initialized for each device driver
-that is expected to do hardware time stamping. The parameter is:
+that is expected to do hardware time stamping. The parameter is defined in
+/include/linux/net_tstamp.h as:
 
 struct hwtstamp_config {
-    int flags;           /* no flags defined right now, must be zero */
-    int tx_type;         /* HWTSTAMP_TX_* */
-    int rx_filter;       /* HWTSTAMP_FILTER_* */
+       int flags;      /* no flags defined right now, must be zero */
+       int tx_type;    /* HWTSTAMP_TX_* */
+       int rx_filter;  /* HWTSTAMP_FILTER_* */
 };
 
 Desired behavior is passed into the kernel and to a specific device by
@@ -139,42 +141,56 @@ enum {
        /* time stamp any incoming packet */
        HWTSTAMP_FILTER_ALL,
 
-        /* return value: time stamp all packets requested plus some others */
-        HWTSTAMP_FILTER_SOME,
+       /* return value: time stamp all packets requested plus some others */
+       HWTSTAMP_FILTER_SOME,
 
        /* PTP v1, UDP, any kind of event packet */
        HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
 
-        ...
+       /* for the complete list of values, please check
+        * the include file /include/linux/net_tstamp.h
+        */
 };
 
 
 DEVICE IMPLEMENTATION
 
 A driver which supports hardware time stamping must support the
-SIOCSHWTSTAMP ioctl. Time stamps for received packets must be stored
-in the skb with skb_hwtstamp_set().
+SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with
+the actual values as described in the section on SIOCSHWTSTAMP.
+
+Time stamps for received packets must be stored in the skb. To get a pointer
+to the shared time stamp structure of the skb call skb_hwtstamps(). Then
+set the time stamps in the structure:
+
+struct skb_shared_hwtstamps {
+       /* hardware time stamp transformed into duration
+        * since arbitrary point in time
+        */
+       ktime_t hwtstamp;
+       ktime_t syststamp; /* hwtstamp transformed to system time base */
+};
 
 Time stamps for outgoing packets are to be generated as follows:
-- In hard_start_xmit(), check if skb_hwtstamp_check_tx_hardware()
-  returns non-zero. If yes, then the driver is expected
-  to do hardware time stamping.
+- In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero.
+  If yes, then the driver is expected to do hardware time stamping.
 - If this is possible for the skb and requested, then declare
-  that the driver is doing the time stamping by calling
-  skb_hwtstamp_tx_in_progress(). A driver not supporting
-  hardware time stamping doesn't do that. A driver must never
-  touch sk_buff::tstamp! It is used to store how time stamping
-  for an outgoing packets is to be done.
+  that the driver is doing the time stamping by setting the field
+  skb_tx(skb)->in_progress non-zero. You might want to keep a pointer
+  to the associated skb for the next step and not free the skb. A driver
+  not supporting hardware time stamping doesn't do that. A driver must
+  never touch sk_buff::tstamp! It is used to store software generated
+  time stamps by the network subsystem.
 - As soon as the driver has sent the packet and/or obtained a
   hardware time stamp for it, it passes the time stamp back by
   calling skb_hwtstamp_tx() with the original skb, the raw
-  hardware time stamp and a handle to the device (necessary
-  to convert the hardware time stamp to system time). If obtaining
-  the hardware time stamp somehow fails, then the driver should
-  not fall back to software time stamping. The rationale is that
-  this would occur at a later time in the processing pipeline
-  than other software time stamping and therefore could lead
-  to unexpected deltas between time stamps.
-- If the driver did not call skb_hwtstamp_tx_in_progress(), then
+  hardware time stamp. skb_hwtstamp_tx() clones the original skb and
+  adds the timestamps, therefore the original skb has to be freed now.
+  If obtaining the hardware time stamp somehow fails, then the driver
+  should not fall back to software time stamping. The rationale is that
+  this would occur at a later time in the processing pipeline than other
+  software time stamping and therefore could lead to unexpected deltas
+  between time stamps.
+- If the driver did not call set skb_tx(skb)->in_progress, then
   dev_hard_start_xmit() checks whether software time stamping
   is wanted as fallback and potentially generates the time stamp.
index 5b42290..a0e3c3a 100644 (file)
@@ -485,8 +485,8 @@ S:  Maintained
 F:     drivers/input/mouse/bcm5974.c
 
 APPLE SMC DRIVER
-M:     Nicolas Boichat <nicolas@boichat.ch>
-L:     mactel-linux-devel@lists.sourceforge.net
+M:     Henrik Rydberg <rydberg@euromail.se>
+L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     drivers/hwmon/applesmc.c
 
index 88b7af2..d9d2ed6 100644 (file)
@@ -148,14 +148,18 @@ static inline int atomic_xchg(atomic_t *v, int new)
 static inline int atomic_sub_and_test(int i, atomic_t *v)
 {
        char c;
-       __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i));
+       __asm__ __volatile__("subl %2,%1; seq %0"
+                            : "=d" (c), "+m" (*v)
+                            : "id" (i));
        return c != 0;
 }
 
 static inline int atomic_add_negative(int i, atomic_t *v)
 {
        char c;
-       __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
+       __asm__ __volatile__("addl %2,%1; smi %0"
+                            : "=d" (c), "+m" (*v)
+                            : "id" (i));
        return c != 0;
 }
 
index 1320eaa..a29dd74 100644 (file)
@@ -17,13 +17,11 @@ struct sigcontext {
 #ifndef __uClinux__
 # ifdef __mcoldfire__
        unsigned long  sc_fpregs[2][2]; /* room for two fp registers */
-       unsigned long  sc_fpcntl[3];
-       unsigned char  sc_fpstate[16+6*8];
 # else
        unsigned long  sc_fpregs[2*3];  /* room for two fp registers */
+# endif
        unsigned long  sc_fpcntl[3];
        unsigned char  sc_fpstate[216];
-# endif
 #endif
 };
 
index 6db5136..9908d47 100644 (file)
@@ -37,6 +37,9 @@ config SPARC64
        def_bool 64BIT
        select ARCH_SUPPORTS_MSI
        select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FUNCTION_GRAPH_FP_TEST
+       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KRETPROBES
        select HAVE_KPROBES
        select HAVE_LMB
index 9d3c889..1b4a831 100644 (file)
@@ -19,13 +19,10 @@ config DEBUG_DCFLUSH
        bool "D-cache flush debugging"
        depends on SPARC64 && DEBUG_KERNEL
 
-config STACK_DEBUG
-       bool "Stack Overflow Detection Support"
-
 config MCOUNT
        bool
        depends on SPARC64
-       depends on STACK_DEBUG || FUNCTION_TRACER
+       depends on FUNCTION_TRACER
        default y
 
 config FRAME_POINTER
index 926397d..050ef35 100644 (file)
@@ -17,7 +17,7 @@ typedef struct {
        unsigned int    __nmi_count;
        unsigned long   clock_tick;     /* %tick's per second */
        unsigned long   __pad;
-       unsigned int    __pad1;
+       unsigned int    irq0_irqs;
        unsigned int    __pad2;
 
        /* Dcache line 2, rarely used */
index 8b49bf9..bfa1ea4 100644 (file)
@@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void)
  */
 static inline unsigned long __raw_local_irq_save(void)
 {
-       unsigned long flags = __raw_local_save_flags();
-
-       raw_local_irq_disable();
+       unsigned long flags, tmp;
+
+       /* Disable interrupts to PIL_NORMAL_MAX unless we already
+        * are using PIL_NMI, in which case PIL_NMI is retained.
+        *
+        * The only values we ever program into the %pil are 0,
+        * PIL_NORMAL_MAX and PIL_NMI.
+        *
+        * Since PIL_NMI is the largest %pil value and all bits are
+        * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX
+        * actually is.
+        */
+       __asm__ __volatile__(
+               "rdpr   %%pil, %0\n\t"
+               "or     %0, %2, %1\n\t"
+               "wrpr   %1, 0x0, %%pil"
+               : "=r" (flags), "=r" (tmp)
+               : "i" (PIL_NORMAL_MAX)
+               : "memory"
+       );
 
        return flags;
 }
index c631614..0c2dc1f 100644 (file)
@@ -13,6 +13,14 @@ extra-y     += init_task.o
 CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS)
 extra-y              += vmlinux.lds
 
+ifdef CONFIG_FUNCTION_TRACER
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_ftrace.o := -pg
+CFLAGS_REMOVE_time_$(BITS).o := -pg
+CFLAGS_REMOVE_perf_event.o := -pg
+CFLAGS_REMOVE_pcr.o := -pg
+endif
+
 obj-$(CONFIG_SPARC32)   += entry.o wof.o wuf.o
 obj-$(CONFIG_SPARC32)   += etrap_32.o
 obj-$(CONFIG_SPARC32)   += rtrap_32.o
@@ -85,7 +93,7 @@ obj-$(CONFIG_KGDB)        += kgdb_$(BITS).o
 
 
 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-CFLAGS_REMOVE_ftrace.o := -pg
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
 
 obj-$(CONFIG_EARLYFB) += btext.o
 obj-$(CONFIG_STACKTRACE)     += stacktrace.o
index 9103a56..03ab022 100644 (file)
@@ -13,7 +13,7 @@ static const u32 ftrace_nop = 0x01000000;
 
 static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
 {
-       static u32 call;
+       u32 call;
        s32 off;
 
        off = ((s32)addr - (s32)ip);
@@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *data)
        return 0;
 }
 #endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       unsigned long ip = (unsigned long)(&ftrace_graph_call);
+       u32 old, new;
+
+       old = *(u32 *) &ftrace_graph_call;
+       new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
+       return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       unsigned long ip = (unsigned long)(&ftrace_graph_call);
+       u32 old, new;
+
+       old = *(u32 *) &ftrace_graph_call;
+       new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
+
+       return ftrace_modify_code(ip, old, new);
+}
+
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+unsigned long prepare_ftrace_return(unsigned long parent,
+                                   unsigned long self_addr,
+                                   unsigned long frame_pointer)
+{
+       unsigned long return_hooker = (unsigned long) &return_to_handler;
+       struct ftrace_graph_ent trace;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return parent + 8UL;
+
+       if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
+                                    frame_pointer) == -EBUSY)
+               return parent + 8UL;
+
+       trace.func = self_addr;
+
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace)) {
+               current->curr_ret_stack--;
+               return parent + 8UL;
+       }
+
+       return return_hooker;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index e1cbdb9..454ce3a 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/delay.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/ftrace.h>
 #include <linux/irq.h>
 
 #include <asm/ptrace.h>
@@ -647,6 +648,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
        bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
        if (unlikely(!bucket))
                return 0;
+
+       /* The only reference we store to the IRQ bucket is
+        * by physical address which kmemleak can't see, tell
+        * it that this object explicitly is not a leak and
+        * should be scanned.
+        */
+       kmemleak_not_leak(bucket);
+
        __flush_dcache_range((unsigned long) bucket,
                             ((unsigned long) bucket +
                              sizeof(struct ino_bucket)));
@@ -721,7 +730,7 @@ static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
        __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
 }
 
-void handler_irq(int irq, struct pt_regs *regs)
+void __irq_entry handler_irq(int irq, struct pt_regs *regs)
 {
        unsigned long pstate, bucket_pa;
        struct pt_regs *old_regs;
index f5a0fd4..0a2bd0f 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/kgdb.h>
 #include <linux/kdebug.h>
+#include <linux/ftrace.h>
 
 #include <asm/kdebug.h>
 #include <asm/ptrace.h>
@@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
 }
 
 #ifdef CONFIG_SMP
-void smp_kgdb_capture_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs)
 {
        unsigned long flags;
 
index b287b62..75a3d1a 100644 (file)
@@ -92,7 +92,6 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
 notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
 {
        unsigned int sum, touched = 0;
-       int cpu = smp_processor_id();
 
        clear_softint(1 << irq);
 
@@ -106,7 +105,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
        else
                pcr_ops->write(PCR_PIC_PRIV);
 
-       sum = kstat_irqs_cpu(0, cpu);
+       sum = local_cpu_data().irq0_irqs;
        if (__get_cpu_var(nmi_touch)) {
                __get_cpu_var(nmi_touch) = 0;
                touched = 1;
index b775658..8a00058 100644 (file)
@@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm)
                struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
 
                if (!rp) {
-                       prom_printf("Cannot allocate IOMMU resource.\n");
-                       prom_halt();
+                       pr_info("%s: Cannot allocate IOMMU resource.\n",
+                               pbm->name);
+                       return;
                }
                rp->name = "IOMMU";
                rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
                rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
                rp->flags = IORESOURCE_BUSY;
-               request_resource(&pbm->mem_space, rp);
+               if (request_resource(&pbm->mem_space, rp)) {
+                       pr_info("%s: Unable to request IOMMU resource.\n",
+                               pbm->name);
+                       kfree(rp);
+               }
        }
 }
 
index 2d94e7a..c4a6a50 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/irq.h>
 
 #include <linux/perf_event.h>
+#include <linux/ftrace.h>
 
 #include <asm/pil.h>
 #include <asm/pcr.h>
@@ -34,7 +35,7 @@ unsigned int picl_shift;
  * Therefore in such situations we defer the work by signalling
  * a lower level cpu IRQ.
  */
-void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
+void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
 {
        struct pt_regs *old_regs;
 
index 4c53345..b6a2b8f 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/profile.h>
 #include <linux/bootmem.h>
 #include <linux/vmalloc.h>
+#include <linux/ftrace.h>
 #include <linux/cpu.h>
 #include <linux/slab.h>
 
@@ -823,13 +824,13 @@ void arch_send_call_function_single_ipi(int cpu)
                      &cpumask_of_cpu(cpu));
 }
 
-void smp_call_function_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
        generic_smp_call_function_interrupt();
 }
 
-void smp_call_function_single_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
        generic_smp_call_function_single_interrupt();
@@ -965,7 +966,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        put_cpu();
 }
 
-void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
 {
        struct mm_struct *mm;
        unsigned long flags;
@@ -1149,7 +1150,7 @@ void smp_release(void)
  */
 extern void prom_world(int);
 
-void smp_penguin_jailcell(int irq, struct pt_regs *regs)
+void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
 
@@ -1365,7 +1366,7 @@ void smp_send_reschedule(int cpu)
                      &cpumask_of_cpu(cpu));
 }
 
-void smp_receive_signal_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
 }
index 67e1651..c7bbe6c 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/clocksource.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/ftrace.h>
 
 #include <asm/oplib.h>
 #include <asm/timer.h>
@@ -717,7 +718,7 @@ static struct clock_event_device sparc64_clockevent = {
 };
 static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
 
-void timer_interrupt(int irq, struct pt_regs *regs)
+void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
        unsigned long tick_mask = tick_ops->softint_mask;
@@ -728,6 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
 
        irq_enter();
 
+       local_cpu_data().irq0_irqs++;
        kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
 
        if (unlikely(!evt->event_handler)) {
index 837dfc2..9da57f0 100644 (file)
@@ -2203,27 +2203,6 @@ void dump_stack(void)
 
 EXPORT_SYMBOL(dump_stack);
 
-static inline int is_kernel_stack(struct task_struct *task,
-                                 struct reg_window *rw)
-{
-       unsigned long rw_addr = (unsigned long) rw;
-       unsigned long thread_base, thread_end;
-
-       if (rw_addr < PAGE_OFFSET) {
-               if (task != &init_task)
-                       return 0;
-       }
-
-       thread_base = (unsigned long) task_stack_page(task);
-       thread_end = thread_base + sizeof(union thread_union);
-       if (rw_addr >= thread_base &&
-           rw_addr < thread_end &&
-           !(rw_addr & 0x7UL))
-               return 1;
-
-       return 0;
-}
-
 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
 {
        unsigned long fp = rw->ins[6];
@@ -2252,6 +2231,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
        show_regs(regs);
        add_taint(TAINT_DIE);
        if (regs->tstate & TSTATE_PRIV) {
+               struct thread_info *tp = current_thread_info();
                struct reg_window *rw = (struct reg_window *)
                        (regs->u_regs[UREG_FP] + STACK_BIAS);
 
@@ -2259,8 +2239,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
                 * find some badly aligned kernel stack.
                 */
                while (rw &&
-                      count++ < 30&&
-                      is_kernel_stack(current, rw)) {
+                      count++ < 30 &&
+                      kstack_valid(tp, (unsigned long) rw)) {
                        printk("Caller[%016lx]: %pS\n", rw->ins[7],
                               (void *) rw->ins[7]);
 
index 4e59925..0c1e678 100644 (file)
@@ -46,11 +46,16 @@ SECTIONS
                SCHED_TEXT
                LOCK_TEXT
                KPROBES_TEXT
+               IRQENTRY_TEXT
                *(.gnu.warning)
        } = 0
        _etext = .;
 
        RO_DATA(PAGE_SIZE)
+
+       /* Start of data section */
+       _sdata = .;
+
        .data1 : {
                *(.data1)
        }
index 24b8b12..3753e3c 100644 (file)
@@ -7,26 +7,11 @@
 
 #include <linux/linkage.h>
 
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-
 /*
  * This is the main variant and is called by C code.  GCC's -pg option
  * automatically instruments every C function with a call to this.
  */
 
-#ifdef CONFIG_STACK_DEBUG
-
-#define OVSTACKSIZE    4096            /* lets hope this is enough */
-
-       .data
-       .align          8
-panicstring:
-       .asciz          "Stack overflow\n"
-       .align          8
-ovstack:
-       .skip           OVSTACKSIZE
-#endif
        .text
        .align          32
        .globl          _mcount
@@ -35,84 +20,48 @@ ovstack:
        .type           mcount,#function
 _mcount:
 mcount:
-#ifdef CONFIG_STACK_DEBUG
-       /*
-        * Check whether %sp is dangerously low.
-        */
-       ldub            [%g6 + TI_FPDEPTH], %g1
-       srl             %g1, 1, %g3
-       add             %g3, 1, %g3
-       sllx            %g3, 8, %g3                     ! each fpregs frame is 256b
-       add             %g3, 192, %g3
-       add             %g6, %g3, %g3                   ! where does task_struct+frame end?
-       sub             %g3, STACK_BIAS, %g3
-       cmp             %sp, %g3
-       bg,pt           %xcc, 1f
-        nop
-       lduh            [%g6 + TI_CPU], %g1
-       sethi           %hi(hardirq_stack), %g3
-       or              %g3, %lo(hardirq_stack), %g3
-       sllx            %g1, 3, %g1
-       ldx             [%g3 + %g1], %g7
-       sub             %g7, STACK_BIAS, %g7
-       cmp             %sp, %g7
-       bleu,pt         %xcc, 2f
-        sethi          %hi(THREAD_SIZE), %g3
-       add             %g7, %g3, %g7
-       cmp             %sp, %g7
-       blu,pn          %xcc, 1f
-2:      sethi          %hi(softirq_stack), %g3
-       or              %g3, %lo(softirq_stack), %g3
-       ldx             [%g3 + %g1], %g7
-       sub             %g7, STACK_BIAS, %g7
-       cmp             %sp, %g7
-       bleu,pt         %xcc, 3f
-        sethi          %hi(THREAD_SIZE), %g3
-       add             %g7, %g3, %g7
-       cmp             %sp, %g7
-       blu,pn          %xcc, 1f
-        nop
-       /* If we are already on ovstack, don't hop onto it
-        * again, we are already trying to output the stack overflow
-        * message.
-        */
-3:     sethi           %hi(ovstack), %g7               ! cant move to panic stack fast enough
-        or             %g7, %lo(ovstack), %g7
-       add             %g7, OVSTACKSIZE, %g3
-       sub             %g3, STACK_BIAS + 192, %g3
-       sub             %g7, STACK_BIAS, %g7
-       cmp             %sp, %g7
-       blu,pn          %xcc, 2f
-        cmp            %sp, %g3
-       bleu,pn         %xcc, 1f
-        nop
-2:     mov             %g3, %sp
-       sethi           %hi(panicstring), %g3
-       call            prom_printf
-        or             %g3, %lo(panicstring), %o0
-       call            prom_halt
-        nop
-1:
-#endif
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
-       mov             %o7, %o0
-       .globl          mcount_call
-mcount_call:
-       call            ftrace_stub
-        mov            %o0, %o7
+       /* Do nothing, the retl/nop below is all we need.  */
 #else
-       sethi           %hi(ftrace_trace_function), %g1
+       sethi           %hi(function_trace_stop), %g1
+       lduw            [%g1 + %lo(function_trace_stop)], %g2
+       brnz,pn         %g2, 2f
+        sethi          %hi(ftrace_trace_function), %g1
        sethi           %hi(ftrace_stub), %g2
        ldx             [%g1 + %lo(ftrace_trace_function)], %g1
        or              %g2, %lo(ftrace_stub), %g2
        cmp             %g1, %g2
        be,pn           %icc, 1f
-        mov            %i7, %o1
-       jmpl            %g1, %g0
-        mov            %o7, %o0
+        mov            %i7, %g3
+       save            %sp, -128, %sp
+       mov             %g3, %o1
+       jmpl            %g1, %o7
+        mov            %i7, %o0
+       ret
+        restore
        /* not reached */
 1:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       sethi           %hi(ftrace_graph_return), %g1
+       ldx             [%g1 + %lo(ftrace_graph_return)], %g3
+       cmp             %g2, %g3
+       bne,pn          %xcc, 5f
+        sethi          %hi(ftrace_graph_entry_stub), %g2
+       sethi           %hi(ftrace_graph_entry), %g1
+       or              %g2, %lo(ftrace_graph_entry_stub), %g2
+       ldx             [%g1 + %lo(ftrace_graph_entry)], %g1
+       cmp             %g1, %g2
+       be,pt           %xcc, 2f
+        nop
+5:     mov             %i7, %g2
+       mov             %fp, %g3
+       save            %sp, -128, %sp
+       mov             %g2, %l0
+       ba,pt           %xcc, ftrace_graph_caller
+        mov            %g3, %l1
+#endif
+2:
 #endif
 #endif
        retl
@@ -131,14 +80,50 @@ ftrace_stub:
        .globl          ftrace_caller
        .type           ftrace_caller,#function
 ftrace_caller:
-       mov             %i7, %o1
-       mov             %o7, %o0
+       sethi           %hi(function_trace_stop), %g1
+       mov             %i7, %g2
+       lduw            [%g1 + %lo(function_trace_stop)], %g1
+       brnz,pn         %g1, ftrace_stub
+        mov            %fp, %g3
+       save            %sp, -128, %sp
+       mov             %g2, %o1
+       mov             %g2, %l0
+       mov             %g3, %l1
        .globl          ftrace_call
 ftrace_call:
        call            ftrace_stub
-        mov            %o0, %o7
-       retl
+        mov            %i7, %o0
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       .globl          ftrace_graph_call
+ftrace_graph_call:
+       call            ftrace_stub
         nop
+#endif
+       ret
+        restore
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       .size           ftrace_graph_call,.-ftrace_graph_call
+#endif
+       .size           ftrace_call,.-ftrace_call
        .size           ftrace_caller,.-ftrace_caller
 #endif
 #endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+       mov             %l0, %o0
+       mov             %i7, %o1
+       call            prepare_ftrace_return
+        mov            %l1, %o2
+       ret
+        restore        %o0, -8, %i7
+END(ftrace_graph_caller)
+
+ENTRY(return_to_handler)
+       save            %sp, -128, %sp
+       call            ftrace_return_to_handler
+        mov            %fp, %o0
+       jmpl            %o0 + 8, %g0
+        restore
+END(return_to_handler)
+#endif
index ba19ad4..86a0ff0 100644 (file)
@@ -21,6 +21,7 @@
 #define _ASM_X86_AMD_IOMMU_TYPES_H
 
 #include <linux/types.h>
+#include <linux/mutex.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 
 
 /* constants to configure the command buffer */
 #define CMD_BUFFER_SIZE    8192
+#define CMD_BUFFER_UNINITIALIZED 1
 #define CMD_BUFFER_ENTRIES 512
 #define MMIO_CMD_SIZE_SHIFT 56
 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
@@ -237,6 +239,7 @@ struct protection_domain {
        struct list_head list;  /* for list of all protection domains */
        struct list_head dev_list; /* List of all devices in this domain */
        spinlock_t lock;        /* mostly used to lock the page table*/
+       struct mutex api_lock;  /* protect page tables in the iommu-api path */
        u16 id;                 /* the domain id written to the device table */
        int mode;               /* paging mode (0-6 levels) */
        u64 *pt_root;           /* page table root pointer */
index ba0eed8..b60f292 100644 (file)
 
 #ifndef __ASSEMBLY__
 #include <asm/hw_irq.h>
-#include <asm/kvm_para.h>
 
 /*G:030
  * But first, how does our Guest contact the Host to ask for privileged
  * operations?  There are two ways: the direct way is to make a "hypercall",
  * to make requests of the Host Itself.
  *
- * We use the KVM hypercall mechanism, though completely different hypercall
- * numbers. Seventeen hypercalls are available: the hypercall number is put in
- * the %eax register, and the arguments (when required) are placed in %ebx,
- * %ecx, %edx and %esi.  If a return value makes sense, it's returned in %eax.
+ * Our hypercall mechanism uses the highest unused trap code (traps 32 and
+ * above are used by real hardware interrupts).  Seventeen hypercalls are
+ * available: the hypercall number is put in the %eax register, and the
+ * arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
+ * If a return value makes sense, it's returned in %eax.
  *
  * Grossly invalid calls result in Sudden Death at the hands of the vengeful
  * Host, rather than returning failure.  This reflects Winston Churchill's
  * definition of a gentleman: "someone who is only rude intentionally".
-:*/
+ */
+static inline unsigned long
+hcall(unsigned long call,
+      unsigned long arg1, unsigned long arg2, unsigned long arg3,
+      unsigned long arg4)
+{
+       /* "int" is the Intel instruction to trigger a trap. */
+       asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
+                    /* The call in %eax (aka "a") might be overwritten */
+                    : "=a"(call)
+                      /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */
+                    : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4)
+                      /* "memory" means this might write somewhere in memory.
+                       * This isn't true for all calls, but it's safe to tell
+                       * gcc that it might happen so it doesn't get clever. */
+                    : "memory");
+       return call;
+}
 
 /* Can't use our min() macro here: needs to be a constant */
 #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
index f3dadb5..f854d89 100644 (file)
@@ -118,7 +118,7 @@ static bool check_device(struct device *dev)
                return false;
 
        /* No device or no PCI device */
-       if (!dev || dev->bus != &pci_bus_type)
+       if (dev->bus != &pci_bus_type)
                return false;
 
        devid = get_device_id(dev);
@@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
        u32 tail, head;
        u8 *target;
 
+       WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
        tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
        target = iommu->cmd_buf + tail;
        memcpy_toio(target, cmd, sizeof(*cmd));
@@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void)
        struct dma_ops_domain *dma_dom;
        u16 devid;
 
-       while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+       for_each_pci_dev(dev) {
 
                /* Do we handle this device? */
                if (!check_device(&dev->dev))
@@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain)
        list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
                struct device *dev = dev_data->dev;
 
-               do_detach(dev);
+               __detach_device(dev);
                atomic_set(&dev_data->bind, 0);
        }
 
@@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void)
                return NULL;
 
        spin_lock_init(&domain->lock);
+       mutex_init(&domain->api_lock);
        domain->id = domain_id_alloc();
        if (!domain->id)
                goto out_err;
@@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
 
        free_pagetable(domain);
 
-       domain_id_free(domain->id);
-
-       kfree(domain);
+       protection_domain_free(domain);
 
        dom->priv = NULL;
 }
@@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
        iova  &= PAGE_MASK;
        paddr &= PAGE_MASK;
 
+       mutex_lock(&domain->api_lock);
+
        for (i = 0; i < npages; ++i) {
                ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
                if (ret)
@@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
                paddr += PAGE_SIZE;
        }
 
+       mutex_unlock(&domain->api_lock);
+
        return 0;
 }
 
@@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
 
        iova  &= PAGE_MASK;
 
+       mutex_lock(&domain->api_lock);
+
        for (i = 0; i < npages; ++i) {
                iommu_unmap_page(domain, iova, PM_MAP_4k);
                iova  += PAGE_SIZE;
        }
 
        iommu_flush_tlb_pde(domain);
+
+       mutex_unlock(&domain->api_lock);
 }
 
 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
index 42f5350..6360abf 100644 (file)
@@ -138,9 +138,9 @@ int amd_iommus_present;
 bool amd_iommu_np_cache __read_mostly;
 
 /*
- * Set to true if ACPI table parsing and hardware intialization went properly
+ * The ACPI table parsing functions set this variable on an error
  */
-static bool amd_iommu_initialized;
+static int __initdata amd_iommu_init_err;
 
 /*
  * List of protection domains - used during resume
@@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
         */
        for (i = 0; i < table->length; ++i)
                checksum += p[i];
-       if (checksum != 0)
+       if (checksum != 0) {
                /* ACPI table corrupt */
-               return -ENODEV;
+               amd_iommu_init_err = -ENODEV;
+               return 0;
+       }
 
        p += IVRS_HEADER_LENGTH;
 
@@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
        if (cmd_buf == NULL)
                return NULL;
 
-       iommu->cmd_buf_size = CMD_BUFFER_SIZE;
+       iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
 
        return cmd_buf;
 }
@@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
                    &entry, sizeof(entry));
 
        amd_iommu_reset_cmd_buffer(iommu);
+       iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
 }
 
 static void __init free_command_buffer(struct amd_iommu *iommu)
 {
        free_pages((unsigned long)iommu->cmd_buf,
-                  get_order(iommu->cmd_buf_size));
+                  get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
 }
 
 /* allocates the memory where the IOMMU will log its events to */
@@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
                                    h->mmio_phys);
 
                        iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
-                       if (iommu == NULL)
-                               return -ENOMEM;
+                       if (iommu == NULL) {
+                               amd_iommu_init_err = -ENOMEM;
+                               return 0;
+                       }
+
                        ret = init_iommu_one(iommu, h);
-                       if (ret)
-                               return ret;
+                       if (ret) {
+                               amd_iommu_init_err = ret;
+                               return 0;
+                       }
                        break;
                default:
                        break;
@@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table)
        }
        WARN_ON(p != end);
 
-       amd_iommu_initialized = true;
-
        return 0;
 }
 
@@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void)
        if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
                return -ENODEV;
 
+       ret = amd_iommu_init_err;
+       if (ret)
+               goto out;
+
        dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
        alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
        rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
@@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void)
        if (acpi_table_parse("IVRS", init_iommu_all) != 0)
                goto free;
 
-       if (!amd_iommu_initialized)
+       if (amd_iommu_init_err) {
+               ret = amd_iommu_init_err;
                goto free;
+       }
 
        if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
                goto free;
 
+       if (amd_iommu_init_err) {
+               ret = amd_iommu_init_err;
+               goto free;
+       }
+
        ret = sysdev_class_register(&amd_iommu_sysdev_class);
        if (ret)
                goto free;
@@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void)
        if (ret)
                goto free;
 
+       enable_iommus();
+
        if (iommu_pass_through)
                ret = amd_iommu_init_passthrough();
        else
@@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void)
 
        amd_iommu_init_notifier();
 
-       enable_iommus();
-
        if (iommu_pass_through)
                goto out;
 
@@ -1315,6 +1332,7 @@ out:
        return ret;
 
 free:
+       disable_iommus();
 
        amd_iommu_uninit_devices();
 
index 3704997..b5d8b0b 100644 (file)
@@ -393,6 +393,7 @@ void __init gart_iommu_hole_init(void)
        for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
                int bus;
                int dev_base, dev_limit;
+               u32 ctl;
 
                bus = bus_dev_ranges[i].bus;
                dev_base = bus_dev_ranges[i].dev_base;
@@ -406,7 +407,19 @@ void __init gart_iommu_hole_init(void)
                        gart_iommu_aperture = 1;
                        x86_init.iommu.iommu_init = gart_iommu_init;
 
-                       aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
+                       ctl = read_pci_config(bus, slot, 3,
+                                             AMD64_GARTAPERTURECTL);
+
+                       /*
+                        * Before we do anything else disable the GART. It may
+                        * still be enabled if we boot into a crash-kernel here.
+                        * Reconfiguring the GART while it is enabled could have
+                        * unknown side-effects.
+                        */
+                       ctl &= ~GARTEN;
+                       write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
+
+                       aper_order = (ctl >> 1) & 7;
                        aper_size = (32 * 1024 * 1024) << aper_order;
                        aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
                        aper_base <<= 25;
index a4849c1..ebd4c51 100644 (file)
@@ -27,7 +27,6 @@
 #include <asm/cpu.h>
 #include <asm/reboot.h>
 #include <asm/virtext.h>
-#include <asm/x86_init.h>
 
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
 
@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
 #ifdef CONFIG_HPET_TIMER
        hpet_disable();
 #endif
-
-#ifdef CONFIG_X86_64
-       x86_platform.iommu_shutdown();
-#endif
-
        crash_save_cpu(regs, safe_smp_processor_id());
 }
index 68cd24f..0f7f130 100644 (file)
@@ -565,6 +565,9 @@ static void enable_gart_translations(void)
 
                enable_gart_translation(dev, __pa(agp_gatt_table));
        }
+
+       /* Flush the GART-TLB to remove stale entries */
+       k8_flush_garts();
 }
 
 /*
index 7e59dc1..2bdf628 100644 (file)
@@ -115,7 +115,7 @@ static void async_hcall(unsigned long call, unsigned long arg1,
        local_irq_save(flags);
        if (lguest_data.hcall_status[next_call] != 0xFF) {
                /* Table full, so do normal hcall which will flush table. */
-               kvm_hypercall4(call, arg1, arg2, arg3, arg4);
+               hcall(call, arg1, arg2, arg3, arg4);
        } else {
                lguest_data.hcalls[next_call].arg0 = call;
                lguest_data.hcalls[next_call].arg1 = arg1;
@@ -145,46 +145,45 @@ static void async_hcall(unsigned long call, unsigned long arg1,
  * So, when we're in lazy mode, we call async_hcall() to store the call for
  * future processing:
  */
-static void lazy_hcall1(unsigned long call,
-                      unsigned long arg1)
+static void lazy_hcall1(unsigned long call, unsigned long arg1)
 {
        if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
-               kvm_hypercall1(call, arg1);
+               hcall(call, arg1, 0, 0, 0);
        else
                async_hcall(call, arg1, 0, 0, 0);
 }
 
 /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
 static void lazy_hcall2(unsigned long call,
-                      unsigned long arg1,
-                      unsigned long arg2)
+                       unsigned long arg1,
+                       unsigned long arg2)
 {
        if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
-               kvm_hypercall2(call, arg1, arg2);
+               hcall(call, arg1, arg2, 0, 0);
        else
                async_hcall(call, arg1, arg2, 0, 0);
 }
 
 static void lazy_hcall3(unsigned long call,
-                      unsigned long arg1,
-                      unsigned long arg2,
-                      unsigned long arg3)
+                       unsigned long arg1,
+                       unsigned long arg2,
+                       unsigned long arg3)
 {
        if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
-               kvm_hypercall3(call, arg1, arg2, arg3);
+               hcall(call, arg1, arg2, arg3, 0);
        else
                async_hcall(call, arg1, arg2, arg3, 0);
 }
 
 #ifdef CONFIG_X86_PAE
 static void lazy_hcall4(unsigned long call,
-                      unsigned long arg1,
-                      unsigned long arg2,
-                      unsigned long arg3,
-                      unsigned long arg4)
+                       unsigned long arg1,
+                       unsigned long arg2,
+                       unsigned long arg3,
+                       unsigned long arg4)
 {
        if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
-               kvm_hypercall4(call, arg1, arg2, arg3, arg4);
+               hcall(call, arg1, arg2, arg3, arg4);
        else
                async_hcall(call, arg1, arg2, arg3, arg4);
 }
@@ -196,13 +195,13 @@ static void lazy_hcall4(unsigned long call,
 :*/
 static void lguest_leave_lazy_mmu_mode(void)
 {
-       kvm_hypercall0(LHCALL_FLUSH_ASYNC);
+       hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
        paravirt_leave_lazy_mmu();
 }
 
 static void lguest_end_context_switch(struct task_struct *next)
 {
-       kvm_hypercall0(LHCALL_FLUSH_ASYNC);
+       hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
        paravirt_end_context_switch(next);
 }
 
@@ -286,7 +285,7 @@ static void lguest_write_idt_entry(gate_desc *dt,
        /* Keep the local copy up to date. */
        native_write_idt_entry(dt, entrynum, g);
        /* Tell Host about this new entry. */
-       kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]);
+       hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0);
 }
 
 /*
@@ -300,7 +299,7 @@ static void lguest_load_idt(const struct desc_ptr *desc)
        struct desc_struct *idt = (void *)desc->address;
 
        for (i = 0; i < (desc->size+1)/8; i++)
-               kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
+               hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0);
 }
 
 /*
@@ -321,7 +320,7 @@ static void lguest_load_gdt(const struct desc_ptr *desc)
        struct desc_struct *gdt = (void *)desc->address;
 
        for (i = 0; i < (desc->size+1)/8; i++)
-               kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b);
+               hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0);
 }
 
 /*
@@ -334,8 +333,8 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
 {
        native_write_gdt_entry(dt, entrynum, desc, type);
        /* Tell Host about this new entry. */
-       kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum,
-                      dt[entrynum].a, dt[entrynum].b);
+       hcall(LHCALL_LOAD_GDT_ENTRY, entrynum,
+             dt[entrynum].a, dt[entrynum].b, 0);
 }
 
 /*
@@ -931,7 +930,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta,
        }
 
        /* Please wake us this far in the future. */
-       kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta);
+       hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0);
        return 0;
 }
 
@@ -942,7 +941,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode,
        case CLOCK_EVT_MODE_UNUSED:
        case CLOCK_EVT_MODE_SHUTDOWN:
                /* A 0 argument shuts the clock down. */
-               kvm_hypercall0(LHCALL_SET_CLOCKEVENT);
+               hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
                break;
        case CLOCK_EVT_MODE_ONESHOT:
                /* This is what we expect. */
@@ -1100,7 +1099,7 @@ static void set_lguest_basic_apic_ops(void)
 /* STOP!  Until an interrupt comes in. */
 static void lguest_safe_halt(void)
 {
-       kvm_hypercall0(LHCALL_HALT);
+       hcall(LHCALL_HALT, 0, 0, 0, 0);
 }
 
 /*
@@ -1112,8 +1111,8 @@ static void lguest_safe_halt(void)
  */
 static void lguest_power_off(void)
 {
-       kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"),
-                                       LGUEST_SHUTDOWN_POWEROFF);
+       hcall(LHCALL_SHUTDOWN, __pa("Power down"),
+             LGUEST_SHUTDOWN_POWEROFF, 0, 0);
 }
 
 /*
@@ -1123,7 +1122,7 @@ static void lguest_power_off(void)
  */
 static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
 {
-       kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF);
+       hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0);
        /* The hcall won't return, but to keep gcc happy, we're "done". */
        return NOTIFY_DONE;
 }
@@ -1162,7 +1161,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
                len = sizeof(scratch) - 1;
        scratch[len] = '\0';
        memcpy(scratch, buf, len);
-       kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch));
+       hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0);
 
        /* This routine returns the number of bytes actually written. */
        return len;
@@ -1174,7 +1173,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
  */
 static void lguest_restart(char *reason)
 {
-       kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
+       hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
 }
 
 /*G:050
index 27eac0f..4f420c2 100644 (file)
@@ -32,7 +32,7 @@ ENTRY(lguest_entry)
         */
        movl $LHCALL_LGUEST_INIT, %eax
        movl $lguest_data - __PAGE_OFFSET, %ebx
-       .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
+       int $LGUEST_TRAP_ENTRY
 
        /* Set up the initial stack so we can run C code. */
        movl $(init_thread_union+THREAD_SIZE),%esp
index 702dcc9..14a34d9 100644 (file)
@@ -960,6 +960,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
                u.packet.header_length = GET_HEADER_LENGTH(control);
 
                if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
+                       if (u.packet.header_length % 4 != 0)
+                               return -EINVAL;
                        header_length = u.packet.header_length;
                } else {
                        /*
@@ -969,7 +971,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
                        if (ctx->header_size == 0) {
                                if (u.packet.header_length > 0)
                                        return -EINVAL;
-                       } else if (u.packet.header_length % ctx->header_size != 0) {
+                       } else if (u.packet.header_length == 0 ||
+                                  u.packet.header_length % ctx->header_size != 0) {
                                return -EINVAL;
                        }
                        header_length = 0;
@@ -1354,24 +1357,24 @@ static int dispatch_ioctl(struct client *client,
                return -ENODEV;
 
        if (_IOC_TYPE(cmd) != '#' ||
-           _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
+           _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
+           _IOC_SIZE(cmd) > sizeof(buffer))
                return -EINVAL;
 
-       if (_IOC_DIR(cmd) & _IOC_WRITE) {
-               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
-                   copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
+       if (_IOC_DIR(cmd) == _IOC_READ)
+               memset(&buffer, 0, _IOC_SIZE(cmd));
+
+       if (_IOC_DIR(cmd) & _IOC_WRITE)
+               if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
                        return -EFAULT;
-       }
 
        ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
        if (ret < 0)
                return ret;
 
-       if (_IOC_DIR(cmd) & _IOC_READ) {
-               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
-                   copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
+       if (_IOC_DIR(cmd) & _IOC_READ)
+               if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
                        return -EFAULT;
-       }
 
        return ret;
 }
index c1605b5..0f28d91 100644 (file)
@@ -142,6 +142,12 @@ static const char *temperature_sensors_sets[][41] = {
          "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S",
          "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S",
          NULL },
+/* Set 17: iMac 9,1 */
+       { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TH0P", "TL0P",
+         "TN0D", "TN0H", "TN0P", "TO0P", "Tm0P", "Tp0P", NULL },
+/* Set 18: MacBook Pro 2,2 */
+       { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
+         "Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
 };
 
 /* List of keys used to read/write fan speeds */
@@ -1350,6 +1356,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
        { .accelerometer = 1, .light = 1, .temperature_set = 15 },
 /* MacPro3,1: temperature set 16 */
        { .accelerometer = 0, .light = 0, .temperature_set = 16 },
+/* iMac 9,1: light sensor only, temperature set 17 */
+       { .accelerometer = 0, .light = 0, .temperature_set = 17 },
+/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
+       { .accelerometer = 1, .light = 1, .temperature_set = 18 },
 };
 
 /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1375,6 +1385,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
          DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
          DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") },
                &applesmc_dmi_data[9]},
+       { applesmc_dmi_match, "Apple MacBook Pro 2,2", {
+         DMI_MATCH(DMI_BOARD_VENDOR, "Apple Computer, Inc."),
+         DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2") },
+               &applesmc_dmi_data[18]},
        { applesmc_dmi_match, "Apple MacBook Pro", {
          DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
          DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") },
@@ -1415,6 +1429,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
          DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
          DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
                &applesmc_dmi_data[4]},
+       { applesmc_dmi_match, "Apple iMac 9,1", {
+         DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+         DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1") },
+               &applesmc_dmi_data[17]},
        { applesmc_dmi_match, "Apple iMac 8", {
          DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
          DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
index 1002bef..5be09c0 100644 (file)
@@ -539,14 +539,14 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
 
        struct it87_data *data = dev_get_drvdata(dev);
        long val;
+       u8 reg;
 
        if (strict_strtol(buf, 10, &val) < 0)
                return -EINVAL;
 
-       mutex_lock(&data->update_lock);
-
-       data->sensor &= ~(1 << nr);
-       data->sensor &= ~(8 << nr);
+       reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
+       reg &= ~(1 << nr);
+       reg &= ~(8 << nr);
        if (val == 2) { /* backwards compatibility */
                dev_warn(dev, "Sensor type 2 is deprecated, please use 4 "
                         "instead\n");
@@ -554,14 +554,16 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
        }
        /* 3 = thermal diode; 4 = thermistor; 0 = disabled */
        if (val == 3)
-               data->sensor |= 1 << nr;
+               reg |= 1 << nr;
        else if (val == 4)
-               data->sensor |= 8 << nr;
-       else if (val != 0) {
-               mutex_unlock(&data->update_lock);
+               reg |= 8 << nr;
+       else if (val != 0)
                return -EINVAL;
-       }
+
+       mutex_lock(&data->update_lock);
+       data->sensor = reg;
        it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor);
+       data->valid = 0;        /* Force cache refresh */
        mutex_unlock(&data->update_lock);
        return count;
 }
@@ -1841,14 +1843,10 @@ static void __devinit it87_init_device(struct platform_device *pdev)
                        it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
        }
 
-       /* Check if temperature channels are reset manually or by some reason */
-       tmp = it87_read_value(data, IT87_REG_TEMP_ENABLE);
-       if ((tmp & 0x3f) == 0) {
-               /* Temp1,Temp3=thermistor; Temp2=thermal diode */
-               tmp = (tmp & 0xc0) | 0x2a;
-               it87_write_value(data, IT87_REG_TEMP_ENABLE, tmp);
-       }
-       data->sensor = tmp;
+       /* Temperature channels are not forcibly enabled, as they can be
+        * set to two different sensor types and we can't guess which one
+        * is correct for a given system. These channels can be enabled at
+        * run-time through the temp{1-3}_type sysfs accessors if needed. */
 
        /* Check if voltage monitors are reset manually or by some reason */
        tmp = it87_read_value(data, IT87_REG_VIN_ENABLE);
index 6b2d8ae..a610e78 100644 (file)
@@ -303,13 +303,13 @@ error_ret:
  **/
 static inline int sht15_calc_temp(struct sht15_data *data)
 {
-       int d1 = 0;
+       int d1 = temppoints[0].d1;
        int i;
 
-       for (i = 1; i < ARRAY_SIZE(temppoints); i++)
+       for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
                /* Find pointer to interpolate */
                if (data->supply_uV > temppoints[i - 1].vdd) {
-                       d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
+                       d1 = (data->supply_uV - temppoints[i - 1].vdd)
                                * (temppoints[i].d1 - temppoints[i - 1].d1)
                                / (temppoints[i].vdd - temppoints[i - 1].vdd)
                                + temppoints[i - 1].d1;
@@ -542,7 +542,12 @@ static int __devinit sht15_probe(struct platform_device *pdev)
 /* If a regulator is available, query what the supply voltage actually is!*/
        data->reg = regulator_get(data->dev, "vcc");
        if (!IS_ERR(data->reg)) {
-               data->supply_uV = regulator_get_voltage(data->reg);
+               int voltage;
+
+               voltage = regulator_get_voltage(data->reg);
+               if (voltage)
+                       data->supply_uV = voltage;
+
                regulator_enable(data->reg);
                /* setup a notifier block to update this if another device
                 *  causes the voltage to change */
index afd4e2b..9c79bd5 100644 (file)
@@ -660,7 +660,14 @@ static int input_default_setkeycode(struct input_dev *dev,
 int input_get_keycode(struct input_dev *dev,
                      unsigned int scancode, unsigned int *keycode)
 {
-       return dev->getkeycode(dev, scancode, keycode);
+       unsigned long flags;
+       int retval;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       retval = dev->getkeycode(dev, scancode, keycode);
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       return retval;
 }
 EXPORT_SYMBOL(input_get_keycode);
 
index ffc25cf..b443e08 100644 (file)
@@ -374,7 +374,9 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
        input_dev->name         = pdev->name;
        input_dev->id.bustype   = BUS_HOST;
        input_dev->dev.parent   = &pdev->dev;
-       input_dev->evbit[0]     = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+       input_dev->evbit[0]     = BIT_MASK(EV_KEY);
+       if (!pdata->no_autorepeat)
+               input_dev->evbit[0] |= BIT_MASK(EV_REP);
        input_dev->open         = matrix_keypad_start;
        input_dev->close        = matrix_keypad_stop;
 
index 99d5876..0d22cb9 100644 (file)
@@ -64,6 +64,7 @@ static const struct alps_model_info alps_model_data[] = {
        { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
                ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
        { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS },          /* Dell Vostro 1400 */
+       { { 0x73, 0x02, 0x64 }, 0xf8, 0xf8, 0 },                          /* HP Pavilion dm3 */
        { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
                ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },      /* Toshiba Tecra A11-11L */
 };
index 4f8fe08..b89879b 100644 (file)
@@ -803,7 +803,6 @@ static struct usb_driver bcm5974_driver = {
        .disconnect             = bcm5974_disconnect,
        .suspend                = bcm5974_suspend,
        .resume                 = bcm5974_resume,
-       .reset_resume           = bcm5974_resume,
        .id_table               = bcm5974_table,
        .supports_autosuspend   = 1,
 };
index 577688b..6440a8f 100644 (file)
@@ -39,7 +39,7 @@ MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port.");
 
 static bool i8042_nomux;
 module_param_named(nomux, i8042_nomux, bool, 0);
-MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing conrtoller is present.");
+MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present.");
 
 static bool i8042_unlock;
 module_param_named(unlock, i8042_unlock, bool, 0);
index 82ae18d..0142483 100644 (file)
@@ -68,12 +68,14 @@ static int sparse_keymap_getkeycode(struct input_dev *dev,
                                    unsigned int scancode,
                                    unsigned int *keycode)
 {
-       const struct key_entry *key =
-                       sparse_keymap_entry_from_scancode(dev, scancode);
+       const struct key_entry *key;
 
-       if (key && key->type == KE_KEY) {
-               *keycode = key->keycode;
-               return 0;
+       if (dev->keycode) {
+               key = sparse_keymap_entry_from_scancode(dev, scancode);
+               if (key && key->type == KE_KEY) {
+                       *keycode = key->keycode;
+                       return 0;
+               }
        }
 
        return -EINVAL;
@@ -86,17 +88,16 @@ static int sparse_keymap_setkeycode(struct input_dev *dev,
        struct key_entry *key;
        int old_keycode;
 
-       if (keycode < 0 || keycode > KEY_MAX)
-               return -EINVAL;
-
-       key = sparse_keymap_entry_from_scancode(dev, scancode);
-       if (key && key->type == KE_KEY) {
-               old_keycode = key->keycode;
-               key->keycode = keycode;
-               set_bit(keycode, dev->keybit);
-               if (!sparse_keymap_entry_from_keycode(dev, old_keycode))
-                       clear_bit(old_keycode, dev->keybit);
-               return 0;
+       if (dev->keycode) {
+               key = sparse_keymap_entry_from_scancode(dev, scancode);
+               if (key && key->type == KE_KEY) {
+                       old_keycode = key->keycode;
+                       key->keycode = keycode;
+                       set_bit(keycode, dev->keybit);
+                       if (!sparse_keymap_entry_from_keycode(dev, old_keycode))
+                               clear_bit(old_keycode, dev->keybit);
+                       return 0;
+               }
        }
 
        return -EINVAL;
@@ -164,7 +165,7 @@ int sparse_keymap_setup(struct input_dev *dev,
        return 0;
 
  err_out:
-       kfree(keymap);
+       kfree(map);
        return error;
 
 }
@@ -176,14 +177,27 @@ EXPORT_SYMBOL(sparse_keymap_setup);
  *
  * This function is used to free memory allocated by sparse keymap
  * in an input device that was set up by sparse_keymap_setup().
+ * NOTE: It is safe to cal this function while input device is
+ * still registered (however the drivers should care not to try to
+ * use freed keymap and thus have to shut off interrups/polling
+ * before freeing the keymap).
  */
 void sparse_keymap_free(struct input_dev *dev)
 {
+       unsigned long flags;
+
+       /*
+        * Take event lock to prevent racing with input_get_keycode()
+        * and input_set_keycode() if we are called while input device
+        * is still registered.
+        */
+       spin_lock_irqsave(&dev->event_lock, flags);
+
        kfree(dev->keycode);
        dev->keycode = NULL;
        dev->keycodemax = 0;
-       dev->getkeycode = NULL;
-       dev->setkeycode = NULL;
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 EXPORT_SYMBOL(sparse_keymap_free);
 
index 8b5d287..f465025 100644 (file)
@@ -673,13 +673,15 @@ static int wacom_resume(struct usb_interface *intf)
        int rv;
 
        mutex_lock(&wacom->lock);
-       if (wacom->open) {
+
+       /* switch to wacom mode first */
+       wacom_query_tablet_data(intf, features);
+
+       if (wacom->open)
                rv = usb_submit_urb(wacom->irq, GFP_NOIO);
-               /* switch to wacom mode if needed */
-               if (!wacom_retrieve_hid_descriptor(intf, features))
-                       wacom_query_tablet_data(intf, features);
-       } else
+       else
                rv = 0;
+
        mutex_unlock(&wacom->lock);
 
        return rv;
index b3ba343..4a852d8 100644 (file)
@@ -155,19 +155,19 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
 {
        struct wacom_features *features = &wacom->features;
        unsigned char *data = wacom->data;
-       int x, y, prox;
-       int rw = 0;
-       int retval = 0;
+       int x, y, rw;
+       static int penData = 0;
 
        if (data[0] != WACOM_REPORT_PENABLED) {
                dbg("wacom_graphire_irq: received unknown report #%d", data[0]);
-               goto exit;
+               return 0;
        }
 
-       prox = data[1] & 0x80;
-       if (prox || wacom->id[0]) {
-               if (prox) {
-                       switch ((data[1] >> 5) & 3) {
+       if (data[1] & 0x80) {
+               /* in prox and not a pad data */
+               penData = 1;
+
+               switch ((data[1] >> 5) & 3) {
 
                        case 0: /* Pen */
                                wacom->tool[0] = BTN_TOOL_PEN;
@@ -181,13 +181,23 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
 
                        case 2: /* Mouse with wheel */
                                wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04);
+                               if (features->type == WACOM_G4 || features->type == WACOM_MO) {
+                                       rw = data[7] & 0x04 ? (data[7] & 0x03)-4 : (data[7] & 0x03);
+                                       wacom_report_rel(wcombo, REL_WHEEL, -rw);
+                               } else
+                                       wacom_report_rel(wcombo, REL_WHEEL, -(signed char) data[6]);
                                /* fall through */
 
                        case 3: /* Mouse without wheel */
                                wacom->tool[0] = BTN_TOOL_MOUSE;
                                wacom->id[0] = CURSOR_DEVICE_ID;
+                               wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01);
+                               wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02);
+                               if (features->type == WACOM_G4 || features->type == WACOM_MO)
+                                       wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f);
+                               else
+                                       wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f);
                                break;
-                       }
                }
                x = wacom_le16_to_cpu(&data[2]);
                y = wacom_le16_to_cpu(&data[4]);
@@ -198,32 +208,36 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
                        wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01);
                        wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
                        wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04);
-               } else {
-                       wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01);
-                       wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02);
-                       if (features->type == WACOM_G4 ||
-                                       features->type == WACOM_MO) {
-                               wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f);
-                               rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
-                       } else {
-                               wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f);
-                               rw = -(signed)data[6];
-                       }
-                       wacom_report_rel(wcombo, REL_WHEEL, rw);
                }
-
-               if (!prox)
-                       wacom->id[0] = 0;
                wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
-               wacom_report_key(wcombo, wacom->tool[0], prox);
-               wacom_input_sync(wcombo); /* sync last event */
+               wacom_report_key(wcombo, wacom->tool[0], 1);
+       } else if (wacom->id[0]) {
+               wacom_report_abs(wcombo, ABS_X, 0);
+               wacom_report_abs(wcombo, ABS_Y, 0);
+               if (wacom->tool[0] == BTN_TOOL_MOUSE) {
+                       wacom_report_key(wcombo, BTN_LEFT, 0);
+                       wacom_report_key(wcombo, BTN_RIGHT, 0);
+                       wacom_report_abs(wcombo, ABS_DISTANCE, 0);
+               } else {
+                       wacom_report_abs(wcombo, ABS_PRESSURE, 0);
+                       wacom_report_key(wcombo, BTN_TOUCH, 0);
+                       wacom_report_key(wcombo, BTN_STYLUS, 0);
+                       wacom_report_key(wcombo, BTN_STYLUS2, 0);
+               }
+               wacom->id[0] = 0;
+               wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
+               wacom_report_key(wcombo, wacom->tool[0], 0);
        }
 
        /* send pad data */
        switch (features->type) {
            case WACOM_G4:
-               prox = data[7] & 0xf8;
-               if (prox || wacom->id[1]) {
+               if (data[7] & 0xf8) {
+                       if (penData) {
+                               wacom_input_sync(wcombo); /* sync last event */
+                               if (!wacom->id[0])
+                                       penData = 0;
+                       }
                        wacom->id[1] = PAD_DEVICE_ID;
                        wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
                        wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
@@ -231,16 +245,29 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
                        wacom_report_rel(wcombo, REL_WHEEL, rw);
                        wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
                        wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
-                       if (!prox)
-                               wacom->id[1] = 0;
-                       wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
+                       wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+               } else if (wacom->id[1]) {
+                       if (penData) {
+                               wacom_input_sync(wcombo); /* sync last event */
+                               if (!wacom->id[0])
+                                       penData = 0;
+                       }
+                       wacom->id[1] = 0;
+                       wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
+                       wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
+                       wacom_report_rel(wcombo, REL_WHEEL, 0);
+                       wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
+                       wacom_report_abs(wcombo, ABS_MISC, 0);
                        wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
                }
-               retval = 1;
                break;
            case WACOM_MO:
-               prox = (data[7] & 0xf8) || data[8];
-               if (prox || wacom->id[1]) {
+               if ((data[7] & 0xf8) || (data[8] & 0xff)) {
+                       if (penData) {
+                               wacom_input_sync(wcombo); /* sync last event */
+                               if (!wacom->id[0])
+                                       penData = 0;
+                       }
                        wacom->id[1] = PAD_DEVICE_ID;
                        wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
                        wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
@@ -248,16 +275,27 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
                        wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
                        wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
                        wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
-                       if (!prox)
-                               wacom->id[1] = 0;
                        wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
                        wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+               } else if (wacom->id[1]) {
+                       if (penData) {
+                               wacom_input_sync(wcombo); /* sync last event */
+                               if (!wacom->id[0])
+                                       penData = 0;
+                       }
+                       wacom->id[1] = 0;
+                       wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
+                       wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
+                       wacom_report_key(wcombo, BTN_4, (data[7] & 0x10));
+                       wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
+                       wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
+                       wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
+                       wacom_report_abs(wcombo, ABS_MISC, 0);
+                       wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
                }
-               retval = 1;
                break;
        }
-exit:
-       return retval;
+       return 1;
 }
 
 static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
@@ -598,9 +636,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
 static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx)
 {
        wacom_report_abs(wcombo, ABS_X,
-               data[2 + idx * 2] | ((data[3 + idx * 2] & 0x7f) << 8));
+               (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8));
        wacom_report_abs(wcombo, ABS_Y,
-               data[6 + idx * 2] | ((data[7 + idx * 2] & 0x7f) << 8));
+               (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8));
        wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
        wacom_report_key(wcombo, wacom->tool[idx], 1);
        if (idx)
@@ -744,24 +782,31 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
 
                touchInProx = 0;
 
-               if (!wacom->id[0]) { /* first in prox */
-                       /* Going into proximity select tool */
-                       wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
-                       if (wacom->tool[0] == BTN_TOOL_PEN)
-                               wacom->id[0] = STYLUS_DEVICE_ID;
-                       else
-                               wacom->id[0] = ERASER_DEVICE_ID;
-               }
-               wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
-               wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
-               wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
-               wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
-               pressure = ((data[7] & 0x01) << 8) | data[6];
-               if (pressure < 0)
-                       pressure = features->pressure_max + pressure + 1;
-               wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
-               wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
-               if (!prox) { /* out-prox */
+               if (prox) { /* in prox */
+                       if (!wacom->id[0]) {
+                               /* Going into proximity select tool */
+                               wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+                               if (wacom->tool[0] == BTN_TOOL_PEN)
+                                       wacom->id[0] = STYLUS_DEVICE_ID;
+                               else
+                                       wacom->id[0] = ERASER_DEVICE_ID;
+                       }
+                       wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
+                       wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
+                       wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
+                       wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
+                       pressure = ((data[7] & 0x01) << 8) | data[6];
+                       if (pressure < 0)
+                               pressure = features->pressure_max + pressure + 1;
+                       wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
+                       wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
+               } else {
+                       wacom_report_abs(wcombo, ABS_X, 0);
+                       wacom_report_abs(wcombo, ABS_Y, 0);
+                       wacom_report_abs(wcombo, ABS_PRESSURE, 0);
+                       wacom_report_key(wcombo, BTN_STYLUS, 0);
+                       wacom_report_key(wcombo, BTN_STYLUS2, 0);
+                       wacom_report_key(wcombo, BTN_TOUCH, 0);
                        wacom->id[0] = 0;
                        /* pen is out so touch can be enabled now */
                        touchInProx = 1;
index 07090f3..69c84a1 100644 (file)
@@ -178,7 +178,7 @@ static void set_status(struct virtio_device *vdev, u8 status)
 
        /* We set the status. */
        to_lgdev(vdev)->desc->status = status;
-       kvm_hypercall1(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset);
+       hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0);
 }
 
 static void lg_set_status(struct virtio_device *vdev, u8 status)
@@ -229,7 +229,7 @@ static void lg_notify(struct virtqueue *vq)
         */
        struct lguest_vq_info *lvq = vq->priv;
 
-       kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT);
+       hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0);
 }
 
 /* An extern declaration inside a C file is bad form.  Don't do it. */
index fb2b7ef..b4eb675 100644 (file)
@@ -287,6 +287,18 @@ static int emulate_insn(struct lg_cpu *cpu)
        /* Decoding x86 instructions is icky. */
        insn = lgread(cpu, physaddr, u8);
 
+       /*
+        * Around 2.6.33, the kernel started using an emulation for the
+        * cmpxchg8b instruction in early boot on many configurations.  This
+        * code isn't paravirtualized, and it tries to disable interrupts.
+        * Ignore it, which will Mostly Work.
+        */
+       if (insn == 0xfa) {
+               /* "cli", or Clear Interrupt Enable instruction.  Skip it. */
+               cpu->regs->eip++;
+               return 1;
+       }
+
        /*
         * 0x66 is an "operand prefix".  It means it's using the upper 16 bits
         * of the eax register.
index 9781942..4b451a7 100644 (file)
@@ -2334,13 +2334,13 @@ static int cnic_service_bnx2x(void *data, void *status_blk)
        struct cnic_local *cp = dev->cnic_priv;
        u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
 
-       prefetch(cp->status_blk.bnx2x);
-       prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+       if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+               prefetch(cp->status_blk.bnx2x);
+               prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
 
-       if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
                tasklet_schedule(&cp->cnic_irq_task);
-
-       cnic_chk_pkt_rings(cp);
+               cnic_chk_pkt_rings(cp);
+       }
 
        return 0;
 }
index cfd09ce..73d43c5 100644 (file)
@@ -661,6 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                                i = 0;
                }
 
+               if (i == tx_ring->next_to_use)
+                       break;
                eop = tx_ring->buffer_info[i].next_to_watch;
                eop_desc = E1000_TX_DESC(*tx_ring, eop);
        }
index d313fae..7430384 100644 (file)
@@ -1814,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
                retval = 0;
                break;
        case E1000_DEV_ID_82576_QUAD_COPPER:
+       case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
                /* quad port adapters only support WoL on port A */
                if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
                        wol->supported = 0;
index 9b3c51a..c9baa2a 100644 (file)
@@ -1612,6 +1612,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                        adapter->eeprom_wol = 0;
                break;
        case E1000_DEV_ID_82576_QUAD_COPPER:
+       case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
                /* if quad port adapter, disable WoL on all but port A */
                if (global_quad_port_a != 0)
                        adapter->eeprom_wol = 0;
index 4718877..ecde087 100644 (file)
@@ -1690,7 +1690,7 @@ myri10ge_set_pauseparam(struct net_device *netdev,
        if (pause->tx_pause != mgp->pause)
                return myri10ge_change_pause(mgp, pause->tx_pause);
        if (pause->rx_pause != mgp->pause)
-               return myri10ge_change_pause(mgp, pause->tx_pause);
+               return myri10ge_change_pause(mgp, pause->rx_pause);
        if (pause->autoneg != 0)
                return -EINVAL;
        return 0;
index ff7eb91..fd9d6e3 100644 (file)
@@ -1608,9 +1608,12 @@ static void set_rx_mode(struct net_device *dev)
 {
     unsigned int ioaddr = dev->base_addr;
     struct smc_private *smc = netdev_priv(dev);
-    u_int multicast_table[ 2 ] = { 0, };
+    unsigned char multicast_table[8];
     unsigned long flags;
     u_short rx_cfg_setting;
+    int i;
+
+    memset(multicast_table, 0, sizeof(multicast_table));
 
     if (dev->flags & IFF_PROMISC) {
        rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti;
@@ -1622,10 +1625,6 @@ static void set_rx_mode(struct net_device *dev)
 
            netdev_for_each_mc_addr(mc_addr, dev) {
                u_int position = ether_crc(6, mc_addr->dmi_addr);
-#ifndef final_version          /* Verify multicast address. */
-               if ((mc_addr->dmi_addr[0] & 1) == 0)
-                   continue;
-#endif
                multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
            }
        }
@@ -1635,8 +1634,8 @@ static void set_rx_mode(struct net_device *dev)
     /* Load MC table and Rx setting into the chip without interrupts. */
     spin_lock_irqsave(&smc->lock, flags);
     SMC_SELECT_BANK(3);
-    outl(multicast_table[0], ioaddr + MULTICAST0);
-    outl(multicast_table[1], ioaddr + MULTICAST4);
+    for (i = 0; i < 8; i++)
+       outb(multicast_table[i], ioaddr + MULTICAST0 + i);
     SMC_SELECT_BANK(0);
     outw(rx_cfg_setting, ioaddr + RCR);
     SMC_SELECT_BANK(2);
index a6ef266..e73ba45 100644 (file)
@@ -431,6 +431,9 @@ void qlcnic_set_multi(struct net_device *netdev)
        u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
        u32 mode = VPORT_MISS_MODE_DROP;
 
+       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+               return;
+
        qlcnic_nic_add_mac(adapter, adapter->mac_addr);
        qlcnic_nic_add_mac(adapter, bcast_addr);
 
index 43afdb6..0298d8c 100644 (file)
 #define RX_DESC_SIZE   (RX_DCNT * sizeof(struct r6040_descriptor))
 #define TX_DESC_SIZE   (TX_DCNT * sizeof(struct r6040_descriptor))
 #define MBCR_DEFAULT   0x012A  /* MAC Bus Control Register */
-#define MCAST_MAX      4       /* Max number multicast addresses to filter */
+#define MCAST_MAX      3       /* Max number multicast addresses to filter */
 
 /* Descriptor status */
 #define DSC_OWNER_MAC  0x8000  /* MAC is the owner of this descriptor */
@@ -982,9 +982,6 @@ static void r6040_multicast_list(struct net_device *dev)
                        crc >>= 26;
                        hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
                }
-               /* Write the index of the hash table */
-               for (i = 0; i < 4; i++)
-                       iowrite16(hash_table[i] << 14, ioaddr + MCR1);
                /* Fill the MAC hash tables with their values */
                iowrite16(hash_table[0], ioaddr + MAR0);
                iowrite16(hash_table[1], ioaddr + MAR1);
@@ -1000,9 +997,9 @@ static void r6040_multicast_list(struct net_device *dev)
                        iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
                        iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
                } else {
-                       iowrite16(0xffff, ioaddr + MID_0L + 8 * i);
-                       iowrite16(0xffff, ioaddr + MID_0M + 8 * i);
-                       iowrite16(0xffff, ioaddr + MID_0H + 8 * i);
+                       iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
+                       iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
+                       iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
                }
                i++;
        }
index a214a16..4111a85 100644 (file)
@@ -1686,7 +1686,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
        }
        pr_info("done!\n");
 
-       if (!request_mem_region(res->start, (res->end - res->start),
+       if (!request_mem_region(res->start, resource_size(res),
                                pdev->name)) {
                pr_err("%s: ERROR: memory allocation failed"
                       "cannot get the I/O addr 0x%x\n",
@@ -1695,9 +1695,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
                goto out;
        }
 
-       addr = ioremap(res->start, (res->end - res->start));
+       addr = ioremap(res->start, resource_size(res));
        if (!addr) {
-               pr_err("%s: ERROR: memory mapping failed \n", __func__);
+               pr_err("%s: ERROR: memory mapping failed\n", __func__);
                ret = -ENOMEM;
                goto out;
        }
@@ -1775,7 +1775,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
 out:
        if (ret < 0) {
                platform_set_drvdata(pdev, NULL);
-               release_mem_region(res->start, (res->end - res->start));
+               release_mem_region(res->start, resource_size(res));
                if (addr != NULL)
                        iounmap(addr);
        }
@@ -1813,7 +1813,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
 
        iounmap((void *)ndev->base_addr);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, (res->end - res->start));
+       release_mem_region(res->start, resource_size(res));
 
        free_netdev(ndev);
 
index 6fb783c..b0577dd 100644 (file)
@@ -327,6 +327,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
        struct scatterlist sg[2];
        int err;
 
+       sg_init_table(sg, 2);
        skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
        if (unlikely(!skb))
                return -ENOMEM;
@@ -352,6 +353,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
        char *p;
        int i, err, offset;
 
+       sg_init_table(sg, MAX_SKB_FRAGS + 2);
        /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
        for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
                first = get_a_page(vi, gfp);
index 67ca4e5..115e1ae 100644 (file)
@@ -1532,8 +1532,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                all_wiphys_idle =  ath9k_all_wiphys_idle(sc);
                ath9k_set_wiphy_idle(aphy, idle);
 
-               if (!idle && all_wiphys_idle)
-                       enable_radio = true;
+               enable_radio = (!idle && all_wiphys_idle);
 
                /*
                 * After we unlock here its possible another wiphy
index 83c52a6..8972166 100644 (file)
@@ -2015,7 +2015,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
                        IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
                                           "%d index %d\n", scd_ssn , index);
                        freed = iwl_tx_queue_reclaim(priv, txq_id, index);
-                       iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+                       if (qc)
+                               iwl_free_tfds_in_queue(priv, sta_id,
+                                                      tid, freed);
 
                        if (priv->mac80211_registered &&
                            (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -2041,14 +2043,17 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
                                   tx_resp->failure_frame);
 
                freed = iwl_tx_queue_reclaim(priv, txq_id, index);
-               iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+               if (qc && likely(sta_id != IWL_INVALID_STATION))
+                       iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+               else if (sta_id == IWL_INVALID_STATION)
+                       IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
 
                if (priv->mac80211_registered &&
                    (iwl_queue_space(&txq->q) > txq->q.low_mark))
                        iwl_wake_queue(priv, txq_id);
        }
-
-       iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+       if (qc && likely(sta_id != IWL_INVALID_STATION))
+               iwl_txq_check_empty(priv, sta_id, tid, txq_id);
 
        if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
                IWL_ERR(priv, "TODO:  Implement Tx ABORT REQUIRED!!!\n");
index 35f819a..1460116 100644 (file)
@@ -346,6 +346,17 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
               !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
 }
 
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+       if (tbl->expected_tpt)
+               return tbl->expected_tpt[rs_index];
+       return 0;
+}
+
 /**
  * rs_collect_tx_data - Update the success/failure sliding window
  *
@@ -353,19 +364,21 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
  * at this rate.  window->data contains the bitmask of successful
  * packets.
  */
-static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
-                             int scale_index, s32 tpt, int attempts,
-                             int successes)
+static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+                             int scale_index, int attempts, int successes)
 {
        struct iwl_rate_scale_data *window = NULL;
        static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
-       s32 fail_count;
+       s32 fail_count, tpt;
 
        if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
                return -EINVAL;
 
        /* Select window for current tx bit rate */
-       window = &(windows[scale_index]);
+       window = &(tbl->win[scale_index]);
+
+       /* Get expected throughput */
+       tpt = get_expected_tpt(tbl, scale_index);
 
        /*
         * Keep track of only the latest 62 tx frame attempts in this rate's
@@ -739,16 +752,6 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
        return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
                (a->is_SGI == b->is_SGI);
 }
-/*
- * Static function to get the expected throughput from an iwl_scale_tbl_info
- * that wraps a NULL pointer check
- */
-static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
-{
-       if (tbl->expected_tpt)
-               return tbl->expected_tpt[rs_index];
-       return 0;
-}
 
 /*
  * mac80211 sends us Tx status
@@ -765,12 +768,10 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct iwl_priv *priv = (struct iwl_priv *)priv_r;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct iwl_rate_scale_data *window = NULL;
        enum mac80211_rate_control_flags mac_flags;
        u32 tx_rate;
        struct iwl_scale_tbl_info tbl_type;
-       struct iwl_scale_tbl_info *curr_tbl, *other_tbl;
-       s32 tpt = 0;
+       struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
 
        IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
 
@@ -853,7 +854,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
                IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
                return;
        }
-       window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
 
        /*
         * Updating the frame history depends on whether packets were
@@ -866,8 +866,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
                tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
                rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
                                &rs_index);
-               tpt = get_expected_tpt(curr_tbl, rs_index);
-               rs_collect_tx_data(window, rs_index, tpt,
+               rs_collect_tx_data(curr_tbl, rs_index,
                                   info->status.ampdu_ack_len,
                                   info->status.ampdu_ack_map);
 
@@ -897,19 +896,13 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
                         * table as active/search.
                         */
                        if (table_type_matches(&tbl_type, curr_tbl))
-                               tpt = get_expected_tpt(curr_tbl, rs_index);
+                               tmp_tbl = curr_tbl;
                        else if (table_type_matches(&tbl_type, other_tbl))
-                               tpt = get_expected_tpt(other_tbl, rs_index);
+                               tmp_tbl = other_tbl;
                        else
                                continue;
-
-                       /* Constants mean 1 transmission, 0 successes */
-                       if (i < retries)
-                               rs_collect_tx_data(window, rs_index, tpt, 1,
-                                               0);
-                       else
-                               rs_collect_tx_data(window, rs_index, tpt, 1,
-                                               legacy_success);
+                       rs_collect_tx_data(tmp_tbl, rs_index, 1,
+                                          i < retries ? 0 : legacy_success);
                }
 
                /* Update success/fail counts if not searching for new mode */
index db050b8..3352f70 100644 (file)
@@ -308,10 +308,13 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       /* Allocate and init all Tx and Command queues */
-       ret = iwl_txq_ctx_reset(priv);
-       if (ret)
-               return ret;
+       /* Allocate or reset and init all Tx and Command queues */
+       if (!priv->txq) {
+               ret = iwl_txq_ctx_alloc(priv);
+               if (ret)
+                       return ret;
+       } else
+               iwl_txq_ctx_reset(priv);
 
        set_bit(STATUS_INIT, &priv->status);
 
index 4ef7739..732590f 100644 (file)
@@ -442,7 +442,8 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
 /*****************************************************
 * TX
 ******************************************************/
-int iwl_txq_ctx_reset(struct iwl_priv *priv);
+int iwl_txq_ctx_alloc(struct iwl_priv *priv);
+void iwl_txq_ctx_reset(struct iwl_priv *priv);
 void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
 int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
                                 struct iwl_tx_queue *txq,
@@ -456,6 +457,8 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv,
 void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
 int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
                      int slots_num, u32 txq_id);
+void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+                       int slots_num, u32 txq_id);
 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
index f0b7e6c..8dd0c03 100644 (file)
@@ -194,10 +194,34 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
        struct iwl_queue *q = &txq->q;
        struct device *dev = &priv->pci_dev->dev;
        int i;
+       bool huge = false;
 
        if (q->n_bd == 0)
                return;
 
+       for (; q->read_ptr != q->write_ptr;
+            q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+               /* we have no way to tell if it is a huge cmd ATM */
+               i = get_cmd_index(q, q->read_ptr, 0);
+
+               if (txq->meta[i].flags & CMD_SIZE_HUGE) {
+                       huge = true;
+                       continue;
+               }
+
+               pci_unmap_single(priv->pci_dev,
+                                pci_unmap_addr(&txq->meta[i], mapping),
+                                pci_unmap_len(&txq->meta[i], len),
+                                PCI_DMA_BIDIRECTIONAL);
+       }
+       if (huge) {
+               i = q->n_window;
+               pci_unmap_single(priv->pci_dev,
+                                pci_unmap_addr(&txq->meta[i], mapping),
+                                pci_unmap_len(&txq->meta[i], len),
+                                PCI_DMA_BIDIRECTIONAL);
+       }
+
        /* De-alloc array of command/tx buffers */
        for (i = 0; i <= TFD_CMD_SLOTS; i++)
                kfree(txq->cmd[i]);
@@ -410,6 +434,26 @@ out_free_arrays:
 }
 EXPORT_SYMBOL(iwl_tx_queue_init);
 
+void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+                       int slots_num, u32 txq_id)
+{
+       int actual_slots = slots_num;
+
+       if (txq_id == IWL_CMD_QUEUE_NUM)
+               actual_slots++;
+
+       memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
+
+       txq->need_update = 0;
+
+       /* Initialize queue's high/low-water marks, and head/tail indexes */
+       iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+       /* Tell device where to find queue */
+       priv->cfg->ops->lib->txq_init(priv, txq);
+}
+EXPORT_SYMBOL(iwl_tx_queue_reset);
+
 /**
  * iwl_hw_txq_ctx_free - Free TXQ Context
  *
@@ -421,8 +465,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
 
        /* Tx queues */
        if (priv->txq) {
-               for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
-                    txq_id++)
+               for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
                        if (txq_id == IWL_CMD_QUEUE_NUM)
                                iwl_cmd_queue_free(priv);
                        else
@@ -438,15 +481,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
 EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
 
 /**
- * iwl_txq_ctx_reset - Reset TX queue context
- * Destroys all DMA structures and initialize them again
+ * iwl_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
  *
  * @param priv
  * @return error code
  */
-int iwl_txq_ctx_reset(struct iwl_priv *priv)
+int iwl_txq_ctx_alloc(struct iwl_priv *priv)
 {
-       int ret = 0;
+       int ret;
        int txq_id, slots_num;
        unsigned long flags;
 
@@ -504,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
        return ret;
 }
 
+void iwl_txq_ctx_reset(struct iwl_priv *priv)
+{
+       int txq_id, slots_num;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Turn off all Tx DMA fifos */
+       priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+       /* Tell NIC where to find the "keep warm" buffer */
+       iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /* Alloc and init all Tx queues, including the command queue (#4) */
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+               slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
+                           TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+               iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
+       }
+}
+
 /**
- * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
+ * iwl_txq_ctx_stop - Stop all Tx DMA channels
  */
 void iwl_txq_ctx_stop(struct iwl_priv *priv)
 {
@@ -525,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv)
                                    1000);
        }
        spin_unlock_irqrestore(&priv->lock, flags);
-
-       /* Deallocate memory for all Tx queues */
-       iwl_hw_txq_ctx_free(priv);
 }
 EXPORT_SYMBOL(iwl_txq_ctx_stop);
 
@@ -1050,6 +1113,14 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
 
        spin_lock_irqsave(&priv->hcmd_lock, flags);
 
+       /* If this is a huge cmd, mark the huge flag also on the meta.flags
+        * of the _original_ cmd. This is used for DMA mapping clean up.
+        */
+       if (cmd->flags & CMD_SIZE_HUGE) {
+               idx = get_cmd_index(q, q->write_ptr, 0);
+               txq->meta[idx].flags = CMD_SIZE_HUGE;
+       }
+
        idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
        out_cmd = txq->cmd[idx];
        out_meta = &txq->meta[idx];
@@ -1227,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
        bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
        struct iwl_device_cmd *cmd;
        struct iwl_cmd_meta *meta;
+       struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
 
        /* If a Tx command is being handled and it isn't in the actual
         * command queue then there a command routing bug has been introduced
@@ -1240,9 +1312,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
                return;
        }
 
-       cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
-       cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
-       meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
+       /* If this is a huge cmd, clear the huge flag on the meta.flags
+        * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
+        * the DMA buffer for the scan (huge) command.
+        */
+       if (huge) {
+               cmd_index = get_cmd_index(&txq->q, index, 0);
+               txq->meta[cmd_index].flags = 0;
+       }
+       cmd_index = get_cmd_index(&txq->q, index, huge);
+       cmd = txq->cmd[cmd_index];
+       meta = &txq->meta[cmd_index];
 
        pci_unmap_single(priv->pci_dev,
                         pci_unmap_addr(meta, mapping),
@@ -1264,6 +1344,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
                               get_cmd_string(cmd->hdr.cmd));
                wake_up_interruptible(&priv->wait_command_queue);
        }
+       meta->flags = 0;
 }
 EXPORT_SYMBOL(iwl_tx_cmd_complete);
 
index 5be11c9..e69d238 100644 (file)
@@ -236,6 +236,10 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
                               int log_all)
 {
        int i;
+
+        if (!mem)
+                return 0;
+
        for (i = 0; i < mem->nregions; ++i) {
                struct vhost_memory_region *m = mem->regions + i;
                unsigned long a = m->userspace_addr;
index aa3cd7c..4125937 100644 (file)
@@ -337,16 +337,15 @@ out:
 /*
  * Get ref for the oldest snapc for an inode with dirty data... that is, the
  * only snap context we are allowed to write back.
- *
- * Caller holds i_lock.
  */
-static struct ceph_snap_context *__get_oldest_context(struct inode *inode,
-                                                     u64 *snap_size)
+static struct ceph_snap_context *get_oldest_context(struct inode *inode,
+                                                   u64 *snap_size)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_snap_context *snapc = NULL;
        struct ceph_cap_snap *capsnap = NULL;
 
+       spin_lock(&inode->i_lock);
        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
                dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
                     capsnap->context, capsnap->dirty_pages);
@@ -357,21 +356,11 @@ static struct ceph_snap_context *__get_oldest_context(struct inode *inode,
                        break;
                }
        }
-       if (!snapc && ci->i_snap_realm) {
-               snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
+       if (!snapc && ci->i_head_snapc) {
+               snapc = ceph_get_snap_context(ci->i_head_snapc);
                dout(" head snapc %p has %d dirty pages\n",
                     snapc, ci->i_wrbuffer_ref_head);
        }
-       return snapc;
-}
-
-static struct ceph_snap_context *get_oldest_context(struct inode *inode,
-                                                   u64 *snap_size)
-{
-       struct ceph_snap_context *snapc = NULL;
-
-       spin_lock(&inode->i_lock);
-       snapc = __get_oldest_context(inode, snap_size);
        spin_unlock(&inode->i_lock);
        return snapc;
 }
@@ -392,7 +381,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        int len = PAGE_CACHE_SIZE;
        loff_t i_size;
        int err = 0;
-       struct ceph_snap_context *snapc;
+       struct ceph_snap_context *snapc, *oldest;
        u64 snap_size = 0;
        long writeback_stat;
 
@@ -413,13 +402,16 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
                dout("writepage %p page %p not dirty?\n", inode, page);
                goto out;
        }
-       if (snapc != get_oldest_context(inode, &snap_size)) {
+       oldest = get_oldest_context(inode, &snap_size);
+       if (snapc->seq > oldest->seq) {
                dout("writepage %p page %p snapc %p not writeable - noop\n",
                     inode, page, (void *)page->private);
                /* we should only noop if called by kswapd */
                WARN_ON((current->flags & PF_MEMALLOC) == 0);
+               ceph_put_snap_context(oldest);
                goto out;
        }
+       ceph_put_snap_context(oldest);
 
        /* is this a partial page at end of file? */
        if (snap_size)
@@ -458,7 +450,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        ClearPagePrivate(page);
        end_page_writeback(page);
        ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
-       ceph_put_snap_context(snapc);
+       ceph_put_snap_context(snapc);  /* page's reference */
 out:
        return err;
 }
@@ -558,9 +550,9 @@ static void writepages_finish(struct ceph_osd_request *req,
                        dout("inode %p skipping page %p\n", inode, page);
                        wbc->pages_skipped++;
                }
+               ceph_put_snap_context((void *)page->private);
                page->private = 0;
                ClearPagePrivate(page);
-               ceph_put_snap_context(snapc);
                dout("unlocking %d %p\n", i, page);
                end_page_writeback(page);
 
@@ -618,7 +610,7 @@ static int ceph_writepages_start(struct address_space *mapping,
        int range_whole = 0;
        int should_loop = 1;
        pgoff_t max_pages = 0, max_pages_ever = 0;
-       struct ceph_snap_context *snapc = NULL, *last_snapc = NULL;
+       struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
        struct pagevec pvec;
        int done = 0;
        int rc = 0;
@@ -770,9 +762,10 @@ get_more_pages:
                        }
 
                        /* only if matching snap context */
-                       if (snapc != (void *)page->private) {
-                               dout("page snapc %p != oldest %p\n",
-                                    (void *)page->private, snapc);
+                       pgsnapc = (void *)page->private;
+                       if (pgsnapc->seq > snapc->seq) {
+                               dout("page snapc %p %lld > oldest %p %lld\n",
+                                    pgsnapc, pgsnapc->seq, snapc, snapc->seq);
                                unlock_page(page);
                                if (!locked_pages)
                                        continue; /* keep looking for snap */
@@ -914,7 +907,10 @@ static int context_is_writeable_or_written(struct inode *inode,
                                           struct ceph_snap_context *snapc)
 {
        struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
-       return !oldest || snapc->seq <= oldest->seq;
+       int ret = !oldest || snapc->seq <= oldest->seq;
+
+       ceph_put_snap_context(oldest);
+       return ret;
 }
 
 /*
@@ -936,8 +932,8 @@ static int ceph_update_writeable_page(struct file *file,
        int pos_in_page = pos & ~PAGE_CACHE_MASK;
        int end_in_page = pos_in_page + len;
        loff_t i_size;
-       struct ceph_snap_context *snapc;
        int r;
+       struct ceph_snap_context *snapc, *oldest;
 
 retry_locked:
        /* writepages currently holds page lock, but if we change that later, */
@@ -947,23 +943,24 @@ retry_locked:
        BUG_ON(!ci->i_snap_realm);
        down_read(&mdsc->snap_rwsem);
        BUG_ON(!ci->i_snap_realm->cached_context);
-       if (page->private &&
-           (void *)page->private != ci->i_snap_realm->cached_context) {
+       snapc = (void *)page->private;
+       if (snapc && snapc != ci->i_head_snapc) {
                /*
                 * this page is already dirty in another (older) snap
                 * context!  is it writeable now?
                 */
-               snapc = get_oldest_context(inode, NULL);
+               oldest = get_oldest_context(inode, NULL);
                up_read(&mdsc->snap_rwsem);
 
-               if (snapc != (void *)page->private) {
+               if (snapc->seq > oldest->seq) {
+                       ceph_put_snap_context(oldest);
                        dout(" page %p snapc %p not current or oldest\n",
-                            page, (void *)page->private);
+                            page, snapc);
                        /*
                         * queue for writeback, and wait for snapc to
                         * be writeable or written
                         */
-                       snapc = ceph_get_snap_context((void *)page->private);
+                       snapc = ceph_get_snap_context(snapc);
                        unlock_page(page);
                        ceph_queue_writeback(inode);
                        r = wait_event_interruptible(ci->i_cap_wq,
@@ -973,6 +970,7 @@ retry_locked:
                                return r;
                        return -EAGAIN;
                }
+               ceph_put_snap_context(oldest);
 
                /* yay, writeable, do it now (without dropping page lock) */
                dout(" page %p snapc %p not current, but oldest\n",
index 3710e07..aa2239f 100644 (file)
@@ -1205,6 +1205,12 @@ retry:
                if (capsnap->dirty_pages || capsnap->writing)
                        continue;
 
+               /*
+                * if cap writeback already occurred, we should have dropped
+                * the capsnap in ceph_put_wrbuffer_cap_refs.
+                */
+               BUG_ON(capsnap->dirty == 0);
+
                /* pick mds, take s_mutex */
                mds = __ceph_get_cap_mds(ci, &mseq);
                if (session && session->s_mds != mds) {
@@ -2118,8 +2124,8 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
                }
        spin_unlock(&inode->i_lock);
 
-       dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had),
-            last ? "last" : "");
+       dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
+            last ? " last" : "", put ? " put" : "");
 
        if (last && !flushsnaps)
                ceph_check_caps(ci, 0, NULL);
@@ -2143,7 +2149,8 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
 {
        struct inode *inode = &ci->vfs_inode;
        int last = 0;
-       int last_snap = 0;
+       int complete_capsnap = 0;
+       int drop_capsnap = 0;
        int found = 0;
        struct ceph_cap_snap *capsnap = NULL;
 
@@ -2166,19 +2173,32 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
                        if (capsnap->context == snapc) {
                                found = 1;
-                               capsnap->dirty_pages -= nr;
-                               last_snap = !capsnap->dirty_pages;
                                break;
                        }
                }
                BUG_ON(!found);
+               capsnap->dirty_pages -= nr;
+               if (capsnap->dirty_pages == 0) {
+                       complete_capsnap = 1;
+                       if (capsnap->dirty == 0)
+                               /* cap writeback completed before we created
+                                * the cap_snap; no FLUSHSNAP is needed */
+                               drop_capsnap = 1;
+               }
                dout("put_wrbuffer_cap_refs on %p cap_snap %p "
-                    " snap %lld %d/%d -> %d/%d %s%s\n",
+                    " snap %lld %d/%d -> %d/%d %s%s%s\n",
                     inode, capsnap, capsnap->context->seq,
                     ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
                     ci->i_wrbuffer_ref, capsnap->dirty_pages,
                     last ? " (wrbuffer last)" : "",
-                    last_snap ? " (capsnap last)" : "");
+                    complete_capsnap ? " (complete capsnap)" : "",
+                    drop_capsnap ? " (drop capsnap)" : "");
+               if (drop_capsnap) {
+                       ceph_put_snap_context(capsnap->context);
+                       list_del(&capsnap->ci_item);
+                       list_del(&capsnap->flushing_item);
+                       ceph_put_cap_snap(capsnap);
+               }
        }
 
        spin_unlock(&inode->i_lock);
@@ -2186,10 +2206,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
        if (last) {
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
                iput(inode);
-       } else if (last_snap) {
+       } else if (complete_capsnap) {
                ceph_flush_snaps(ci);
                wake_up(&ci->i_cap_wq);
        }
+       if (drop_capsnap)
+               iput(inode);
 }
 
 /*
@@ -2465,8 +2487,8 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
                                break;
                        }
                        WARN_ON(capsnap->dirty_pages || capsnap->writing);
-                       dout(" removing cap_snap %p follows %lld\n",
-                            capsnap, follows);
+                       dout(" removing %p cap_snap %p follows %lld\n",
+                            inode, capsnap, follows);
                        ceph_put_snap_context(capsnap->context);
                        list_del(&capsnap->ci_item);
                        list_del(&capsnap->flushing_item);
index 7261dc6..ea8ee2e 100644 (file)
@@ -171,11 +171,11 @@ more:
        spin_lock(&inode->i_lock);
        spin_lock(&dcache_lock);
 
+       last = dentry;
+
        if (err < 0)
                goto out_unlock;
 
-       last = dentry;
-
        p = p->prev;
        filp->f_pos++;
 
@@ -312,7 +312,7 @@ more:
                req->r_readdir_offset = fi->next_offset;
                req->r_args.readdir.frag = cpu_to_le32(frag);
                req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
-               req->r_num_caps = max_entries;
+               req->r_num_caps = max_entries + 1;
                err = ceph_mdsc_do_request(mdsc, NULL, req);
                if (err < 0) {
                        ceph_mdsc_put_request(req);
@@ -489,6 +489,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
                struct inode *inode = ceph_get_snapdir(parent);
                dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
                     dentry, dentry->d_name.len, dentry->d_name.name, inode);
+               BUG_ON(!d_unhashed(dentry));
                d_add(dentry, inode);
                err = 0;
        }
index aca82d5..26f883c 100644 (file)
@@ -886,6 +886,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
        struct inode *in = NULL;
        struct ceph_mds_reply_inode *ininfo;
        struct ceph_vino vino;
+       struct ceph_client *client = ceph_sb_to_client(sb);
        int i = 0;
        int err = 0;
 
@@ -949,7 +950,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                        return err;
        }
 
-       if (rinfo->head->is_dentry && !req->r_aborted) {
+       /*
+        * ignore null lease/binding on snapdir ENOENT, or else we
+        * will have trouble splicing in the virtual snapdir later
+        */
+       if (rinfo->head->is_dentry && !req->r_aborted &&
+           (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
+                                              client->mount_args->snapdir_name,
+                                              req->r_dentry->d_name.len))) {
                /*
                 * lookup link rename   : null -> possibly existing inode
                 * mknod symlink mkdir  : null -> new inode
index 8f1715f..cdaaa13 100644 (file)
@@ -30,6 +30,10 @@ static char tag_msg = CEPH_MSGR_TAG_MSG;
 static char tag_ack = CEPH_MSGR_TAG_ACK;
 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
 
+#ifdef CONFIG_LOCKDEP
+static struct lock_class_key socket_class;
+#endif
+
 
 static void queue_con(struct ceph_connection *con);
 static void con_work(struct work_struct *);
@@ -228,6 +232,10 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con)
        con->sock = sock;
        sock->sk->sk_allocation = GFP_NOFS;
 
+#ifdef CONFIG_LOCKDEP
+       lockdep_set_class(&sock->sk->sk_lock, &socket_class);
+#endif
+
        set_sock_callbacks(sock, con);
 
        dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
@@ -333,6 +341,7 @@ static void reset_connection(struct ceph_connection *con)
                con->out_msg = NULL;
        }
        con->in_seq = 0;
+       con->in_seq_acked = 0;
 }
 
 /*
index 21c6623..2e2c15e 100644 (file)
@@ -314,71 +314,6 @@ bad:
        return ERR_PTR(err);
 }
 
-
-/*
- * osd map
- */
-void ceph_osdmap_destroy(struct ceph_osdmap *map)
-{
-       dout("osdmap_destroy %p\n", map);
-       if (map->crush)
-               crush_destroy(map->crush);
-       while (!RB_EMPTY_ROOT(&map->pg_temp)) {
-               struct ceph_pg_mapping *pg =
-                       rb_entry(rb_first(&map->pg_temp),
-                                struct ceph_pg_mapping, node);
-               rb_erase(&pg->node, &map->pg_temp);
-               kfree(pg);
-       }
-       while (!RB_EMPTY_ROOT(&map->pg_pools)) {
-               struct ceph_pg_pool_info *pi =
-                       rb_entry(rb_first(&map->pg_pools),
-                                struct ceph_pg_pool_info, node);
-               rb_erase(&pi->node, &map->pg_pools);
-               kfree(pi);
-       }
-       kfree(map->osd_state);
-       kfree(map->osd_weight);
-       kfree(map->osd_addr);
-       kfree(map);
-}
-
-/*
- * adjust max osd value.  reallocate arrays.
- */
-static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
-{
-       u8 *state;
-       struct ceph_entity_addr *addr;
-       u32 *weight;
-
-       state = kcalloc(max, sizeof(*state), GFP_NOFS);
-       addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
-       weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
-       if (state == NULL || addr == NULL || weight == NULL) {
-               kfree(state);
-               kfree(addr);
-               kfree(weight);
-               return -ENOMEM;
-       }
-
-       /* copy old? */
-       if (map->osd_state) {
-               memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
-               memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
-               memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
-               kfree(map->osd_state);
-               kfree(map->osd_addr);
-               kfree(map->osd_weight);
-       }
-
-       map->osd_state = state;
-       map->osd_weight = weight;
-       map->osd_addr = addr;
-       map->max_osd = max;
-       return 0;
-}
-
 /*
  * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
  * to a set of osds)
@@ -482,6 +417,13 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
        return NULL;
 }
 
+static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
+{
+       rb_erase(&pi->node, root);
+       kfree(pi->name);
+       kfree(pi);
+}
+
 void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
 {
        ceph_decode_copy(p, &pi->v, sizeof(pi->v));
@@ -490,6 +432,98 @@ void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
        *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
 }
 
+static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
+{
+       struct ceph_pg_pool_info *pi;
+       u32 num, len, pool;
+
+       ceph_decode_32_safe(p, end, num, bad);
+       dout(" %d pool names\n", num);
+       while (num--) {
+               ceph_decode_32_safe(p, end, pool, bad);
+               ceph_decode_32_safe(p, end, len, bad);
+               dout("  pool %d len %d\n", pool, len);
+               pi = __lookup_pg_pool(&map->pg_pools, pool);
+               if (pi) {
+                       kfree(pi->name);
+                       pi->name = kmalloc(len + 1, GFP_NOFS);
+                       if (pi->name) {
+                               memcpy(pi->name, *p, len);
+                               pi->name[len] = '\0';
+                               dout("  name is %s\n", pi->name);
+                       }
+               }
+               *p += len;
+       }
+       return 0;
+
+bad:
+       return -EINVAL;
+}
+
+/*
+ * osd map
+ */
+void ceph_osdmap_destroy(struct ceph_osdmap *map)
+{
+       dout("osdmap_destroy %p\n", map);
+       if (map->crush)
+               crush_destroy(map->crush);
+       while (!RB_EMPTY_ROOT(&map->pg_temp)) {
+               struct ceph_pg_mapping *pg =
+                       rb_entry(rb_first(&map->pg_temp),
+                                struct ceph_pg_mapping, node);
+               rb_erase(&pg->node, &map->pg_temp);
+               kfree(pg);
+       }
+       while (!RB_EMPTY_ROOT(&map->pg_pools)) {
+               struct ceph_pg_pool_info *pi =
+                       rb_entry(rb_first(&map->pg_pools),
+                                struct ceph_pg_pool_info, node);
+               __remove_pg_pool(&map->pg_pools, pi);
+       }
+       kfree(map->osd_state);
+       kfree(map->osd_weight);
+       kfree(map->osd_addr);
+       kfree(map);
+}
+
+/*
+ * adjust max osd value.  reallocate arrays.
+ */
+static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
+{
+       u8 *state;
+       struct ceph_entity_addr *addr;
+       u32 *weight;
+
+       state = kcalloc(max, sizeof(*state), GFP_NOFS);
+       addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
+       weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
+       if (state == NULL || addr == NULL || weight == NULL) {
+               kfree(state);
+               kfree(addr);
+               kfree(weight);
+               return -ENOMEM;
+       }
+
+       /* copy old? */
+       if (map->osd_state) {
+               memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
+               memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
+               memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
+               kfree(map->osd_state);
+               kfree(map->osd_addr);
+               kfree(map->osd_weight);
+       }
+
+       map->osd_state = state;
+       map->osd_weight = weight;
+       map->osd_addr = addr;
+       map->max_osd = max;
+       return 0;
+}
+
 /*
  * decode a full map.
  */
@@ -526,7 +560,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
        ceph_decode_32_safe(p, end, max, bad);
        while (max--) {
                ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
-               pi = kmalloc(sizeof(*pi), GFP_NOFS);
+               pi = kzalloc(sizeof(*pi), GFP_NOFS);
                if (!pi)
                        goto bad;
                pi->id = ceph_decode_32(p);
@@ -539,6 +573,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
                __decode_pool(p, pi);
                __insert_pg_pool(&map->pg_pools, pi);
        }
+
+       if (version >= 5 && __decode_pool_names(p, end, map) < 0)
+               goto bad;
+
        ceph_decode_32_safe(p, end, map->pool_max, bad);
 
        ceph_decode_32_safe(p, end, map->flags, bad);
@@ -712,7 +750,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                }
                pi = __lookup_pg_pool(&map->pg_pools, pool);
                if (!pi) {
-                       pi = kmalloc(sizeof(*pi), GFP_NOFS);
+                       pi = kzalloc(sizeof(*pi), GFP_NOFS);
                        if (!pi) {
                                err = -ENOMEM;
                                goto bad;
@@ -722,6 +760,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                }
                __decode_pool(p, pi);
        }
+       if (version >= 5 && __decode_pool_names(p, end, map) < 0)
+               goto bad;
 
        /* old_pool */
        ceph_decode_32_safe(p, end, len, bad);
@@ -730,10 +770,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
 
                ceph_decode_32_safe(p, end, pool, bad);
                pi = __lookup_pg_pool(&map->pg_pools, pool);
-               if (pi) {
-                       rb_erase(&pi->node, &map->pg_pools);
-                       kfree(pi);
-               }
+               if (pi)
+                       __remove_pg_pool(&map->pg_pools, pi);
        }
 
        /* new_up */
index 1fb55af..8bc9f1e 100644 (file)
@@ -23,6 +23,7 @@ struct ceph_pg_pool_info {
        int id;
        struct ceph_pg_pool v;
        int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
+       char *name;
 };
 
 struct ceph_pg_mapping {
index 26ac8b8..a1fc1d0 100644 (file)
 /*
  * osdmap encoding versions
  */
-#define CEPH_OSDMAP_INC_VERSION 4
-#define CEPH_OSDMAP_VERSION     4
+#define CEPH_OSDMAP_INC_VERSION     5
+#define CEPH_OSDMAP_INC_VERSION_EXT 5
+#define CEPH_OSDMAP_VERSION         5
+#define CEPH_OSDMAP_VERSION_EXT     5
 
 /*
  * fs id
index e6f9bc5..2b88126 100644 (file)
@@ -431,8 +431,7 @@ static int dup_array(u64 **dst, __le64 *src, int num)
  * Caller must hold snap_rwsem for read (i.e., the realm topology won't
  * change).
  */
-void ceph_queue_cap_snap(struct ceph_inode_info *ci,
-                        struct ceph_snap_context *snapc)
+void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 {
        struct inode *inode = &ci->vfs_inode;
        struct ceph_cap_snap *capsnap;
@@ -451,10 +450,11 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
                   as no new writes are allowed to start when pending, so any
                   writes in progress now were started before the previous
                   cap_snap.  lucky us. */
-               dout("queue_cap_snap %p snapc %p seq %llu used %d"
-                    " already pending\n", inode, snapc, snapc->seq, used);
+               dout("queue_cap_snap %p already pending\n", inode);
                kfree(capsnap);
        } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
+               struct ceph_snap_context *snapc = ci->i_head_snapc;
+
                igrab(inode);
 
                atomic_set(&capsnap->nref, 1);
@@ -463,7 +463,6 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
                INIT_LIST_HEAD(&capsnap->flushing_item);
 
                capsnap->follows = snapc->seq - 1;
-               capsnap->context = ceph_get_snap_context(snapc);
                capsnap->issued = __ceph_caps_issued(ci, NULL);
                capsnap->dirty = __ceph_caps_dirty(ci);
 
@@ -480,7 +479,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
                   snapshot. */
                capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
                ci->i_wrbuffer_ref_head = 0;
-               ceph_put_snap_context(ci->i_head_snapc);
+               capsnap->context = snapc;
                ci->i_head_snapc = NULL;
                list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
 
@@ -522,15 +521,17 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
        capsnap->ctime = inode->i_ctime;
        capsnap->time_warp_seq = ci->i_time_warp_seq;
        if (capsnap->dirty_pages) {
-               dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu "
+               dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
                     "still has %d dirty pages\n", inode, capsnap,
                     capsnap->context, capsnap->context->seq,
-                    capsnap->size, capsnap->dirty_pages);
+                    ceph_cap_string(capsnap->dirty), capsnap->size,
+                    capsnap->dirty_pages);
                return 0;
        }
-       dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n",
+       dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
             inode, capsnap, capsnap->context,
-            capsnap->context->seq, capsnap->size);
+            capsnap->context->seq, ceph_cap_string(capsnap->dirty),
+            capsnap->size);
 
        spin_lock(&mdsc->snap_flush_lock);
        list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
@@ -602,7 +603,7 @@ more:
                                if (lastinode)
                                        iput(lastinode);
                                lastinode = inode;
-                               ceph_queue_cap_snap(ci, realm->cached_context);
+                               ceph_queue_cap_snap(ci);
                                spin_lock(&realm->inodes_with_caps_lock);
                        }
                        spin_unlock(&realm->inodes_with_caps_lock);
@@ -824,8 +825,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        spin_unlock(&realm->inodes_with_caps_lock);
                        spin_unlock(&inode->i_lock);
 
-                       ceph_queue_cap_snap(ci,
-                                           ci->i_snap_realm->cached_context);
+                       ceph_queue_cap_snap(ci);
 
                        iput(inode);
                        continue;
index ca702c6..e30dfbb 100644 (file)
@@ -715,8 +715,7 @@ extern int ceph_update_snap_trace(struct ceph_mds_client *m,
 extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
                             struct ceph_mds_session *session,
                             struct ceph_msg *msg);
-extern void ceph_queue_cap_snap(struct ceph_inode_info *ci,
-                               struct ceph_snap_context *snapc);
+extern void ceph_queue_cap_snap(struct ceph_inode_info *ci);
 extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
                                  struct ceph_cap_snap *capsnap);
 extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
index 2a3d352..a8766c4 100644 (file)
@@ -1294,7 +1294,8 @@ static int nfs4_init_server(struct nfs_server *server,
 
        /* Initialise the client representation from the mount data */
        server->flags = data->flags;
-       server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
+       server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
+               NFS_CAP_POSIX_LOCK;
        server->options = data->options;
 
        /* Get a client record */
index c6f2750..be46f26 100644 (file)
@@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
                                res = NULL;
                                goto out;
                        /* This turned out not to be a regular file */
+                       case -EISDIR:
                        case -ENOTDIR:
                                goto no_open;
                        case -ELOOP:
                                if (!(nd->intent.open.flags & O_NOFOLLOW))
                                        goto no_open;
-                       /* case -EISDIR: */
                        /* case -EINVAL: */
                        default:
                                goto out;
index 737128f..50a56ed 100644 (file)
@@ -623,10 +623,10 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
        list_for_each_entry(pos, &nfsi->open_files, list) {
                if (cred != NULL && pos->cred != cred)
                        continue;
-               if ((pos->mode & mode) == mode) {
-                       ctx = get_nfs_open_context(pos);
-                       break;
-               }
+               if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
+                       continue;
+               ctx = get_nfs_open_context(pos);
+               break;
        }
        spin_unlock(&inode->i_lock);
        return ctx;
index fe0cd9e..6380670 100644 (file)
@@ -1523,6 +1523,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
                nfs_post_op_update_inode(dir, o_res->dir_attr);
        } else
                nfs_refresh_inode(dir, o_res->dir_attr);
+       if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
+               server->caps &= ~NFS_CAP_POSIX_LOCK;
        if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
                status = _nfs4_proc_open_confirm(data);
                if (status != 0)
@@ -1664,7 +1666,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
        status = PTR_ERR(state);
        if (IS_ERR(state))
                goto err_opendata_put;
-       if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
+       if (server->caps & NFS_CAP_POSIX_LOCK)
                set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
        nfs4_opendata_put(opendata);
        nfs4_put_state_owner(sp);
index 53ff70e..de38d63 100644 (file)
@@ -201,6 +201,7 @@ static int nfs_set_page_writeback(struct page *page)
                struct inode *inode = page->mapping->host;
                struct nfs_server *nfss = NFS_SERVER(inode);
 
+               page_cache_get(page);
                if (atomic_long_inc_return(&nfss->writeback) >
                                NFS_CONGESTION_ON_THRESH) {
                        set_bdi_congested(&nfss->backing_dev_info,
@@ -216,6 +217,7 @@ static void nfs_end_page_writeback(struct page *page)
        struct nfs_server *nfss = NFS_SERVER(inode);
 
        end_page_writeback(page);
+       page_cache_release(page);
        if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
                clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
 }
@@ -421,6 +423,7 @@ static void
 nfs_mark_request_dirty(struct nfs_page *req)
 {
        __set_page_dirty_nobuffers(req->wb_page);
+       __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC);
 }
 
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -660,9 +663,11 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
        req = nfs_setup_write_request(ctx, page, offset, count);
        if (IS_ERR(req))
                return PTR_ERR(req);
+       nfs_mark_request_dirty(req);
        /* Update file length */
        nfs_grow_file(page, offset, count);
        nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
+       nfs_mark_request_dirty(req);
        nfs_clear_page_tag_locked(req);
        return 0;
 }
@@ -739,8 +744,6 @@ int nfs_updatepage(struct file *file, struct page *page,
        status = nfs_writepage_setup(ctx, page, offset, count);
        if (status < 0)
                nfs_set_pageerror(page);
-       else
-               __set_page_dirty_nobuffers(page);
 
        dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
                        status, (long long)i_size_read(inode));
@@ -749,13 +752,12 @@ int nfs_updatepage(struct file *file, struct page *page,
 
 static void nfs_writepage_release(struct nfs_page *req)
 {
+       struct page *page = req->wb_page;
 
-       if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
-               nfs_end_page_writeback(req->wb_page);
+       if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req))
                nfs_inode_remove_request(req);
-       } else
-               nfs_end_page_writeback(req->wb_page);
        nfs_clear_page_tag_locked(req);
+       nfs_end_page_writeback(page);
 }
 
 static int flush_task_priority(int how)
@@ -779,7 +781,6 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
                int how)
 {
        struct inode *inode = req->wb_context->path.dentry->d_inode;
-       int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
        int priority = flush_task_priority(how);
        struct rpc_task *task;
        struct rpc_message msg = {
@@ -794,9 +795,10 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
                .callback_ops = call_ops,
                .callback_data = data,
                .workqueue = nfsiod_workqueue,
-               .flags = flags,
+               .flags = RPC_TASK_ASYNC,
                .priority = priority,
        };
+       int ret = 0;
 
        /* Set up the RPC argument and reply structs
         * NB: take care not to mess about with data->commit et al. */
@@ -835,10 +837,18 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
                (unsigned long long)data->args.offset);
 
        task = rpc_run_task(&task_setup_data);
-       if (IS_ERR(task))
-               return PTR_ERR(task);
+       if (IS_ERR(task)) {
+               ret = PTR_ERR(task);
+               goto out;
+       }
+       if (how & FLUSH_SYNC) {
+               ret = rpc_wait_for_completion_task(task);
+               if (ret == 0)
+                       ret = task->tk_status;
+       }
        rpc_put_task(task);
-       return 0;
+out:
+       return ret;
 }
 
 /* If a nfs_flush_* function fails, it should remove reqs from @head and
@@ -847,9 +857,11 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
  */
 static void nfs_redirty_request(struct nfs_page *req)
 {
+       struct page *page = req->wb_page;
+
        nfs_mark_request_dirty(req);
-       nfs_end_page_writeback(req->wb_page);
        nfs_clear_page_tag_locked(req);
+       nfs_end_page_writeback(page);
 }
 
 /*
@@ -1084,16 +1096,15 @@ static void nfs_writeback_release_full(void *calldata)
                if (nfs_write_need_commit(data)) {
                        memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
                        nfs_mark_request_commit(req);
-                       nfs_end_page_writeback(page);
                        dprintk(" marked for commit\n");
                        goto next;
                }
                dprintk(" OK\n");
 remove_request:
-               nfs_end_page_writeback(page);
                nfs_inode_remove_request(req);
        next:
                nfs_clear_page_tag_locked(req);
+               nfs_end_page_writeback(page);
        }
        nfs_writedata_release(calldata);
 }
@@ -1207,7 +1218,6 @@ static int nfs_commit_rpcsetup(struct list_head *head,
 {
        struct nfs_page *first = nfs_list_entry(head->next);
        struct inode *inode = first->wb_context->path.dentry->d_inode;
-       int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
        int priority = flush_task_priority(how);
        struct rpc_task *task;
        struct rpc_message msg = {
@@ -1222,7 +1232,7 @@ static int nfs_commit_rpcsetup(struct list_head *head,
                .callback_ops = &nfs_commit_ops,
                .callback_data = data,
                .workqueue = nfsiod_workqueue,
-               .flags = flags,
+               .flags = RPC_TASK_ASYNC,
                .priority = priority,
        };
 
@@ -1252,6 +1262,8 @@ static int nfs_commit_rpcsetup(struct list_head *head,
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
                return PTR_ERR(task);
+       if (how & FLUSH_SYNC)
+               rpc_wait_for_completion_task(task);
        rpc_put_task(task);
        return 0;
 }
index 40b1101..81f3b14 100644 (file)
@@ -1,21 +1,26 @@
 /*
  * Char device interface.
  *
- * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
  */
 
 #ifndef _LINUX_FIREWIRE_CDEV_H
@@ -438,7 +443,7 @@ struct fw_cdev_remove_descriptor {
  * @type:      %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE
  * @header_size: Header size to strip for receive contexts
  * @channel:   Channel to bind to
- * @speed:     Speed to transmit at
+ * @speed:     Speed for transmit contexts
  * @closure:   To be returned in &fw_cdev_event_iso_interrupt
  * @handle:    Handle to context, written back by kernel
  *
@@ -451,6 +456,9 @@ struct fw_cdev_remove_descriptor {
  * If a context was successfully created, the kernel writes back a handle to the
  * context, which must be passed in for subsequent operations on that context.
  *
+ * For receive contexts, @header_size must be at least 4 and must be a multiple
+ * of 4.
+ *
  * Note that the effect of a @header_size > 4 depends on
  * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
  */
@@ -481,10 +489,34 @@ struct fw_cdev_create_iso_context {
  *
  * &struct fw_cdev_iso_packet is used to describe isochronous packet queues.
  *
- * Use the FW_CDEV_ISO_ macros to fill in @control.  The sy and tag fields are
- * specified by IEEE 1394a and IEC 61883.
- *
- * FIXME - finish this documentation
+ * Use the FW_CDEV_ISO_ macros to fill in @control.
+ *
+ * For transmit packets, the header length must be a multiple of 4 and specifies
+ * the numbers of bytes in @header that will be prepended to the packet's
+ * payload; these bytes are copied into the kernel and will not be accessed
+ * after the ioctl has returned.  The sy and tag fields are copied to the iso
+ * packet header (these fields are specified by IEEE 1394a and IEC 61883-1).
+ * The skip flag specifies that no packet is to be sent in a frame; when using
+ * this, all other fields except the interrupt flag must be zero.
+ *
+ * For receive packets, the header length must be a multiple of the context's
+ * header size; if the header length is larger than the context's header size,
+ * multiple packets are queued for this entry.  The sy and tag fields are
+ * ignored.  If the sync flag is set, the context drops all packets until
+ * a packet with a matching sy field is received (the sync value to wait for is
+ * specified in the &fw_cdev_start_iso structure).  The payload length defines
+ * how many payload bytes can be received for one packet (in addition to payload
+ * quadlets that have been defined as headers and are stripped and returned in
+ * the &fw_cdev_event_iso_interrupt structure).  If more bytes are received, the
+ * additional bytes are dropped.  If less bytes are received, the remaining
+ * bytes in this part of the payload buffer will not be written to, not even by
+ * the next packet, i.e., packets received in consecutive frames will not
+ * necessarily be consecutive in memory.  If an entry has queued multiple
+ * packets, the payload length is divided equally among them.
+ *
+ * When a packet with the interrupt flag set has been completed, the
+ * &fw_cdev_event_iso_interrupt event will be sent.  An entry that has queued
+ * multiple receive packets is completed when its last packet is completed.
  */
 struct fw_cdev_iso_packet {
        __u32 control;
@@ -501,7 +533,7 @@ struct fw_cdev_iso_packet {
  * Queue a number of isochronous packets for reception or transmission.
  * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs,
  * which describe how to transmit from or receive into a contiguous region
- * of a mmap()'ed payload buffer.  As part of the packet descriptors,
+ * of a mmap()'ed payload buffer.  As part of transmit packet descriptors,
  * a series of headers can be supplied, which will be prepended to the
  * payload during DMA.
  *
@@ -620,8 +652,8 @@ struct fw_cdev_get_cycle_timer2 {
  * instead of allocated.
  * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
  *
- * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources
- * for the lifetime of the fd or handle.
+ * To summarize, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE allocates iso resources
+ * for the lifetime of the fd or @handle.
  * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
  * for the duration of a bus generation.
  *
index b316770..9c63f06 100644 (file)
@@ -1,3 +1,28 @@
+/*
+ * IEEE 1394 constants.
+ *
+ * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
 #ifndef _LINUX_FIREWIRE_CONSTANTS_H
 #define _LINUX_FIREWIRE_CONSTANTS_H
 
@@ -21,7 +46,7 @@
 #define EXTCODE_WRAP_ADD               0x6
 #define EXTCODE_VENDOR_DEPENDENT       0x7
 
-/* Juju specific tcodes */
+/* Linux firewire-core (Juju) specific tcodes */
 #define TCODE_LOCK_MASK_SWAP           (0x10 | EXTCODE_MASK_SWAP)
 #define TCODE_LOCK_COMPARE_SWAP                (0x10 | EXTCODE_COMPARE_SWAP)
 #define TCODE_LOCK_FETCH_ADD           (0x10 | EXTCODE_FETCH_ADD)
@@ -36,7 +61,7 @@
 #define RCODE_TYPE_ERROR               0x6
 #define RCODE_ADDRESS_ERROR            0x7
 
-/* Juju specific rcodes */
+/* Linux firewire-core (Juju) specific rcodes */
 #define RCODE_SEND_ERROR               0x10
 #define RCODE_CANCELLED                        0x11
 #define RCODE_BUSY                     0x12
index 3bd018b..c964cd7 100644 (file)
@@ -44,6 +44,7 @@ struct matrix_keymap_data {
  * @active_low: gpio polarity
  * @wakeup: controls whether the device should be set up as wakeup
  *     source
+ * @no_autorepeat: disable key autorepeat
  *
  * This structure represents platform-specific data that use used by
  * matrix_keypad driver to perform proper initialization.
@@ -64,6 +65,7 @@ struct matrix_keypad_platform_data {
 
        bool            active_low;
        bool            wakeup;
+       bool            no_autorepeat;
 };
 
 /**
index 717a5e5..e82957a 100644 (file)
@@ -176,6 +176,7 @@ struct nfs_server {
 #define NFS_CAP_ATIME          (1U << 11)
 #define NFS_CAP_CTIME          (1U << 12)
 #define NFS_CAP_MTIME          (1U << 13)
+#define NFS_CAP_POSIX_LOCK     (1U << 14)
 
 
 /* maximum number of slots to use */
index 15ef962..468551e 100644 (file)
@@ -183,6 +183,10 @@ extern int  sysctl_x25_clear_request_timeout;
 extern int  sysctl_x25_ack_holdback_timeout;
 extern int  sysctl_x25_forward;
 
+extern int x25_parse_address_block(struct sk_buff *skb,
+               struct x25_address *called_addr,
+               struct x25_address *calling_addr);
+
 extern int  x25_addr_ntoa(unsigned char *, struct x25_address *,
                          struct x25_address *);
 extern int  x25_addr_aton(unsigned char *, struct x25_address *,
index 4d22896..a8c9621 100644 (file)
@@ -420,7 +420,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                         * User space encodes device types as two-byte values,
                         * so we need to recode them
                         */
-                       swdev = old_decode_dev(swap_area.dev);
+                       swdev = new_decode_dev(swap_area.dev);
                        if (swdev) {
                                offset = swap_area.offset;
                                data->swap = swap_type_of(swdev, offset, NULL);
index ff01710..935248b 100644 (file)
@@ -356,7 +356,7 @@ config SLUB_STATS
 config DEBUG_KMEMLEAK
        bool "Kernel memory leak detector"
        depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
-               (X86 || ARM || PPC || S390 || SUPERH || MICROBLAZE)
+               (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
 
        select DEBUG_FS if SYSFS
        select STACKTRACE if STACKTRACE_SUPPORT
index ba8b670..01e6427 100644 (file)
@@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
         * Now parse out the first token and use it as the name for the
         * driver to filter for.
         */
-       for (i = 0; i < NAME_MAX_LEN; ++i) {
+       for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
                current_driver_name[i] = buf[i];
                if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
                        break;
index 24112e5..7376b7c 100644 (file)
@@ -408,12 +408,12 @@ enum format_type {
 };
 
 struct printf_spec {
-       u16     type;
-       s16     field_width;    /* width of output field */
+       u8      type;           /* format_type enum */
        u8      flags;          /* flags to number() */
-       u8      base;
-       s8      precision;      /* # of digits/chars */
-       u8      qualifier;
+       u8      base;           /* number base, 8, 10 or 16 only */
+       u8      qualifier;      /* number qualifier, one of 'hHlLtzZ' */
+       s16     field_width;    /* width of output field */
+       s16     precision;      /* # of digits/chars */
 };
 
 static char *number(char *buf, char *end, unsigned long long num,
index 6980625..f29ada8 100644 (file)
@@ -723,7 +723,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
                if (!pskb_may_pull(skb, len))
                        return -EINVAL;
 
-               grec = (void *)(skb->data + len);
+               grec = (void *)(skb->data + len - sizeof(*grec));
                group = grec->grec_mca;
                type = grec->grec_type;
 
index 3a7dffb..da99cf1 100644 (file)
@@ -445,7 +445,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                                return -EFAULT;
                        }
                } else if (count == 1) {
-                       if (copy_from_user(&sfilter, optval, optlen))
+                       if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
                                return -EFAULT;
                }
 
index 954bbfb..8fef859 100644 (file)
@@ -472,8 +472,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
                        if (hslot->count < hslot2->count)
                                goto begin;
 
-                       result = udp4_lib_lookup2(net, INADDR_ANY, sport,
-                                                 daddr, hnum, dif,
+                       result = udp4_lib_lookup2(net, saddr, sport,
+                                                 INADDR_ANY, hnum, dif,
                                                  hslot2, slot2);
                }
                rcu_read_unlock();
index c177aea..9082485 100644 (file)
@@ -259,8 +259,8 @@ static struct sock *__udp6_lib_lookup(struct net *net,
                        if (hslot->count < hslot2->count)
                                goto begin;
 
-                       result = udp6_lib_lookup2(net, &in6addr_any, sport,
-                                                 daddr, hnum, dif,
+                       result = udp6_lib_lookup2(net, saddr, sport,
+                                                 &in6addr_any, hnum, dif,
                                                  hslot2, slot2);
                }
                rcu_read_unlock();
index 06c33b6..b887e48 100644 (file)
@@ -225,11 +225,11 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
                        switch (sdata->vif.type) {
                        case NL80211_IFTYPE_AP:
                                sdata->vif.bss_conf.enable_beacon =
-                                       !!rcu_dereference(sdata->u.ap.beacon);
+                                       !!sdata->u.ap.beacon;
                                break;
                        case NL80211_IFTYPE_ADHOC:
                                sdata->vif.bss_conf.enable_beacon =
-                                       !!rcu_dereference(sdata->u.ibss.presp);
+                                       !!sdata->u.ibss.presp;
                                break;
                        case NL80211_IFTYPE_MESH_POINT:
                                sdata->vif.bss_conf.enable_beacon = true;
index 58e3e3a..859ee5f 100644 (file)
@@ -750,9 +750,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
 
        switch (fc & IEEE80211_FCTL_STYPE) {
        case IEEE80211_STYPE_ACTION:
-               if (skb->len < IEEE80211_MIN_ACTION_SIZE)
-                       return RX_DROP_MONITOR;
-               /* fall through */
        case IEEE80211_STYPE_PROBE_RESP:
        case IEEE80211_STYPE_BEACON:
                skb_queue_tail(&ifmsh->skb_queue, skb);
index f0accf6..04ea07f 100644 (file)
@@ -1974,6 +1974,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                        goto handled;
                }
                break;
+       case MESH_PLINK_CATEGORY:
+       case MESH_PATH_SEL_CATEGORY:
+               if (ieee80211_vif_is_mesh(&sdata->vif))
+                       return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
+               break;
        }
 
        /*
index 56422d8..fb12cec 100644 (file)
@@ -93,12 +93,18 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
 
-       sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
+       sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
+                                   rcu_read_lock_held() ||
+                                   lockdep_is_held(&local->sta_lock) ||
+                                   lockdep_is_held(&local->sta_mtx));
        while (sta) {
                if (sta->sdata == sdata &&
                    memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
                        break;
-               sta = rcu_dereference(sta->hnext);
+               sta = rcu_dereference_check(sta->hnext,
+                                           rcu_read_lock_held() ||
+                                           lockdep_is_held(&local->sta_lock) ||
+                                           lockdep_is_held(&local->sta_mtx));
        }
        return sta;
 }
@@ -113,13 +119,19 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
 
-       sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
+       sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
+                                   rcu_read_lock_held() ||
+                                   lockdep_is_held(&local->sta_lock) ||
+                                   lockdep_is_held(&local->sta_mtx));
        while (sta) {
                if ((sta->sdata == sdata ||
                     sta->sdata->bss == sdata->bss) &&
                    memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
                        break;
-               sta = rcu_dereference(sta->hnext);
+               sta = rcu_dereference_check(sta->hnext,
+                                           rcu_read_lock_held() ||
+                                           lockdep_is_held(&local->sta_lock) ||
+                                           lockdep_is_held(&local->sta_mtx));
        }
        return sta;
 }
index e56f711..cbddd0c 100644 (file)
@@ -83,6 +83,41 @@ struct compat_x25_subscrip_struct {
 };
 #endif
 
+
+int x25_parse_address_block(struct sk_buff *skb,
+               struct x25_address *called_addr,
+               struct x25_address *calling_addr)
+{
+       unsigned char len;
+       int needed;
+       int rc;
+
+       if (skb->len < 1) {
+               /* packet has no address block */
+               rc = 0;
+               goto empty;
+       }
+
+       len = *skb->data;
+       needed = 1 + (len >> 4) + (len & 0x0f);
+
+       if (skb->len < needed) {
+               /* packet is too short to hold the addresses it claims
+                  to hold */
+               rc = -1;
+               goto empty;
+       }
+
+       return x25_addr_ntoa(skb->data, called_addr, calling_addr);
+
+empty:
+       *called_addr->x25_addr = 0;
+       *calling_addr->x25_addr = 0;
+
+       return rc;
+}
+
+
 int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
                  struct x25_address *calling_addr)
 {
@@ -554,7 +589,8 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
        x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE;
        x25->facilities.pacsize_in  = X25_DEFAULT_PACKET_SIZE;
        x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE;
-       x25->facilities.throughput  = X25_DEFAULT_THROUGHPUT;
+       x25->facilities.throughput  = 0;        /* by default don't negotiate
+                                                  throughput */
        x25->facilities.reverse     = X25_DEFAULT_REVERSE;
        x25->dte_facilities.calling_len = 0;
        x25->dte_facilities.called_len = 0;
@@ -922,16 +958,26 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
        /*
         *      Extract the X.25 addresses and convert them to ASCII strings,
         *      and remove them.
+        *
+        *      Address block is mandatory in call request packets
         */
-       addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr);
+       addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr);
+       if (addr_len <= 0)
+               goto out_clear_request;
        skb_pull(skb, addr_len);
 
        /*
         *      Get the length of the facilities, skip past them for the moment
         *      get the call user data because this is needed to determine
         *      the correct listener
+        *
+        *      Facilities length is mandatory in call request packets
         */
+       if (skb->len < 1)
+               goto out_clear_request;
        len = skb->data[0] + 1;
+       if (skb->len < len)
+               goto out_clear_request;
        skb_pull(skb,len);
 
        /*
@@ -1415,9 +1461,20 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        if (facilities.winsize_in < 1 ||
                            facilities.winsize_in > 127)
                                break;
-                       if (facilities.throughput < 0x03 ||
-                           facilities.throughput > 0xDD)
-                               break;
+                       if (facilities.throughput) {
+                               int out = facilities.throughput & 0xf0;
+                               int in  = facilities.throughput & 0x0f;
+                               if (!out)
+                                       facilities.throughput |=
+                                               X25_DEFAULT_THROUGHPUT << 4;
+                               else if (out < 0x30 || out > 0xD0)
+                                       break;
+                               if (!in)
+                                       facilities.throughput |=
+                                               X25_DEFAULT_THROUGHPUT;
+                               else if (in < 0x03 || in > 0x0D)
+                                       break;
+                       }
                        if (facilities.reverse &&
                                (facilities.reverse & 0x81) != 0x81)
                                break;
index a21f664..771bab0 100644 (file)
@@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
                struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
 {
        unsigned char *p = skb->data;
-       unsigned int len = *p++;
+       unsigned int len;
 
        *vc_fac_mask = 0;
 
@@ -50,6 +50,14 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
        memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
        memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
 
+       if (skb->len < 1)
+               return 0;
+
+       len = *p++;
+
+       if (len >= skb->len)
+               return -1;
+
        while (len > 0) {
                switch (*p & X25_FAC_CLASS_MASK) {
                case X25_FAC_CLASS_A:
@@ -247,6 +255,8 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
        memcpy(new, ours, sizeof(*new));
 
        len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
+       if (len < 0)
+               return len;
 
        /*
         *      They want reverse charging, we won't accept it.
@@ -259,9 +269,18 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
        new->reverse = theirs.reverse;
 
        if (theirs.throughput) {
-               if (theirs.throughput < ours->throughput) {
-                       SOCK_DEBUG(sk, "X.25: throughput negotiated down\n");
-                       new->throughput = theirs.throughput;
+               int theirs_in =  theirs.throughput & 0x0f;
+               int theirs_out = theirs.throughput & 0xf0;
+               int ours_in  = ours->throughput & 0x0f;
+               int ours_out = ours->throughput & 0xf0;
+               if (!ours_in || theirs_in < ours_in) {
+                       SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n");
+                       new->throughput = (new->throughput & 0xf0) | theirs_in;
+               }
+               if (!ours_out || theirs_out < ours_out) {
+                       SOCK_DEBUG(sk,
+                               "X.25: outbound throughput negotiated\n");
+                       new->throughput = (new->throughput & 0x0f) | theirs_out;
                }
        }
 
index a31b3b9..372ac22 100644 (file)
@@ -90,6 +90,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
 static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
 {
        struct x25_address source_addr, dest_addr;
+       int len;
 
        switch (frametype) {
                case X25_CALL_ACCEPTED: {
@@ -107,11 +108,17 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
                         *      Parse the data in the frame.
                         */
                        skb_pull(skb, X25_STD_MIN_LEN);
-                       skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr));
-                       skb_pull(skb,
-                                x25_parse_facilities(skb, &x25->facilities,
+
+                       len = x25_parse_address_block(skb, &source_addr,
+                                               &dest_addr);
+                       if (len > 0)
+                               skb_pull(skb, len);
+
+                       len = x25_parse_facilities(skb, &x25->facilities,
                                                &x25->dte_facilities,
-                                               &x25->vc_facil_mask));
+                                               &x25->vc_facil_mask);
+                       if (len > 0)
+                               skb_pull(skb, len);
                        /*
                         *      Copy any Call User Data.
                         */
index 8da6a84..cd4f734 100644 (file)
@@ -82,7 +82,7 @@ struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified
 void avtab_cache_init(void);
 void avtab_cache_destroy(void);
 
-#define MAX_AVTAB_HASH_BITS 13
+#define MAX_AVTAB_HASH_BITS 11
 #define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
 #define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
 #define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS