Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Apr 2011 15:01:13 +0000 (08:01 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Apr 2011 15:01:13 +0000 (08:01 -0700)
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: only force kblockd unplugging from the schedule() path
  block: cleanup the block plug helper functions
  block, blk-sysfs: Use the variable directly instead of a function call
  block: move queue run on unplug to kblockd
  block: kill queue_sync_plugs()
  block: readd plug trace event
  block: add callback function for unplug notification
  block: add comment on why we save and disable interrupts in flush_plug_list()
  block: fixup block IO unplug trace call
  block: remove block_unplug_timer() trace point
  block: splice plug list to local context

212 files changed:
Documentation/feature-removal-schedule.txt
MAINTAINERS
Makefile
arch/avr32/include/asm/setup.h
arch/avr32/kernel/setup.c
arch/avr32/kernel/traps.c
arch/avr32/mach-at32ap/clock.c
arch/avr32/mach-at32ap/extint.c
arch/avr32/mach-at32ap/pio.c
arch/avr32/mach-at32ap/pm-at32ap700x.S
arch/blackfin/include/asm/system.h
arch/blackfin/kernel/gptimers.c
arch/blackfin/kernel/time-ts.c
arch/blackfin/mach-common/smp.c
arch/m68k/include/asm/unistd.h
arch/m68k/kernel/entry_mm.S
arch/m68k/kernel/syscalltable.S
arch/powerpc/kernel/ibmebus.c
arch/powerpc/sysdev/fsl_rio.c
arch/um/Kconfig.x86
arch/um/include/asm/bug.h [new file with mode: 0644]
arch/x86/xen/Kconfig
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
drivers/amba/bus.c
drivers/base/platform.c
drivers/base/power/main.c
drivers/dma/fsldma.c
drivers/gpio/ml_ioh_gpio.c
drivers/gpio/pca953x.c
drivers/gpio/pch_gpio.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_perf.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nv04_dfp.c
drivers/gpu/drm/nouveau/nv50_crtc.c
drivers/gpu/drm/nouveau/nv50_evo.c
drivers/gpu/drm/nouveau/nv50_graph.c
drivers/gpu/drm/nouveau/nvc0_vm.c
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/stub/Kconfig
drivers/leds/leds-regulator.c
drivers/mfd/mfd-core.c
drivers/misc/sgi-gru/grufile.c
drivers/net/benet/be.h
drivers/net/benet/be_main.c
drivers/net/bna/bfa_ioc.c
drivers/net/can/mcp251x.c
drivers/net/mlx4/en_rx.c
drivers/net/mlx4/main.c
drivers/net/mlx4/mlx4.h
drivers/net/mlx4/sense.c
drivers/net/pppoe.c
drivers/net/smsc911x.c
drivers/net/usb/smsc95xx.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/dma.h
drivers/net/wireless/iwlwifi/iwl-eeprom.h
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rtlwifi/efuse.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/wl12xx/sdio.c
drivers/net/wireless/wl12xx/spi.c
drivers/net/wireless/wl12xx/testmode.c
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/net/wireless/zd1211rw/zd_usb.h
drivers/pci/pci-driver.c
drivers/pci/setup-bus.c
drivers/platform/x86/Kconfig
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/eeepc-wmi.c
drivers/platform/x86/intel_pmic_gpio.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/rapidio/rio.c
drivers/rapidio/switches/idt_gen2.c
drivers/rtc/rtc-mc13xxx.c
drivers/spi/amba-pl022.c
drivers/spi/dw_spi.c
drivers/spi/pxa2xx_spi.c
drivers/spi/spi_bfin5xx.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/samsung-laptop/Kconfig [deleted file]
drivers/staging/samsung-laptop/Makefile [deleted file]
drivers/staging/samsung-laptop/TODO [deleted file]
drivers/staging/samsung-laptop/samsung-laptop.c [deleted file]
drivers/xen/events.c
drivers/xen/manage.c
fs/binfmt_elf.c
fs/cifs/README
fs/cifs/cache.c
fs/cifs/cifs_debug.c
fs/cifs/cifs_spnego.c
fs/cifs/cifs_unicode.c
fs/cifs/cifs_unicode.h
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/link.c
fs/cifs/misc.c
fs/cifs/sess.c
fs/dcache.c
fs/ext4/ext4_jbd2.h
fs/ext4/fsync.c
fs/ext4/inode.c
fs/ext4/super.c
fs/fhandle.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/namespace.c
fs/nfs/write.c
fs/nfsd/lockd.c
fs/nfsd/nfs4state.c
fs/partitions/ldm.c
fs/ramfs/file-nommu.c
fs/ubifs/debug.h
fs/ubifs/file.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_message.c
fs/xfs/linux-2.6/xfs_message.h
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/linux-2.6/xfs_sync.h
fs/xfs/quota/xfs_qm.c
fs/xfs/quota/xfs_qm.h
fs/xfs/quota/xfs_qm_syscalls.c
fs/xfs/xfs_alloc.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_itable.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_mount.h
fs/xfs/xfs_trans_ail.c
fs/xfs/xfs_trans_priv.h
include/linux/can/platform/mcp251x.h
include/linux/memcontrol.h
include/linux/mfd/core.h
include/linux/netfilter.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/ipset/ip_set_ahash.h
include/linux/platform_device.h
include/linux/rio.h
include/linux/rio_ids.h
include/linux/sched.h
include/linux/suspend.h
include/linux/vmstat.h
include/net/ip_vs.h
include/net/mac80211.h
include/net/route.h
kernel/power/Kconfig
kernel/sched.c
lib/kstrtox.c
lib/test-kstrtox.c
mm/huge_memory.c
mm/memory.c
mm/memory_hotplug.c
mm/mlock.c
mm/mmap.c
mm/oom_kill.c
mm/page_alloc.c
mm/shmem.c
mm/vmscan.c
mm/vmstat.c
net/ceph/osd_client.c
net/dsa/mv88e6131.c
net/dsa/mv88e6xxx.h
net/ipv4/netfilter.c
net/ipv4/route.c
net/ipv4/xfrm4_policy.c
net/ipv6/netfilter.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/rx.c
net/netfilter/Kconfig
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_h323_asn1.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_addrtype.c
net/netfilter/xt_conntrack.c

index 274b32d..492e81d 100644 (file)
@@ -387,26 +387,6 @@ Who:       Tejun Heo <tj@kernel.org>
 
 ----------------------------
 
-What:  Support for lcd_switch and display_get in asus-laptop driver
-When:  March 2010
-Why:   These two features use non-standard interfaces. There are the
-       only features that really need multiple path to guess what's
-       the right method name on a specific laptop.
-
-       Removing them will allow to remove a lot of code an significantly
-       clean the drivers.
-
-       This will affect the backlight code which won't be able to know
-       if the backlight is on or off. The platform display file will also be
-       write only (like the one in eeepc-laptop).
-
-       This should'nt affect a lot of user because they usually know
-       when their display is on or off.
-
-Who:   Corentin Chary <corentin.chary@gmail.com>
-
-----------------------------
-
 What:  sysfs-class-rfkill state file
 When:  Feb 2014
 Files: net/rfkill/core.c
index 6b4b9cd..ec36003 100644 (file)
@@ -184,10 +184,9 @@ F: Documentation/filesystems/9p.txt
 F:     fs/9p/
 
 A2232 SERIAL BOARD DRIVER
-M:     Enver Haase <A2232@gmx.net>
 L:     linux-m68k@lists.linux-m68k.org
-S:     Maintained
-F:     drivers/char/ser_a2232*
+S:     Orphan
+F:     drivers/staging/generic_serial/ser_a2232*
 
 AACRAID SCSI RAID DRIVER
 M:     Adaptec OEM Raid Solutions <aacraid@adaptec.com>
@@ -877,6 +876,13 @@ F: arch/arm/mach-mv78xx0/
 F:     arch/arm/mach-orion5x/
 F:     arch/arm/plat-orion/
 
+ARM/Orion SoC/Technologic Systems TS-78xx platform support
+M:     Alexander Clouter <alex@digriz.org.uk>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+W:     http://www.digriz.org.uk/ts78xx/kernel
+S:     Maintained
+F:     arch/arm/mach-orion5x/ts78xx-*
+
 ARM/MIOA701 MACHINE SUPPORT
 M:     Robert Jarzmik <robert.jarzmik@free.fr>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1063,7 +1069,7 @@ F:        arch/arm/mach-shmobile/
 F:     drivers/sh/
 
 ARM/TELECHIPS ARM ARCHITECTURE
-M:     "Hans J. Koch" <hjk@linutronix.de>
+M:     "Hans J. Koch" <hjk@hansjkoch.de>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/plat-tcc/
@@ -1823,11 +1829,10 @@ S:      Maintained
 F:     drivers/platform/x86/compal-laptop.c
 
 COMPUTONE INTELLIPORT MULTIPORT CARD
-M:     "Michael H. Warfield" <mhw@wittsend.com>
 W:     http://www.wittsend.com/computone.html
-S:     Maintained
+S:     Orphan
 F:     Documentation/serial/computone.txt
-F:     drivers/char/ip2/
+F:     drivers/staging/tty/ip2/
 
 CONEXANT ACCESSRUNNER USB DRIVER
 M:     Simon Arlott <cxacru@fire.lp0.eu>
@@ -2010,7 +2015,7 @@ F:        drivers/net/wan/cycx*
 CYCLADES ASYNC MUX DRIVER
 W:     http://www.cyclades.com/
 S:     Orphan
-F:     drivers/char/cyclades.c
+F:     drivers/tty/cyclades.c
 F:     include/linux/cyclades.h
 
 CYCLADES PC300 DRIVER
@@ -2124,8 +2129,8 @@ L:        Eng.Linux@digi.com
 W:     http://www.digi.com
 S:     Orphan
 F:     Documentation/serial/digiepca.txt
-F:     drivers/char/epca*
-F:     drivers/char/digi*
+F:     drivers/staging/tty/epca*
+F:     drivers/staging/tty/digi*
 
 DIOLAN U2C-12 I2C DRIVER
 M:     Guenter Roeck <guenter.roeck@ericsson.com>
@@ -4077,7 +4082,7 @@ F:        drivers/video/matrox/matroxfb_*
 F:     include/linux/matroxfb.h
 
 MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
-M:     "Hans J. Koch" <hjk@linutronix.de>
+M:     "Hans J. Koch" <hjk@hansjkoch.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/max6650
@@ -4192,7 +4197,7 @@ MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
 M:     Jiri Slaby <jirislaby@gmail.com>
 S:     Maintained
 F:     Documentation/serial/moxa-smartio
-F:     drivers/char/mxser.*
+F:     drivers/tty/mxser.*
 
 MSI LAPTOP SUPPORT
 M:     "Lee, Chun-Yi" <jlee@novell.com>
@@ -4234,7 +4239,7 @@ F:        sound/oss/msnd*
 
 MULTITECH MULTIPORT CARD (ISICOM)
 S:     Orphan
-F:     drivers/char/isicom.c
+F:     drivers/tty/isicom.c
 F:     include/linux/isicom.h
 
 MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
@@ -5273,14 +5278,14 @@ F:      drivers/memstick/host/r592.*
 RISCOM8 DRIVER
 S:     Orphan
 F:     Documentation/serial/riscom8.txt
-F:     drivers/char/riscom8*
+F:     drivers/staging/tty/riscom8*
 
 ROCKETPORT DRIVER
 P:     Comtrol Corp.
 W:     http://www.comtrol.com
 S:     Maintained
 F:     Documentation/serial/rocket.txt
-F:     drivers/char/rocket*
+F:     drivers/tty/rocket*
 
 ROSE NETWORK LAYER
 M:     Ralf Baechle <ralf@linux-mips.org>
@@ -5916,10 +5921,9 @@ F:       arch/arm/mach-spear6xx/spear600.c
 F:     arch/arm/mach-spear6xx/spear600_evb.c
 
 SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
-M:     Roger Wolff <R.E.Wolff@BitWizard.nl>
-S:     Supported
+S:     Orphan
 F:     Documentation/serial/specialix.txt
-F:     drivers/char/specialix*
+F:     drivers/staging/tty/specialix*
 
 SPI SUBSYSTEM
 M:     David Brownell <dbrownell@users.sourceforge.net>
@@ -5964,7 +5968,6 @@ F:        arch/alpha/kernel/srm_env.c
 
 STABLE BRANCH
 M:     Greg Kroah-Hartman <greg@kroah.com>
-M:     Chris Wright <chrisw@sous-sol.org>
 L:     stable@kernel.org
 S:     Maintained
 
@@ -6248,7 +6251,8 @@ M:        Greg Ungerer <gerg@uclinux.org>
 W:     http://www.uclinux.org/
 L:     uclinux-dev@uclinux.org  (subscribers-only)
 S:     Maintained
-F:     arch/m68knommu/
+F:     arch/m68k/*/*_no.*
+F:     arch/m68k/include/asm/*_no.*
 
 UCLINUX FOR RENESAS H8/300 (H8300)
 M:     Yoshinori Sato <ysato@users.sourceforge.jp>
@@ -6618,7 +6622,7 @@ F:        fs/hostfs/
 F:     fs/hppfs/
 
 USERSPACE I/O (UIO)
-M:     "Hans J. Koch" <hjk@linutronix.de>
+M:     "Hans J. Koch" <hjk@hansjkoch.de>
 M:     Greg Kroah-Hartman <gregkh@suse.de>
 S:     Maintained
 F:     Documentation/DocBook/uio-howto.tmpl
@@ -6916,6 +6920,13 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
 S:     Maintained
 F:     drivers/platform/x86
 
+XEN NETWORK BACKEND DRIVER
+M:     Ian Campbell <ian.campbell@citrix.com>
+L:     xen-devel@lists.xensource.com (moderated for non-subscribers)
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/xen-netback/*
+
 XEN PCI SUBSYSTEM
 M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:     xen-devel@lists.xensource.com (moderated for non-subscribers)
index 8392b64..322e733 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 39
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
index ff5b7cf..160543d 100644 (file)
@@ -94,6 +94,13 @@ struct tag_ethernet {
 
 #define ETH_INVALID_PHY        0xff
 
+/* board information */
+#define ATAG_BOARDINFO 0x54410008
+
+struct tag_boardinfo {
+       u32     board_number;
+};
+
 struct tag {
        struct tag_header hdr;
        union {
@@ -102,6 +109,7 @@ struct tag {
                struct tag_cmdline cmdline;
                struct tag_clock clock;
                struct tag_ethernet ethernet;
+               struct tag_boardinfo boardinfo;
        } u;
 };
 
@@ -128,6 +136,7 @@ extern struct tag *bootloader_tags;
 
 extern resource_size_t fbmem_start;
 extern resource_size_t fbmem_size;
+extern u32 board_number;
 
 void setup_processor(void);
 
index 5c70839..bb0974c 100644 (file)
@@ -390,6 +390,21 @@ static int __init parse_tag_clock(struct tag *tag)
 }
 __tagtable(ATAG_CLOCK, parse_tag_clock);
 
+/*
+ * The board_number correspond to the bd->bi_board_number in U-Boot. This
+ * parameter is only available during initialisation and can be used in some
+ * kind of board identification.
+ */
+u32 __initdata board_number;
+
+static int __init parse_tag_boardinfo(struct tag *tag)
+{
+       board_number = tag->u.boardinfo.board_number;
+
+       return 0;
+}
+__tagtable(ATAG_BOARDINFO, parse_tag_boardinfo);
+
 /*
  * Scan the tag table for this tag, and call its parse function. The
  * tag table is built by the linker from all the __tagtable
index b91b204..7aa2575 100644 (file)
@@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code,
        info.si_code = code;
        info.si_addr = (void __user *)addr;
        force_sig_info(signr, &info, current);
-
-       /*
-        * Init gets no signals that it doesn't have a handler for.
-        * That's all very well, but if it has caused a synchronous
-        * exception and we ignore the resulting signal, it will just
-        * generate the same exception over and over again and we get
-        * nowhere.  Better to kill it and let the kernel panic.
-        */
-       if (is_global_init(current)) {
-               __sighandler_t handler;
-
-               spin_lock_irq(&current->sighand->siglock);
-               handler = current->sighand->action[signr-1].sa.sa_handler;
-               spin_unlock_irq(&current->sighand->siglock);
-               if (handler == SIG_DFL) {
-                       /* init has generated a synchronous exception
-                          and it doesn't have a handler for the signal */
-                       printk(KERN_CRIT "init has generated signal %ld "
-                              "but has no handler for it\n", signr);
-                       do_exit(signr);
-               }
-       }
 }
 
 asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
index 442f08c..86925fd 100644 (file)
@@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk)
        spin_unlock(&clk_list_lock);
 }
 
-struct clk *clk_get(struct device *dev, const char *id)
+static struct clk *__clk_get(struct device *dev, const char *id)
 {
        struct clk *clk;
 
-       spin_lock(&clk_list_lock);
-
        list_for_each_entry(clk, &at32_clock_list, list) {
                if (clk->dev == dev && strcmp(id, clk->name) == 0) {
-                       spin_unlock(&clk_list_lock);
                        return clk;
                }
        }
 
-       spin_unlock(&clk_list_lock);
        return ERR_PTR(-ENOENT);
 }
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+       struct clk *clk;
+
+       spin_lock(&clk_list_lock);
+       clk = __clk_get(dev, id);
+       spin_unlock(&clk_list_lock);
+
+       return clk;
+}
+
 EXPORT_SYMBOL(clk_get);
 
 void clk_put(struct clk *clk)
@@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused)
        spin_lock(&clk_list_lock);
 
        /* show clock tree as derived from the three oscillators */
-       clk = clk_get(NULL, "osc32k");
+       clk = __clk_get(NULL, "osc32k");
        dump_clock(clk, &r);
        clk_put(clk);
 
-       clk = clk_get(NULL, "osc0");
+       clk = __clk_get(NULL, "osc0");
        dump_clock(clk, &r);
        clk_put(clk);
 
-       clk = clk_get(NULL, "osc1");
+       clk = __clk_get(NULL, "osc1");
        dump_clock(clk, &r);
        clk_put(clk);
 
index 47ba4b9..fbc2aea 100644 (file)
@@ -61,34 +61,34 @@ struct eic {
 static struct eic *nmi_eic;
 static bool nmi_enabled;
 
-static void eic_ack_irq(struct irq_chip *d)
+static void eic_ack_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
 }
 
-static void eic_mask_irq(struct irq_chip *d)
+static void eic_mask_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
 }
 
-static void eic_mask_ack_irq(struct irq_chip *d)
+static void eic_mask_ack_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
        eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
 }
 
-static void eic_unmask_irq(struct irq_chip *d)
+static void eic_unmask_irq(struct irq_data *d)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        eic_writel(eic, IER, 1 << (d->irq - eic->first_irq));
 }
 
-static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type)
+static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type)
 {
-       struct eic *eic = irq_data_get_irq_chip_data(data);
+       struct eic *eic = irq_data_get_irq_chip_data(d);
        unsigned int irq = d->irq;
        unsigned int i = irq - eic->first_irq;
        u32 mode, edge, level;
@@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev)
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        int_irq = platform_get_irq(pdev, 0);
-       if (!regs || !int_irq) {
+       if (!regs || (int)int_irq <= 0) {
                dev_dbg(&pdev->dev, "missing regs and/or irq resource\n");
                return -ENXIO;
        }
index f308e1d..2e0aa85 100644 (file)
@@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d)
        pio_writel(pio, IDR, 1 << (gpio & 0x1f));
 }
 
-static void gpio_irq_unmask(struct irq_data *d))
+static void gpio_irq_unmask(struct irq_data *d)
 {
        unsigned                gpio = irq_to_gpio(d->irq);
        struct pio_device       *pio = &pio_dev[gpio >> 5];
index 17503b0..f868f4c 100644 (file)
@@ -53,7 +53,7 @@ cpu_enter_idle:
        st.w    r8[TI_flags], r9
        unmask_interrupts
        sleep   CPU_SLEEP_IDLE
-       .size   cpu_idle_sleep, . - cpu_idle_sleep
+       .size   cpu_enter_idle, . - cpu_enter_idle
 
        /*
         * Common return path for PM functions that don't run from
index 19e2c7c..44bd0cc 100644 (file)
  * Force strict CPU ordering.
  */
 #define nop()  __asm__ __volatile__ ("nop;\n\t" : : )
-#define mb()   __asm__ __volatile__ (""   : : : "memory")
-#define rmb()  __asm__ __volatile__ (""   : : : "memory")
-#define wmb()  __asm__ __volatile__ (""   : : : "memory")
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-#define read_barrier_depends()                 do { } while(0)
+#define smp_mb()  mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define smp_read_barrier_depends()     read_barrier_depends()
 
 #ifdef CONFIG_SMP
 asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
@@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
                                        unsigned long new, unsigned long old);
 
 #ifdef __ARCH_SYNC_CORE_DCACHE
-# define smp_mb()      do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
-# define smp_rmb()     do { barrier(); smp_check_barrier(); } while (0)
-# define smp_wmb()     do { barrier(); smp_mark_barrier(); } while (0)
-#define smp_read_barrier_depends()     do { barrier(); smp_check_barrier(); } while (0)
-
+/* Force Core data cache coherence */
+# define mb()  do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
+# define rmb() do { barrier(); smp_check_barrier(); } while (0)
+# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
+# define read_barrier_depends()        do { barrier(); smp_check_barrier(); } while (0)
 #else
-# define smp_mb()      barrier()
-# define smp_rmb()     barrier()
-# define smp_wmb()     barrier()
-#define smp_read_barrier_depends()     barrier()
+# define mb()  barrier()
+# define rmb() barrier()
+# define wmb() barrier()
+# define read_barrier_depends()        do { } while (0)
 #endif
 
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
@@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 
 #else /* !CONFIG_SMP */
 
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#define smp_read_barrier_depends()     do { } while(0)
+#define mb()   barrier()
+#define rmb()  barrier()
+#define wmb()  barrier()
+#define read_barrier_depends() do { } while (0)
 
 struct __xchg_dummy {
        unsigned long a[100];
index cdbe075..8b81dc0 100644 (file)
@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask)
        _disable_gptimers(mask);
        for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
                if (mask & (1 << i))
-                       group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i];
+                       group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i];
        SSYNC();
 }
 EXPORT_SYMBOL(disable_gptimers);
index 8c9a43d..cdb4beb 100644 (file)
@@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evt = dev_id;
        smp_mb();
-       evt->event_handler(evt);
+       /*
+        * We want to ACK before we handle so that we can handle smaller timer
+        * intervals.  This way if the timer expires again while we're handling
+        * things, we're more likely to see that 2nd int rather than swallowing
+        * it by ACKing the int at the end of this handler.
+        */
        bfin_gptmr0_ack();
+       evt->event_handler(evt);
        return IRQ_HANDLED;
 }
 
index 6e17a26..8bce5ed 100644 (file)
@@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info)
        struct blackfin_flush_data *fdata = info;
 
        /* Invalidate the memory holding the bounds of the flushed region. */
-       invalidate_dcache_range((unsigned long)fdata,
-               (unsigned long)fdata + sizeof(*fdata));
+       blackfin_dcache_invalidate_range((unsigned long)fdata,
+                                        (unsigned long)fdata + sizeof(*fdata));
+
+       /* Make sure all write buffers in the data side of the core
+        * are flushed before trying to invalidate the icache.  This
+        * needs to be after the data flush and before the icache
+        * flush so that the SSYNC does the right thing in preventing
+        * the instruction prefetcher from hitting things in cached
+        * memory at the wrong time -- it runs much further ahead than
+        * the pipeline.
+        */
+       SSYNC();
 
-       flush_icache_range(fdata->start, fdata->end);
+       /* ipi_flaush_icache is invoked by generic flush_icache_range,
+        * so call blackfin arch icache flush directly here.
+        */
+       blackfin_icache_flush_range(fdata->start, fdata->end);
 }
 
 static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
index 26d851d..29e1790 100644 (file)
 #define __NR_fanotify_init     337
 #define __NR_fanotify_mark     338
 #define __NR_prlimit64         339
+#define __NR_name_to_handle_at 340
+#define __NR_open_by_handle_at 341
+#define __NR_clock_adjtime     342
+#define __NR_syncfs            343
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            340
+#define NR_syscalls            344
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 1559dea..1359ee6 100644 (file)
@@ -750,4 +750,8 @@ sys_call_table:
        .long sys_fanotify_init
        .long sys_fanotify_mark
        .long sys_prlimit64
+       .long sys_name_to_handle_at     /* 340 */
+       .long sys_open_by_handle_at
+       .long sys_clock_adjtime
+       .long sys_syncfs
 
index 79b1ed1..9b8393d 100644 (file)
@@ -358,6 +358,10 @@ ENTRY(sys_call_table)
        .long sys_fanotify_init
        .long sys_fanotify_mark
        .long sys_prlimit64
+       .long sys_name_to_handle_at     /* 340 */
+       .long sys_open_by_handle_at
+       .long sys_clock_adjtime
+       .long sys_syncfs
 
        .rept NR_syscalls-(.-sys_call_table)/4
                .long sys_ni_syscall
index c00d4ca..28581f1 100644 (file)
@@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev)
 
 #endif /* !CONFIG_SUSPEND */
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 
 static int ibmebus_bus_pm_freeze(struct device *dev)
 {
@@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
        return ret;
 }
 
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define ibmebus_bus_pm_freeze          NULL
 #define ibmebus_bus_pm_thaw            NULL
@@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
 #define ibmebus_bus_pm_poweroff_noirq  NULL
 #define ibmebus_bus_pm_restore_noirq   NULL
 
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
 static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
        .prepare = ibmebus_bus_pm_prepare,
index 14232d5..4979853 100644 (file)
@@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev)
        port->ops = ops;
        port->priv = priv;
        port->phys_efptr = 0x100;
-       rio_register_mport(port);
 
        priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
        rio_regs_win = priv->regs_win;
@@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev)
        dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
                        port->sys_size ? 65536 : 256);
 
+       if (rio_register_mport(port))
+               goto err;
+
        if (port->host_deviceid >= 0)
                out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
                        RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
index 02fb017..a9da516 100644 (file)
@@ -4,6 +4,10 @@ menu "UML-specific options"
 
 menu "Host processor type and features"
 
+config CMPXCHG_LOCAL
+       bool
+       default n
+
 source "arch/x86/Kconfig.cpu"
 
 endmenu
diff --git a/arch/um/include/asm/bug.h b/arch/um/include/asm/bug.h
new file mode 100644 (file)
index 0000000..9e33b86
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __UM_BUG_H
+#define __UM_BUG_H
+
+#include <asm-generic/bug.h>
+
+#endif
index 1c7121b..5cc821c 100644 (file)
@@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY
 config XEN_SAVE_RESTORE
        bool
        depends on XEN
+       select HIBERNATE_CALLBACKS
        default y
 
 config XEN_DEBUG_FS
index 49dbd78..e3c6a06 100644 (file)
@@ -238,6 +238,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
 static __init void xen_init_cpuid_mask(void)
 {
        unsigned int ax, bx, cx, dx;
+       unsigned int xsave_mask;
 
        cpuid_leaf1_edx_mask =
                ~((1 << X86_FEATURE_MCE)  |  /* disable MCE */
@@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void)
                cpuid_leaf1_edx_mask &=
                        ~((1 << X86_FEATURE_APIC) |  /* disable local APIC */
                          (1 << X86_FEATURE_ACPI));  /* disable ACPI */
-
        ax = 1;
-       cx = 0;
        xen_cpuid(&ax, &bx, &cx, &dx);
 
-       /* cpuid claims we support xsave; try enabling it to see what happens */
-       if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
-               unsigned long cr4;
-
-               set_in_cr4(X86_CR4_OSXSAVE);
-               
-               cr4 = read_cr4();
+       xsave_mask =
+               (1 << (X86_FEATURE_XSAVE % 32)) |
+               (1 << (X86_FEATURE_OSXSAVE % 32));
 
-               if ((cr4 & X86_CR4_OSXSAVE) == 0)
-                       cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
-
-               clear_in_cr4(X86_CR4_OSXSAVE);
-       }
+       /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
+       if ((cx & xsave_mask) != xsave_mask)
+               cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
 }
 
 static void xen_set_debugreg(int reg, unsigned long val)
index c82df6c..a991b57 100644 (file)
@@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte)
        if (io_page &&
            (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
                other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
-               WARN(addr != other_addr,
+               WARN_ONCE(addr != other_addr,
                        "0x%lx is using VM_IO, but it is 0x%lx!\n",
                        (unsigned long)addr, (unsigned long)other_addr);
        } else {
                pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
                other_addr = (_pte.pte & PTE_PFN_MASK);
-               WARN((addr == other_addr) && (!io_page) && (!iomap_set),
+               WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
                        "0x%lx is missing VM_IO (and wasn't fixed)!\n",
                        (unsigned long)addr);
        }
index 8210405..7025593 100644 (file)
@@ -214,7 +214,7 @@ static int amba_pm_resume_noirq(struct device *dev)
 
 #endif /* !CONFIG_SUSPEND */
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 
 static int amba_pm_freeze(struct device *dev)
 {
@@ -352,7 +352,7 @@ static int amba_pm_restore_noirq(struct device *dev)
        return ret;
 }
 
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define amba_pm_freeze         NULL
 #define amba_pm_thaw           NULL
@@ -363,7 +363,7 @@ static int amba_pm_restore_noirq(struct device *dev)
 #define amba_pm_poweroff_noirq NULL
 #define amba_pm_restore_noirq  NULL
 
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #ifdef CONFIG_PM
 
index f051cff..9e0e4fc 100644 (file)
@@ -149,6 +149,7 @@ static void platform_device_release(struct device *dev)
 
        of_device_node_put(&pa->pdev.dev);
        kfree(pa->pdev.dev.platform_data);
+       kfree(pa->pdev.mfd_cell);
        kfree(pa->pdev.resource);
        kfree(pa);
 }
@@ -771,7 +772,7 @@ int __weak platform_pm_resume_noirq(struct device *dev)
 
 #endif /* !CONFIG_SUSPEND */
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 
 static int platform_pm_freeze(struct device *dev)
 {
@@ -909,7 +910,7 @@ static int platform_pm_restore_noirq(struct device *dev)
        return ret;
 }
 
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define platform_pm_freeze             NULL
 #define platform_pm_thaw               NULL
@@ -920,7 +921,7 @@ static int platform_pm_restore_noirq(struct device *dev)
 #define platform_pm_poweroff_noirq     NULL
 #define platform_pm_restore_noirq      NULL
 
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #ifdef CONFIG_PM_RUNTIME
 
index 052dc53..fbc5b6e 100644 (file)
@@ -233,7 +233,7 @@ static int pm_op(struct device *dev,
                }
                break;
 #endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
        case PM_EVENT_FREEZE:
        case PM_EVENT_QUIESCE:
                if (ops->freeze) {
@@ -260,7 +260,7 @@ static int pm_op(struct device *dev,
                        suspend_report_result(ops->restore, error);
                }
                break;
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
        default:
                error = -EINVAL;
        }
@@ -308,7 +308,7 @@ static int pm_noirq_op(struct device *dev,
                }
                break;
 #endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
        case PM_EVENT_FREEZE:
        case PM_EVENT_QUIESCE:
                if (ops->freeze_noirq) {
@@ -335,7 +335,7 @@ static int pm_noirq_op(struct device *dev,
                        suspend_report_result(ops->restore_noirq, error);
                }
                break;
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
        default:
                error = -EINVAL;
        }
index 6b39675..8a78154 100644 (file)
@@ -1448,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = {
        {}
 };
 
-static struct of_platform_driver fsldma_of_driver = {
+static struct platform_driver fsldma_of_driver = {
        .driver = {
                .name = "fsl-elo-dma",
                .owner = THIS_MODULE,
index 7f6f01a..0a775f7 100644 (file)
@@ -116,6 +116,7 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
                reg_val |= (1 << nr);
        else
                reg_val &= ~(1 << nr);
+       iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
 
        mutex_unlock(&chip->lock);
 
index 583e925..7630ab7 100644 (file)
@@ -558,7 +558,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
 
        ret = gpiochip_add(&chip->gpio_chip);
        if (ret)
-               goto out_failed;
+               goto out_failed_irq;
 
        if (pdata->setup) {
                ret = pdata->setup(client, chip->gpio_chip.base,
@@ -570,8 +570,9 @@ static int __devinit pca953x_probe(struct i2c_client *client,
        i2c_set_clientdata(client, chip);
        return 0;
 
-out_failed:
+out_failed_irq:
        pca953x_irq_teardown(chip);
+out_failed:
        kfree(chip->dyn_pdata);
        kfree(chip);
        return ret;
index 2c6af87..f970a5f 100644 (file)
@@ -105,6 +105,7 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
                reg_val |= (1 << nr);
        else
                reg_val &= ~(1 << nr);
+       iowrite32(reg_val, &chip->reg->po);
 
        mutex_unlock(&chip->lock);
 
index a6feb78..c58f691 100644 (file)
@@ -96,6 +96,7 @@ config DRM_I915
        # i915 depends on ACPI_VIDEO when ACPI is enabled
        # but for select to work, need to select ACPI_VIDEO's dependencies, ick
        select BACKLIGHT_CLASS_DEVICE if ACPI
+       select VIDEO_OUTPUT_CONTROL if ACPI
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
        select ACPI_BUTTON if ACPI
index 8314a49..90aef64 100644 (file)
@@ -269,7 +269,7 @@ struct init_tbl_entry {
        int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
 };
 
-static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
+static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *);
 
 #define MACRO_INDEX_SIZE       2
 #define MACRO_SIZE             8
@@ -2010,6 +2010,27 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
        return 3;
 }
 
+static int
+init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+       /*
+        * INIT_JUMP   opcode: 0x5C ('\')
+        *
+        * offset      (8  bit): opcode
+        * offset + 1  (16 bit): offset (in bios)
+        *
+        * Continue execution of init table from 'offset'
+        */
+
+       uint16_t jmp_offset = ROM16(bios->data[offset + 1]);
+
+       if (!iexec->execute)
+               return 3;
+
+       BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset);
+       return jmp_offset - offset;
+}
+
 static int
 init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 {
@@ -3659,6 +3680,7 @@ static struct init_tbl_entry itbl_entry[] = {
        { "INIT_ZM_REG_SEQUENCE"              , 0x58, init_zm_reg_sequence            },
        /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
        { "INIT_SUB_DIRECT"                   , 0x5B, init_sub_direct                 },
+       { "INIT_JUMP"                         , 0x5C, init_jump                       },
        { "INIT_I2C_IF"                       , 0x5E, init_i2c_if                     },
        { "INIT_COPY_NV_REG"                  , 0x5F, init_copy_nv_reg                },
        { "INIT_ZM_INDEX_IO"                  , 0x62, init_zm_index_io                },
@@ -3700,8 +3722,7 @@ static struct init_tbl_entry itbl_entry[] = {
 #define MAX_TABLE_OPS 1000
 
 static int
-parse_init_table(struct nvbios *bios, unsigned int offset,
-                struct init_exec *iexec)
+parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 {
        /*
         * Parses all commands in an init table.
@@ -6333,6 +6354,32 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
                }
        }
 
+       /* XFX GT-240X-YA
+        *
+        * So many things wrong here, replace the entire encoder table..
+        */
+       if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) {
+               if (idx == 0) {
+                       *conn = 0x02001300; /* VGA, connector 1 */
+                       *conf = 0x00000028;
+               } else
+               if (idx == 1) {
+                       *conn = 0x01010312; /* DVI, connector 0 */
+                       *conf = 0x00020030;
+               } else
+               if (idx == 2) {
+                       *conn = 0x01010310; /* VGA, connector 0 */
+                       *conf = 0x00000028;
+               } else
+               if (idx == 3) {
+                       *conn = 0x02022362; /* HDMI, connector 2 */
+                       *conf = 0x00020010;
+               } else {
+                       *conn = 0x0000000e; /* EOL */
+                       *conf = 0x00000000;
+               }
+       }
+
        return true;
 }
 
index 57e5302..856d56a 100644 (file)
@@ -1190,7 +1190,7 @@ extern int  nv50_graph_load_context(struct nouveau_channel *);
 extern int  nv50_graph_unload_context(struct drm_device *);
 extern int  nv50_grctx_init(struct nouveau_grctx *);
 extern void nv50_graph_tlb_flush(struct drm_device *dev);
-extern void nv86_graph_tlb_flush(struct drm_device *dev);
+extern void nv84_graph_tlb_flush(struct drm_device *dev);
 extern struct nouveau_enum nv50_data_error_names[];
 
 /* nvc0_graph.c */
index 2683377..78f467f 100644 (file)
@@ -552,6 +552,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
        u8 tRC;         /* Byte 9 */
        u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
        u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+       u8 magic_number = 0; /* Yeah... sorry*/
        u8 *mem = NULL, *entry;
        int i, recordlen, entries;
 
@@ -596,6 +597,12 @@ nouveau_mem_timing_init(struct drm_device *dev)
        if (!memtimings->timing)
                return;
 
+       /* Get "some number" from the timing reg for NV_40
+        * Used in calculations later */
+       if(dev_priv->card_type == NV_40) {
+               magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24;
+       }
+
        entry = mem + mem[1];
        for (i = 0; i < entries; i++, entry += recordlen) {
                struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
@@ -635,36 +642,51 @@ nouveau_mem_timing_init(struct drm_device *dev)
 
                /* XXX: I don't trust the -1's and +1's... they must come
                 *      from somewhere! */
-               timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
+               timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
                                      tUNK_18 << 16 |
-                                     (tUNK_1 + tUNK_19 + 1) << 8 |
-                                     (tUNK_2 - 1));
+                                     (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
+               if(dev_priv->chipset == 0xa8) {
+                       timing->reg_100224 |= (tUNK_2 - 1);
+               } else {
+                       timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
+               }
 
                timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
-               if(recordlen > 19) {
-                       timing->reg_100228 += (tUNK_19 - 1) << 24;
-               }/* I cannot back-up this else-statement right now
-                        else {
-                       timing->reg_100228 += tUNK_12 << 24;
-               }*/
-
-               /* XXX: reg_10022c */
-               timing->reg_10022c = tUNK_2 - 1;
-
-               timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
-                                     tUNK_13 << 8  | tUNK_13);
-
-               /* XXX: +6? */
-               timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
-               timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
-
-               /* XXX; reg_100238, reg_10023c
-                * reg: 0x00??????
-                * reg_10023c:
-                *      0 for pre-NV50 cards
-                *      0x????0202 for NV50+ cards (empirical evidence) */
-               if(dev_priv->card_type >= NV_50) {
+               if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) {
+                       timing->reg_100228 |= (tUNK_19 - 1) << 24;
+               }
+
+               if(dev_priv->card_type == NV_40) {
+                       /* NV40: don't know what the rest of the regs are..
+                        * And don't need to know either */
+                       timing->reg_100228 |= 0x20200000 | magic_number << 24;
+               } else if(dev_priv->card_type >= NV_50) {
+                       /* XXX: reg_10022c */
+                       timing->reg_10022c = tUNK_2 - 1;
+
+                       timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
+                                                 tUNK_13 << 8  | tUNK_13);
+
+                       timing->reg_100234 = (tRAS << 24 | tRC);
+                       timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
+
+                       if(dev_priv->chipset < 0xa3) {
+                               timing->reg_100234 |= (tUNK_2 + 2) << 8;
+                       } else {
+                               /* XXX: +6? */
+                               timing->reg_100234 |= (tUNK_19 + 6) << 8;
+                       }
+
+                       /* XXX; reg_100238, reg_10023c
+                        * reg_100238: 0x00??????
+                        * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */
                        timing->reg_10023c = 0x202;
+                       if(dev_priv->chipset < 0xa3) {
+                               timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
+                       } else {
+                               /* currently unknown
+                                * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
+                       }
                }
 
                NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
@@ -675,7 +697,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
                         timing->reg_100238, timing->reg_10023c);
        }
 
-       memtimings->nr_timing  = entries;
+       memtimings->nr_timing = entries;
        memtimings->supported = true;
 }
 
index ac62a1b..670e3cb 100644 (file)
@@ -134,7 +134,7 @@ nouveau_perf_init(struct drm_device *dev)
                case 0x13:
                case 0x15:
                        perflvl->fanspeed = entry[55];
-                       perflvl->voltage = entry[56];
+                       perflvl->voltage = (recordlen > 56) ? entry[56] : 0;
                        perflvl->core = ROM32(entry[1]) * 10;
                        perflvl->memory = ROM32(entry[5]) * 20;
                        break;
index 5bb2859..6e2b1a6 100644 (file)
@@ -376,15 +376,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->graph.destroy_context   = nv50_graph_destroy_context;
                engine->graph.load_context      = nv50_graph_load_context;
                engine->graph.unload_context    = nv50_graph_unload_context;
-               if (dev_priv->chipset != 0x86)
+               if (dev_priv->chipset == 0x50 ||
+                   dev_priv->chipset == 0xac)
                        engine->graph.tlb_flush = nv50_graph_tlb_flush;
-               else {
-                       /* from what i can see nvidia do this on every
-                        * pre-NVA3 board except NVAC, but, we've only
-                        * ever seen problems on NV86
-                        */
-                       engine->graph.tlb_flush = nv86_graph_tlb_flush;
-               }
+               else
+                       engine->graph.tlb_flush = nv84_graph_tlb_flush;
                engine->fifo.channels           = 128;
                engine->fifo.init               = nv50_fifo_init;
                engine->fifo.takedown           = nv50_fifo_takedown;
index c82db37..12098bf 100644 (file)
@@ -581,12 +581,13 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
        int head = nv_encoder->restore.head;
 
        if (nv_encoder->dcb->type == OUTPUT_LVDS) {
-               struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
-               if (native_mode)
-                       call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
-                                        native_mode->clock);
-               else
-                       NV_ERROR(dev, "Not restoring LVDS without native mode\n");
+               struct nouveau_connector *connector =
+                       nouveau_encoder_connector_get(nv_encoder);
+
+               if (connector && connector->native_mode)
+                       call_lvds_script(dev, nv_encoder->dcb, head,
+                                        LVDS_PANEL_ON,
+                                        connector->native_mode->clock);
 
        } else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
                int clock = nouveau_hw_pllvals_to_clk
index 2b99840..a19ccaa 100644 (file)
@@ -469,9 +469,6 @@ nv50_crtc_wait_complete(struct drm_crtc *crtc)
 
        start = ptimer->read(dev);
        do {
-               nv_wr32(dev, 0x61002c, 0x370);
-               nv_wr32(dev, 0x000140, 1);
-
                if (nv_ro32(disp->ntfy, 0x000))
                        return 0;
        } while (ptimer->read(dev) - start < 2000000000ULL);
index a2cfaa6..c8e83c1 100644 (file)
@@ -186,6 +186,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
        nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
 
        evo->dma.max = (4096/4) - 2;
+       evo->dma.max &= ~7;
        evo->dma.put = 0;
        evo->dma.cur = evo->dma.put;
        evo->dma.free = evo->dma.max - evo->dma.cur;
index 8675b00..b02a5b1 100644 (file)
@@ -503,7 +503,7 @@ nv50_graph_tlb_flush(struct drm_device *dev)
 }
 
 void
-nv86_graph_tlb_flush(struct drm_device *dev)
+nv84_graph_tlb_flush(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
index 69af0ba..a0a2a02 100644 (file)
@@ -104,20 +104,26 @@ nvc0_vm_flush(struct nouveau_vm *vm)
        struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
        struct drm_device *dev = vm->dev;
        struct nouveau_vm_pgd *vpgd;
-       u32 r100c80, engine;
+       u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
 
        pinstmem->flush(vm->dev);
 
-       if (vm == dev_priv->chan_vm)
-               engine = 1;
-       else
-               engine = 5;
-
+       spin_lock(&dev_priv->ramin_lock);
        list_for_each_entry(vpgd, &vm->pgd_list, head) {
-               r100c80 = nv_rd32(dev, 0x100c80);
+               /* looks like maybe a "free flush slots" counter, the
+                * faster you write to 0x100cbc to more it decreases
+                */
+               if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
+                       NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
+                                nv_rd32(dev, 0x100c80), engine);
+               }
                nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
                nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
-               if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
-                       NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
+               /* wait for flush to be queued? */
+               if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
+                       NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
+                                nv_rd32(dev, 0x100c80), engine);
+               }
        }
+       spin_unlock(&dev_priv->ramin_lock);
 }
index 258fa5e..d71d375 100644 (file)
@@ -32,6 +32,7 @@
 #include "atom.h"
 #include "atom-names.h"
 #include "atom-bits.h"
+#include "radeon.h"
 
 #define ATOM_COND_ABOVE                0
 #define ATOM_COND_ABOVEOREQUAL 1
@@ -101,7 +102,9 @@ static void debug_print_spaces(int n)
 static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
                                 uint32_t index, uint32_t data)
 {
+       struct radeon_device *rdev = ctx->card->dev->dev_private;
        uint32_t temp = 0xCDCDCDCD;
+
        while (1)
                switch (CU8(base)) {
                case ATOM_IIO_NOP:
@@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
                        base += 3;
                        break;
                case ATOM_IIO_WRITE:
-                       (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+                       if (rdev->family == CHIP_RV515)
+                               (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
                        ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
                        base += 3;
                        break;
index b41ec59..9d516a8 100644 (file)
@@ -531,6 +531,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                        pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
                else
                        pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+               if ((rdev->family == CHIP_R600) ||
+                   (rdev->family == CHIP_RV610) ||
+                   (rdev->family == CHIP_RV630) ||
+                   (rdev->family == CHIP_RV670))
+                       pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
        } else {
                pll->flags |= RADEON_PLL_LEGACY;
 
index 0b0cc74..3453910 100644 (file)
@@ -120,11 +120,16 @@ void evergreen_pm_misc(struct radeon_device *rdev)
        struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
        struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
 
-       if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
-               if (voltage->voltage != rdev->pm.current_vddc) {
-                       radeon_atom_set_voltage(rdev, voltage->voltage);
+       if (voltage->type == VOLTAGE_SW) {
+               if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
+                       radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
                        rdev->pm.current_vddc = voltage->voltage;
-                       DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+                       DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
+               }
+               if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
+                       radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
+                       rdev->pm.current_vddci = voltage->vddci;
+                       DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
                }
        }
 }
@@ -3036,9 +3041,6 @@ int evergreen_init(struct radeon_device *rdev)
 {
        int r;
 
-       r = radeon_dummy_page_init(rdev);
-       if (r)
-               return r;
        /* This don't do much */
        r = radeon_gem_init(rdev);
        if (r)
@@ -3150,7 +3152,6 @@ void evergreen_fini(struct radeon_device *rdev)
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
        rdev->bios = NULL;
-       radeon_dummy_page_fini(rdev);
 }
 
 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
index be271c4..15d5829 100644 (file)
@@ -587,7 +587,7 @@ void r600_pm_misc(struct radeon_device *rdev)
 
        if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
                if (voltage->voltage != rdev->pm.current_vddc) {
-                       radeon_atom_set_voltage(rdev, voltage->voltage);
+                       radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
                        rdev->pm.current_vddc = voltage->voltage;
                        DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
                }
@@ -2509,9 +2509,6 @@ int r600_init(struct radeon_device *rdev)
 {
        int r;
 
-       r = radeon_dummy_page_init(rdev);
-       if (r)
-               return r;
        if (r600_debugfs_mc_info_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for mc !\n");
        }
@@ -2625,7 +2622,6 @@ void r600_fini(struct radeon_device *rdev)
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
        rdev->bios = NULL;
-       radeon_dummy_page_fini(rdev);
 }
 
 
index 93f5365..ba643b5 100644 (file)
@@ -177,7 +177,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
 void radeon_pm_resume(struct radeon_device *rdev);
 void radeon_combios_get_power_modes(struct radeon_device *rdev);
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
-void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
 void rs690_pm_info(struct radeon_device *rdev);
 extern int rv6xx_get_temp(struct radeon_device *rdev);
 extern int rv770_get_temp(struct radeon_device *rdev);
@@ -767,7 +767,9 @@ struct radeon_voltage {
        u8 vddci_id; /* index into vddci voltage table */
        bool vddci_enabled;
        /* r6xx+ sw */
-       u32 voltage;
+       u16 voltage;
+       /* evergreen+ vddci */
+       u16 vddci;
 };
 
 /* clock mode flags */
@@ -835,10 +837,12 @@ struct radeon_pm {
        int                     default_power_state_index;
        u32                     current_sclk;
        u32                     current_mclk;
-       u32                     current_vddc;
+       u16                     current_vddc;
+       u16                     current_vddci;
        u32                     default_sclk;
        u32                     default_mclk;
-       u32                     default_vddc;
+       u16                     default_vddc;
+       u16                     default_vddci;
        struct radeon_i2c_chan *i2c_bus;
        /* selected pm method */
        enum radeon_pm_method     pm_method;
index eb888ee..ca57619 100644 (file)
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
                rdev->mc_rreg = &rs600_mc_rreg;
                rdev->mc_wreg = &rs600_mc_wreg;
        }
-       if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
+       if (rdev->family >= CHIP_R600) {
                rdev->pciep_rreg = &r600_pciep_rreg;
                rdev->pciep_wreg = &r600_pciep_wreg;
        }
index 99768d9..f5d12fb 100644 (file)
@@ -2176,24 +2176,27 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
        }
 }
 
-static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
+static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
+                                                u16 *vddc, u16 *vddci)
 {
        struct radeon_mode_info *mode_info = &rdev->mode_info;
        int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
        u8 frev, crev;
        u16 data_offset;
        union firmware_info *firmware_info;
-       u16 vddc = 0;
+
+       *vddc = 0;
+       *vddci = 0;
 
        if (atom_parse_data_header(mode_info->atom_context, index, NULL,
                                   &frev, &crev, &data_offset)) {
                firmware_info =
                        (union firmware_info *)(mode_info->atom_context->bios +
                                                data_offset);
-               vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+               *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+               if ((frev == 2) && (crev >= 2))
+                       *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
        }
-
-       return vddc;
 }
 
 static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
@@ -2203,7 +2206,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
        int j;
        u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
        u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
-       u16 vddc = radeon_atombios_get_default_vddc(rdev);
+       u16 vddc, vddci;
+
+       radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
 
        rdev->pm.power_state[state_index].misc = misc;
        rdev->pm.power_state[state_index].misc2 = misc2;
@@ -2244,6 +2249,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
                        rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
                        rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
                        rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+                       rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
                } else {
                        /* patch the table values with the default slck/mclk from firmware info */
                        for (j = 0; j < mode_index; j++) {
@@ -2286,6 +2292,8 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
                        VOLTAGE_SW;
                rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
                        le16_to_cpu(clock_info->evergreen.usVDDC);
+               rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+                       le16_to_cpu(clock_info->evergreen.usVDDCI);
        } else {
                sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
                sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2577,25 +2585,25 @@ union set_voltage {
        struct _SET_VOLTAGE_PARAMETERS_V2 v2;
 };
 
-void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
 {
        union set_voltage args;
        int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
-       u8 frev, crev, volt_index = level;
+       u8 frev, crev, volt_index = voltage_level;
 
        if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
                return;
 
        switch (crev) {
        case 1:
-               args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+               args.v1.ucVoltageType = voltage_type;
                args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
                args.v1.ucVoltageIndex = volt_index;
                break;
        case 2:
-               args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+               args.v2.ucVoltageType = voltage_type;
                args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
-               args.v2.usVoltageLevel = cpu_to_le16(level);
+               args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
                break;
        default:
                DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
index 9e59868..bbcd1dd 100644 (file)
@@ -79,7 +79,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
                        scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
                else
                        scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               seq = rdev->wb.wb[scratch_index/4];
+               seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
        } else
                seq = RREG32(rdev->fence_drv.scratch_reg);
        if (seq != rdev->fence_drv.last_seq) {
index f0534ef..8a955bb 100644 (file)
@@ -285,4 +285,6 @@ void radeon_gart_fini(struct radeon_device *rdev)
        rdev->gart.pages = NULL;
        rdev->gart.pages_addr = NULL;
        rdev->gart.ttm_alloced = NULL;
+
+       radeon_dummy_page_fini(rdev);
 }
index ded2a45..ccbabf7 100644 (file)
@@ -1062,7 +1062,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
                *val = in_buf[0];
                DRM_DEBUG("val = 0x%02x\n", *val);
        } else {
-               DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
+               DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
                          addr, *val);
        }
 }
@@ -1084,7 +1084,7 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
        out_buf[1] = val;
 
        if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
-               DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
+               DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
                          addr, val);
 }
 
index 5b54268..2f46e0c 100644 (file)
@@ -269,7 +269,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
        .disable = radeon_legacy_encoder_disable,
 };
 
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
 #define MAX_RADEON_LEVEL 0xFF
 
index 08de669..86eda1e 100644 (file)
@@ -23,6 +23,7 @@
 #include "drmP.h"
 #include "radeon.h"
 #include "avivod.h"
+#include "atom.h"
 #ifdef CONFIG_ACPI
 #include <linux/acpi.h>
 #endif
@@ -535,7 +536,11 @@ void radeon_pm_resume(struct radeon_device *rdev)
        /* set up the default clocks if the MC ucode is loaded */
        if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
                if (rdev->pm.default_vddc)
-                       radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+                       radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+                                               SET_VOLTAGE_TYPE_ASIC_VDDC);
+               if (rdev->pm.default_vddci)
+                       radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+                                               SET_VOLTAGE_TYPE_ASIC_VDDCI);
                if (rdev->pm.default_sclk)
                        radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
                if (rdev->pm.default_mclk)
@@ -548,6 +553,7 @@ void radeon_pm_resume(struct radeon_device *rdev)
        rdev->pm.current_sclk = rdev->pm.default_sclk;
        rdev->pm.current_mclk = rdev->pm.default_mclk;
        rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+       rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
        if (rdev->pm.pm_method == PM_METHOD_DYNPM
            && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
                rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
@@ -585,7 +591,8 @@ int radeon_pm_init(struct radeon_device *rdev)
                /* set up the default clocks if the MC ucode is loaded */
                if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
                        if (rdev->pm.default_vddc)
-                               radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+                               radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+                                                       SET_VOLTAGE_TYPE_ASIC_VDDC);
                        if (rdev->pm.default_sclk)
                                radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
                        if (rdev->pm.default_mclk)
index bbc9cd8..c6776e4 100644 (file)
@@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
 void radeon_ring_free_size(struct radeon_device *rdev)
 {
        if (rdev->wb.enabled)
-               rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4];
+               rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
        else {
                if (rdev->family >= CHIP_R600)
                        rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
index 876cebc..6e3b11e 100644 (file)
@@ -114,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
                                udelay(voltage->delay);
                }
        } else if (voltage->type == VOLTAGE_VDDC)
-               radeon_atom_set_voltage(rdev, voltage->vddc_id);
+               radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
 
        dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
        dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
index b974ac7..ef8a5ba 100644 (file)
@@ -106,7 +106,7 @@ void rv770_pm_misc(struct radeon_device *rdev)
 
        if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
                if (voltage->voltage != rdev->pm.current_vddc) {
-                       radeon_atom_set_voltage(rdev, voltage->voltage);
+                       radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
                        rdev->pm.current_vddc = voltage->voltage;
                        DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
                }
@@ -1255,9 +1255,6 @@ int rv770_init(struct radeon_device *rdev)
 {
        int r;
 
-       r = radeon_dummy_page_init(rdev);
-       if (r)
-               return r;
        /* This don't do much */
        r = radeon_gem_init(rdev);
        if (r)
@@ -1372,7 +1369,6 @@ void rv770_fini(struct radeon_device *rdev)
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
        rdev->bios = NULL;
-       radeon_dummy_page_fini(rdev);
 }
 
 static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
index 737a2a2..9d9d929 100644 (file)
@@ -683,22 +683,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
                        gfp_flags |= GFP_HIGHUSER;
 
                for (r = 0; r < count; ++r) {
-                       if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
-                               void *addr;
-                               addr = dma_alloc_coherent(NULL, PAGE_SIZE,
-                                                         &dma_address[r],
-                                                         gfp_flags);
-                               if (addr == NULL)
-                                       return -ENOMEM;
-                               p = virt_to_page(addr);
-                       } else
-                               p = alloc_page(gfp_flags);
+                       p = alloc_page(gfp_flags);
                        if (!p) {
 
                                printk(KERN_ERR TTM_PFX
                                       "Unable to allocate page.");
                                return -ENOMEM;
                        }
+
                        list_add(&p->lru, pages);
                }
                return 0;
@@ -746,24 +738,12 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
        unsigned long irq_flags;
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        struct page *p, *tmp;
-       unsigned r;
 
        if (pool == NULL) {
                /* No pool for this memory type so free the pages */
 
-               r = page_count-1;
                list_for_each_entry_safe(p, tmp, pages, lru) {
-                       if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
-                               void *addr = page_address(p);
-                               WARN_ON(!addr || !dma_address[r]);
-                               if (addr)
-                                       dma_free_coherent(NULL, PAGE_SIZE,
-                                                         addr,
-                                                         dma_address[r]);
-                               dma_address[r] = 0;
-                       } else
-                               __free_page(p);
-                       r--;
+                       __free_page(p);
                }
                /* Make the pages list empty */
                INIT_LIST_HEAD(pages);
index 70e60a4..4199179 100644 (file)
@@ -5,6 +5,7 @@ config STUB_POULSBO
        # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
        # but for select to work, need to select ACPI_VIDEO's dependencies, ick
        select BACKLIGHT_CLASS_DEVICE if ACPI
+       select VIDEO_OUTPUT_CONTROL if ACPI
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
        select THERMAL if ACPI
index 3790816..8497f56 100644 (file)
@@ -178,6 +178,10 @@ static int __devinit regulator_led_probe(struct platform_device *pdev)
        led->cdev.flags |= LED_CORE_SUSPENDRESUME;
        led->vcc = vcc;
 
+       /* to handle correctly an already enabled regulator */
+       if (regulator_is_enabled(led->vcc))
+               led->enabled = 1;
+
        mutex_init(&led->mutex);
        INIT_WORK(&led->work, led_work);
 
index d01574d..f4c8c84 100644 (file)
@@ -55,6 +55,19 @@ int mfd_cell_disable(struct platform_device *pdev)
 }
 EXPORT_SYMBOL(mfd_cell_disable);
 
+static int mfd_platform_add_cell(struct platform_device *pdev,
+                                const struct mfd_cell *cell)
+{
+       if (!cell)
+               return 0;
+
+       pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
+       if (!pdev->mfd_cell)
+               return -ENOMEM;
+
+       return 0;
+}
+
 static int mfd_add_device(struct device *parent, int id,
                          const struct mfd_cell *cell,
                          struct resource *mem_base,
@@ -75,7 +88,7 @@ static int mfd_add_device(struct device *parent, int id,
 
        pdev->dev.parent = parent;
 
-       ret = platform_device_add_data(pdev, cell, sizeof(*cell));
+       ret = mfd_platform_add_cell(pdev, cell);
        if (ret)
                goto fail_res;
 
@@ -123,7 +136,6 @@ static int mfd_add_device(struct device *parent, int id,
 
        return 0;
 
-/*     platform_device_del(pdev); */
 fail_res:
        kfree(res);
 fail_device:
index 20e4e93..ecafa4b 100644 (file)
@@ -348,15 +348,15 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
 
 static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
 
-static void gru_noop(unsigned int irq)
+static void gru_noop(struct irq_data *d)
 {
 }
 
 static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
        [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
-               .mask           = gru_noop,
-               .unmask         = gru_noop,
-               .ack            = gru_noop
+               .irq_mask       = gru_noop,
+               .irq_unmask     = gru_noop,
+               .irq_ack        = gru_noop
        }
 };
 
index f803c58..66823ed 100644 (file)
@@ -154,7 +154,7 @@ struct be_eq_obj {
        u16 min_eqd;            /* in usecs */
        u16 max_eqd;            /* in usecs */
        u16 cur_eqd;            /* in usecs */
-       u8  msix_vec_idx;
+       u8  eq_idx;
 
        struct napi_struct napi;
 };
@@ -291,7 +291,7 @@ struct be_adapter {
        u32 num_rx_qs;
        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 
-       u8 msix_vec_next_idx;
+       u8 eq_next_idx;
        struct be_drv_stats drv_stats;
 
        struct vlan_group *vlan_grp;
index 9a54c8b..7cb5a11 100644 (file)
@@ -1497,7 +1497,7 @@ static int be_tx_queues_create(struct be_adapter *adapter)
        if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
                goto tx_eq_free;
 
-       adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+       adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
 
 
        /* Alloc TX eth compl queue */
@@ -1590,7 +1590,7 @@ static int be_rx_queues_create(struct be_adapter *adapter)
                if (rc)
                        goto err;
 
-               rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+               rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
 
                /* CQ */
                cq = &rxo->cq;
@@ -1666,11 +1666,11 @@ static irqreturn_t be_intx(int irq, void *dev)
                if (!isr)
                        return IRQ_NONE;
 
-               if ((1 << adapter->tx_eq.msix_vec_idx & isr))
+               if ((1 << adapter->tx_eq.eq_idx & isr))
                        event_handle(adapter, &adapter->tx_eq);
 
                for_all_rx_queues(adapter, rxo, i) {
-                       if ((1 << rxo->rx_eq.msix_vec_idx & isr))
+                       if ((1 << rxo->rx_eq.eq_idx & isr))
                                event_handle(adapter, &rxo->rx_eq);
                }
        }
@@ -1951,7 +1951,7 @@ static void be_sriov_disable(struct be_adapter *adapter)
 static inline int be_msix_vec_get(struct be_adapter *adapter,
                                        struct be_eq_obj *eq_obj)
 {
-       return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
+       return adapter->msix_entries[eq_obj->eq_idx].vector;
 }
 
 static int be_request_irq(struct be_adapter *adapter,
@@ -2345,6 +2345,7 @@ static int be_clear(struct be_adapter *adapter)
        be_mcc_queues_destroy(adapter);
        be_rx_queues_destroy(adapter);
        be_tx_queues_destroy(adapter);
+       adapter->eq_next_idx = 0;
 
        if (be_physfn(adapter) && adapter->sriov_enabled)
                for (vf = 0; vf < num_vfs; vf++)
@@ -3141,12 +3142,14 @@ static int be_resume(struct pci_dev *pdev)
 static void be_shutdown(struct pci_dev *pdev)
 {
        struct be_adapter *adapter = pci_get_drvdata(pdev);
-       struct net_device *netdev =  adapter->netdev;
 
-       if (netif_running(netdev))
+       if (!adapter)
+               return;
+
+       if (netif_running(adapter->netdev))
                cancel_delayed_work_sync(&adapter->work);
 
-       netif_device_detach(netdev);
+       netif_device_detach(adapter->netdev);
 
        be_cmd_reset_function(adapter);
 
index 34933cb..e3de0b8 100644 (file)
@@ -2219,13 +2219,9 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
 static void
 bfa_ioc_recover(struct bfa_ioc *ioc)
 {
-       u16 bdf;
-
-       bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 |
-                                       ioc->pcidev.device_id);
-
-       pr_crit("Firmware heartbeat failure at %d", bdf);
-       BUG_ON(1);
+       pr_crit("Heart Beat of IOC has failed\n");
+       bfa_ioc_stats(ioc, ioc_hbfails);
+       bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
 }
 
 static void
index 7513c45..330140e 100644 (file)
@@ -931,7 +931,8 @@ static int mcp251x_open(struct net_device *net)
        priv->tx_len = 0;
 
        ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
-                         IRQF_TRIGGER_FALLING, DEVICE_NAME, priv);
+                 pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
+                 DEVICE_NAME, priv);
        if (ret) {
                dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
                if (pdata->transceiver_enable)
index cfd50bc..62dd21b 100644 (file)
@@ -345,6 +345,8 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
                err = mlx4_en_init_allocator(priv, ring);
                if (err) {
                        en_err(priv, "Failed initializing ring allocator\n");
+                       if (ring->stride <= TXBB_SIZE)
+                               ring->buf -= TXBB_SIZE;
                        ring_ind--;
                        goto err_allocator;
                }
@@ -369,6 +371,8 @@ err_buffers:
        ring_ind = priv->rx_ring_num - 1;
 err_allocator:
        while (ring_ind >= 0) {
+               if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
+                       priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
                mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
                ring_ind--;
        }
index 62fa7ee..3814fc9 100644 (file)
@@ -944,6 +944,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
        }
 
        for (port = 1; port <= dev->caps.num_ports; port++) {
+               enum mlx4_port_type port_type = 0;
+               mlx4_SENSE_PORT(dev, port, &port_type);
+               if (port_type)
+                       dev->caps.port_type[port] = port_type;
                ib_port_default_caps = 0;
                err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
                if (err)
@@ -958,6 +962,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                        goto err_mcg_table_free;
                }
        }
+       mlx4_set_port_mask(dev);
 
        return 0;
 
index c1e0e5f..dd7d745 100644 (file)
@@ -431,6 +431,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 
 void mlx4_handle_catas_err(struct mlx4_dev *dev);
 
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+                   enum mlx4_port_type *type);
 void mlx4_do_sense_ports(struct mlx4_dev *dev,
                         enum mlx4_port_type *stype,
                         enum mlx4_port_type *defaults);
index 015fbe7..e2337a7 100644 (file)
@@ -38,8 +38,8 @@
 
 #include "mlx4.h"
 
-static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
-                          enum mlx4_port_type *type)
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+                   enum mlx4_port_type *type)
 {
        u64 out_param;
        int err = 0;
index 693aaef..718879b 100644 (file)
@@ -317,7 +317,7 @@ static void pppoe_flush_dev(struct net_device *dev)
                        lock_sock(sk);
 
                        if (po->pppoe_dev == dev &&
-                           sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+                           sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
                                pppox_unbind_sock(sk);
                                sk->sk_state = PPPOX_ZOMBIE;
                                sk->sk_state_change(sk);
index c498b72..4b42ecc 100644 (file)
@@ -1818,6 +1818,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
        SMSC_TRACE(PROBE, "PHY will be autodetected.");
 
        spin_lock_init(&pdata->dev_lock);
+       spin_lock_init(&pdata->mac_lock);
 
        if (pdata->ioaddr == 0) {
                SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000");
@@ -1895,8 +1896,11 @@ static int __devinit smsc911x_init(struct net_device *dev)
        /* workaround for platforms without an eeprom, where the mac address
         * is stored elsewhere and set by the bootloader.  This saves the
         * mac address before resetting the device */
-       if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS)
+       if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) {
+               spin_lock_irq(&pdata->mac_lock);
                smsc911x_read_mac_address(dev);
+               spin_unlock_irq(&pdata->mac_lock);
+       }
 
        /* Reset the LAN911x */
        if (smsc911x_soft_reset(pdata))
@@ -2059,8 +2063,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
                SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name);
        }
 
-       spin_lock_init(&pdata->mac_lock);
-
        retval = smsc911x_mii_init(pdev, dev);
        if (retval) {
                SMSC_WARNING(PROBE,
index 727874d..47a6c87 100644 (file)
@@ -1313,6 +1313,21 @@ static const struct usb_device_id products[] = {
                USB_DEVICE(0x0424, 0x9909),
                .driver_info = (unsigned long) &smsc95xx_info,
        },
+       {
+               /* SMSC LAN9530 USB Ethernet Device */
+               USB_DEVICE(0x0424, 0x9530),
+               .driver_info = (unsigned long) &smsc95xx_info,
+       },
+       {
+               /* SMSC LAN9730 USB Ethernet Device */
+               USB_DEVICE(0x0424, 0x9730),
+               .driver_info = (unsigned long) &smsc95xx_info,
+       },
+       {
+               /* SMSC LAN89530 USB Ethernet Device */
+               USB_DEVICE(0x0424, 0x9E08),
+               .driver_info = (unsigned long) &smsc95xx_info,
+       },
        { },            /* END */
 };
 MODULE_DEVICE_TABLE(usb, products);
index 338b075..1ec9bcd 100644 (file)
@@ -2546,6 +2546,7 @@ static struct {
        { AR_SREV_VERSION_9287,         "9287" },
        { AR_SREV_VERSION_9271,         "9271" },
        { AR_SREV_VERSION_9300,         "9300" },
+       { AR_SREV_VERSION_9485,         "9485" },
 };
 
 /* For devices with external radios */
index 3d5566e..ff0f5ba 100644 (file)
@@ -1536,7 +1536,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
                dmaaddr = meta->dmaaddr;
                goto drop_recycle_buffer;
        }
-       if (unlikely(len > ring->rx_buffersize)) {
+       if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
                /* The data did not fit into one descriptor buffer
                 * and is split over multiple buffers.
                 * This should never happen, as we try to allocate buffers
index a01c210..e8a80a1 100644 (file)
@@ -163,7 +163,7 @@ struct b43_dmadesc_generic {
 /* DMA engine tuning knobs */
 #define B43_TXRING_SLOTS               256
 #define B43_RXRING_SLOTS               64
-#define B43_DMA0_RX_BUFFERSIZE         IEEE80211_MAX_FRAME_LEN
+#define B43_DMA0_RX_BUFFERSIZE         (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN)
 
 /* Pointer poison */
 #define B43_DMA_PTR_POISON             ((void *)ERR_PTR(-ENOMEM))
index 98aa8af..20b6646 100644 (file)
@@ -241,7 +241,7 @@ struct iwl_eeprom_enhanced_txpwr {
 
 /* 6x00 Specific */
 #define EEPROM_6000_TX_POWER_VERSION    (4)
-#define EEPROM_6000_EEPROM_VERSION     (0x434)
+#define EEPROM_6000_EEPROM_VERSION     (0x423)
 
 /* 6x50 Specific */
 #define EEPROM_6050_TX_POWER_VERSION    (4)
index 9b344a9..e183587 100644 (file)
@@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
        {USB_DEVICE(0x0846, 0x4210)},   /* Netgear WG121 the second ? */
        {USB_DEVICE(0x0846, 0x4220)},   /* Netgear WG111 */
        {USB_DEVICE(0x09aa, 0x1000)},   /* Spinnaker Proto board */
+       {USB_DEVICE(0x0bf8, 0x1007)},   /* Fujitsu E-5400 USB */
        {USB_DEVICE(0x0cde, 0x0006)},   /* Medion 40900, Roper Europe */
        {USB_DEVICE(0x0db0, 0x6826)},   /* MSI UB54G (MS-6826) */
        {USB_DEVICE(0x107b, 0x55f2)},   /* Gateway WGU-210 (Gemtek) */
@@ -68,6 +69,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
        {USB_DEVICE(0x1915, 0x2235)},   /* Linksys WUSB54G Portable OEM */
        {USB_DEVICE(0x2001, 0x3701)},   /* DLink DWL-G120 Spinnaker */
        {USB_DEVICE(0x2001, 0x3703)},   /* DLink DWL-G122 */
+       {USB_DEVICE(0x2001, 0x3762)},   /* Conceptronic C54U */
        {USB_DEVICE(0x5041, 0x2234)},   /* Linksys WUSB54G */
        {USB_DEVICE(0x5041, 0x2235)},   /* Linksys WUSB54G Portable */
 
index 9de9dbe..84eb6ad 100644 (file)
@@ -1062,8 +1062,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
         * Stop all work.
         */
        cancel_work_sync(&rt2x00dev->intf_work);
-       cancel_work_sync(&rt2x00dev->rxdone_work);
-       cancel_work_sync(&rt2x00dev->txdone_work);
+       if (rt2x00_is_usb(rt2x00dev)) {
+               cancel_work_sync(&rt2x00dev->rxdone_work);
+               cancel_work_sync(&rt2x00dev->txdone_work);
+       }
        destroy_workqueue(rt2x00dev->workqueue);
 
        /*
index f74a870..590f14f 100644 (file)
@@ -685,7 +685,7 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
 
        u8 efuse_data, word_cnts = 0;
        u16 efuse_addr = 0;
-       u8 hworden;
+       u8 hworden = 0;
        u8 tmpdata[8];
 
        if (data == NULL)
index 5ef9137..28a6ce3 100644 (file)
@@ -303,7 +303,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
        u16 box_reg, box_extreg;
        u8 u1b_tmp;
        bool isfw_read = false;
-       u8 buf_index;
+       u8 buf_index = 0;
        bool bwrite_sucess = false;
        u8 wait_h2c_limmit = 100;
        u8 wait_writeh2c_limmit = 100;
index a4b2613..f5d8573 100644 (file)
@@ -246,7 +246,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
 
 static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
 {
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_priv __maybe_unused *rtlpriv = rtl_priv(hw);
 
        mutex_destroy(&rtlpriv->io.bb_mutex);
 }
index 5b9dbea..b1c7d03 100644 (file)
@@ -340,7 +340,7 @@ module_init(wl1271_init);
 module_exit(wl1271_exit);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
 MODULE_FIRMWARE(WL1271_FW_NAME);
 MODULE_FIRMWARE(WL1271_AP_FW_NAME);
index 18cf017..ffc745b 100644 (file)
@@ -487,7 +487,7 @@ module_init(wl1271_init);
 module_exit(wl1271_exit);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
 MODULE_FIRMWARE(WL1271_FW_NAME);
 MODULE_FIRMWARE(WL1271_AP_FW_NAME);
index e64403b..6ec06a4 100644 (file)
@@ -204,7 +204,10 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
 
        kfree(wl->nvs);
 
-       wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+       if (len != sizeof(struct wl1271_nvs_file))
+               return -EINVAL;
+
+       wl->nvs = kzalloc(len, GFP_KERNEL);
        if (!wl->nvs) {
                wl1271_error("could not allocate memory for the nvs file");
                ret = -ENOMEM;
index 58236e6..ab607bb 100644 (file)
@@ -643,7 +643,7 @@ static void rx_urb_complete(struct urb *urb)
        usb = urb->context;
        rx = &usb->rx;
 
-       zd_usb_reset_rx_idle_timer(usb);
+       tasklet_schedule(&rx->reset_timer_tasklet);
 
        if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
                /* If there is an old first fragment, we don't care. */
@@ -812,6 +812,7 @@ void zd_usb_disable_rx(struct zd_usb *usb)
        __zd_usb_disable_rx(usb);
        mutex_unlock(&rx->setup_mutex);
 
+       tasklet_kill(&rx->reset_timer_tasklet);
        cancel_delayed_work_sync(&rx->idle_work);
 }
 
@@ -1106,6 +1107,13 @@ static void zd_rx_idle_timer_handler(struct work_struct *work)
        zd_usb_reset_rx(usb);
 }
 
+static void zd_usb_reset_rx_idle_timer_tasklet(unsigned long param)
+{
+       struct zd_usb *usb = (struct zd_usb *)param;
+
+       zd_usb_reset_rx_idle_timer(usb);
+}
+
 void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
 {
        struct zd_usb_rx *rx = &usb->rx;
@@ -1127,6 +1135,7 @@ static inline void init_usb_interrupt(struct zd_usb *usb)
 static inline void init_usb_rx(struct zd_usb *usb)
 {
        struct zd_usb_rx *rx = &usb->rx;
+
        spin_lock_init(&rx->lock);
        mutex_init(&rx->setup_mutex);
        if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
@@ -1136,11 +1145,14 @@ static inline void init_usb_rx(struct zd_usb *usb)
        }
        ZD_ASSERT(rx->fragment_length == 0);
        INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
+       rx->reset_timer_tasklet.func = zd_usb_reset_rx_idle_timer_tasklet;
+       rx->reset_timer_tasklet.data = (unsigned long)usb;
 }
 
 static inline void init_usb_tx(struct zd_usb *usb)
 {
        struct zd_usb_tx *tx = &usb->tx;
+
        spin_lock_init(&tx->lock);
        atomic_set(&tx->enabled, 0);
        tx->stopped = 0;
@@ -1671,6 +1683,10 @@ static void iowrite16v_urb_complete(struct urb *urb)
 
        if (urb->status && !usb->cmd_error)
                usb->cmd_error = urb->status;
+
+       if (!usb->cmd_error &&
+                       urb->actual_length != urb->transfer_buffer_length)
+               usb->cmd_error = -EIO;
 }
 
 static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
@@ -1805,7 +1821,7 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
        usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
                         req, req_len, iowrite16v_urb_complete, usb,
                         ep->desc.bInterval);
-       urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK;
+       urb->transfer_flags |= URB_FREE_BUFFER;
 
        /* Submit previous URB */
        r = zd_submit_waiting_urb(usb, false);
index b3df2c8..325d0f9 100644 (file)
@@ -183,6 +183,7 @@ struct zd_usb_rx {
        spinlock_t lock;
        struct mutex setup_mutex;
        struct delayed_work idle_work;
+       struct tasklet_struct reset_timer_tasklet;
        u8 fragment[2 * USB_MAX_RX_SIZE];
        unsigned int fragment_length;
        unsigned int usb_packet_size;
index d86ea8b..135df16 100644 (file)
@@ -781,7 +781,7 @@ static int pci_pm_resume(struct device *dev)
 
 #endif /* !CONFIG_SUSPEND */
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 
 static int pci_pm_freeze(struct device *dev)
 {
@@ -970,7 +970,7 @@ static int pci_pm_restore(struct device *dev)
        return error;
 }
 
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define pci_pm_freeze          NULL
 #define pci_pm_freeze_noirq    NULL
@@ -981,7 +981,7 @@ static int pci_pm_restore(struct device *dev)
 #define pci_pm_restore         NULL
 #define pci_pm_restore_noirq   NULL
 
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #ifdef CONFIG_PM_RUNTIME
 
index 89d0a6a..ebf51ad 100644 (file)
@@ -676,10 +676,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
                        min_align = align1 >> 1;
                align += aligns[order];
        }
-       size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), align);
+       size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
        size1 = !add_size ? size :
                calculate_memsize(size, min_size+add_size, 0,
-                               resource_size(b_res), align);
+                               resource_size(b_res), min_align);
        if (!size0 && !size1) {
                if (b_res->start || b_res->end)
                        dev_info(&bus->self->dev, "disabling bridge window "
index 2ee442c..0485e39 100644 (file)
@@ -187,7 +187,8 @@ config MSI_LAPTOP
        depends on ACPI
        depends on BACKLIGHT_CLASS_DEVICE
        depends on RFKILL
-       depends on SERIO_I8042
+       depends on INPUT && SERIO_I8042
+       select INPUT_SPARSEKMAP
        ---help---
          This is a driver for laptops built by MSI (MICRO-STAR
          INTERNATIONAL):
index 5ea6c34..ac4e7f8 100644 (file)
@@ -89,7 +89,7 @@ MODULE_LICENSE("GPL");
 #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026"
 
 MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
-MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3");
+MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3");
 MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
 
 enum acer_wmi_event_ids {
index efc776c..832a3fd 100644 (file)
@@ -201,8 +201,8 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
        if (!asus->inputdev)
                return -ENOMEM;
 
-       asus->inputdev->name = asus->driver->input_phys;
-       asus->inputdev->phys = asus->driver->input_name;
+       asus->inputdev->name = asus->driver->input_name;
+       asus->inputdev->phys = asus->driver->input_phys;
        asus->inputdev->id.bustype = BUS_HOST;
        asus->inputdev->dev.parent = &asus->platform_device->dev;
 
index 0ddc434..649dcad 100644 (file)
@@ -67,9 +67,11 @@ static const struct key_entry eeepc_wmi_keymap[] = {
        { KE_KEY, 0x82, { KEY_CAMERA } },
        { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } },
        { KE_KEY, 0x88, { KEY_WLAN } },
+       { KE_KEY, 0xbd, { KEY_CAMERA } },
        { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
        { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */
        { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
+       { KE_KEY, 0xe8, { KEY_SCREENLOCK } },
        { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } },
        { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
        { KE_KEY, 0xec, { KEY_CAMERA_UP } },
index d653104..464bb3f 100644 (file)
@@ -74,6 +74,19 @@ struct pmic_gpio {
        u32                     trigger_type;
 };
 
+static void pmic_program_irqtype(int gpio, int type)
+{
+       if (type & IRQ_TYPE_EDGE_RISING)
+               intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
+       else
+               intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
+
+       if (type & IRQ_TYPE_EDGE_FALLING)
+               intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
+       else
+               intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
+};
+
 static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
        if (offset > 8) {
@@ -166,16 +179,38 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
        return pg->irq_base + offset;
 }
 
+static void pmic_bus_lock(struct irq_data *data)
+{
+       struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+
+       mutex_lock(&pg->buslock);
+}
+
+static void pmic_bus_sync_unlock(struct irq_data *data)
+{
+       struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+
+       if (pg->update_type) {
+               unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE;
+
+               pmic_program_irqtype(gpio, pg->trigger_type);
+               pg->update_type = 0;
+       }
+       mutex_unlock(&pg->buslock);
+}
+
 /* the gpiointr register is read-clear, so just do nothing. */
 static void pmic_irq_unmask(struct irq_data *data) { }
 
 static void pmic_irq_mask(struct irq_data *data) { }
 
 static struct irq_chip pmic_irqchip = {
-       .name           = "PMIC-GPIO",
-       .irq_mask       = pmic_irq_mask,
-       .irq_unmask     = pmic_irq_unmask,
-       .irq_set_type   = pmic_irq_type,
+       .name                   = "PMIC-GPIO",
+       .irq_mask               = pmic_irq_mask,
+       .irq_unmask             = pmic_irq_unmask,
+       .irq_set_type           = pmic_irq_type,
+       .irq_bus_lock           = pmic_bus_lock,
+       .irq_bus_sync_unlock    = pmic_bus_sync_unlock,
 };
 
 static irqreturn_t pmic_irq_handler(int irq, void *data)
index de434c6..d347116 100644 (file)
@@ -570,6 +570,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                },
                .callback = dmi_check_cb,
        },
+       {
+               .ident = "R410 Plus",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR,
+                                       "SAMSUNG ELECTRONICS CO., LTD."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
+                       DMI_MATCH(DMI_BOARD_NAME, "R460"),
+               },
+               .callback = dmi_check_cb,
+       },
        {
                .ident = "R518",
                .matches = {
@@ -591,12 +601,12 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                .callback = dmi_check_cb,
        },
        {
-               .ident = "N150/N210/N220",
+               .ident = "N150/N210/N220/N230",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR,
                                        "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
+                       DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
                },
                .callback = dmi_check_cb,
        },
@@ -771,6 +781,7 @@ static int __init samsung_init(void)
 
        /* create a backlight device to talk to this one */
        memset(&props, 0, sizeof(struct backlight_properties));
+       props.type = BACKLIGHT_PLATFORM;
        props.max_brightness = sabi_config->max_brightness;
        backlight_device = backlight_device_register("samsung", &sdev->dev,
                                                     NULL, &backlight_ops,
index e642f5f..8f709ae 100644 (file)
@@ -138,6 +138,8 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
                 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
                 "(default: 0)");
 
+static void sony_nc_kbd_backlight_resume(void);
+
 enum sony_nc_rfkill {
        SONY_WIFI,
        SONY_BLUETOOTH,
@@ -771,11 +773,6 @@ static int sony_nc_handles_setup(struct platform_device *pd)
        if (!handles)
                return -ENOMEM;
 
-       sysfs_attr_init(&handles->devattr.attr);
-       handles->devattr.attr.name = "handles";
-       handles->devattr.attr.mode = S_IRUGO;
-       handles->devattr.show = sony_nc_handles_show;
-
        for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
                if (!acpi_callsetfunc(sony_nc_acpi_handle,
                                        "SN00", i + 0x20, &result)) {
@@ -785,11 +782,18 @@ static int sony_nc_handles_setup(struct platform_device *pd)
                }
        }
 
-       /* allow reading capabilities via sysfs */
-       if (device_create_file(&pd->dev, &handles->devattr)) {
-               kfree(handles);
-               handles = NULL;
-               return -1;
+       if (debug) {
+               sysfs_attr_init(&handles->devattr.attr);
+               handles->devattr.attr.name = "handles";
+               handles->devattr.attr.mode = S_IRUGO;
+               handles->devattr.show = sony_nc_handles_show;
+
+               /* allow reading capabilities via sysfs */
+               if (device_create_file(&pd->dev, &handles->devattr)) {
+                       kfree(handles);
+                       handles = NULL;
+                       return -1;
+               }
        }
 
        return 0;
@@ -798,7 +802,8 @@ static int sony_nc_handles_setup(struct platform_device *pd)
 static int sony_nc_handles_cleanup(struct platform_device *pd)
 {
        if (handles) {
-               device_remove_file(&pd->dev, &handles->devattr);
+               if (debug)
+                       device_remove_file(&pd->dev, &handles->devattr);
                kfree(handles);
                handles = NULL;
        }
@@ -808,6 +813,11 @@ static int sony_nc_handles_cleanup(struct platform_device *pd)
 static int sony_find_snc_handle(int handle)
 {
        int i;
+
+       /* not initialized yet, return early */
+       if (!handles)
+               return -1;
+
        for (i = 0; i < 0x10; i++) {
                if (handles->cap[i] == handle) {
                        dprintk("found handle 0x%.4x (offset: 0x%.2x)\n",
@@ -1168,6 +1178,9 @@ static int sony_nc_resume(struct acpi_device *device)
        /* re-read rfkill state */
        sony_nc_rfkill_update();
 
+       /* restore kbd backlight states */
+       sony_nc_kbd_backlight_resume();
+
        return 0;
 }
 
@@ -1355,6 +1368,7 @@ out_no_enum:
 #define KBDBL_HANDLER  0x137
 #define KBDBL_PRESENT  0xB00
 #define        SET_MODE        0xC00
+#define SET_STATE      0xD00
 #define SET_TIMEOUT    0xE00
 
 struct kbd_backlight {
@@ -1377,6 +1391,10 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
                                (value << 0x10) | SET_MODE, &result))
                return -EIO;
 
+       /* Try to turn the light on/off immediately */
+       sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE,
+                       &result);
+
        kbdbl_handle->mode = value;
 
        return 0;
@@ -1458,7 +1476,7 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
 {
        int result;
 
-       if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result))
+       if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result))
                return 0;
        if (!(result & 0x02))
                return 0;
@@ -1501,13 +1519,36 @@ outkzalloc:
 static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
 {
        if (kbdbl_handle) {
+               int result;
+
                device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
                device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
+
+               /* restore the default hw behaviour */
+               sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result);
+               sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result);
+
                kfree(kbdbl_handle);
        }
        return 0;
 }
 
+static void sony_nc_kbd_backlight_resume(void)
+{
+       int ignore = 0;
+
+       if (!kbdbl_handle)
+               return;
+
+       if (kbdbl_handle->mode == 0)
+               sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore);
+
+       if (kbdbl_handle->timeout != 0)
+               sony_call_snc_handle(KBDBL_HANDLER,
+                               (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
+                               &ignore);
+}
+
 static void sony_nc_backlight_setup(void)
 {
        acpi_handle unused;
index a08561f..efb3b6b 100644 (file)
@@ -8618,8 +8618,7 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
                tpacpi_is_fw_digit(s[1]) &&
                s[2] == t && s[3] == 'T' &&
                tpacpi_is_fw_digit(s[4]) &&
-               tpacpi_is_fw_digit(s[5]) &&
-               s[6] == 'W' && s[7] == 'W';
+               tpacpi_is_fw_digit(s[5]);
 }
 
 /* returns 0 - probe ok, or < 0 - probe error.
index c29719c..86c9a09 100644 (file)
@@ -1171,16 +1171,17 @@ static int rio_hdid_setup(char *str)
 
 __setup("riohdid=", rio_hdid_setup);
 
-void rio_register_mport(struct rio_mport *port)
+int rio_register_mport(struct rio_mport *port)
 {
        if (next_portid >= RIO_MAX_MPORTS) {
                pr_err("RIO: reached specified max number of mports\n");
-               return;
+               return 1;
        }
 
        port->id = next_portid++;
        port->host_deviceid = rio_get_hdid(port->id);
        list_add_tail(&port->node, &rio_mports);
+       return 0;
 }
 
 EXPORT_SYMBOL_GPL(rio_local_get_device_id);
index 095016a..ac2701b 100644 (file)
@@ -418,3 +418,4 @@ DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init);
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1432, idtg2_switch_init);
index c420064..c5ac037 100644 (file)
@@ -401,6 +401,7 @@ const struct platform_device_id mc13xxx_rtc_idtable[] = {
        }, {
                .name = "mc13892-rtc",
        },
+       { }
 };
 
 static struct platform_driver mc13xxx_rtc_driver = {
index 5825370..08de58e 100644 (file)
@@ -1555,7 +1555,7 @@ static int stop_queue(struct pl022 *pl022)
         * A wait_queue on the pl022->busy could be used, but then the common
         * execution path (pump_messages) would be required to call wake_up or
         * friends on every SPI message. Do this instead */
-       while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
+       while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) {
                spin_unlock_irqrestore(&pl022->queue_lock, flags);
                msleep(10);
                spin_lock_irqsave(&pl022->queue_lock, flags);
index b1a4b9f..871e337 100644 (file)
@@ -821,7 +821,7 @@ static int stop_queue(struct dw_spi *dws)
 
        spin_lock_irqsave(&dws->lock, flags);
        dws->run = QUEUE_STOPPED;
-       while (!list_empty(&dws->queue) && dws->busy && limit--) {
+       while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
                spin_unlock_irqrestore(&dws->lock, flags);
                msleep(10);
                spin_lock_irqsave(&dws->lock, flags);
index 9c74aad..dc25bee 100644 (file)
@@ -1493,7 +1493,7 @@ static int stop_queue(struct driver_data *drv_data)
         * execution path (pump_messages) would be required to call wake_up or
         * friends on every SPI message. Do this instead */
        drv_data->run = QUEUE_STOPPED;
-       while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+       while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
                spin_unlock_irqrestore(&drv_data->lock, flags);
                msleep(10);
                spin_lock_irqsave(&drv_data->lock, flags);
index bdb7289..f706dba 100644 (file)
@@ -1284,7 +1284,7 @@ static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
         * friends on every SPI message. Do this instead
         */
        drv_data->running = false;
-       while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+       while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
                spin_unlock_irqrestore(&drv_data->lock, flags);
                msleep(10);
                spin_lock_irqsave(&drv_data->lock, flags);
index dca4a0b..e3786f1 100644 (file)
@@ -131,8 +131,6 @@ source "drivers/staging/wlags49_h2/Kconfig"
 
 source "drivers/staging/wlags49_h25/Kconfig"
 
-source "drivers/staging/samsung-laptop/Kconfig"
-
 source "drivers/staging/sm7xx/Kconfig"
 
 source "drivers/staging/dt3155v4l/Kconfig"
index eb93012..f0d5c53 100644 (file)
@@ -48,7 +48,6 @@ obj-$(CONFIG_XVMALLOC)                += zram/
 obj-$(CONFIG_ZCACHE)           += zcache/
 obj-$(CONFIG_WLAGS49_H2)       += wlags49_h2/
 obj-$(CONFIG_WLAGS49_H25)      += wlags49_h25/
-obj-$(CONFIG_SAMSUNG_LAPTOP)   += samsung-laptop/
 obj-$(CONFIG_FB_SM7XX)         += sm7xx/
 obj-$(CONFIG_VIDEO_DT3155)     += dt3155v4l/
 obj-$(CONFIG_CRYSTALHD)                += crystalhd/
diff --git a/drivers/staging/samsung-laptop/Kconfig b/drivers/staging/samsung-laptop/Kconfig
deleted file mode 100644 (file)
index f27c608..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-config SAMSUNG_LAPTOP
-       tristate "Samsung Laptop driver"
-       default n
-       depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86
-       help
-         This module implements a driver for the N128 Samsung Laptop
-         providing control over the Wireless LED and the LCD backlight
-
-         To compile this driver as a module, choose
-         M here: the module will be called samsung-laptop.
diff --git a/drivers/staging/samsung-laptop/Makefile b/drivers/staging/samsung-laptop/Makefile
deleted file mode 100644 (file)
index 3c6f420..0000000
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SAMSUNG_LAPTOP)   += samsung-laptop.o
diff --git a/drivers/staging/samsung-laptop/TODO b/drivers/staging/samsung-laptop/TODO
deleted file mode 100644 (file)
index f7a6d58..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-TODO:
-       - review from other developers
-       - figure out ACPI video issues
-
-Please send patches to Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
deleted file mode 100644 (file)
index 2529446..0000000
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- * Samsung Laptop driver
- *
- * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de)
- * Copyright (C) 2009,2011 Novell Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/dmi.h>
-#include <linux/platform_device.h>
-#include <linux/rfkill.h>
-
-/*
- * This driver is needed because a number of Samsung laptops do not hook
- * their control settings through ACPI.  So we have to poke around in the
- * BIOS to do things like brightness values, and "special" key controls.
- */
-
-/*
- * We have 0 - 8 as valid brightness levels.  The specs say that level 0 should
- * be reserved by the BIOS (which really doesn't make much sense), we tell
- * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8
- */
-#define MAX_BRIGHT     0x07
-
-
-#define SABI_IFACE_MAIN                        0x00
-#define SABI_IFACE_SUB                 0x02
-#define SABI_IFACE_COMPLETE            0x04
-#define SABI_IFACE_DATA                        0x05
-
-/* Structure to get data back to the calling function */
-struct sabi_retval {
-       u8 retval[20];
-};
-
-struct sabi_header_offsets {
-       u8 port;
-       u8 re_mem;
-       u8 iface_func;
-       u8 en_mem;
-       u8 data_offset;
-       u8 data_segment;
-};
-
-struct sabi_commands {
-       /*
-        * Brightness is 0 - 8, as described above.
-        * Value 0 is for the BIOS to use
-        */
-       u8 get_brightness;
-       u8 set_brightness;
-
-       /*
-        * first byte:
-        * 0x00 - wireless is off
-        * 0x01 - wireless is on
-        * second byte:
-        * 0x02 - 3G is off
-        * 0x03 - 3G is on
-        * TODO, verify 3G is correct, that doesn't seem right...
-        */
-       u8 get_wireless_button;
-       u8 set_wireless_button;
-
-       /* 0 is off, 1 is on */
-       u8 get_backlight;
-       u8 set_backlight;
-
-       /*
-        * 0x80 or 0x00 - no action
-        * 0x81 - recovery key pressed
-        */
-       u8 get_recovery_mode;
-       u8 set_recovery_mode;
-
-       /*
-        * on seclinux: 0 is low, 1 is high,
-        * on swsmi: 0 is normal, 1 is silent, 2 is turbo
-        */
-       u8 get_performance_level;
-       u8 set_performance_level;
-
-       /*
-        * Tell the BIOS that Linux is running on this machine.
-        * 81 is on, 80 is off
-        */
-       u8 set_linux;
-};
-
-struct sabi_performance_level {
-       const char *name;
-       u8 value;
-};
-
-struct sabi_config {
-       const char *test_string;
-       u16 main_function;
-       const struct sabi_header_offsets header_offsets;
-       const struct sabi_commands commands;
-       const struct sabi_performance_level performance_levels[4];
-       u8 min_brightness;
-       u8 max_brightness;
-};
-
-static const struct sabi_config sabi_configs[] = {
-       {
-               .test_string = "SECLINUX",
-
-               .main_function = 0x4c49,
-
-               .header_offsets = {
-                       .port = 0x00,
-                       .re_mem = 0x02,
-                       .iface_func = 0x03,
-                       .en_mem = 0x04,
-                       .data_offset = 0x05,
-                       .data_segment = 0x07,
-               },
-
-               .commands = {
-                       .get_brightness = 0x00,
-                       .set_brightness = 0x01,
-
-                       .get_wireless_button = 0x02,
-                       .set_wireless_button = 0x03,
-
-                       .get_backlight = 0x04,
-                       .set_backlight = 0x05,
-
-                       .get_recovery_mode = 0x06,
-                       .set_recovery_mode = 0x07,
-
-                       .get_performance_level = 0x08,
-                       .set_performance_level = 0x09,
-
-                       .set_linux = 0x0a,
-               },
-
-               .performance_levels = {
-                       {
-                               .name = "silent",
-                               .value = 0,
-                       },
-                       {
-                               .name = "normal",
-                               .value = 1,
-                       },
-                       { },
-               },
-               .min_brightness = 1,
-               .max_brightness = 8,
-       },
-       {
-               .test_string = "SwSmi@",
-
-               .main_function = 0x5843,
-
-               .header_offsets = {
-                       .port = 0x00,
-                       .re_mem = 0x04,
-                       .iface_func = 0x02,
-                       .en_mem = 0x03,
-                       .data_offset = 0x05,
-                       .data_segment = 0x07,
-               },
-
-               .commands = {
-                       .get_brightness = 0x10,
-                       .set_brightness = 0x11,
-
-                       .get_wireless_button = 0x12,
-                       .set_wireless_button = 0x13,
-
-                       .get_backlight = 0x2d,
-                       .set_backlight = 0x2e,
-
-                       .get_recovery_mode = 0xff,
-                       .set_recovery_mode = 0xff,
-
-                       .get_performance_level = 0x31,
-                       .set_performance_level = 0x32,
-
-                       .set_linux = 0xff,
-               },
-
-               .performance_levels = {
-                       {
-                               .name = "normal",
-                               .value = 0,
-                       },
-                       {
-                               .name = "silent",
-                               .value = 1,
-                       },
-                       {
-                               .name = "overclock",
-                               .value = 2,
-                       },
-                       { },
-               },
-               .min_brightness = 0,
-               .max_brightness = 8,
-       },
-       { },
-};
-
-static const struct sabi_config *sabi_config;
-
-static void __iomem *sabi;
-static void __iomem *sabi_iface;
-static void __iomem *f0000_segment;
-static struct backlight_device *backlight_device;
-static struct mutex sabi_mutex;
-static struct platform_device *sdev;
-static struct rfkill *rfk;
-
-static int force;
-module_param(force, bool, 0);
-MODULE_PARM_DESC(force,
-               "Disable the DMI check and forces the driver to be loaded");
-
-static int debug;
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
-
-static int sabi_get_command(u8 command, struct sabi_retval *sretval)
-{
-       int retval = 0;
-       u16 port = readw(sabi + sabi_config->header_offsets.port);
-       u8 complete, iface_data;
-
-       mutex_lock(&sabi_mutex);
-
-       /* enable memory to be able to write to it */
-       outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
-
-       /* write out the command */
-       writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
-       writew(command, sabi_iface + SABI_IFACE_SUB);
-       writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
-       outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
-
-       /* write protect memory to make it safe */
-       outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
-
-       /* see if the command actually succeeded */
-       complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
-       iface_data = readb(sabi_iface + SABI_IFACE_DATA);
-       if (complete != 0xaa || iface_data == 0xff) {
-               pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
-                       command, complete, iface_data);
-               retval = -EINVAL;
-               goto exit;
-       }
-       /*
-        * Save off the data into a structure so the caller use it.
-        * Right now we only want the first 4 bytes,
-        * There are commands that need more, but not for the ones we
-        * currently care about.
-        */
-       sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA);
-       sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1);
-       sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2);
-       sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3);
-
-exit:
-       mutex_unlock(&sabi_mutex);
-       return retval;
-
-}
-
-static int sabi_set_command(u8 command, u8 data)
-{
-       int retval = 0;
-       u16 port = readw(sabi + sabi_config->header_offsets.port);
-       u8 complete, iface_data;
-
-       mutex_lock(&sabi_mutex);
-
-       /* enable memory to be able to write to it */
-       outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
-
-       /* write out the command */
-       writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
-       writew(command, sabi_iface + SABI_IFACE_SUB);
-       writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
-       writeb(data, sabi_iface + SABI_IFACE_DATA);
-       outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
-
-       /* write protect memory to make it safe */
-       outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
-
-       /* see if the command actually succeeded */
-       complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
-       iface_data = readb(sabi_iface + SABI_IFACE_DATA);
-       if (complete != 0xaa || iface_data == 0xff) {
-               pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
-                      command, complete, iface_data);
-               retval = -EINVAL;
-       }
-
-       mutex_unlock(&sabi_mutex);
-       return retval;
-}
-
-static void test_backlight(void)
-{
-       struct sabi_retval sretval;
-
-       sabi_get_command(sabi_config->commands.get_backlight, &sretval);
-       printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-
-       sabi_set_command(sabi_config->commands.set_backlight, 0);
-       printk(KERN_DEBUG "backlight should be off\n");
-
-       sabi_get_command(sabi_config->commands.get_backlight, &sretval);
-       printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-
-       msleep(1000);
-
-       sabi_set_command(sabi_config->commands.set_backlight, 1);
-       printk(KERN_DEBUG "backlight should be on\n");
-
-       sabi_get_command(sabi_config->commands.get_backlight, &sretval);
-       printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-}
-
-static void test_wireless(void)
-{
-       struct sabi_retval sretval;
-
-       sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
-       printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-
-       sabi_set_command(sabi_config->commands.set_wireless_button, 0);
-       printk(KERN_DEBUG "wireless led should be off\n");
-
-       sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
-       printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-
-       msleep(1000);
-
-       sabi_set_command(sabi_config->commands.set_wireless_button, 1);
-       printk(KERN_DEBUG "wireless led should be on\n");
-
-       sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
-       printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-}
-
-static u8 read_brightness(void)
-{
-       struct sabi_retval sretval;
-       int user_brightness = 0;
-       int retval;
-
-       retval = sabi_get_command(sabi_config->commands.get_brightness,
-                                 &sretval);
-       if (!retval) {
-               user_brightness = sretval.retval[0];
-               if (user_brightness != 0)
-                       user_brightness -= sabi_config->min_brightness;
-       }
-       return user_brightness;
-}
-
-static void set_brightness(u8 user_brightness)
-{
-       u8 user_level = user_brightness - sabi_config->min_brightness;
-
-       sabi_set_command(sabi_config->commands.set_brightness, user_level);
-}
-
-static int get_brightness(struct backlight_device *bd)
-{
-       return (int)read_brightness();
-}
-
-static int update_status(struct backlight_device *bd)
-{
-       set_brightness(bd->props.brightness);
-
-       if (bd->props.power == FB_BLANK_UNBLANK)
-               sabi_set_command(sabi_config->commands.set_backlight, 1);
-       else
-               sabi_set_command(sabi_config->commands.set_backlight, 0);
-       return 0;
-}
-
-static const struct backlight_ops backlight_ops = {
-       .get_brightness = get_brightness,
-       .update_status  = update_status,
-};
-
-static int rfkill_set(void *data, bool blocked)
-{
-       /* Do something with blocked...*/
-       /*
-        * blocked == false is on
-        * blocked == true is off
-        */
-       if (blocked)
-               sabi_set_command(sabi_config->commands.set_wireless_button, 0);
-       else
-               sabi_set_command(sabi_config->commands.set_wireless_button, 1);
-
-       return 0;
-}
-
-static struct rfkill_ops rfkill_ops = {
-       .set_block = rfkill_set,
-};
-
-static int init_wireless(struct platform_device *sdev)
-{
-       int retval;
-
-       rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN,
-                          &rfkill_ops, NULL);
-       if (!rfk)
-               return -ENOMEM;
-
-       retval = rfkill_register(rfk);
-       if (retval) {
-               rfkill_destroy(rfk);
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static void destroy_wireless(void)
-{
-       rfkill_unregister(rfk);
-       rfkill_destroy(rfk);
-}
-
-static ssize_t get_performance_level(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
-{
-       struct sabi_retval sretval;
-       int retval;
-       int i;
-
-       /* Read the state */
-       retval = sabi_get_command(sabi_config->commands.get_performance_level,
-                                 &sretval);
-       if (retval)
-               return retval;
-
-       /* The logic is backwards, yeah, lots of fun... */
-       for (i = 0; sabi_config->performance_levels[i].name; ++i) {
-               if (sretval.retval[0] == sabi_config->performance_levels[i].value)
-                       return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name);
-       }
-       return sprintf(buf, "%s\n", "unknown");
-}
-
-static ssize_t set_performance_level(struct device *dev,
-                               struct device_attribute *attr, const char *buf,
-                               size_t count)
-{
-       if (count >= 1) {
-               int i;
-               for (i = 0; sabi_config->performance_levels[i].name; ++i) {
-                       const struct sabi_performance_level *level =
-                               &sabi_config->performance_levels[i];
-                       if (!strncasecmp(level->name, buf, strlen(level->name))) {
-                               sabi_set_command(sabi_config->commands.set_performance_level,
-                                                level->value);
-                               break;
-                       }
-               }
-               if (!sabi_config->performance_levels[i].name)
-                       return -EINVAL;
-       }
-       return count;
-}
-static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
-                  get_performance_level, set_performance_level);
-
-
-static int __init dmi_check_cb(const struct dmi_system_id *id)
-{
-       pr_info("found laptop model '%s'\n",
-               id->ident);
-       return 0;
-}
-
-static struct dmi_system_id __initdata samsung_dmi_table[] = {
-       {
-               .ident = "N128",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N128"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N128"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N130",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N130"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "X125",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "X125"),
-                       DMI_MATCH(DMI_BOARD_NAME, "X125"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "X120/X170",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"),
-                       DMI_MATCH(DMI_BOARD_NAME, "X120/X170"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "NC10",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
-                       DMI_MATCH(DMI_BOARD_NAME, "NC10"),
-               },
-               .callback = dmi_check_cb,
-       },
-               {
-               .ident = "NP-Q45",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
-                       DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
-               },
-               .callback = dmi_check_cb,
-               },
-       {
-               .ident = "X360",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
-                       DMI_MATCH(DMI_BOARD_NAME, "X360"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R410 Plus",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R460"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R518",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R518"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R518"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R519/R719",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R519/R719"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N150/N210/N220/N230",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N150P/N210P/N220P",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R530/R730",
-               .matches = {
-                     DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                     DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"),
-                     DMI_MATCH(DMI_BOARD_NAME, "R530/R730"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "NF110/NF210/NF310",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
-                       DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N145P/N250P/N260P",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R70/R71",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R70/R71"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "P460",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "P460"),
-                       DMI_MATCH(DMI_BOARD_NAME, "P460"),
-               },
-               .callback = dmi_check_cb,
-       },
-       { },
-};
-MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
-
-static int find_signature(void __iomem *memcheck, const char *testStr)
-{
-       int i = 0;
-       int loca;
-
-       for (loca = 0; loca < 0xffff; loca++) {
-               char temp = readb(memcheck + loca);
-
-               if (temp == testStr[i]) {
-                       if (i == strlen(testStr)-1)
-                               break;
-                       ++i;
-               } else {
-                       i = 0;
-               }
-       }
-       return loca;
-}
-
-static int __init samsung_init(void)
-{
-       struct backlight_properties props;
-       struct sabi_retval sretval;
-       unsigned int ifaceP;
-       int i;
-       int loca;
-       int retval;
-
-       mutex_init(&sabi_mutex);
-
-       if (!force && !dmi_check_system(samsung_dmi_table))
-               return -ENODEV;
-
-       f0000_segment = ioremap_nocache(0xf0000, 0xffff);
-       if (!f0000_segment) {
-               pr_err("Can't map the segment at 0xf0000\n");
-               return -EINVAL;
-       }
-
-       /* Try to find one of the signatures in memory to find the header */
-       for (i = 0; sabi_configs[i].test_string != 0; ++i) {
-               sabi_config = &sabi_configs[i];
-               loca = find_signature(f0000_segment, sabi_config->test_string);
-               if (loca != 0xffff)
-                       break;
-       }
-
-       if (loca == 0xffff) {
-               pr_err("This computer does not support SABI\n");
-               goto error_no_signature;
-       }
-
-       /* point to the SMI port Number */
-       loca += 1;
-       sabi = (f0000_segment + loca);
-
-       if (debug) {
-               printk(KERN_DEBUG "This computer supports SABI==%x\n",
-                       loca + 0xf0000 - 6);
-               printk(KERN_DEBUG "SABI header:\n");
-               printk(KERN_DEBUG " SMI Port Number = 0x%04x\n",
-                       readw(sabi + sabi_config->header_offsets.port));
-               printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n",
-                       readb(sabi + sabi_config->header_offsets.iface_func));
-               printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n",
-                       readb(sabi + sabi_config->header_offsets.en_mem));
-               printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n",
-                       readb(sabi + sabi_config->header_offsets.re_mem));
-               printk(KERN_DEBUG " SABI data offset = 0x%04x\n",
-                       readw(sabi + sabi_config->header_offsets.data_offset));
-               printk(KERN_DEBUG " SABI data segment = 0x%04x\n",
-                       readw(sabi + sabi_config->header_offsets.data_segment));
-       }
-
-       /* Get a pointer to the SABI Interface */
-       ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4;
-       ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff;
-       sabi_iface = ioremap_nocache(ifaceP, 16);
-       if (!sabi_iface) {
-               pr_err("Can't remap %x\n", ifaceP);
-               goto exit;
-       }
-       if (debug) {
-               printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP);
-               printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface);
-
-               test_backlight();
-               test_wireless();
-
-               retval = sabi_get_command(sabi_config->commands.get_brightness,
-                                         &sretval);
-               printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]);
-       }
-
-       /* Turn on "Linux" mode in the BIOS */
-       if (sabi_config->commands.set_linux != 0xff) {
-               retval = sabi_set_command(sabi_config->commands.set_linux,
-                                         0x81);
-               if (retval) {
-                       pr_warn("Linux mode was not set!\n");
-                       goto error_no_platform;
-               }
-       }
-
-       /* knock up a platform device to hang stuff off of */
-       sdev = platform_device_register_simple("samsung", -1, NULL, 0);
-       if (IS_ERR(sdev))
-               goto error_no_platform;
-
-       /* create a backlight device to talk to this one */
-       memset(&props, 0, sizeof(struct backlight_properties));
-       props.type = BACKLIGHT_PLATFORM;
-       props.max_brightness = sabi_config->max_brightness;
-       backlight_device = backlight_device_register("samsung", &sdev->dev,
-                                                    NULL, &backlight_ops,
-                                                    &props);
-       if (IS_ERR(backlight_device))
-               goto error_no_backlight;
-
-       backlight_device->props.brightness = read_brightness();
-       backlight_device->props.power = FB_BLANK_UNBLANK;
-       backlight_update_status(backlight_device);
-
-       retval = init_wireless(sdev);
-       if (retval)
-               goto error_no_rfk;
-
-       retval = device_create_file(&sdev->dev, &dev_attr_performance_level);
-       if (retval)
-               goto error_file_create;
-
-exit:
-       return 0;
-
-error_file_create:
-       destroy_wireless();
-
-error_no_rfk:
-       backlight_device_unregister(backlight_device);
-
-error_no_backlight:
-       platform_device_unregister(sdev);
-
-error_no_platform:
-       iounmap(sabi_iface);
-
-error_no_signature:
-       iounmap(f0000_segment);
-       return -EINVAL;
-}
-
-static void __exit samsung_exit(void)
-{
-       /* Turn off "Linux" mode in the BIOS */
-       if (sabi_config->commands.set_linux != 0xff)
-               sabi_set_command(sabi_config->commands.set_linux, 0x80);
-
-       device_remove_file(&sdev->dev, &dev_attr_performance_level);
-       backlight_device_unregister(backlight_device);
-       destroy_wireless();
-       iounmap(sabi_iface);
-       iounmap(f0000_segment);
-       platform_device_unregister(sdev);
-}
-
-module_init(samsung_init);
-module_exit(samsung_exit);
-
-MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
-MODULE_DESCRIPTION("Samsung Backlight driver");
-MODULE_LICENSE("GPL");
index 42d6c93..33167b4 100644 (file)
@@ -912,8 +912,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
                              unsigned long irqflags,
                              const char *devname, void *dev_id)
 {
-       unsigned int irq;
-       int retval;
+       int irq, retval;
 
        irq = bind_evtchn_to_irq(evtchn);
        if (irq < 0)
@@ -955,8 +954,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
                            irq_handler_t handler,
                            unsigned long irqflags, const char *devname, void *dev_id)
 {
-       unsigned int irq;
-       int retval;
+       int irq, retval;
 
        irq = bind_virq_to_irq(virq, cpu);
        if (irq < 0)
index 95143dd..1ac9412 100644 (file)
@@ -61,7 +61,7 @@ static void xen_post_suspend(int cancelled)
        xen_mm_unpin_all();
 }
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
 static int xen_suspend(void *data)
 {
        struct suspend_info *si = data;
@@ -173,7 +173,7 @@ out:
 #endif
        shutting_down = SHUTDOWN_INVALID;
 }
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
 
 struct shutdown_handler {
        const char *command;
@@ -202,7 +202,7 @@ static void shutdown_handler(struct xenbus_watch *watch,
                { "poweroff",   do_poweroff },
                { "halt",       do_poweroff },
                { "reboot",     do_reboot   },
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
                { "suspend",    do_suspend  },
 #endif
                {NULL, NULL},
index f34078d..303983f 100644 (file)
@@ -941,9 +941,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        current->mm->start_stack = bprm->p;
 
 #ifdef arch_randomize_brk
-       if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1))
+       if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
                current->mm->brk = current->mm->start_brk =
                        arch_randomize_brk(current->mm);
+#ifdef CONFIG_COMPAT_BRK
+               current->brk_randomized = 1;
+#endif
+       }
 #endif
 
        if (current->personality & MMAP_PAGE_ZERO) {
index fe16835..74ab165 100644 (file)
@@ -685,22 +685,6 @@ LinuxExtensionsEnabled     If set to one then the client will attempt to
                        support and want to map the uid and gid fields 
                        to values supplied at mount (rather than the 
                        actual values, then set this to zero. (default 1)
-Experimental            When set to 1 used to enable certain experimental
-                       features (currently enables multipage writes
-                       when signing is enabled, the multipage write
-                       performance enhancement was disabled when
-                       signing turned on in case buffer was modified
-                       just before it was sent, also this flag will
-                       be used to use the new experimental directory change 
-                       notification code).  When set to 2 enables
-                       an additional experimental feature, "raw ntlmssp"
-                       session establishment support (which allows
-                       specifying "sec=ntlmssp" on mount). The Linux cifs
-                       module will use ntlmv2 authentication encapsulated
-                       in "raw ntlmssp" (not using SPNEGO) when
-                       "sec=ntlmssp" is specified on mount.
-                       This support also requires building cifs with
-                       the CONFIG_CIFS_EXPERIMENTAL configuration flag.
 
 These experimental features and tracing can be enabled by changing flags in 
 /proc/fs/cifs (after the cifs module has been installed or built into the 
index e654dfd..53d57a3 100644 (file)
@@ -50,7 +50,7 @@ void cifs_fscache_unregister(void)
  */
 struct cifs_server_key {
        uint16_t        family;         /* address family */
-       uint16_t        port;           /* IP port */
+       __be16          port;           /* IP port */
        union {
                struct in_addr  ipv4_addr;
                struct in6_addr ipv6_addr;
index 65829d3..30d01bc 100644 (file)
@@ -423,7 +423,6 @@ static const struct file_operations cifs_lookup_cache_proc_fops;
 static const struct file_operations traceSMB_proc_fops;
 static const struct file_operations cifs_multiuser_mount_proc_fops;
 static const struct file_operations cifs_security_flags_proc_fops;
-static const struct file_operations cifs_experimental_proc_fops;
 static const struct file_operations cifs_linux_ext_proc_fops;
 
 void
@@ -441,8 +440,6 @@ cifs_proc_init(void)
        proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops);
        proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
        proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops);
-       proc_create("Experimental", 0, proc_fs_cifs,
-                   &cifs_experimental_proc_fops);
        proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
                    &cifs_linux_ext_proc_fops);
        proc_create("MultiuserMount", 0, proc_fs_cifs,
@@ -469,7 +466,6 @@ cifs_proc_clean(void)
        remove_proc_entry("OplockEnabled", proc_fs_cifs);
        remove_proc_entry("SecurityFlags", proc_fs_cifs);
        remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
-       remove_proc_entry("Experimental", proc_fs_cifs);
        remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
        remove_proc_entry("fs/cifs", NULL);
 }
@@ -550,45 +546,6 @@ static const struct file_operations cifs_oplock_proc_fops = {
        .write          = cifs_oplock_proc_write,
 };
 
-static int cifs_experimental_proc_show(struct seq_file *m, void *v)
-{
-       seq_printf(m, "%d\n", experimEnabled);
-       return 0;
-}
-
-static int cifs_experimental_proc_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, cifs_experimental_proc_show, NULL);
-}
-
-static ssize_t cifs_experimental_proc_write(struct file *file,
-               const char __user *buffer, size_t count, loff_t *ppos)
-{
-       char c;
-       int rc;
-
-       rc = get_user(c, buffer);
-       if (rc)
-               return rc;
-       if (c == '0' || c == 'n' || c == 'N')
-               experimEnabled = 0;
-       else if (c == '1' || c == 'y' || c == 'Y')
-               experimEnabled = 1;
-       else if (c == '2')
-               experimEnabled = 2;
-
-       return count;
-}
-
-static const struct file_operations cifs_experimental_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = cifs_experimental_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = cifs_experimental_proc_write,
-};
-
 static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
 {
        seq_printf(m, "%d\n", linuxExtEnabled);
index 4dfba82..33d2213 100644 (file)
@@ -113,7 +113,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
                   MAX_MECH_STR_LEN +
                   UID_KEY_LEN + (sizeof(uid_t) * 2) +
                   CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
-                  USER_KEY_LEN + strlen(sesInfo->userName) +
+                  USER_KEY_LEN + strlen(sesInfo->user_name) +
                   PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
 
        spnego_key = ERR_PTR(-ENOMEM);
@@ -153,7 +153,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
        sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
 
        dp = description + strlen(description);
-       sprintf(dp, ";user=%s", sesInfo->userName);
+       sprintf(dp, ";user=%s", sesInfo->user_name);
 
        dp = description + strlen(description);
        sprintf(dp, ";pid=0x%x", current->pid);
index fc0fd4f..23d43cd 100644 (file)
@@ -90,7 +90,7 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
        case UNI_COLON:
                *target = ':';
                break;
-       case UNI_ASTERIK:
+       case UNI_ASTERISK:
                *target = '*';
                break;
        case UNI_QUESTION:
@@ -264,40 +264,40 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
  * names are little endian 16 bit Unicode on the wire
  */
 int
-cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+cifsConvertToUCS(__le16 *target, const char *source, int srclen,
                 const struct nls_table *cp, int mapChars)
 {
        int i, j, charlen;
-       int len_remaining = maxlen;
        char src_char;
-       __u16 temp;
+       __le16 dst_char;
+       wchar_t tmp;
 
        if (!mapChars)
                return cifs_strtoUCS(target, source, PATH_MAX, cp);
 
-       for (i = 0, j = 0; i < maxlen; j++) {
+       for (i = 0, j = 0; i < srclen; j++) {
                src_char = source[i];
                switch (src_char) {
                case 0:
-                       put_unaligned_le16(0, &target[j]);
+                       put_unaligned(0, &target[j]);
                        goto ctoUCS_out;
                case ':':
-                       temp = UNI_COLON;
+                       dst_char = cpu_to_le16(UNI_COLON);
                        break;
                case '*':
-                       temp = UNI_ASTERIK;
+                       dst_char = cpu_to_le16(UNI_ASTERISK);
                        break;
                case '?':
-                       temp = UNI_QUESTION;
+                       dst_char = cpu_to_le16(UNI_QUESTION);
                        break;
                case '<':
-                       temp = UNI_LESSTHAN;
+                       dst_char = cpu_to_le16(UNI_LESSTHAN);
                        break;
                case '>':
-                       temp = UNI_GRTRTHAN;
+                       dst_char = cpu_to_le16(UNI_GRTRTHAN);
                        break;
                case '|':
-                       temp = UNI_PIPE;
+                       dst_char = cpu_to_le16(UNI_PIPE);
                        break;
                /*
                 * FIXME: We can not handle remapping backslash (UNI_SLASH)
@@ -305,17 +305,17 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
                 * as they use backslash as separator.
                 */
                default:
-                       charlen = cp->char2uni(source+i, len_remaining,
-                                               &temp);
+                       charlen = cp->char2uni(source + i, srclen - i, &tmp);
+                       dst_char = cpu_to_le16(tmp);
+
                        /*
                         * if no match, use question mark, which at least in
                         * some cases serves as wild card
                         */
                        if (charlen < 1) {
-                               temp = 0x003f;
+                               dst_char = cpu_to_le16(0x003f);
                                charlen = 1;
                        }
-                       len_remaining -= charlen;
                        /*
                         * character may take more than one byte in the source
                         * string, but will take exactly two bytes in the
@@ -324,9 +324,8 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
                        i += charlen;
                        continue;
                }
-               put_unaligned_le16(temp, &target[j]);
+               put_unaligned(dst_char, &target[j]);
                i++; /* move to next char in source string */
-               len_remaining--;
        }
 
 ctoUCS_out:
index 7fe6b52..644dd88 100644 (file)
@@ -44,7 +44,7 @@
  * reserved symbols (along with \ and /), otherwise illegal to store
  * in filenames in NTFS
  */
-#define UNI_ASTERI    (__u16) ('*' + 0xF000)
+#define UNI_ASTERISK    (__u16) ('*' + 0xF000)
 #define UNI_QUESTION    (__u16) ('?' + 0xF000)
 #define UNI_COLON       (__u16) (':' + 0xF000)
 #define UNI_GRTRTHAN    (__u16) ('>' + 0xF000)
index a51585f..d1a016b 100644 (file)
 #include <linux/ctype.h>
 #include <linux/random.h>
 
-/* Calculate and return the CIFS signature based on the mac key and SMB PDU */
-/* the 16 byte signature must be allocated by the caller  */
-/* Note we only use the 1st eight bytes */
-/* Note that the smb header signature field on input contains the
-       sequence number before this function is called */
-
+/*
+ * Calculate and return the CIFS signature based on the mac key and SMB PDU.
+ * The 16 byte signature must be allocated by the caller. Note we only use the
+ * 1st eight bytes and that the smb header signature field on input contains
+ * the sequence number before this function is called. Also, this function
+ * should be called with the server->srv_mutex held.
+ */
 static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
                                struct TCP_Server_Info *server, char *signature)
 {
@@ -209,8 +210,10 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
                                        cpu_to_le32(expected_sequence_number);
        cifs_pdu->Signature.Sequence.Reserved = 0;
 
+       mutex_lock(&server->srv_mutex);
        rc = cifs_calculate_signature(cifs_pdu, server,
                what_we_think_sig_should_be);
+       mutex_unlock(&server->srv_mutex);
 
        if (rc)
                return rc;
@@ -469,15 +472,15 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash,
                return rc;
        }
 
-       /* convert ses->userName to unicode and uppercase */
-       len = strlen(ses->userName);
+       /* convert ses->user_name to unicode and uppercase */
+       len = strlen(ses->user_name);
        user = kmalloc(2 + (len * 2), GFP_KERNEL);
        if (user == NULL) {
                cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
                rc = -ENOMEM;
                goto calc_exit_2;
        }
-       len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
+       len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
        UniStrupr(user);
 
        crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
index f297013..5c412b3 100644 (file)
@@ -53,7 +53,6 @@ int cifsFYI = 0;
 int cifsERROR = 1;
 int traceSMB = 0;
 unsigned int oplockEnabled = 1;
-unsigned int experimEnabled = 0;
 unsigned int linuxExtEnabled = 1;
 unsigned int lookupCacheEnabled = 1;
 unsigned int multiuser_mount = 0;
@@ -127,6 +126,7 @@ cifs_read_super(struct super_block *sb, void *data,
                kfree(cifs_sb);
                return rc;
        }
+       cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
 
 #ifdef CONFIG_CIFS_DFS_UPCALL
        /* copy mount params to sb for use in submounts */
@@ -409,8 +409,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
                seq_printf(s, ",multiuser");
-       else if (tcon->ses->userName)
-               seq_printf(s, ",username=%s", tcon->ses->userName);
+       else if (tcon->ses->user_name)
+               seq_printf(s, ",username=%s", tcon->ses->user_name);
 
        if (tcon->ses->domainName)
                seq_printf(s, ",domain=%s", tcon->ses->domainName);
index 17afb0f..a5d1106 100644 (file)
 
 #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
 #define MAX_SERVER_SIZE 15
-#define MAX_SHARE_SIZE  64     /* used to be 20, this should still be enough */
-#define MAX_USERNAME_SIZE 32   /* 32 is to allow for 15 char names + null
-                                  termination then *2 for unicode versions */
-#define MAX_PASSWORD_SIZE 512  /* max for windows seems to be 256 wide chars */
+#define MAX_SHARE_SIZE 80
+#define MAX_USERNAME_SIZE 256  /* reasonable maximum for current servers */
+#define MAX_PASSWORD_SIZE 512  /* max for windows seems to be 256 wide chars */
 
 #define CIFS_MIN_RCV_POOL 4
 
@@ -92,7 +91,8 @@ enum statusEnum {
        CifsNew = 0,
        CifsGood,
        CifsExiting,
-       CifsNeedReconnect
+       CifsNeedReconnect,
+       CifsNeedNegotiate
 };
 
 enum securityEnum {
@@ -274,7 +274,7 @@ struct cifsSesInfo {
        int capabilities;
        char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
                                TCP names - will ipv6 and sctp addresses fit? */
-       char userName[MAX_USERNAME_SIZE + 1];
+       char *user_name;
        char *domainName;
        char *password;
        struct session_key auth_key;
@@ -817,7 +817,6 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
                                have the uid/password or Kerberos credential
                                or equivalent for current user */
 GLOBAL_EXTERN unsigned int oplockEnabled;
-GLOBAL_EXTERN unsigned int experimEnabled;
 GLOBAL_EXTERN unsigned int lookupCacheEnabled;
 GLOBAL_EXTERN unsigned int global_secflags;    /* if on, session setup sent
                                with more secure ntlmssp2 challenge/resp */
index 2644a5d..df959ba 100644 (file)
@@ -142,9 +142,9 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
         */
        while (server->tcpStatus == CifsNeedReconnect) {
                wait_event_interruptible_timeout(server->response_q,
-                       (server->tcpStatus == CifsGood), 10 * HZ);
+                       (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
 
-               /* is TCP session is reestablished now ?*/
+               /* are we still trying to reconnect? */
                if (server->tcpStatus != CifsNeedReconnect)
                        break;
 
@@ -729,7 +729,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
                return rc;
 
        /* set up echo request */
-       smb->hdr.Tid = cpu_to_le16(0xffff);
+       smb->hdr.Tid = 0xffff;
        smb->hdr.WordCount = 1;
        put_unaligned_le16(1, &smb->EchoCount);
        put_bcc_le(1, &smb->hdr);
@@ -1884,10 +1884,10 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
                                        __constant_cpu_to_le16(CIFS_WRLCK))
                                pLockData->fl_type = F_WRLCK;
 
-                       pLockData->fl_start = parm_data->start;
-                       pLockData->fl_end = parm_data->start +
-                                               parm_data->length - 1;
-                       pLockData->fl_pid = parm_data->pid;
+                       pLockData->fl_start = le64_to_cpu(parm_data->start);
+                       pLockData->fl_end = pLockData->fl_start +
+                                       le64_to_cpu(parm_data->length) - 1;
+                       pLockData->fl_pid = le32_to_cpu(parm_data->pid);
                }
        }
 
index 6e2b2ad..db9d55b 100644 (file)
@@ -199,8 +199,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
        }
        spin_unlock(&GlobalMid_Lock);
 
-       while ((server->tcpStatus != CifsExiting) &&
-              (server->tcpStatus != CifsGood)) {
+       while (server->tcpStatus == CifsNeedReconnect) {
                try_to_freeze();
 
                /* we should try only the port we connected to before */
@@ -212,7 +211,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                        atomic_inc(&tcpSesReconnectCount);
                        spin_lock(&GlobalMid_Lock);
                        if (server->tcpStatus != CifsExiting)
-                               server->tcpStatus = CifsGood;
+                               server->tcpStatus = CifsNeedNegotiate;
                        spin_unlock(&GlobalMid_Lock);
                }
        }
@@ -248,24 +247,24 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
        total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
        data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
 
-       remaining = total_data_size - data_in_this_rsp;
-
-       if (remaining == 0)
+       if (total_data_size == data_in_this_rsp)
                return 0;
-       else if (remaining < 0) {
+       else if (total_data_size < data_in_this_rsp) {
                cFYI(1, "total data %d smaller than data in frame %d",
                        total_data_size, data_in_this_rsp);
                return -EINVAL;
-       } else {
-               cFYI(1, "missing %d bytes from transact2, check next response",
-                       remaining);
-               if (total_data_size > maxBufSize) {
-                       cERROR(1, "TotalDataSize %d is over maximum buffer %d",
-                               total_data_size, maxBufSize);
-                       return -EINVAL;
-               }
-               return remaining;
        }
+
+       remaining = total_data_size - data_in_this_rsp;
+
+       cFYI(1, "missing %d bytes from transact2, check next response",
+               remaining);
+       if (total_data_size > maxBufSize) {
+               cERROR(1, "TotalDataSize %d is over maximum buffer %d",
+                       total_data_size, maxBufSize);
+               return -EINVAL;
+       }
+       return remaining;
 }
 
 static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
@@ -421,7 +420,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
                pdu_length = 4; /* enough to get RFC1001 header */
 
 incomplete_rcv:
-               if (echo_retries > 0 &&
+               if (echo_retries > 0 && server->tcpStatus == CifsGood &&
                    time_after(jiffies, server->lstrp +
                                        (echo_retries * SMB_ECHO_INTERVAL))) {
                        cERROR(1, "Server %s has not responded in %d seconds. "
@@ -881,7 +880,8 @@ cifs_parse_mount_options(char *options, const char *devname,
                                /* null user, ie anonymous, authentication */
                                vol->nullauth = 1;
                        }
-                       if (strnlen(value, 200) < 200) {
+                       if (strnlen(value, MAX_USERNAME_SIZE) <
+                                               MAX_USERNAME_SIZE) {
                                vol->username = value;
                        } else {
                                printk(KERN_WARNING "CIFS: username too long\n");
@@ -1472,7 +1472,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
 static bool
 match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
 {
-       unsigned short int port, *sport;
+       __be16 port, *sport;
 
        switch (addr->sa_family) {
        case AF_INET:
@@ -1765,6 +1765,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
                module_put(THIS_MODULE);
                goto out_err_crypto_release;
        }
+       tcp_ses->tcpStatus = CifsNeedNegotiate;
 
        /* thread spawned, put it on the list */
        spin_lock(&cifs_tcp_ses_lock);
@@ -1808,7 +1809,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
                        break;
                default:
                        /* anything else takes username/password */
-                       if (strncmp(ses->userName, vol->username,
+                       if (ses->user_name == NULL)
+                               continue;
+                       if (strncmp(ses->user_name, vol->username,
                                    MAX_USERNAME_SIZE))
                                continue;
                        if (strlen(vol->username) != 0 &&
@@ -1851,6 +1854,8 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
        cifs_put_tcp_session(server);
 }
 
+static bool warned_on_ntlm;  /* globals init to false automatically */
+
 static struct cifsSesInfo *
 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
 {
@@ -1906,9 +1911,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
        else
                sprintf(ses->serverName, "%pI4", &addr->sin_addr);
 
-       if (volume_info->username)
-               strncpy(ses->userName, volume_info->username,
-                       MAX_USERNAME_SIZE);
+       if (volume_info->username) {
+               ses->user_name = kstrdup(volume_info->username, GFP_KERNEL);
+               if (!ses->user_name)
+                       goto get_ses_fail;
+       }
 
        /* volume_info->password freed at unmount */
        if (volume_info->password) {
@@ -1923,6 +1930,15 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
        }
        ses->cred_uid = volume_info->cred_uid;
        ses->linux_uid = volume_info->linux_uid;
+
+       /* ntlmv2 is much stronger than ntlm security, and has been broadly
+       supported for many years, time to update default security mechanism */
+       if ((volume_info->secFlg == 0) && warned_on_ntlm == false) {
+               warned_on_ntlm = true;
+               cERROR(1, "default security mechanism requested.  The default "
+                       "security mechanism will be upgraded from ntlm to "
+                       "ntlmv2 in kernel release 2.6.41");
+       }
        ses->overrideSecFlg = volume_info->secFlg;
 
        mutex_lock(&ses->session_mutex);
@@ -2276,7 +2292,7 @@ static int
 generic_ip_connect(struct TCP_Server_Info *server)
 {
        int rc = 0;
-       unsigned short int sport;
+       __be16 sport;
        int slen, sfamily;
        struct socket *socket = server->ssocket;
        struct sockaddr *saddr;
@@ -2361,7 +2377,7 @@ generic_ip_connect(struct TCP_Server_Info *server)
 static int
 ip_connect(struct TCP_Server_Info *server)
 {
-       unsigned short int *sport;
+       __be16 *sport;
        struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
        struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
 
@@ -2826,7 +2842,7 @@ try_mount_again:
 
 remote_path_check:
        /* check if a whole path (including prepath) is not remote */
-       if (!rc && cifs_sb->prepathlen && tcon) {
+       if (!rc && tcon) {
                /* build_path_to_root works only when we have a valid tcon */
                full_path = cifs_build_path_to_root(cifs_sb, tcon);
                if (full_path == NULL) {
index c27d236..faf5952 100644 (file)
@@ -575,8 +575,10 @@ reopen_error_exit:
 
 int cifs_close(struct inode *inode, struct file *file)
 {
-       cifsFileInfo_put(file->private_data);
-       file->private_data = NULL;
+       if (file->private_data != NULL) {
+               cifsFileInfo_put(file->private_data);
+               file->private_data = NULL;
+       }
 
        /* return code from the ->release op is always ignored */
        return 0;
@@ -970,6 +972,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
             total_written += bytes_written) {
                rc = -EAGAIN;
                while (rc == -EAGAIN) {
+                       struct kvec iov[2];
+                       unsigned int len;
+
                        if (open_file->invalidHandle) {
                                /* we could deadlock if we called
                                   filemap_fdatawait from here so tell
@@ -979,31 +984,14 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
                                if (rc != 0)
                                        break;
                        }
-                       if (experimEnabled || (pTcon->ses->server &&
-                               ((pTcon->ses->server->secMode &
-                               (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
-                               == 0))) {
-                               struct kvec iov[2];
-                               unsigned int len;
-
-                               len = min((size_t)cifs_sb->wsize,
-                                         write_size - total_written);
-                               /* iov[0] is reserved for smb header */
-                               iov[1].iov_base = (char *)write_data +
-                                                 total_written;
-                               iov[1].iov_len = len;
-                               rc = CIFSSMBWrite2(xid, pTcon,
-                                               open_file->netfid, len,
-                                               *poffset, &bytes_written,
-                                               iov, 1, 0);
-                       } else
-                               rc = CIFSSMBWrite(xid, pTcon,
-                                        open_file->netfid,
-                                        min_t(const int, cifs_sb->wsize,
-                                              write_size - total_written),
-                                        *poffset, &bytes_written,
-                                        write_data + total_written,
-                                        NULL, 0);
+
+                       len = min((size_t)cifs_sb->wsize,
+                                 write_size - total_written);
+                       /* iov[0] is reserved for smb header */
+                       iov[1].iov_base = (char *)write_data + total_written;
+                       iov[1].iov_len = len;
+                       rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len,
+                                          *poffset, &bytes_written, iov, 1, 0);
                }
                if (rc || (bytes_written == 0)) {
                        if (total_written)
@@ -1240,12 +1228,6 @@ static int cifs_writepages(struct address_space *mapping,
        }
 
        tcon = tlink_tcon(open_file->tlink);
-       if (!experimEnabled && tcon->ses->server->secMode &
-                       (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
-               cifsFileInfo_put(open_file);
-               kfree(iov);
-               return generic_writepages(mapping, wbc);
-       }
        cifsFileInfo_put(open_file);
 
        xid = GetXid();
@@ -1980,6 +1962,24 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
        return total_read;
 }
 
+/*
+ * If the page is mmap'ed into a process' page tables, then we need to make
+ * sure that it doesn't change while being written back.
+ */
+static int
+cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct page *page = vmf->page;
+
+       lock_page(page);
+       return VM_FAULT_LOCKED;
+}
+
+static struct vm_operations_struct cifs_file_vm_ops = {
+       .fault = filemap_fault,
+       .page_mkwrite = cifs_page_mkwrite,
+};
+
 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
 {
        int rc, xid;
@@ -1991,6 +1991,8 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
                cifs_invalidate_mapping(inode);
 
        rc = generic_file_mmap(file, vma);
+       if (rc == 0)
+               vma->vm_ops = &cifs_file_vm_ops;
        FreeXid(xid);
        return rc;
 }
@@ -2007,6 +2009,8 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
                return rc;
        }
        rc = generic_file_mmap(file, vma);
+       if (rc == 0)
+               vma->vm_ops = &cifs_file_vm_ops;
        FreeXid(xid);
        return rc;
 }
index e8804d3..ce417a9 100644 (file)
@@ -239,7 +239,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
        if (rc != 0)
                return rc;
 
-       if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+       if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
                CIFSSMBClose(xid, tcon, netfid);
                /* it's not a symlink */
                return -EINVAL;
@@ -316,7 +316,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
        if (rc != 0)
                goto out;
 
-       if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+       if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
                CIFSSMBClose(xid, pTcon, netfid);
                /* it's not a symlink */
                goto out;
index 2a930a7..0c684ae 100644 (file)
@@ -100,6 +100,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
                memset(buf_to_free->password, 0, strlen(buf_to_free->password));
                kfree(buf_to_free->password);
        }
+       kfree(buf_to_free->user_name);
        kfree(buf_to_free->domainName);
        kfree(buf_to_free);
 }
@@ -520,7 +521,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
                        (struct smb_com_transaction_change_notify_rsp *)buf;
                struct file_notify_information *pnotify;
                __u32 data_offset = 0;
-               if (pSMBr->ByteCount > sizeof(struct file_notify_information)) {
+               if (get_bcc_le(buf) > sizeof(struct file_notify_information)) {
                        data_offset = le32_to_cpu(pSMBr->DataOffset);
 
                        pnotify = (struct file_notify_information *)
index 1676570..f6728eb 100644 (file)
@@ -219,12 +219,12 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
                bcc_ptr++;
        } */
        /* copy user */
-       if (ses->userName == NULL) {
+       if (ses->user_name == NULL) {
                /* null user mount */
                *bcc_ptr = 0;
                *(bcc_ptr+1) = 0;
        } else {
-               bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
+               bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name,
                                          MAX_USERNAME_SIZE, nls_cp);
        }
        bcc_ptr += 2 * bytes_ret;
@@ -244,12 +244,11 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
        /* copy user */
        /* BB what about null user mounts - check that we do this BB */
        /* copy user */
-       if (ses->userName == NULL) {
-               /* BB what about null user mounts - check that we do this BB */
-       } else {
-               strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
-       }
-       bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
+       if (ses->user_name != NULL)
+               strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
+       /* else null user mount */
+
+       bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
        *bcc_ptr = 0;
        bcc_ptr++; /* account for null termination */
 
@@ -405,8 +404,8 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
        /* BB spec says that if AvId field of MsvAvTimestamp is populated then
                we must set the MIC field of the AUTHENTICATE_MESSAGE */
        ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
-       tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
-       tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
+       tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
+       tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
        if (tilen) {
                ses->auth_key.response = kmalloc(tilen, GFP_KERNEL);
                if (!ses->auth_key.response) {
@@ -523,14 +522,14 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                tmp += len;
        }
 
-       if (ses->userName == NULL) {
+       if (ses->user_name == NULL) {
                sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->UserName.Length = 0;
                sec_blob->UserName.MaximumLength = 0;
                tmp += 2;
        } else {
                int len;
-               len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
+               len = cifs_strtoUCS((__le16 *)tmp, ses->user_name,
                                    MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
                sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
index ad25c4c..129a357 100644 (file)
@@ -2131,7 +2131,7 @@ EXPORT_SYMBOL(d_rehash);
  */
 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
 {
-       BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
+       BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
        BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
 
        spin_lock(&dentry->d_lock);
index e25e99b..d0f5353 100644 (file)
@@ -86,8 +86,8 @@
 
 #ifdef CONFIG_QUOTA
 /* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only inode+data */
-#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
+ * allocated so we need to update only data block */
+#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0)
 /* Amount of blocks needed for quota insert/delete - we do some block writes
  * but inode, sb and group updates are done only once */
 #define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
index 4673bc0..e9473cb 100644 (file)
@@ -125,9 +125,11 @@ extern int ext4_flush_completed_IO(struct inode *inode)
  * the parent directory's parent as well, and so on recursively, if
  * they are also freshly created.
  */
-static void ext4_sync_parent(struct inode *inode)
+static int ext4_sync_parent(struct inode *inode)
 {
+       struct writeback_control wbc;
        struct dentry *dentry = NULL;
+       int ret = 0;
 
        while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
                ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
@@ -136,8 +138,17 @@ static void ext4_sync_parent(struct inode *inode)
                if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
                        break;
                inode = dentry->d_parent->d_inode;
-               sync_mapping_buffers(inode->i_mapping);
+               ret = sync_mapping_buffers(inode->i_mapping);
+               if (ret)
+                       break;
+               memset(&wbc, 0, sizeof(wbc));
+               wbc.sync_mode = WB_SYNC_ALL;
+               wbc.nr_to_write = 0;         /* only write out the inode */
+               ret = sync_inode(inode, &wbc);
+               if (ret)
+                       break;
        }
+       return ret;
 }
 
 /*
@@ -176,7 +187,7 @@ int ext4_sync_file(struct file *file, int datasync)
        if (!journal) {
                ret = generic_file_fsync(file, datasync);
                if (!ret && !list_empty(&inode->i_dentry))
-                       ext4_sync_parent(inode);
+                       ret = ext4_sync_parent(inode);
                goto out;
        }
 
index ad8e303..f2fa5e8 100644 (file)
@@ -2502,6 +2502,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
                 * for partial write.
                 */
                set_buffer_new(bh);
+               set_buffer_mapped(bh);
        }
        return 0;
 }
@@ -4429,8 +4430,8 @@ void ext4_truncate(struct inode *inode)
        Indirect chain[4];
        Indirect *partial;
        __le32 nr = 0;
-       int n;
-       ext4_lblk_t last_block;
+       int n = 0;
+       ext4_lblk_t last_block, max_block;
        unsigned blocksize = inode->i_sb->s_blocksize;
 
        trace_ext4_truncate_enter(inode);
@@ -4455,14 +4456,18 @@ void ext4_truncate(struct inode *inode)
 
        last_block = (inode->i_size + blocksize-1)
                                        >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+       max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
+                                       >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
 
        if (inode->i_size & (blocksize - 1))
                if (ext4_block_truncate_page(handle, mapping, inode->i_size))
                        goto out_stop;
 
-       n = ext4_block_to_path(inode, last_block, offsets, NULL);
-       if (n == 0)
-               goto out_stop;  /* error */
+       if (last_block != max_block) {
+               n = ext4_block_to_path(inode, last_block, offsets, NULL);
+               if (n == 0)
+                       goto out_stop;  /* error */
+       }
 
        /*
         * OK.  This truncate is going to happen.  We add the inode to the
@@ -4493,7 +4498,13 @@ void ext4_truncate(struct inode *inode)
         */
        ei->i_disksize = inode->i_size;
 
-       if (n == 1) {           /* direct blocks */
+       if (last_block == max_block) {
+               /*
+                * It is unnecessary to free any data blocks if last_block is
+                * equal to the indirect block limit.
+                */
+               goto out_unlock;
+       } else if (n == 1) {            /* direct blocks */
                ext4_free_data(handle, inode, NULL, i_data+offsets[0],
                               i_data + EXT4_NDIR_BLOCKS);
                goto do_indirects;
@@ -4553,6 +4564,7 @@ do_indirects:
                ;
        }
 
+out_unlock:
        up_write(&ei->i_data_sem);
        inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
        ext4_mark_inode_dirty(handle, inode);
@@ -5398,13 +5410,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
        /* if nrblocks are contiguous */
        if (chunk) {
                /*
-                * With N contiguous data blocks, it need at most
-                * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
-                * 2 dindirect blocks
-                * 1 tindirect block
+                * With N contiguous data blocks, we need at most
+                * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+                * 2 dindirect blocks, and 1 tindirect block
                 */
-               indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
-               return indirects + 3;
+               return DIV_ROUND_UP(nrblocks,
+                                   EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
        }
        /*
         * if nrblocks are not contiguous, worse case, each block touch
index 056474b..8553dfb 100644 (file)
@@ -242,27 +242,44 @@ static void ext4_put_nojournal(handle_t *handle)
  * journal_end calls result in the superblock being marked dirty, so
  * that sync() will call the filesystem's write_super callback if
  * appropriate.
+ *
+ * To avoid j_barrier hold in userspace when a user calls freeze(),
+ * ext4 prevents a new handle from being started by s_frozen, which
+ * is in an upper layer.
  */
 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
 {
        journal_t *journal;
+       handle_t  *handle;
 
        if (sb->s_flags & MS_RDONLY)
                return ERR_PTR(-EROFS);
 
-       vfs_check_frozen(sb, SB_FREEZE_TRANS);
-       /* Special case here: if the journal has aborted behind our
-        * backs (eg. EIO in the commit thread), then we still need to
-        * take the FS itself readonly cleanly. */
        journal = EXT4_SB(sb)->s_journal;
-       if (journal) {
-               if (is_journal_aborted(journal)) {
-                       ext4_abort(sb, "Detected aborted journal");
-                       return ERR_PTR(-EROFS);
-               }
-               return jbd2_journal_start(journal, nblocks);
+       handle = ext4_journal_current_handle();
+
+       /*
+        * If a handle has been started, it should be allowed to
+        * finish, otherwise deadlock could happen between freeze
+        * and others(e.g. truncate) due to the restart of the
+        * journal handle if the filesystem is forzen and active
+        * handles are not stopped.
+        */
+       if (!handle)
+               vfs_check_frozen(sb, SB_FREEZE_TRANS);
+
+       if (!journal)
+               return ext4_get_nojournal();
+       /*
+        * Special case here: if the journal has aborted behind our
+        * backs (eg. EIO in the commit thread), then we still need to
+        * take the FS itself readonly cleanly.
+        */
+       if (is_journal_aborted(journal)) {
+               ext4_abort(sb, "Detected aborted journal");
+               return ERR_PTR(-EROFS);
        }
-       return ext4_get_nojournal();
+       return jbd2_journal_start(journal, nblocks);
 }
 
 /*
@@ -2975,6 +2992,12 @@ static int ext4_register_li_request(struct super_block *sb,
        mutex_unlock(&ext4_li_info->li_list_mtx);
 
        sbi->s_li_request = elr;
+       /*
+        * set elr to NULL here since it has been inserted to
+        * the request_list and the removal and free of it is
+        * handled by ext4_clear_request_list from now on.
+        */
+       elr = NULL;
 
        if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
                ret = ext4_run_lazyinit_thread();
@@ -3385,6 +3408,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
        spin_lock_init(&sbi->s_next_gen_lock);
 
+       init_timer(&sbi->s_err_report);
+       sbi->s_err_report.function = print_daily_error_info;
+       sbi->s_err_report.data = (unsigned long) sb;
+
        err = percpu_counter_init(&sbi->s_freeblocks_counter,
                        ext4_count_free_blocks(sb));
        if (!err) {
@@ -3646,9 +3673,6 @@ no_journal:
                 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
                 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
 
-       init_timer(&sbi->s_err_report);
-       sbi->s_err_report.function = print_daily_error_info;
-       sbi->s_err_report.data = (unsigned long) sb;
        if (es->s_error_count)
                mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
 
@@ -3672,6 +3696,7 @@ failed_mount_wq:
                sbi->s_journal = NULL;
        }
 failed_mount3:
+       del_timer(&sbi->s_err_report);
        if (sbi->s_flex_groups) {
                if (is_vmalloc_addr(sbi->s_flex_groups))
                        vfree(sbi->s_flex_groups);
@@ -4138,6 +4163,11 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
 /*
  * LVM calls this function before a (read-only) snapshot is created.  This
  * gives us a chance to flush the journal completely and mark the fs clean.
+ *
+ * Note that only this function cannot bring a filesystem to be in a clean
+ * state independently, because ext4 prevents a new handle from being started
+ * by @sb->s_frozen, which stays in an upper layer.  It thus needs help from
+ * the upper layer.
  */
 static int ext4_freeze(struct super_block *sb)
 {
@@ -4614,11 +4644,24 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
 
 static int ext4_quota_off(struct super_block *sb, int type)
 {
+       struct inode *inode = sb_dqopt(sb)->files[type];
+       handle_t *handle;
+
        /* Force all delayed allocation blocks to be allocated.
         * Caller already holds s_umount sem */
        if (test_opt(sb, DELALLOC))
                sync_filesystem(sb);
 
+       /* Update modification times of quota files when userspace can
+        * start looking at them */
+       handle = ext4_journal_start(inode, 1);
+       if (IS_ERR(handle))
+               goto out;
+       inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+       ext4_mark_inode_dirty(handle, inode);
+       ext4_journal_stop(handle);
+
+out:
        return dquot_quota_off(sb, type);
 }
 
@@ -4714,9 +4757,8 @@ out:
        if (inode->i_size < off + len) {
                i_size_write(inode, off + len);
                EXT4_I(inode)->i_disksize = inode->i_size;
+               ext4_mark_inode_dirty(handle, inode);
        }
-       inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-       ext4_mark_inode_dirty(handle, inode);
        mutex_unlock(&inode->i_mutex);
        return len;
 }
index bf93ad2..6b08864 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/exportfs.h>
 #include <linux/fs_struct.h>
 #include <linux/fsnotify.h>
+#include <linux/personality.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
index 20af62f..6e28000 100644 (file)
@@ -105,6 +105,8 @@ static int journal_submit_commit_record(journal_t *journal,
        int ret;
        struct timespec now = current_kernel_time();
 
+       *cbh = NULL;
+
        if (is_journal_aborted(journal))
                return 0;
 
@@ -806,7 +808,7 @@ wait_for_iobuf:
                if (err)
                        __jbd2_journal_abort_hard(journal);
        }
-       if (!err && !is_journal_aborted(journal))
+       if (cbh)
                err = journal_wait_on_commit_record(journal, cbh);
        if (JBD2_HAS_INCOMPAT_FEATURE(journal,
                                      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
index aba8eba..e0ec3db 100644 (file)
@@ -2413,10 +2413,12 @@ const char *jbd2_dev_to_name(dev_t device)
        new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
        if (!new_dev)
                return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
+       bd = bdget(device);
        spin_lock(&devname_cache_lock);
        if (devcache[i]) {
                if (devcache[i]->device == device) {
                        kfree(new_dev);
+                       bdput(bd);
                        ret = devcache[i]->devname;
                        spin_unlock(&devname_cache_lock);
                        return ret;
@@ -2425,7 +2427,6 @@ const char *jbd2_dev_to_name(dev_t device)
        }
        devcache[i] = new_dev;
        devcache[i]->device = device;
-       bd = bdget(device);
        if (bd) {
                bdevname(bd, devcache[i]->devname);
                bdput(bd);
index 7dba2ed..d99bcf5 100644 (file)
@@ -1030,18 +1030,6 @@ const struct seq_operations mounts_op = {
        .show   = show_vfsmnt
 };
 
-static int uuid_is_nil(u8 *uuid)
-{
-       int i;
-       u8  *cp = (u8 *)uuid;
-
-       for (i = 0; i < 16; i++) {
-               if (*cp++)
-                       return 0;
-       }
-       return 1;
-}
-
 static int show_mountinfo(struct seq_file *m, void *v)
 {
        struct proc_mounts *p = m->private;
@@ -1085,10 +1073,6 @@ static int show_mountinfo(struct seq_file *m, void *v)
        if (IS_MNT_UNBINDABLE(mnt))
                seq_puts(m, " unbindable");
 
-       if (!uuid_is_nil(mnt->mnt_sb->s_uuid))
-               /* print the uuid */
-               seq_printf(m, " uuid:%pU", mnt->mnt_sb->s_uuid);
-
        /* Filesystem specific data */
        seq_puts(m, " - ");
        show_type(m, sb);
index af0c627..e4cbc11 100644 (file)
@@ -542,11 +542,15 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, u
        if (!nfs_need_commit(nfsi))
                return 0;
 
+       spin_lock(&inode->i_lock);
        ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
        if (ret > 0)
                nfsi->ncommit -= ret;
+       spin_unlock(&inode->i_lock);
+
        if (nfs_need_commit(NFS_I(inode)))
                __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+
        return ret;
 }
 #else
@@ -1483,9 +1487,7 @@ int nfs_commit_inode(struct inode *inode, int how)
        res = nfs_commit_set_lock(NFS_I(inode), may_wait);
        if (res <= 0)
                goto out_mark_dirty;
-       spin_lock(&inode->i_lock);
        res = nfs_scan_commit(inode, &head, 0, 0);
-       spin_unlock(&inode->i_lock);
        if (res) {
                int error;
 
index 0c6d816..7c831a2 100644 (file)
@@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
        exp_readlock();
        nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp);
        fh_put(&fh);
-       rqstp->rq_client = NULL;
        exp_readunlock();
        /* We return nlm error codes as nlm doesn't know
         * about nfsd, but nfsd does know about nlm..
index 4b36ec3..aa309aa 100644 (file)
@@ -397,10 +397,13 @@ static void unhash_generic_stateid(struct nfs4_stateid *stp)
 
 static void free_generic_stateid(struct nfs4_stateid *stp)
 {
-       int oflag = nfs4_access_bmap_to_omode(stp);
+       int oflag;
 
-       nfs4_file_put_access(stp->st_file, oflag);
-       put_nfs4_file(stp->st_file);
+       if (stp->st_access_bmap) {
+               oflag = nfs4_access_bmap_to_omode(stp);
+               nfs4_file_put_access(stp->st_file, oflag);
+               put_nfs4_file(stp->st_file);
+       }
        kmem_cache_free(stateid_slab, stp);
 }
 
index b10e354..ce4f624 100644 (file)
@@ -1299,6 +1299,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
 
        BUG_ON (!data || !frags);
 
+       if (size < 2 * VBLK_SIZE_HEAD) {
+               ldm_error("Value of size is to small.");
+               return false;
+       }
+
        group = get_unaligned_be32(data + 0x08);
        rec   = get_unaligned_be16(data + 0x0C);
        num   = get_unaligned_be16(data + 0x0E);
@@ -1306,6 +1311,10 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
                ldm_error ("A VBLK claims to have %d parts.", num);
                return false;
        }
+       if (rec >= num) {
+               ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
+               return false;
+       }
 
        list_for_each (item, frags) {
                f = list_entry (item, struct frag, list);
@@ -1334,10 +1343,9 @@ found:
 
        f->map |= (1 << rec);
 
-       if (num > 0) {
-               data += VBLK_SIZE_HEAD;
-               size -= VBLK_SIZE_HEAD;
-       }
+       data += VBLK_SIZE_HEAD;
+       size -= VBLK_SIZE_HEAD;
+
        memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
 
        return true;
index 9eead2c..fbb0b47 100644 (file)
@@ -112,6 +112,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
                SetPageDirty(page);
 
                unlock_page(page);
+               put_page(page);
        }
 
        return 0;
index 919f0de..e6493ca 100644 (file)
 #ifndef __UBIFS_DEBUG_H__
 #define __UBIFS_DEBUG_H__
 
+/* Checking helper functions */
+typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
+                                struct ubifs_zbranch *zbr, void *priv);
+typedef int (*dbg_znode_callback)(struct ubifs_info *c,
+                                 struct ubifs_znode *znode, void *priv);
+
 #ifdef CONFIG_UBIFS_FS_DEBUG
 
 /**
@@ -270,11 +276,6 @@ void dbg_dump_tnc(struct ubifs_info *c);
 void dbg_dump_index(struct ubifs_info *c);
 void dbg_dump_lpt_lebs(const struct ubifs_info *c);
 
-/* Checking helper functions */
-typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
-                                struct ubifs_zbranch *zbr, void *priv);
-typedef int (*dbg_znode_callback)(struct ubifs_info *c,
-                                 struct ubifs_znode *znode, void *priv);
 int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
                   dbg_znode_callback znode_cb, void *priv);
 
@@ -295,7 +296,6 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size);
 int dbg_check_filesystem(struct ubifs_info *c);
 void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
                    int add_pos);
-int dbg_check_lprops(struct ubifs_info *c);
 int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
                        int row, int col);
 int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
@@ -401,58 +401,94 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
 #define DBGKEY(key)  ((char *)(key))
 #define DBGKEY1(key) ((char *)(key))
 
-#define ubifs_debugging_init(c)                0
-#define ubifs_debugging_exit(c)                ({})
-
-#define dbg_ntype(type)                        ""
-#define dbg_cstate(cmt_state)                  ""
-#define dbg_jhead(jhead)                       ""
-#define dbg_get_key_dump(c, key)               ({})
-#define dbg_dump_inode(c, inode)               ({})
-#define dbg_dump_node(c, node)                 ({})
-#define dbg_dump_lpt_node(c, node, lnum, offs) ({})
-#define dbg_dump_budget_req(req)               ({})
-#define dbg_dump_lstats(lst)                   ({})
-#define dbg_dump_budg(c)                       ({})
-#define dbg_dump_lprop(c, lp)                  ({})
-#define dbg_dump_lprops(c)                     ({})
-#define dbg_dump_lpt_info(c)                   ({})
-#define dbg_dump_leb(c, lnum)                  ({})
-#define dbg_dump_znode(c, znode)               ({})
-#define dbg_dump_heap(c, heap, cat)            ({})
-#define dbg_dump_pnode(c, pnode, parent, iip)  ({})
-#define dbg_dump_tnc(c)                        ({})
-#define dbg_dump_index(c)                      ({})
-#define dbg_dump_lpt_lebs(c)                   ({})
-
-#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0
-#define dbg_old_index_check_init(c, zroot)         0
-#define dbg_save_space_info(c)                     ({})
-#define dbg_check_space_info(c)                    0
-#define dbg_check_old_index(c, zroot)              0
-#define dbg_check_cats(c)                          0
-#define dbg_check_ltab(c)                          0
-#define dbg_chk_lpt_free_spc(c)                    0
-#define dbg_chk_lpt_sz(c, action, len)             0
-#define dbg_check_synced_i_size(inode)             0
-#define dbg_check_dir_size(c, dir)                 0
-#define dbg_check_tnc(c, x)                        0
-#define dbg_check_idx_size(c, idx_size)            0
-#define dbg_check_filesystem(c)                    0
-#define dbg_check_heap(c, heap, cat, add_pos)      ({})
-#define dbg_check_lprops(c)                        0
-#define dbg_check_lpt_nodes(c, cnode, row, col)    0
-#define dbg_check_inode_size(c, inode, size)       0
-#define dbg_check_data_nodes_order(c, head)        0
-#define dbg_check_nondata_nodes_order(c, head)     0
-#define dbg_force_in_the_gaps_enabled              0
-#define dbg_force_in_the_gaps()                    0
-#define dbg_failure_mode                           0
-
-#define dbg_debugfs_init()                         0
-#define dbg_debugfs_exit()
-#define dbg_debugfs_init_fs(c)                     0
-#define dbg_debugfs_exit_fs(c)                     0
+static inline int ubifs_debugging_init(struct ubifs_info *c)      { return 0; }
+static inline void ubifs_debugging_exit(struct ubifs_info *c)     { return; }
+static inline const char *dbg_ntype(int type)                     { return ""; }
+static inline const char *dbg_cstate(int cmt_state)               { return ""; }
+static inline const char *dbg_jhead(int jhead)                    { return ""; }
+static inline const char *
+dbg_get_key_dump(const struct ubifs_info *c,
+                const union ubifs_key *key)                      { return ""; }
+static inline void dbg_dump_inode(const struct ubifs_info *c,
+                                 const struct inode *inode)      { return; }
+static inline void dbg_dump_node(const struct ubifs_info *c,
+                                const void *node)                { return; }
+static inline void dbg_dump_lpt_node(const struct ubifs_info *c,
+                                    void *node, int lnum,
+                                    int offs)                    { return; }
+static inline void
+dbg_dump_budget_req(const struct ubifs_budget_req *req)           { return; }
+static inline void
+dbg_dump_lstats(const struct ubifs_lp_stats *lst)                 { return; }
+static inline void dbg_dump_budg(struct ubifs_info *c)            { return; }
+static inline void dbg_dump_lprop(const struct ubifs_info *c,
+                                 const struct ubifs_lprops *lp)  { return; }
+static inline void dbg_dump_lprops(struct ubifs_info *c)          { return; }
+static inline void dbg_dump_lpt_info(struct ubifs_info *c)        { return; }
+static inline void dbg_dump_leb(const struct ubifs_info *c,
+                               int lnum)                         { return; }
+static inline void
+dbg_dump_znode(const struct ubifs_info *c,
+              const struct ubifs_znode *znode)                   { return; }
+static inline void dbg_dump_heap(struct ubifs_info *c,
+                                struct ubifs_lpt_heap *heap,
+                                int cat)                         { return; }
+static inline void dbg_dump_pnode(struct ubifs_info *c,
+                                 struct ubifs_pnode *pnode,
+                                 struct ubifs_nnode *parent,
+                                 int iip)                        { return; }
+static inline void dbg_dump_tnc(struct ubifs_info *c)             { return; }
+static inline void dbg_dump_index(struct ubifs_info *c)           { return; }
+static inline void dbg_dump_lpt_lebs(const struct ubifs_info *c)  { return; }
+
+static inline int dbg_walk_index(struct ubifs_info *c,
+                                dbg_leaf_callback leaf_cb,
+                                dbg_znode_callback znode_cb,
+                                void *priv)                      { return 0; }
+static inline void dbg_save_space_info(struct ubifs_info *c)      { return; }
+static inline int dbg_check_space_info(struct ubifs_info *c)      { return 0; }
+static inline int dbg_check_lprops(struct ubifs_info *c)          { return 0; }
+static inline int
+dbg_old_index_check_init(struct ubifs_info *c,
+                        struct ubifs_zbranch *zroot)             { return 0; }
+static inline int
+dbg_check_old_index(struct ubifs_info *c,
+                   struct ubifs_zbranch *zroot)                  { return 0; }
+static inline int dbg_check_cats(struct ubifs_info *c)            { return 0; }
+static inline int dbg_check_ltab(struct ubifs_info *c)            { return 0; }
+static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c)      { return 0; }
+static inline int dbg_chk_lpt_sz(struct ubifs_info *c,
+                                int action, int len)             { return 0; }
+static inline int dbg_check_synced_i_size(struct inode *inode)    { return 0; }
+static inline int dbg_check_dir_size(struct ubifs_info *c,
+                                    const struct inode *dir)     { return 0; }
+static inline int dbg_check_tnc(struct ubifs_info *c, int extra)  { return 0; }
+static inline int dbg_check_idx_size(struct ubifs_info *c,
+                                    long long idx_size)          { return 0; }
+static inline int dbg_check_filesystem(struct ubifs_info *c)      { return 0; }
+static inline void dbg_check_heap(struct ubifs_info *c,
+                                 struct ubifs_lpt_heap *heap,
+                                 int cat, int add_pos)           { return; }
+static inline int dbg_check_lpt_nodes(struct ubifs_info *c,
+       struct ubifs_cnode *cnode, int row, int col)              { return 0; }
+static inline int dbg_check_inode_size(struct ubifs_info *c,
+                                      const struct inode *inode,
+                                      loff_t size)               { return 0; }
+static inline int
+dbg_check_data_nodes_order(struct ubifs_info *c,
+                          struct list_head *head)                { return 0; }
+static inline int
+dbg_check_nondata_nodes_order(struct ubifs_info *c,
+                             struct list_head *head)             { return 0; }
+
+static inline int dbg_force_in_the_gaps(void)                     { return 0; }
+#define dbg_force_in_the_gaps_enabled 0
+#define dbg_failure_mode              0
+
+static inline int dbg_debugfs_init(void)                          { return 0; }
+static inline void dbg_debugfs_exit(void)                         { return; }
+static inline int dbg_debugfs_init_fs(struct ubifs_info *c)       { return 0; }
+static inline int dbg_debugfs_exit_fs(struct ubifs_info *c)       { return 0; }
 
 #endif /* !CONFIG_UBIFS_FS_DEBUG */
 #endif /* !__UBIFS_DEBUG_H__ */
index 28be1e6..b286db7 100644 (file)
@@ -1312,6 +1312,9 @@ int ubifs_fsync(struct file *file, int datasync)
 
        dbg_gen("syncing inode %lu", inode->i_ino);
 
+       if (inode->i_sb->s_flags & MS_RDONLY)
+               return 0;
+
        /*
         * VFS has already synchronized dirty pages for this inode. Synchronize
         * the inode unless this is a 'datasync()' call.
index 5ea4020..9ef9ed2 100644 (file)
@@ -293,7 +293,6 @@ xfs_buf_allocate_memory(
        size_t                  nbytes, offset;
        gfp_t                   gfp_mask = xb_to_gfp(flags);
        unsigned short          page_count, i;
-       pgoff_t                 first;
        xfs_off_t               end;
        int                     error;
 
@@ -333,7 +332,6 @@ use_alloc_page:
                return error;
 
        offset = bp->b_offset;
-       first = bp->b_file_offset >> PAGE_SHIFT;
        bp->b_flags |= _XBF_PAGES;
 
        for (i = 0; i < bp->b_page_count; i++) {
@@ -657,8 +655,6 @@ xfs_buf_readahead(
        xfs_off_t               ioff,
        size_t                  isize)
 {
-       struct backing_dev_info *bdi;
-
        if (bdi_read_congested(target->bt_bdi))
                return;
 
@@ -919,8 +915,6 @@ xfs_buf_lock(
 
        if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
                xfs_log_force(bp->b_target->bt_mount, 0);
-       if (atomic_read(&bp->b_io_remaining))
-               blk_flush_plug(current);
        down(&bp->b_sema);
        XB_SET_OWNER(bp);
 
@@ -1309,8 +1303,6 @@ xfs_buf_iowait(
 {
        trace_xfs_buf_iowait(bp, _RET_IP_);
 
-       if (atomic_read(&bp->b_io_remaining))
-               blk_flush_plug(current);
        wait_for_completion(&bp->b_iowait);
 
        trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1747,8 +1739,8 @@ xfsbufd(
        do {
                long    age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
                long    tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
-               int     count = 0;
                struct list_head tmp;
+               struct blk_plug plug;
 
                if (unlikely(freezing(current))) {
                        set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
@@ -1764,16 +1756,15 @@ xfsbufd(
 
                xfs_buf_delwri_split(target, &tmp, age);
                list_sort(NULL, &tmp, xfs_buf_cmp);
+
+               blk_start_plug(&plug);
                while (!list_empty(&tmp)) {
                        struct xfs_buf *bp;
                        bp = list_first_entry(&tmp, struct xfs_buf, b_list);
                        list_del_init(&bp->b_list);
                        xfs_bdstrat_cb(bp);
-                       count++;
                }
-               if (count)
-                       blk_flush_plug(current);
-
+               blk_finish_plug(&plug);
        } while (!kthread_should_stop());
 
        return 0;
@@ -1793,6 +1784,7 @@ xfs_flush_buftarg(
        int             pincount = 0;
        LIST_HEAD(tmp_list);
        LIST_HEAD(wait_list);
+       struct blk_plug plug;
 
        xfs_buf_runall_queues(xfsconvertd_workqueue);
        xfs_buf_runall_queues(xfsdatad_workqueue);
@@ -1807,6 +1799,8 @@ xfs_flush_buftarg(
         * we do that after issuing all the IO.
         */
        list_sort(NULL, &tmp_list, xfs_buf_cmp);
+
+       blk_start_plug(&plug);
        while (!list_empty(&tmp_list)) {
                bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
                ASSERT(target == bp->b_target);
@@ -1817,10 +1811,10 @@ xfs_flush_buftarg(
                }
                xfs_bdstrat_cb(bp);
        }
+       blk_finish_plug(&plug);
 
        if (wait) {
-               /* Expedite and wait for IO to complete. */
-               blk_flush_plug(current);
+               /* Wait for IO to complete. */
                while (!list_empty(&wait_list)) {
                        bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
 
index 508e06f..3ca7956 100644 (file)
 /*
  * XFS logging functions
  */
-static int
+static void
 __xfs_printk(
        const char              *level,
        const struct xfs_mount  *mp,
        struct va_format        *vaf)
 {
        if (mp && mp->m_fsname)
-               return printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
-       return printk("%sXFS: %pV\n", level, vaf);
+               printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
+       printk("%sXFS: %pV\n", level, vaf);
 }
 
-int xfs_printk(
+void xfs_printk(
        const char              *level,
        const struct xfs_mount  *mp,
        const char              *fmt, ...)
 {
        struct va_format        vaf;
        va_list                 args;
-       int                      r;
 
        va_start(args, fmt);
 
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       r = __xfs_printk(level, mp, &vaf);
+       __xfs_printk(level, mp, &vaf);
        va_end(args);
-
-       return r;
 }
 
 #define define_xfs_printk_level(func, kern_level)              \
-int func(const struct xfs_mount *mp, const char *fmt, ...)     \
+void func(const struct xfs_mount *mp, const char *fmt, ...)    \
 {                                                              \
        struct va_format        vaf;                            \
        va_list                 args;                           \
-       int                     r;                              \
                                                                \
        va_start(args, fmt);                                    \
                                                                \
        vaf.fmt = fmt;                                          \
        vaf.va = &args;                                         \
                                                                \
-       r = __xfs_printk(kern_level, mp, &vaf);                 \
+       __xfs_printk(kern_level, mp, &vaf);                     \
        va_end(args);                                           \
-                                                               \
-       return r;                                               \
 }                                                              \
 
 define_xfs_printk_level(xfs_emerg, KERN_EMERG);
@@ -88,7 +82,7 @@ define_xfs_printk_level(xfs_info, KERN_INFO);
 define_xfs_printk_level(xfs_debug, KERN_DEBUG);
 #endif
 
-int
+void
 xfs_alert_tag(
        const struct xfs_mount  *mp,
        int                     panic_tag,
@@ -97,7 +91,6 @@ xfs_alert_tag(
        struct va_format        vaf;
        va_list                 args;
        int                     do_panic = 0;
-       int                     r;
 
        if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
                xfs_printk(KERN_ALERT, mp,
@@ -110,12 +103,10 @@ xfs_alert_tag(
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       r = __xfs_printk(KERN_ALERT, mp, &vaf);
+       __xfs_printk(KERN_ALERT, mp, &vaf);
        va_end(args);
 
        BUG_ON(do_panic);
-
-       return r;
 }
 
 void
index e77ffa1..f1b3fc1 100644 (file)
@@ -3,32 +3,34 @@
 
 struct xfs_mount;
 
-extern int xfs_printk(const char *level, const struct xfs_mount *mp,
+extern void xfs_printk(const char *level, const struct xfs_mount *mp,
                       const char *fmt, ...)
         __attribute__ ((format (printf, 3, 4)));
-extern int xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_alert_tag(const struct xfs_mount *mp, int tag,
+extern void xfs_alert_tag(const struct xfs_mount *mp, int tag,
                         const char *fmt, ...)
         __attribute__ ((format (printf, 3, 4)));
-extern int xfs_crit(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_crit(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_err(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_err(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_warn(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_warn(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_notice(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_notice(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
-extern int xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
 
 #ifdef DEBUG
-extern int xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
         __attribute__ ((format (printf, 2, 3)));
 #else
-#define xfs_debug(mp, fmt, ...)        (0)
+static inline void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
+{
+}
 #endif
 
 extern void assfail(char *expr, char *f, int l);
index 1ba5c45..b38e58d 100644 (file)
@@ -816,75 +816,6 @@ xfs_setup_devices(
        return 0;
 }
 
-/*
- * XFS AIL push thread support
- */
-void
-xfsaild_wakeup(
-       struct xfs_ail          *ailp,
-       xfs_lsn_t               threshold_lsn)
-{
-       /* only ever move the target forwards */
-       if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
-               ailp->xa_target = threshold_lsn;
-               wake_up_process(ailp->xa_task);
-       }
-}
-
-STATIC int
-xfsaild(
-       void    *data)
-{
-       struct xfs_ail  *ailp = data;
-       xfs_lsn_t       last_pushed_lsn = 0;
-       long            tout = 0; /* milliseconds */
-
-       while (!kthread_should_stop()) {
-               /*
-                * for short sleeps indicating congestion, don't allow us to
-                * get woken early. Otherwise all we do is bang on the AIL lock
-                * without making progress.
-                */
-               if (tout && tout <= 20)
-                       __set_current_state(TASK_KILLABLE);
-               else
-                       __set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(tout ?
-                                msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
-
-               /* swsusp */
-               try_to_freeze();
-
-               ASSERT(ailp->xa_mount->m_log);
-               if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
-                       continue;
-
-               tout = xfsaild_push(ailp, &last_pushed_lsn);
-       }
-
-       return 0;
-}      /* xfsaild */
-
-int
-xfsaild_start(
-       struct xfs_ail  *ailp)
-{
-       ailp->xa_target = 0;
-       ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
-                                   ailp->xa_mount->m_fsname);
-       if (IS_ERR(ailp->xa_task))
-               return -PTR_ERR(ailp->xa_task);
-       return 0;
-}
-
-void
-xfsaild_stop(
-       struct xfs_ail  *ailp)
-{
-       kthread_stop(ailp->xa_task);
-}
-
-
 /* Catch misguided souls that try to use this interface on XFS */
 STATIC struct inode *
 xfs_fs_alloc_inode(
@@ -1191,22 +1122,12 @@ xfs_fs_sync_fs(
                return -error;
 
        if (laptop_mode) {
-               int     prev_sync_seq = mp->m_sync_seq;
-
                /*
                 * The disk must be active because we're syncing.
                 * We schedule xfssyncd now (now that the disk is
                 * active) instead of later (when it might not be).
                 */
-               wake_up_process(mp->m_sync_task);
-               /*
-                * We have to wait for the sync iteration to complete.
-                * If we don't, the disk activity caused by the sync
-                * will come after the sync is completed, and that
-                * triggers another sync from laptop mode.
-                */
-               wait_event(mp->m_wait_single_sync_task,
-                               mp->m_sync_seq != prev_sync_seq);
+               flush_delayed_work_sync(&mp->m_sync_work);
        }
 
        return 0;
@@ -1490,9 +1411,6 @@ xfs_fs_fill_super(
        spin_lock_init(&mp->m_sb_lock);
        mutex_init(&mp->m_growlock);
        atomic_set(&mp->m_active_trans, 0);
-       INIT_LIST_HEAD(&mp->m_sync_list);
-       spin_lock_init(&mp->m_sync_lock);
-       init_waitqueue_head(&mp->m_wait_single_sync_task);
 
        mp->m_super = sb;
        sb->s_fs_info = mp;
@@ -1798,6 +1716,38 @@ xfs_destroy_zones(void)
 
 }
 
+STATIC int __init
+xfs_init_workqueues(void)
+{
+       /*
+        * max_active is set to 8 to give enough concurency to allow
+        * multiple work operations on each CPU to run. This allows multiple
+        * filesystems to be running sync work concurrently, and scales with
+        * the number of CPUs in the system.
+        */
+       xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
+       if (!xfs_syncd_wq)
+               goto out;
+
+       xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
+       if (!xfs_ail_wq)
+               goto out_destroy_syncd;
+
+       return 0;
+
+out_destroy_syncd:
+       destroy_workqueue(xfs_syncd_wq);
+out:
+       return -ENOMEM;
+}
+
+STATIC void
+xfs_destroy_workqueues(void)
+{
+       destroy_workqueue(xfs_ail_wq);
+       destroy_workqueue(xfs_syncd_wq);
+}
+
 STATIC int __init
 init_xfs_fs(void)
 {
@@ -1813,10 +1763,14 @@ init_xfs_fs(void)
        if (error)
                goto out;
 
-       error = xfs_mru_cache_init();
+       error = xfs_init_workqueues();
        if (error)
                goto out_destroy_zones;
 
+       error = xfs_mru_cache_init();
+       if (error)
+               goto out_destroy_wq;
+
        error = xfs_filestream_init();
        if (error)
                goto out_mru_cache_uninit;
@@ -1833,6 +1787,10 @@ init_xfs_fs(void)
        if (error)
                goto out_cleanup_procfs;
 
+       error = xfs_init_workqueues();
+       if (error)
+               goto out_sysctl_unregister;
+
        vfs_initquota();
 
        error = register_filesystem(&xfs_fs_type);
@@ -1850,6 +1808,8 @@ init_xfs_fs(void)
        xfs_filestream_uninit();
  out_mru_cache_uninit:
        xfs_mru_cache_uninit();
+ out_destroy_wq:
+       xfs_destroy_workqueues();
  out_destroy_zones:
        xfs_destroy_zones();
  out:
@@ -1866,6 +1826,7 @@ exit_xfs_fs(void)
        xfs_buf_terminate();
        xfs_filestream_uninit();
        xfs_mru_cache_uninit();
+       xfs_destroy_workqueues();
        xfs_destroy_zones();
 }
 
index 9cf35a6..e4f9c1b 100644 (file)
@@ -22,6 +22,7 @@
 #include "xfs_log.h"
 #include "xfs_inum.h"
 #include "xfs_trans.h"
+#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
@@ -39,6 +40,8 @@
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 
+struct workqueue_struct        *xfs_syncd_wq;  /* sync workqueue */
+
 /*
  * The inode lookup is done in batches to keep the amount of lock traffic and
  * radix tree lookups to a minimum. The batch size is a trade off between
@@ -431,62 +434,12 @@ xfs_quiesce_attr(
        xfs_unmountfs_writesb(mp);
 }
 
-/*
- * Enqueue a work item to be picked up by the vfs xfssyncd thread.
- * Doing this has two advantages:
- * - It saves on stack space, which is tight in certain situations
- * - It can be used (with care) as a mechanism to avoid deadlocks.
- * Flushing while allocating in a full filesystem requires both.
- */
-STATIC void
-xfs_syncd_queue_work(
-       struct xfs_mount *mp,
-       void            *data,
-       void            (*syncer)(struct xfs_mount *, void *),
-       struct completion *completion)
-{
-       struct xfs_sync_work *work;
-
-       work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
-       INIT_LIST_HEAD(&work->w_list);
-       work->w_syncer = syncer;
-       work->w_data = data;
-       work->w_mount = mp;
-       work->w_completion = completion;
-       spin_lock(&mp->m_sync_lock);
-       list_add_tail(&work->w_list, &mp->m_sync_list);
-       spin_unlock(&mp->m_sync_lock);
-       wake_up_process(mp->m_sync_task);
-}
-
-/*
- * Flush delayed allocate data, attempting to free up reserved space
- * from existing allocations.  At this point a new allocation attempt
- * has failed with ENOSPC and we are in the process of scratching our
- * heads, looking about for more room...
- */
-STATIC void
-xfs_flush_inodes_work(
-       struct xfs_mount *mp,
-       void            *arg)
-{
-       struct inode    *inode = arg;
-       xfs_sync_data(mp, SYNC_TRYLOCK);
-       xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
-       iput(inode);
-}
-
-void
-xfs_flush_inodes(
-       xfs_inode_t     *ip)
+static void
+xfs_syncd_queue_sync(
+       struct xfs_mount        *mp)
 {
-       struct inode    *inode = VFS_I(ip);
-       DECLARE_COMPLETION_ONSTACK(completion);
-
-       igrab(inode);
-       xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
-       wait_for_completion(&completion);
-       xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
+       queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
+                               msecs_to_jiffies(xfs_syncd_centisecs * 10));
 }
 
 /*
@@ -496,9 +449,10 @@ xfs_flush_inodes(
  */
 STATIC void
 xfs_sync_worker(
-       struct xfs_mount *mp,
-       void            *unused)
+       struct work_struct *work)
 {
+       struct xfs_mount *mp = container_of(to_delayed_work(work),
+                                       struct xfs_mount, m_sync_work);
        int             error;
 
        if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
@@ -508,73 +462,106 @@ xfs_sync_worker(
                        error = xfs_fs_log_dummy(mp);
                else
                        xfs_log_force(mp, 0);
-               xfs_reclaim_inodes(mp, 0);
                error = xfs_qm_sync(mp, SYNC_TRYLOCK);
+
+               /* start pushing all the metadata that is currently dirty */
+               xfs_ail_push_all(mp->m_ail);
        }
-       mp->m_sync_seq++;
-       wake_up(&mp->m_wait_single_sync_task);
+
+       /* queue us up again */
+       xfs_syncd_queue_sync(mp);
 }
 
-STATIC int
-xfssyncd(
-       void                    *arg)
+/*
+ * Queue a new inode reclaim pass if there are reclaimable inodes and there
+ * isn't a reclaim pass already in progress. By default it runs every 5s based
+ * on the xfs syncd work default of 30s. Perhaps this should have it's own
+ * tunable, but that can be done if this method proves to be ineffective or too
+ * aggressive.
+ */
+static void
+xfs_syncd_queue_reclaim(
+       struct xfs_mount        *mp)
 {
-       struct xfs_mount        *mp = arg;
-       long                    timeleft;
-       xfs_sync_work_t         *work, *n;
-       LIST_HEAD               (tmp);
-
-       set_freezable();
-       timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
-       for (;;) {
-               if (list_empty(&mp->m_sync_list))
-                       timeleft = schedule_timeout_interruptible(timeleft);
-               /* swsusp */
-               try_to_freeze();
-               if (kthread_should_stop() && list_empty(&mp->m_sync_list))
-                       break;
 
-               spin_lock(&mp->m_sync_lock);
-               /*
-                * We can get woken by laptop mode, to do a sync -
-                * that's the (only!) case where the list would be
-                * empty with time remaining.
-                */
-               if (!timeleft || list_empty(&mp->m_sync_list)) {
-                       if (!timeleft)
-                               timeleft = xfs_syncd_centisecs *
-                                                       msecs_to_jiffies(10);
-                       INIT_LIST_HEAD(&mp->m_sync_work.w_list);
-                       list_add_tail(&mp->m_sync_work.w_list,
-                                       &mp->m_sync_list);
-               }
-               list_splice_init(&mp->m_sync_list, &tmp);
-               spin_unlock(&mp->m_sync_lock);
+       /*
+        * We can have inodes enter reclaim after we've shut down the syncd
+        * workqueue during unmount, so don't allow reclaim work to be queued
+        * during unmount.
+        */
+       if (!(mp->m_super->s_flags & MS_ACTIVE))
+               return;
 
-               list_for_each_entry_safe(work, n, &tmp, w_list) {
-                       (*work->w_syncer)(mp, work->w_data);
-                       list_del(&work->w_list);
-                       if (work == &mp->m_sync_work)
-                               continue;
-                       if (work->w_completion)
-                               complete(work->w_completion);
-                       kmem_free(work);
-               }
+       rcu_read_lock();
+       if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+               queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
+                       msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
        }
+       rcu_read_unlock();
+}
 
-       return 0;
+/*
+ * This is a fast pass over the inode cache to try to get reclaim moving on as
+ * many inodes as possible in a short period of time. It kicks itself every few
+ * seconds, as well as being kicked by the inode cache shrinker when memory
+ * goes low. It scans as quickly as possible avoiding locked inodes or those
+ * already being flushed, and once done schedules a future pass.
+ */
+STATIC void
+xfs_reclaim_worker(
+       struct work_struct *work)
+{
+       struct xfs_mount *mp = container_of(to_delayed_work(work),
+                                       struct xfs_mount, m_reclaim_work);
+
+       xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
+       xfs_syncd_queue_reclaim(mp);
+}
+
+/*
+ * Flush delayed allocate data, attempting to free up reserved space
+ * from existing allocations.  At this point a new allocation attempt
+ * has failed with ENOSPC and we are in the process of scratching our
+ * heads, looking about for more room.
+ *
+ * Queue a new data flush if there isn't one already in progress and
+ * wait for completion of the flush. This means that we only ever have one
+ * inode flush in progress no matter how many ENOSPC events are occurring and
+ * so will prevent the system from bogging down due to every concurrent
+ * ENOSPC event scanning all the active inodes in the system for writeback.
+ */
+void
+xfs_flush_inodes(
+       struct xfs_inode        *ip)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+
+       queue_work(xfs_syncd_wq, &mp->m_flush_work);
+       flush_work_sync(&mp->m_flush_work);
+}
+
+STATIC void
+xfs_flush_worker(
+       struct work_struct *work)
+{
+       struct xfs_mount *mp = container_of(work,
+                                       struct xfs_mount, m_flush_work);
+
+       xfs_sync_data(mp, SYNC_TRYLOCK);
+       xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
 }
 
 int
 xfs_syncd_init(
        struct xfs_mount        *mp)
 {
-       mp->m_sync_work.w_syncer = xfs_sync_worker;
-       mp->m_sync_work.w_mount = mp;
-       mp->m_sync_work.w_completion = NULL;
-       mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
-       if (IS_ERR(mp->m_sync_task))
-               return -PTR_ERR(mp->m_sync_task);
+       INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
+       INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
+       INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
+
+       xfs_syncd_queue_sync(mp);
+       xfs_syncd_queue_reclaim(mp);
+
        return 0;
 }
 
@@ -582,7 +569,9 @@ void
 xfs_syncd_stop(
        struct xfs_mount        *mp)
 {
-       kthread_stop(mp->m_sync_task);
+       cancel_delayed_work_sync(&mp->m_sync_work);
+       cancel_delayed_work_sync(&mp->m_reclaim_work);
+       cancel_work_sync(&mp->m_flush_work);
 }
 
 void
@@ -601,6 +590,10 @@ __xfs_inode_set_reclaim_tag(
                                XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
                                XFS_ICI_RECLAIM_TAG);
                spin_unlock(&ip->i_mount->m_perag_lock);
+
+               /* schedule periodic background inode reclaim */
+               xfs_syncd_queue_reclaim(ip->i_mount);
+
                trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
                                                        -1, _RET_IP_);
        }
@@ -1017,7 +1010,13 @@ xfs_reclaim_inodes(
 }
 
 /*
- * Shrinker infrastructure.
+ * Inode cache shrinker.
+ *
+ * When called we make sure that there is a background (fast) inode reclaim in
+ * progress, while we will throttle the speed of reclaim via doiing synchronous
+ * reclaim of inodes. That means if we come across dirty inodes, we wait for
+ * them to be cleaned, which we hope will not be very long due to the
+ * background walker having already kicked the IO off on those dirty inodes.
  */
 static int
 xfs_reclaim_inode_shrink(
@@ -1032,10 +1031,15 @@ xfs_reclaim_inode_shrink(
 
        mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
        if (nr_to_scan) {
+               /* kick background reclaimer and push the AIL */
+               xfs_syncd_queue_reclaim(mp);
+               xfs_ail_push_all(mp->m_ail);
+
                if (!(gfp_mask & __GFP_FS))
                        return -1;
 
-               xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan);
+               xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT,
+                                       &nr_to_scan);
                /* terminate if we don't exhaust the scan */
                if (nr_to_scan > 0)
                        return -1;
index 32ba662..e3a6ad2 100644 (file)
@@ -32,6 +32,8 @@ typedef struct xfs_sync_work {
 #define SYNC_WAIT              0x0001  /* wait for i/o to complete */
 #define SYNC_TRYLOCK           0x0002  /* only try to lock inodes */
 
+extern struct workqueue_struct *xfs_syncd_wq;  /* sync workqueue */
+
 int xfs_syncd_init(struct xfs_mount *mp);
 void xfs_syncd_stop(struct xfs_mount *mp);
 
index 254ee06..69228aa 100644 (file)
@@ -461,12 +461,10 @@ xfs_qm_dqflush_all(
        struct xfs_quotainfo    *q = mp->m_quotainfo;
        int                     recl;
        struct xfs_dquot        *dqp;
-       int                     niters;
        int                     error;
 
        if (!q)
                return 0;
-       niters = 0;
 again:
        mutex_lock(&q->qi_dqlist_lock);
        list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
@@ -1314,14 +1312,9 @@ xfs_qm_dqiter_bufs(
 {
        xfs_buf_t       *bp;
        int             error;
-       int             notcommitted;
-       int             incr;
        int             type;
 
        ASSERT(blkcnt > 0);
-       notcommitted = 0;
-       incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
-               XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
        type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
                (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
        error = 0;
index c9446f1..567b29b 100644 (file)
@@ -65,11 +65,6 @@ extern kmem_zone_t   *qm_dqtrxzone;
  * block in the dquot/xqm code.
  */
 #define XFS_DQUOT_CLUSTER_SIZE_FSB     (xfs_filblks_t)1
-/*
- * When doing a quotacheck, we log dquot clusters of this many FSBs at most
- * in a single transaction. We don't want to ask for too huge a log reservation.
- */
-#define XFS_QM_MAX_DQCLUSTER_LOGSZ     3
 
 typedef xfs_dqhash_t   xfs_dqlist_t;
 
index 0d62a07..2dadb15 100644 (file)
@@ -313,14 +313,12 @@ xfs_qm_scall_quotaon(
 {
        int             error;
        uint            qf;
-       uint            accflags;
        __int64_t       sbflags;
 
        flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
        /*
         * Switching on quota accounting must be done at mount time.
         */
-       accflags = flags & XFS_ALL_QUOTA_ACCT;
        flags &= ~(XFS_ALL_QUOTA_ACCT);
 
        sbflags = 0;
index 4bc3c64..27d64d7 100644 (file)
@@ -2395,17 +2395,33 @@ xfs_free_extent(
        memset(&args, 0, sizeof(xfs_alloc_arg_t));
        args.tp = tp;
        args.mp = tp->t_mountp;
+
+       /*
+        * validate that the block number is legal - the enables us to detect
+        * and handle a silent filesystem corruption rather than crashing.
+        */
        args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
-       ASSERT(args.agno < args.mp->m_sb.sb_agcount);
+       if (args.agno >= args.mp->m_sb.sb_agcount)
+               return EFSCORRUPTED;
+
        args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
+       if (args.agbno >= args.mp->m_sb.sb_agblocks)
+               return EFSCORRUPTED;
+
        args.pag = xfs_perag_get(args.mp, args.agno);
-       if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
+       ASSERT(args.pag);
+
+       error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
+       if (error)
                goto error0;
-#ifdef DEBUG
-       ASSERT(args.agbp != NULL);
-       ASSERT((args.agbno + len) <=
-               be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length));
-#endif
+
+       /* validate the extent size is legal now we have the agf locked */
+       if (args.agbno + len >
+                       be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
+               error = EFSCORRUPTED;
+               goto error0;
+       }
+
        error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
 error0:
        xfs_perag_put(args.pag);
index 46cc401..576fdfe 100644 (file)
@@ -197,6 +197,41 @@ xfs_inode_item_size(
        return nvecs;
 }
 
+/*
+ * xfs_inode_item_format_extents - convert in-core extents to on-disk form
+ *
+ * For either the data or attr fork in extent format, we need to endian convert
+ * the in-core extent as we place them into the on-disk inode. In this case, we
+ * need to do this conversion before we write the extents into the log. Because
+ * we don't have the disk inode to write into here, we allocate a buffer and
+ * format the extents into it via xfs_iextents_copy(). We free the buffer in
+ * the unlock routine after the copy for the log has been made.
+ *
+ * In the case of the data fork, the in-core and on-disk fork sizes can be
+ * different due to delayed allocation extents. We only log on-disk extents
+ * here, so always use the physical fork size to determine the size of the
+ * buffer we need to allocate.
+ */
+STATIC void
+xfs_inode_item_format_extents(
+       struct xfs_inode        *ip,
+       struct xfs_log_iovec    *vecp,
+       int                     whichfork,
+       int                     type)
+{
+       xfs_bmbt_rec_t          *ext_buffer;
+
+       ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP);
+       if (whichfork == XFS_DATA_FORK)
+               ip->i_itemp->ili_extents_buf = ext_buffer;
+       else
+               ip->i_itemp->ili_aextents_buf = ext_buffer;
+
+       vecp->i_addr = ext_buffer;
+       vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork);
+       vecp->i_type = type;
+}
+
 /*
  * This is called to fill in the vector of log iovecs for the
  * given inode log item.  It fills the first item with an inode
@@ -213,7 +248,6 @@ xfs_inode_item_format(
        struct xfs_inode        *ip = iip->ili_inode;
        uint                    nvecs;
        size_t                  data_bytes;
-       xfs_bmbt_rec_t          *ext_buffer;
        xfs_mount_t             *mp;
 
        vecp->i_addr = &iip->ili_format;
@@ -320,22 +354,8 @@ xfs_inode_item_format(
                        } else
 #endif
                        {
-                               /*
-                                * There are delayed allocation extents
-                                * in the inode, or we need to convert
-                                * the extents to on disk format.
-                                * Use xfs_iextents_copy()
-                                * to copy only the real extents into
-                                * a separate buffer.  We'll free the
-                                * buffer in the unlock routine.
-                                */
-                               ext_buffer = kmem_alloc(ip->i_df.if_bytes,
-                                       KM_SLEEP);
-                               iip->ili_extents_buf = ext_buffer;
-                               vecp->i_addr = ext_buffer;
-                               vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
-                                               XFS_DATA_FORK);
-                               vecp->i_type = XLOG_REG_TYPE_IEXT;
+                               xfs_inode_item_format_extents(ip, vecp,
+                                       XFS_DATA_FORK, XLOG_REG_TYPE_IEXT);
                        }
                        ASSERT(vecp->i_len <= ip->i_df.if_bytes);
                        iip->ili_format.ilf_dsize = vecp->i_len;
@@ -445,19 +465,12 @@ xfs_inode_item_format(
                         */
                        vecp->i_addr = ip->i_afp->if_u1.if_extents;
                        vecp->i_len = ip->i_afp->if_bytes;
+                       vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
 #else
                        ASSERT(iip->ili_aextents_buf == NULL);
-                       /*
-                        * Need to endian flip before logging
-                        */
-                       ext_buffer = kmem_alloc(ip->i_afp->if_bytes,
-                               KM_SLEEP);
-                       iip->ili_aextents_buf = ext_buffer;
-                       vecp->i_addr = ext_buffer;
-                       vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
-                                       XFS_ATTR_FORK);
+                       xfs_inode_item_format_extents(ip, vecp,
+                                       XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT);
 #endif
-                       vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
                        iip->ili_format.ilf_asize = vecp->i_len;
                        vecp++;
                        nvecs++;
index dc1882a..751e94f 100644 (file)
@@ -204,7 +204,6 @@ xfs_bulkstat(
        xfs_agi_t               *agi;   /* agi header data */
        xfs_agino_t             agino;  /* inode # in allocation group */
        xfs_agnumber_t          agno;   /* allocation group number */
-       xfs_daddr_t             bno;    /* inode cluster start daddr */
        int                     chunkidx; /* current index into inode chunk */
        int                     clustidx; /* current index into inode cluster */
        xfs_btree_cur_t         *cur;   /* btree cursor for ialloc btree */
@@ -463,7 +462,6 @@ xfs_bulkstat(
                                                 mp->m_sb.sb_inopblog);
                                }
                                ino = XFS_AGINO_TO_INO(mp, agno, agino);
-                               bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
                                /*
                                 * Skip if this inode is free.
                                 */
index 25efa9b..b612ce4 100644 (file)
@@ -761,7 +761,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
                break;
        case XLOG_STATE_COVER_NEED:
        case XLOG_STATE_COVER_NEED2:
-               if (!xfs_trans_ail_tail(log->l_ailp) &&
+               if (!xfs_ail_min_lsn(log->l_ailp) &&
                    xlog_iclogs_empty(log)) {
                        if (log->l_covered_state == XLOG_STATE_COVER_NEED)
                                log->l_covered_state = XLOG_STATE_COVER_DONE;
@@ -801,7 +801,7 @@ xlog_assign_tail_lsn(
        xfs_lsn_t               tail_lsn;
        struct log              *log = mp->m_log;
 
-       tail_lsn = xfs_trans_ail_tail(mp->m_ail);
+       tail_lsn = xfs_ail_min_lsn(mp->m_ail);
        if (!tail_lsn)
                tail_lsn = atomic64_read(&log->l_last_sync_lsn);
 
@@ -1239,7 +1239,7 @@ xlog_grant_push_ail(
         * the filesystem is shutting down.
         */
        if (!XLOG_FORCED_SHUTDOWN(log))
-               xfs_trans_ail_push(log->l_ailp, threshold_lsn);
+               xfs_ail_push(log->l_ailp, threshold_lsn);
 }
 
 /*
@@ -3407,6 +3407,17 @@ xlog_verify_dest_ptr(
                xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
 }
 
+/*
+ * Check to make sure the grant write head didn't just over lap the tail.  If
+ * the cycles are the same, we can't be overlapping.  Otherwise, make sure that
+ * the cycles differ by exactly one and check the byte count.
+ *
+ * This check is run unlocked, so can give false positives. Rather than assert
+ * on failures, use a warn-once flag and a panic tag to allow the admin to
+ * determine if they want to panic the machine when such an error occurs. For
+ * debug kernels this will have the same effect as using an assert but, unlinke
+ * an assert, it can be turned off at runtime.
+ */
 STATIC void
 xlog_verify_grant_tail(
        struct log      *log)
@@ -3414,17 +3425,22 @@ xlog_verify_grant_tail(
        int             tail_cycle, tail_blocks;
        int             cycle, space;
 
-       /*
-        * Check to make sure the grant write head didn't just over lap the
-        * tail.  If the cycles are the same, we can't be overlapping.
-        * Otherwise, make sure that the cycles differ by exactly one and
-        * check the byte count.
-        */
        xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
        xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
        if (tail_cycle != cycle) {
-               ASSERT(cycle - 1 == tail_cycle);
-               ASSERT(space <= BBTOB(tail_blocks));
+               if (cycle - 1 != tail_cycle &&
+                   !(log->l_flags & XLOG_TAIL_WARN)) {
+                       xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
+                               "%s: cycle - 1 != tail_cycle", __func__);
+                       log->l_flags |= XLOG_TAIL_WARN;
+               }
+
+               if (space > BBTOB(tail_blocks) &&
+                   !(log->l_flags & XLOG_TAIL_WARN)) {
+                       xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
+                               "%s: space > BBTOB(tail_blocks)", __func__);
+                       log->l_flags |= XLOG_TAIL_WARN;
+               }
        }
 }
 
index ffae692..5864850 100644 (file)
@@ -144,6 +144,7 @@ static inline uint xlog_get_client_id(__be32 i)
 #define        XLOG_RECOVERY_NEEDED    0x4     /* log was recovered */
 #define XLOG_IO_ERROR          0x8     /* log hit an I/O error, and being
                                           shutdown */
+#define XLOG_TAIL_WARN         0x10    /* log tail verify warning issued */
 
 #ifdef __KERNEL__
 /*
index a62e897..19af0ab 100644 (file)
@@ -203,12 +203,9 @@ typedef struct xfs_mount {
        struct mutex            m_icsb_mutex;   /* balancer sync lock */
 #endif
        struct xfs_mru_cache    *m_filestream;  /* per-mount filestream data */
-       struct task_struct      *m_sync_task;   /* generalised sync thread */
-       xfs_sync_work_t         m_sync_work;    /* work item for VFS_SYNC */
-       struct list_head        m_sync_list;    /* sync thread work item list */
-       spinlock_t              m_sync_lock;    /* work item list lock */
-       int                     m_sync_seq;     /* sync thread generation no. */
-       wait_queue_head_t       m_wait_single_sync_task;
+       struct delayed_work     m_sync_work;    /* background sync work */
+       struct delayed_work     m_reclaim_work; /* background inode reclaim */
+       struct work_struct      m_flush_work;   /* background inode flush */
        __int64_t               m_update_flags; /* sb flags we need to update
                                                   on the next remount,rw */
        struct shrinker         m_inode_shrink; /* inode reclaim shrinker */
index 12aff95..acdb92f 100644 (file)
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 
-STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
-STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
-STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
+struct workqueue_struct        *xfs_ail_wq;    /* AIL workqueue */
 
 #ifdef DEBUG
-STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *);
-#else
+/*
+ * Check that the list is sorted as it should be.
+ */
+STATIC void
+xfs_ail_check(
+       struct xfs_ail  *ailp,
+       xfs_log_item_t  *lip)
+{
+       xfs_log_item_t  *prev_lip;
+
+       if (list_empty(&ailp->xa_ail))
+               return;
+
+       /*
+        * Check the next and previous entries are valid.
+        */
+       ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+       prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
+       if (&prev_lip->li_ail != &ailp->xa_ail)
+               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+
+       prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
+       if (&prev_lip->li_ail != &ailp->xa_ail)
+               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
+
+
+#ifdef XFS_TRANS_DEBUG
+       /*
+        * Walk the list checking lsn ordering, and that every entry has the
+        * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
+        * when specifically debugging the transaction subsystem.
+        */
+       prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+       list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
+               if (&prev_lip->li_ail != &ailp->xa_ail)
+                       ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+               ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+               prev_lip = lip;
+       }
+#endif /* XFS_TRANS_DEBUG */
+}
+#else /* !DEBUG */
 #define        xfs_ail_check(a,l)
 #endif /* DEBUG */
 
+/*
+ * Return a pointer to the first item in the AIL.  If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_min(
+       struct xfs_ail  *ailp)
+{
+       if (list_empty(&ailp->xa_ail))
+               return NULL;
+
+       return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+}
+
+ /*
+ * Return a pointer to the last item in the AIL.  If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_max(
+       struct xfs_ail  *ailp)
+{
+       if (list_empty(&ailp->xa_ail))
+               return NULL;
+
+       return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
+}
+
+/*
+ * Return a pointer to the item which follows the given item in the AIL.  If
+ * the given item is the last item in the list, then return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_next(
+       struct xfs_ail  *ailp,
+       xfs_log_item_t  *lip)
+{
+       if (lip->li_ail.next == &ailp->xa_ail)
+               return NULL;
+
+       return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
+}
 
 /*
- * This is called by the log manager code to determine the LSN
- * of the tail of the log.  This is exactly the LSN of the first
- * item in the AIL.  If the AIL is empty, then this function
- * returns 0.
+ * This is called by the log manager code to determine the LSN of the tail of
+ * the log.  This is exactly the LSN of the first item in the AIL.  If the AIL
+ * is empty, then this function returns 0.
  *
- * We need the AIL lock in order to get a coherent read of the
- * lsn of the last item in the AIL.
+ * We need the AIL lock in order to get a coherent read of the lsn of the last
+ * item in the AIL.
  */
 xfs_lsn_t
-xfs_trans_ail_tail(
+xfs_ail_min_lsn(
        struct xfs_ail  *ailp)
 {
-       xfs_lsn_t       lsn;
+       xfs_lsn_t       lsn = 0;
        xfs_log_item_t  *lip;
 
        spin_lock(&ailp->xa_lock);
        lip = xfs_ail_min(ailp);
-       if (lip == NULL) {
-               lsn = (xfs_lsn_t)0;
-       } else {
+       if (lip)
                lsn = lip->li_lsn;
-       }
        spin_unlock(&ailp->xa_lock);
 
        return lsn;
 }
 
 /*
- * xfs_trans_push_ail
- *
- * This routine is called to move the tail of the AIL forward.  It does this by
- * trying to flush items in the AIL whose lsns are below the given
- * threshold_lsn.
- *
- * the push is run asynchronously in a separate thread, so we return the tail
- * of the log right now instead of the tail after the push. This means we will
- * either continue right away, or we will sleep waiting on the async thread to
- * do its work.
- *
- * We do this unlocked - we only need to know whether there is anything in the
- * AIL at the time we are called. We don't need to access the contents of
- * any of the objects, so the lock is not needed.
+ * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
  */
-void
-xfs_trans_ail_push(
-       struct xfs_ail  *ailp,
-       xfs_lsn_t       threshold_lsn)
+static xfs_lsn_t
+xfs_ail_max_lsn(
+       struct xfs_ail  *ailp)
 {
-       xfs_log_item_t  *lip;
+       xfs_lsn_t       lsn = 0;
+       xfs_log_item_t  *lip;
 
-       lip = xfs_ail_min(ailp);
-       if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
-               if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0)
-                       xfsaild_wakeup(ailp, threshold_lsn);
-       }
+       spin_lock(&ailp->xa_lock);
+       lip = xfs_ail_max(ailp);
+       if (lip)
+               lsn = lip->li_lsn;
+       spin_unlock(&ailp->xa_lock);
+
+       return lsn;
 }
 
 /*
@@ -236,16 +300,57 @@ out:
 }
 
 /*
- * xfsaild_push does the work of pushing on the AIL.  Returning a timeout of
- * zero indicates that the caller should sleep until woken.
+ * splice the log item list into the AIL at the given LSN.
  */
-long
-xfsaild_push(
-       struct xfs_ail  *ailp,
-       xfs_lsn_t       *last_lsn)
+static void
+xfs_ail_splice(
+       struct xfs_ail  *ailp,
+       struct list_head *list,
+       xfs_lsn_t       lsn)
 {
-       long            tout = 0;
-       xfs_lsn_t       last_pushed_lsn = *last_lsn;
+       xfs_log_item_t  *next_lip;
+
+       /* If the list is empty, just insert the item.  */
+       if (list_empty(&ailp->xa_ail)) {
+               list_splice(list, &ailp->xa_ail);
+               return;
+       }
+
+       list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
+               if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
+                       break;
+       }
+
+       ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
+              XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
+
+       list_splice_init(list, &next_lip->li_ail);
+}
+
+/*
+ * Delete the given item from the AIL.  Return a pointer to the item.
+ */
+static void
+xfs_ail_delete(
+       struct xfs_ail  *ailp,
+       xfs_log_item_t  *lip)
+{
+       xfs_ail_check(ailp, lip);
+       list_del(&lip->li_ail);
+       xfs_trans_ail_cursor_clear(ailp, lip);
+}
+
+/*
+ * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
+ * to run at a later time if there is more work to do to complete the push.
+ */
+STATIC void
+xfs_ail_worker(
+       struct work_struct *work)
+{
+       struct xfs_ail  *ailp = container_of(to_delayed_work(work),
+                                       struct xfs_ail, xa_work);
+       long            tout;
        xfs_lsn_t       target =  ailp->xa_target;
        xfs_lsn_t       lsn;
        xfs_log_item_t  *lip;
@@ -256,15 +361,15 @@ xfsaild_push(
 
        spin_lock(&ailp->xa_lock);
        xfs_trans_ail_cursor_init(ailp, cur);
-       lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn);
+       lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
        if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
                /*
                 * AIL is empty or our push has reached the end.
                 */
                xfs_trans_ail_cursor_done(ailp, cur);
                spin_unlock(&ailp->xa_lock);
-               *last_lsn = 0;
-               return tout;
+               ailp->xa_last_pushed_lsn = 0;
+               return;
        }
 
        XFS_STATS_INC(xs_push_ail);
@@ -301,13 +406,13 @@ xfsaild_push(
                case XFS_ITEM_SUCCESS:
                        XFS_STATS_INC(xs_push_ail_success);
                        IOP_PUSH(lip);
-                       last_pushed_lsn = lsn;
+                       ailp->xa_last_pushed_lsn = lsn;
                        break;
 
                case XFS_ITEM_PUSHBUF:
                        XFS_STATS_INC(xs_push_ail_pushbuf);
                        IOP_PUSHBUF(lip);
-                       last_pushed_lsn = lsn;
+                       ailp->xa_last_pushed_lsn = lsn;
                        push_xfsbufd = 1;
                        break;
 
@@ -319,7 +424,7 @@ xfsaild_push(
 
                case XFS_ITEM_LOCKED:
                        XFS_STATS_INC(xs_push_ail_locked);
-                       last_pushed_lsn = lsn;
+                       ailp->xa_last_pushed_lsn = lsn;
                        stuck++;
                        break;
 
@@ -374,9 +479,23 @@ xfsaild_push(
                wake_up_process(mp->m_ddev_targp->bt_task);
        }
 
+       /* assume we have more work to do in a short while */
+       tout = 10;
        if (!count) {
                /* We're past our target or empty, so idle */
-               last_pushed_lsn = 0;
+               ailp->xa_last_pushed_lsn = 0;
+
+               /*
+                * Check for an updated push target before clearing the
+                * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
+                * work to do. Wait a bit longer before starting that work.
+                */
+               smp_rmb();
+               if (ailp->xa_target == target) {
+                       clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
+                       return;
+               }
+               tout = 50;
        } else if (XFS_LSN_CMP(lsn, target) >= 0) {
                /*
                 * We reached the target so wait a bit longer for I/O to
@@ -384,7 +503,7 @@ xfsaild_push(
                 * start the next scan from the start of the AIL.
                 */
                tout = 50;
-               last_pushed_lsn = 0;
+               ailp->xa_last_pushed_lsn = 0;
        } else if ((stuck * 100) / count > 90) {
                /*
                 * Either there is a lot of contention on the AIL or we
@@ -396,14 +515,61 @@ xfsaild_push(
                 * continuing from where we were.
                 */
                tout = 20;
-       } else {
-               /* more to do, but wait a short while before continuing */
-               tout = 10;
        }
-       *last_lsn = last_pushed_lsn;
-       return tout;
+
+       /* There is more to do, requeue us.  */
+       queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
+                                       msecs_to_jiffies(tout));
+}
+
+/*
+ * This routine is called to move the tail of the AIL forward.  It does this by
+ * trying to flush items in the AIL whose lsns are below the given
+ * threshold_lsn.
+ *
+ * The push is run asynchronously in a workqueue, which means the caller needs
+ * to handle waiting on the async flush for space to become available.
+ * We don't want to interrupt any push that is in progress, hence we only queue
+ * work if we set the pushing bit approriately.
+ *
+ * We do this unlocked - we only need to know whether there is anything in the
+ * AIL at the time we are called. We don't need to access the contents of
+ * any of the objects, so the lock is not needed.
+ */
+void
+xfs_ail_push(
+       struct xfs_ail  *ailp,
+       xfs_lsn_t       threshold_lsn)
+{
+       xfs_log_item_t  *lip;
+
+       lip = xfs_ail_min(ailp);
+       if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
+           XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
+               return;
+
+       /*
+        * Ensure that the new target is noticed in push code before it clears
+        * the XFS_AIL_PUSHING_BIT.
+        */
+       smp_wmb();
+       ailp->xa_target = threshold_lsn;
+       if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
+               queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
 }
 
+/*
+ * Push out all items in the AIL immediately
+ */
+void
+xfs_ail_push_all(
+       struct xfs_ail  *ailp)
+{
+       xfs_lsn_t       threshold_lsn = xfs_ail_max_lsn(ailp);
+
+       if (threshold_lsn)
+               xfs_ail_push(ailp, threshold_lsn);
+}
 
 /*
  * This is to be called when an item is unlocked that may have
@@ -615,7 +781,6 @@ xfs_trans_ail_init(
        xfs_mount_t     *mp)
 {
        struct xfs_ail  *ailp;
-       int             error;
 
        ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
        if (!ailp)
@@ -624,15 +789,9 @@ xfs_trans_ail_init(
        ailp->xa_mount = mp;
        INIT_LIST_HEAD(&ailp->xa_ail);
        spin_lock_init(&ailp->xa_lock);
-       error = xfsaild_start(ailp);
-       if (error)
-               goto out_free_ailp;
+       INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
        mp->m_ail = ailp;
        return 0;
-
-out_free_ailp:
-       kmem_free(ailp);
-       return error;
 }
 
 void
@@ -641,124 +800,6 @@ xfs_trans_ail_destroy(
 {
        struct xfs_ail  *ailp = mp->m_ail;
 
-       xfsaild_stop(ailp);
+       cancel_delayed_work_sync(&ailp->xa_work);
        kmem_free(ailp);
 }
-
-/*
- * splice the log item list into the AIL at the given LSN.
- */
-STATIC void
-xfs_ail_splice(
-       struct xfs_ail  *ailp,
-       struct list_head *list,
-       xfs_lsn_t       lsn)
-{
-       xfs_log_item_t  *next_lip;
-
-       /*
-        * If the list is empty, just insert the item.
-        */
-       if (list_empty(&ailp->xa_ail)) {
-               list_splice(list, &ailp->xa_ail);
-               return;
-       }
-
-       list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
-               if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
-                       break;
-       }
-
-       ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
-              (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
-
-       list_splice_init(list, &next_lip->li_ail);
-       return;
-}
-
-/*
- * Delete the given item from the AIL.  Return a pointer to the item.
- */
-STATIC void
-xfs_ail_delete(
-       struct xfs_ail  *ailp,
-       xfs_log_item_t  *lip)
-{
-       xfs_ail_check(ailp, lip);
-       list_del(&lip->li_ail);
-       xfs_trans_ail_cursor_clear(ailp, lip);
-}
-
-/*
- * Return a pointer to the first item in the AIL.
- * If the AIL is empty, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_min(
-       struct xfs_ail  *ailp)
-{
-       if (list_empty(&ailp->xa_ail))
-               return NULL;
-
-       return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
-}
-
-/*
- * Return a pointer to the item which follows
- * the given item in the AIL.  If the given item
- * is the last item in the list, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_next(
-       struct xfs_ail  *ailp,
-       xfs_log_item_t  *lip)
-{
-       if (lip->li_ail.next == &ailp->xa_ail)
-               return NULL;
-
-       return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
-}
-
-#ifdef DEBUG
-/*
- * Check that the list is sorted as it should be.
- */
-STATIC void
-xfs_ail_check(
-       struct xfs_ail  *ailp,
-       xfs_log_item_t  *lip)
-{
-       xfs_log_item_t  *prev_lip;
-
-       if (list_empty(&ailp->xa_ail))
-               return;
-
-       /*
-        * Check the next and previous entries are valid.
-        */
-       ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
-       prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
-       if (&prev_lip->li_ail != &ailp->xa_ail)
-               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
-
-       prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
-       if (&prev_lip->li_ail != &ailp->xa_ail)
-               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
-
-
-#ifdef XFS_TRANS_DEBUG
-       /*
-        * Walk the list checking lsn ordering, and that every entry has the
-        * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
-        * when specifically debugging the transaction subsystem.
-        */
-       prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
-       list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
-               if (&prev_lip->li_ail != &ailp->xa_ail)
-                       ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
-               ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
-               prev_lip = lip;
-       }
-#endif /* XFS_TRANS_DEBUG */
-}
-#endif /* DEBUG */
index 35162c2..6b164e9 100644 (file)
@@ -65,16 +65,22 @@ struct xfs_ail_cursor {
 struct xfs_ail {
        struct xfs_mount        *xa_mount;
        struct list_head        xa_ail;
-       uint                    xa_gen;
-       struct task_struct      *xa_task;
        xfs_lsn_t               xa_target;
        struct xfs_ail_cursor   xa_cursors;
        spinlock_t              xa_lock;
+       struct delayed_work     xa_work;
+       xfs_lsn_t               xa_last_pushed_lsn;
+       unsigned long           xa_flags;
 };
 
+#define XFS_AIL_PUSHING_BIT    0
+
 /*
  * From xfs_trans_ail.c
  */
+
+extern struct workqueue_struct *xfs_ail_wq;    /* AIL workqueue */
+
 void   xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
                                struct xfs_log_item **log_items, int nr_items,
                                xfs_lsn_t lsn) __releases(ailp->xa_lock);
@@ -98,12 +104,13 @@ xfs_trans_ail_delete(
        xfs_trans_ail_delete_bulk(ailp, &lip, 1);
 }
 
-void                   xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t);
+void                   xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
+void                   xfs_ail_push_all(struct xfs_ail *);
+xfs_lsn_t              xfs_ail_min_lsn(struct xfs_ail *ailp);
+
 void                   xfs_trans_unlocked_item(struct xfs_ail *,
                                        xfs_log_item_t *);
 
-xfs_lsn_t              xfs_trans_ail_tail(struct xfs_ail *ailp);
-
 struct xfs_log_item    *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
                                        struct xfs_ail_cursor *cur,
                                        xfs_lsn_t lsn);
@@ -112,11 +119,6 @@ struct xfs_log_item        *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
 void                   xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
                                        struct xfs_ail_cursor *cur);
 
-long   xfsaild_push(struct xfs_ail *, xfs_lsn_t *);
-void   xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t);
-int    xfsaild_start(struct xfs_ail *);
-void   xfsaild_stop(struct xfs_ail *);
-
 #if BITS_PER_LONG != 64
 static inline void
 xfs_trans_ail_copy_lsn(
index 8e20540..089fe43 100644 (file)
@@ -12,6 +12,7 @@
 /**
  * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
  * @oscillator_frequency:       - oscillator frequency in Hz
+ * @irq_flags:                  - IRQF configuration flags
  * @board_specific_setup:       - called before probing the chip (power,reset)
  * @transceiver_enable:         - called to power on/off the transceiver
  * @power_enable:               - called to power on/off the mcp *and* the
@@ -24,6 +25,7 @@
 
 struct mcp251x_platform_data {
        unsigned long oscillator_frequency;
+       unsigned long irq_flags;
        int (*board_specific_setup)(struct spi_device *spi);
        int (*transceiver_enable)(int enable);
        int (*power_enable) (int enable);
index 5a5ce70..5e9840f 100644 (file)
@@ -216,7 +216,7 @@ static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
        return ;
 }
 
-static inline inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
+static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
 {
        return ;
 }
index ad1b19a..aef2330 100644 (file)
@@ -86,16 +86,25 @@ extern int mfd_clone_cell(const char *cell, const char **clones,
  */
 static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
 {
-       return pdev->dev.platform_data;
+       return pdev->mfd_cell;
 }
 
 /*
  * Given a platform device that's been created by mfd_add_devices(), fetch
  * the .mfd_data entry from the mfd_cell that created it.
+ * Otherwise just return the platform_data pointer.
+ * This maintains compatibility with platform drivers whose devices aren't
+ * created by the mfd layer, and expect platform_data to contain what would've
+ * otherwise been in mfd_data.
  */
 static inline void *mfd_get_data(struct platform_device *pdev)
 {
-       return mfd_get_cell(pdev)->mfd_data;
+       const struct mfd_cell *cell = mfd_get_cell(pdev);
+
+       if (cell)
+               return cell->mfd_data;
+       else
+               return pdev->dev.platform_data;
 }
 
 extern int mfd_add_devices(struct device *parent, int id,
index eeec00a..7fa95df 100644 (file)
@@ -270,7 +270,8 @@ struct nf_afinfo {
                                            unsigned int dataoff,
                                            unsigned int len,
                                            u_int8_t protocol);
-       int             (*route)(struct dst_entry **dst, struct flowi *fl);
+       int             (*route)(struct net *net, struct dst_entry **dst,
+                                struct flowi *fl, bool strict);
        void            (*saveroute)(const struct sk_buff *skb,
                                     struct nf_queue_entry *entry);
        int             (*reroute)(struct sk_buff *skb,
index ec333d8..5a262e3 100644 (file)
@@ -293,7 +293,7 @@ struct ip_set {
        /* Lock protecting the set data */
        rwlock_t lock;
        /* References to the set */
-       atomic_t ref;
+       u32 ref;
        /* The core set type */
        struct ip_set_type *type;
        /* The type variant doing the real job */
index ec9d9be..a0196ac 100644 (file)
@@ -515,8 +515,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
        if (h->netmask != HOST_MASK)
                NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
 #endif
-       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
-                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
        NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
        if (with_timeout(h->timeout))
                NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
index d96db98..744942c 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
 
+struct mfd_cell;
+
 struct platform_device {
        const char      * name;
        int             id;
@@ -23,6 +25,9 @@ struct platform_device {
 
        const struct platform_device_id *id_entry;
 
+       /* MFD cell pointer */
+       struct mfd_cell *mfd_cell;
+
        /* arch specific additions */
        struct pdev_archdata    archdata;
 };
index 4e37a7c..4d50611 100644 (file)
@@ -396,7 +396,7 @@ union rio_pw_msg {
 };
 
 /* Architecture and hardware-specific functions */
-extern void rio_register_mport(struct rio_mport *);
+extern int rio_register_mport(struct rio_mport *);
 extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
 extern void rio_close_inb_mbox(struct rio_mport *, int);
 extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int);
index 7410d33..0cee015 100644 (file)
@@ -35,6 +35,7 @@
 #define RIO_DID_IDTCPS6Q               0x035f
 #define RIO_DID_IDTCPS10Q              0x035e
 #define RIO_DID_IDTCPS1848             0x0374
+#define RIO_DID_IDTCPS1432             0x0375
 #define RIO_DID_IDTCPS1616             0x0379
 #define RIO_DID_IDTVPS1616             0x0377
 #define RIO_DID_IDTSPS1616             0x0378
index 4ec2c02..18d63ce 100644 (file)
@@ -1254,6 +1254,9 @@ struct task_struct {
 #endif
 
        struct mm_struct *mm, *active_mm;
+#ifdef CONFIG_COMPAT_BRK
+       unsigned brk_randomized:1;
+#endif
 #if defined(SPLIT_RSS_COUNTING)
        struct task_rss_stat    rss_stat;
 #endif
index 5a89e36..083ffea 100644 (file)
@@ -249,6 +249,8 @@ extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
 extern int hibernate(void);
 extern bool system_entering_hibernation(void);
 #else /* CONFIG_HIBERNATION */
+static inline void register_nosave_region(unsigned long b, unsigned long e) {}
+static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
 static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
 static inline void swsusp_set_page_free(struct page *p) {}
 static inline void swsusp_unset_page_free(struct page *p) {}
@@ -297,14 +299,7 @@ static inline bool pm_wakeup_pending(void) { return false; }
 
 extern struct mutex pm_mutex;
 
-#ifndef CONFIG_HIBERNATION
-static inline void register_nosave_region(unsigned long b, unsigned long e)
-{
-}
-static inline void register_nosave_region_late(unsigned long b, unsigned long e)
-{
-}
-
+#ifndef CONFIG_HIBERNATE_CALLBACKS
 static inline void lock_system_sleep(void) {}
 static inline void unlock_system_sleep(void) {}
 
index 461c011..2b3831b 100644 (file)
@@ -58,6 +58,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
                UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
                UNEVICTABLE_MLOCKFREED,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               THP_FAULT_ALLOC,
+               THP_FAULT_FALLBACK,
+               THP_COLLAPSE_ALLOC,
+               THP_COLLAPSE_ALLOC_FAILED,
+               THP_SPLIT,
+#endif
                NR_VM_EVENT_ITEMS
 };
 
index 814b434..d516f00 100644 (file)
@@ -52,7 +52,7 @@ static inline struct net *skb_net(const struct sk_buff *skb)
         */
        if (likely(skb->dev && skb->dev->nd_net))
                return dev_net(skb->dev);
-       if (skb_dst(skb)->dev)
+       if (skb_dst(skb) && skb_dst(skb)->dev)
                return dev_net(skb_dst(skb)->dev);
        WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
                      __func__, __LINE__);
index cb13239..025d4cc 100644 (file)
@@ -1753,8 +1753,19 @@ enum ieee80211_ampdu_mlme_action {
  *     that TX/RX_STOP can pass NULL for this parameter.
  *     The @buf_size parameter is only valid when the action is set to
  *     %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
- *     buffer size (number of subframes) for this session -- aggregates
- *     containing more subframes than this may not be transmitted to the peer.
+ *     buffer size (number of subframes) for this session -- the driver
+ *     may neither send aggregates containing more subframes than this
+ *     nor send aggregates in a way that lost frames would exceed the
+ *     buffer size. If just limiting the aggregate size, this would be
+ *     possible with a buf_size of 8:
+ *      - TX: 1.....7
+ *      - RX:  2....7 (lost frame #1)
+ *      - TX:        8..1...
+ *     which is invalid since #1 was now re-transmitted well past the
+ *     buffer size of 8. Correct ways to retransmit #1 would be:
+ *      - TX:       1 or 18 or 81
+ *     Even "189" would be wrong since 1 could be lost again.
+ *
  *     Returns a negative error code on failure.
  *     The callback can sleep.
  *
index f88429c..8fce062 100644 (file)
@@ -64,6 +64,7 @@ struct rtable {
 
        __be32                  rt_dst; /* Path destination     */
        __be32                  rt_src; /* Path source          */
+       int                     rt_route_iif;
        int                     rt_iif;
        int                     rt_oif;
        __u32                   rt_mark;
@@ -80,12 +81,12 @@ struct rtable {
 
 static inline bool rt_is_input_route(struct rtable *rt)
 {
-       return rt->rt_iif != 0;
+       return rt->rt_route_iif != 0;
 }
 
 static inline bool rt_is_output_route(struct rtable *rt)
 {
-       return rt->rt_iif == 0;
+       return rt->rt_route_iif == 0;
 }
 
 struct ip_rt_acct {
index 4603f08..6de9a8f 100644 (file)
@@ -18,9 +18,13 @@ config SUSPEND_FREEZER
 
          Turning OFF this setting is NOT recommended! If in doubt, say Y.
 
+config HIBERNATE_CALLBACKS
+       bool
+
 config HIBERNATION
        bool "Hibernation (aka 'suspend to disk')"
        depends on SWAP && ARCH_HIBERNATION_POSSIBLE
+       select HIBERNATE_CALLBACKS
        select LZO_COMPRESS
        select LZO_DECOMPRESS
        ---help---
@@ -85,7 +89,7 @@ config PM_STD_PARTITION
 
 config PM_SLEEP
        def_bool y
-       depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE
+       depends on SUSPEND || HIBERNATE_CALLBACKS
 
 config PM_SLEEP_SMP
        def_bool y
index 4801363..a187c3f 100644 (file)
@@ -4111,20 +4111,20 @@ need_resched:
                                        try_to_wake_up_local(to_wakeup);
                        }
                        deactivate_task(rq, prev, DEQUEUE_SLEEP);
+
+                       /*
+                        * If we are going to sleep and we have plugged IO queued, make
+                        * sure to submit it to avoid deadlocks.
+                        */
+                       if (blk_needs_flush_plug(prev)) {
+                               raw_spin_unlock(&rq->lock);
+                               blk_flush_plug(prev);
+                               raw_spin_lock(&rq->lock);
+                       }
                }
                switch_count = &prev->nvcsw;
        }
 
-       /*
-        * If we are going to sleep and we have plugged IO queued, make
-        * sure to submit it to avoid deadlocks.
-        */
-       if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) {
-               raw_spin_unlock(&rq->lock);
-               blk_flush_plug(prev);
-               raw_spin_lock(&rq->lock);
-       }
-
        pre_schedule(rq, prev);
 
        if (unlikely(!rq->nr_running))
index 05672e8..a235f3c 100644 (file)
@@ -49,12 +49,9 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
                        val = *s - '0';
                else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f')
                        val = _tolower(*s) - 'a' + 10;
-               else if (*s == '\n') {
-                       if (*(s + 1) == '\0')
-                               break;
-                       else
-                               return -EINVAL;
-               } else
+               else if (*s == '\n' && *(s + 1) == '\0')
+                       break;
+               else
                        return -EINVAL;
 
                if (val >= base)
index 325c2f9..d55769d 100644 (file)
@@ -315,12 +315,12 @@ static void __init test_kstrtou64_ok(void)
                {"65537",       10,     65537},
                {"2147483646",  10,     2147483646},
                {"2147483647",  10,     2147483647},
-               {"2147483648",  10,     2147483648},
-               {"2147483649",  10,     2147483649},
-               {"4294967294",  10,     4294967294},
-               {"4294967295",  10,     4294967295},
-               {"4294967296",  10,     4294967296},
-               {"4294967297",  10,     4294967297},
+               {"2147483648",  10,     2147483648ULL},
+               {"2147483649",  10,     2147483649ULL},
+               {"4294967294",  10,     4294967294ULL},
+               {"4294967295",  10,     4294967295ULL},
+               {"4294967296",  10,     4294967296ULL},
+               {"4294967297",  10,     4294967297ULL},
                {"9223372036854775806", 10,     9223372036854775806ULL},
                {"9223372036854775807", 10,     9223372036854775807ULL},
                {"9223372036854775808", 10,     9223372036854775808ULL},
@@ -369,12 +369,12 @@ static void __init test_kstrtos64_ok(void)
                {"65537",       10,     65537},
                {"2147483646",  10,     2147483646},
                {"2147483647",  10,     2147483647},
-               {"2147483648",  10,     2147483648},
-               {"2147483649",  10,     2147483649},
-               {"4294967294",  10,     4294967294},
-               {"4294967295",  10,     4294967295},
-               {"4294967296",  10,     4294967296},
-               {"4294967297",  10,     4294967297},
+               {"2147483648",  10,     2147483648LL},
+               {"2147483649",  10,     2147483649LL},
+               {"4294967294",  10,     4294967294LL},
+               {"4294967295",  10,     4294967295LL},
+               {"4294967296",  10,     4294967296LL},
+               {"4294967297",  10,     4294967297LL},
                {"9223372036854775806", 10,     9223372036854775806LL},
                {"9223372036854775807", 10,     9223372036854775807LL},
        };
@@ -418,10 +418,10 @@ static void __init test_kstrtou32_ok(void)
                {"65537",       10,     65537},
                {"2147483646",  10,     2147483646},
                {"2147483647",  10,     2147483647},
-               {"2147483648",  10,     2147483648},
-               {"2147483649",  10,     2147483649},
-               {"4294967294",  10,     4294967294},
-               {"4294967295",  10,     4294967295},
+               {"2147483648",  10,     2147483648U},
+               {"2147483649",  10,     2147483649U},
+               {"4294967294",  10,     4294967294U},
+               {"4294967295",  10,     4294967295U},
        };
        TEST_OK(kstrtou32, u32, "%u", test_u32_ok);
 }
index 0a619e0..470dcda 100644 (file)
@@ -244,24 +244,28 @@ static ssize_t single_flag_show(struct kobject *kobj,
                                struct kobj_attribute *attr, char *buf,
                                enum transparent_hugepage_flag flag)
 {
-       if (test_bit(flag, &transparent_hugepage_flags))
-               return sprintf(buf, "[yes] no\n");
-       else
-               return sprintf(buf, "yes [no]\n");
+       return sprintf(buf, "%d\n",
+                      !!test_bit(flag, &transparent_hugepage_flags));
 }
+
 static ssize_t single_flag_store(struct kobject *kobj,
                                 struct kobj_attribute *attr,
                                 const char *buf, size_t count,
                                 enum transparent_hugepage_flag flag)
 {
-       if (!memcmp("yes", buf,
-                   min(sizeof("yes")-1, count))) {
+       unsigned long value;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &value);
+       if (ret < 0)
+               return ret;
+       if (value > 1)
+               return -EINVAL;
+
+       if (value)
                set_bit(flag, &transparent_hugepage_flags);
-       } else if (!memcmp("no", buf,
-                          min(sizeof("no")-1, count))) {
+       else
                clear_bit(flag, &transparent_hugepage_flags);
-       } else
-               return -EINVAL;
 
        return count;
 }
@@ -680,8 +684,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        return VM_FAULT_OOM;
                page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
                                          vma, haddr, numa_node_id(), 0);
-               if (unlikely(!page))
+               if (unlikely(!page)) {
+                       count_vm_event(THP_FAULT_FALLBACK);
                        goto out;
+               }
+               count_vm_event(THP_FAULT_ALLOC);
                if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
                        put_page(page);
                        goto out;
@@ -909,11 +916,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                new_page = NULL;
 
        if (unlikely(!new_page)) {
+               count_vm_event(THP_FAULT_FALLBACK);
                ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
                                                   pmd, orig_pmd, page, haddr);
                put_page(page);
                goto out;
        }
+       count_vm_event(THP_FAULT_ALLOC);
 
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                put_page(new_page);
@@ -1390,6 +1399,7 @@ int split_huge_page(struct page *page)
 
        BUG_ON(!PageSwapBacked(page));
        __split_huge_page(page, anon_vma);
+       count_vm_event(THP_SPLIT);
 
        BUG_ON(PageCompound(page));
 out_unlock:
@@ -1784,9 +1794,11 @@ static void collapse_huge_page(struct mm_struct *mm,
                                      node, __GFP_OTHER_NODE);
        if (unlikely(!new_page)) {
                up_read(&mm->mmap_sem);
+               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                *hpage = ERR_PTR(-ENOMEM);
                return;
        }
+       count_vm_event(THP_COLLAPSE_ALLOC);
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                up_read(&mm->mmap_sem);
                put_page(new_page);
@@ -2151,8 +2163,11 @@ static void khugepaged_do_scan(struct page **hpage)
 #ifndef CONFIG_NUMA
                if (!*hpage) {
                        *hpage = alloc_hugepage(khugepaged_defrag());
-                       if (unlikely(!*hpage))
+                       if (unlikely(!*hpage)) {
+                               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                                break;
+                       }
+                       count_vm_event(THP_COLLAPSE_ALLOC);
                }
 #else
                if (IS_ERR(*hpage))
@@ -2192,8 +2207,11 @@ static struct page *khugepaged_alloc_hugepage(void)
 
        do {
                hpage = alloc_hugepage(khugepaged_defrag());
-               if (!hpage)
+               if (!hpage) {
+                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                        khugepaged_alloc_sleep();
+               } else
+                       count_vm_event(THP_COLLAPSE_ALLOC);
        } while (unlikely(!hpage) &&
                 likely(khugepaged_enabled()));
        return hpage;
@@ -2210,8 +2228,11 @@ static void khugepaged_loop(void)
        while (likely(khugepaged_enabled())) {
 #ifndef CONFIG_NUMA
                hpage = khugepaged_alloc_hugepage();
-               if (unlikely(!hpage))
+               if (unlikely(!hpage)) {
+                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                        break;
+               }
+               count_vm_event(THP_COLLAPSE_ALLOC);
 #else
                if (IS_ERR(hpage)) {
                        khugepaged_alloc_sleep();
index 9da8cab..ce22a25 100644 (file)
@@ -1410,6 +1410,13 @@ no_page_table:
        return page;
 }
 
+static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+{
+       return (vma->vm_flags & VM_GROWSDOWN) &&
+               (vma->vm_start == addr) &&
+               !vma_stack_continue(vma->vm_prev, addr);
+}
+
 /**
  * __get_user_pages() - pin user pages in memory
  * @tsk:       task_struct of target task
@@ -1488,7 +1495,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                vma = find_extend_vma(mm, start);
                if (!vma && in_gate_area(mm, start)) {
                        unsigned long pg = start & PAGE_MASK;
-                       struct vm_area_struct *gate_vma = get_gate_vma(mm);
                        pgd_t *pgd;
                        pud_t *pud;
                        pmd_t *pmd;
@@ -1513,10 +1519,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                pte_unmap(pte);
                                return i ? : -EFAULT;
                        }
+                       vma = get_gate_vma(mm);
                        if (pages) {
                                struct page *page;
 
-                               page = vm_normal_page(gate_vma, start, *pte);
+                               page = vm_normal_page(vma, start, *pte);
                                if (!page) {
                                        if (!(gup_flags & FOLL_DUMP) &&
                                             is_zero_pfn(pte_pfn(*pte)))
@@ -1530,12 +1537,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                get_page(page);
                        }
                        pte_unmap(pte);
-                       if (vmas)
-                               vmas[i] = gate_vma;
-                       i++;
-                       start += PAGE_SIZE;
-                       nr_pages--;
-                       continue;
+                       goto next_page;
                }
 
                if (!vma ||
@@ -1549,6 +1551,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        continue;
                }
 
+               /*
+                * If we don't actually want the page itself,
+                * and it's the stack guard page, just skip it.
+                */
+               if (!pages && stack_guard_page(vma, start))
+                       goto next_page;
+
                do {
                        struct page *page;
                        unsigned int foll_flags = gup_flags;
@@ -1631,6 +1640,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                flush_anon_page(vma, page, start);
                                flush_dcache_page(page);
                        }
+next_page:
                        if (vmas)
                                vmas[i] = vma;
                        i++;
@@ -3678,7 +3688,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
                         */
 #ifdef CONFIG_HAVE_IOREMAP_PROT
                        vma = find_vma(mm, addr);
-                       if (!vma)
+                       if (!vma || vma->vm_start > addr)
                                break;
                        if (vma->vm_ops && vma->vm_ops->access)
                                ret = vma->vm_ops->access(vma, addr, buf,
index a2acaf8..9ca1d60 100644 (file)
@@ -375,7 +375,7 @@ void online_page(struct page *page)
 #endif
 
 #ifdef CONFIG_FLATMEM
-       max_mapnr = max(page_to_pfn(page), max_mapnr);
+       max_mapnr = max(pfn, max_mapnr);
 #endif
 
        ClearPageReserved(page);
index 2689a08..6b55e3e 100644 (file)
@@ -135,13 +135,6 @@ void munlock_vma_page(struct page *page)
        }
 }
 
-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
-{
-       return (vma->vm_flags & VM_GROWSDOWN) &&
-               (vma->vm_start == addr) &&
-               !vma_stack_continue(vma->vm_prev, addr);
-}
-
 /**
  * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
  * @vma:   target vma
@@ -188,12 +181,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        if (vma->vm_flags & VM_LOCKED)
                gup_flags |= FOLL_MLOCK;
 
-       /* We don't try to access the guard page of a stack vma */
-       if (stack_guard_page(vma, start)) {
-               addr += PAGE_SIZE;
-               nr_pages--;
-       }
-
        return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
                                NULL, NULL, nonblocking);
 }
index 2ec8eb5..e27e0cf 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -259,7 +259,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
         * randomize_va_space to 2, which will still cause mm->start_brk
         * to be arbitrarily shifted
         */
-       if (mm->start_brk > PAGE_ALIGN(mm->end_data))
+       if (current->brk_randomized)
                min_brk = mm->start_brk;
        else
                min_brk = mm->end_data;
@@ -1814,11 +1814,14 @@ static int expand_downwards(struct vm_area_struct *vma,
                size = vma->vm_end - address;
                grow = (vma->vm_start - address) >> PAGE_SHIFT;
 
-               error = acct_stack_growth(vma, size, grow);
-               if (!error) {
-                       vma->vm_start = address;
-                       vma->vm_pgoff -= grow;
-                       perf_event_mmap(vma);
+               error = -ENOMEM;
+               if (grow <= vma->vm_pgoff) {
+                       error = acct_stack_growth(vma, size, grow);
+                       if (!error) {
+                               vma->vm_start = address;
+                               vma->vm_pgoff -= grow;
+                               perf_event_mmap(vma);
+                       }
                }
        }
        vma_unlock_anon_vma(vma);
index 6a819d1..83fb72c 100644 (file)
@@ -83,24 +83,6 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
 }
 #endif /* CONFIG_NUMA */
 
-/*
- * If this is a system OOM (not a memcg OOM) and the task selected to be
- * killed is not already running at high (RT) priorities, speed up the
- * recovery by boosting the dying task to the lowest FIFO priority.
- * That helps with the recovery and avoids interfering with RT tasks.
- */
-static void boost_dying_task_prio(struct task_struct *p,
-                                 struct mem_cgroup *mem)
-{
-       struct sched_param param = { .sched_priority = 1 };
-
-       if (mem)
-               return;
-
-       if (!rt_task(p))
-               sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
-}
-
 /*
  * The process p may have detached its own ->mm while exiting or through
  * use_mm(), but one or more of its subthreads may still have a valid
@@ -452,13 +434,6 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
        set_tsk_thread_flag(p, TIF_MEMDIE);
        force_sig(SIGKILL, p);
 
-       /*
-        * We give our sacrificial lamb high priority and access to
-        * all the memory it needs. That way it should be able to
-        * exit() and clear out its resources quickly...
-        */
-       boost_dying_task_prio(p, mem);
-
        return 0;
 }
 #undef K
@@ -482,7 +457,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
         */
        if (p->flags & PF_EXITING) {
                set_tsk_thread_flag(p, TIF_MEMDIE);
-               boost_dying_task_prio(p, mem);
                return 0;
        }
 
@@ -556,7 +530,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
         */
        if (fatal_signal_pending(current)) {
                set_thread_flag(TIF_MEMDIE);
-               boost_dying_task_prio(current, NULL);
                return;
        }
 
@@ -712,7 +685,6 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
         */
        if (fatal_signal_pending(current)) {
                set_thread_flag(TIF_MEMDIE);
-               boost_dying_task_prio(current, NULL);
                return;
        }
 
index 2747f5e..9f8a97b 100644 (file)
@@ -3176,7 +3176,7 @@ static __init_refok int __build_all_zonelists(void *data)
  * Called with zonelists_mutex held always
  * unless system_state == SYSTEM_BOOTING.
  */
-void build_all_zonelists(void *data)
+void __ref build_all_zonelists(void *data)
 {
        set_zonelist_order();
 
index 58da7c1..8fa27e4 100644 (file)
@@ -421,7 +421,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
                 * a waste to allocate index if we cannot allocate data.
                 */
                if (sbinfo->max_blocks) {
-                       if (percpu_counter_compare(&sbinfo->used_blocks, (sbinfo->max_blocks - 1)) > 0)
+                       if (percpu_counter_compare(&sbinfo->used_blocks,
+                                               sbinfo->max_blocks - 1) >= 0)
                                return ERR_PTR(-ENOSPC);
                        percpu_counter_inc(&sbinfo->used_blocks);
                        spin_lock(&inode->i_lock);
@@ -1397,7 +1398,8 @@ repeat:
                shmem_swp_unmap(entry);
                sbinfo = SHMEM_SB(inode->i_sb);
                if (sbinfo->max_blocks) {
-                       if ((percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) > 0) ||
+                       if (percpu_counter_compare(&sbinfo->used_blocks,
+                                               sbinfo->max_blocks) >= 0 ||
                            shmem_acct_block(info->flags)) {
                                spin_unlock(&info->lock);
                                error = -ENOSPC;
index c7f5a6d..f6b435c 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/memcontrol.h>
 #include <linux/delayacct.h>
 #include <linux/sysctl.h>
+#include <linux/oom.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -1988,17 +1989,12 @@ static bool zone_reclaimable(struct zone *zone)
        return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
 }
 
-/*
- * As hibernation is going on, kswapd is freezed so that it can't mark
- * the zone into all_unreclaimable. It can't handle OOM during hibernation.
- * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
- */
+/* All zones in zonelist are unreclaimable? */
 static bool all_unreclaimable(struct zonelist *zonelist,
                struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
-       bool all_unreclaimable = true;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2006,13 +2002,11 @@ static bool all_unreclaimable(struct zonelist *zonelist,
                        continue;
                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                        continue;
-               if (zone_reclaimable(zone)) {
-                       all_unreclaimable = false;
-                       break;
-               }
+               if (!zone->all_unreclaimable)
+                       return false;
        }
 
-       return all_unreclaimable;
+       return true;
 }
 
 /*
@@ -2108,6 +2102,14 @@ out:
        if (sc->nr_reclaimed)
                return sc->nr_reclaimed;
 
+       /*
+        * As hibernation is going on, kswapd is freezed so that it can't mark
+        * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
+        * check.
+        */
+       if (oom_killer_disabled)
+               return 0;
+
        /* top priority shrink_zones still had more to do? don't OOM, then */
        if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
                return 1;
index 772b39b..897ea9e 100644 (file)
@@ -321,9 +321,12 @@ static inline void mod_state(struct zone *zone,
                /*
                 * The fetching of the stat_threshold is racy. We may apply
                 * a counter threshold to the wrong the cpu if we get
-                * rescheduled while executing here. However, the following
-                * will apply the threshold again and therefore bring the
-                * counter under the threshold.
+                * rescheduled while executing here. However, the next
+                * counter update will apply the threshold again and
+                * therefore bring the counter under the threshold again.
+                *
+                * Most of the time the thresholds are the same anyways
+                * for all cpus in a zone.
                 */
                t = this_cpu_read(pcp->stat_threshold);
 
@@ -945,7 +948,16 @@ static const char * const vmstat_text[] = {
        "unevictable_pgs_cleared",
        "unevictable_pgs_stranded",
        "unevictable_pgs_mlockfreed",
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       "thp_fault_alloc",
+       "thp_fault_fallback",
+       "thp_collapse_alloc",
+       "thp_collapse_alloc_failed",
+       "thp_split",
 #endif
+
+#endif /* CONFIG_VM_EVENTS_COUNTERS */
 };
 
 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
index 50af027..5a80f41 100644 (file)
@@ -579,9 +579,15 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
 
        list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
                                 r_linger_osd) {
-               __unregister_linger_request(osdc, req);
+               /*
+                * reregister request prior to unregistering linger so
+                * that r_osd is preserved.
+                */
+               BUG_ON(!list_empty(&req->r_req_lru_item));
                __register_request(osdc, req);
-               list_move(&req->r_req_lru_item, &osdc->req_unsent);
+               list_add(&req->r_req_lru_item, &osdc->req_unsent);
+               list_add(&req->r_osd_item, &req->r_osd->o_requests);
+               __unregister_linger_request(osdc, req);
                dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
                     osd->o_osd);
        }
@@ -798,7 +804,7 @@ static void __register_request(struct ceph_osd_client *osdc,
        req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
        INIT_LIST_HEAD(&req->r_req_lru_item);
 
-       dout("register_request %p tid %lld\n", req, req->r_tid);
+       dout("__register_request %p tid %lld\n", req, req->r_tid);
        __insert_request(osdc, req);
        ceph_osdc_get_request(req);
        osdc->num_requests++;
index d951f93..3da4188 100644 (file)
 #include "dsa_priv.h"
 #include "mv88e6xxx.h"
 
+/*
+ * Switch product IDs
+ */
+#define ID_6085                0x04a0
+#define ID_6095                0x0950
+#define ID_6131                0x1060
+
 static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
 {
        int ret;
@@ -21,9 +28,11 @@ static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
        ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
        if (ret >= 0) {
                ret &= 0xfff0;
-               if (ret == 0x0950)
+               if (ret == ID_6085)
+                       return "Marvell 88E6085";
+               if (ret == ID_6095)
                        return "Marvell 88E6095/88E6095F";
-               if (ret == 0x1060)
+               if (ret == ID_6131)
                        return "Marvell 88E6131";
        }
 
@@ -164,6 +173,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
 
 static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
 {
+       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
        int addr = REG_PORT(p);
        u16 val;
 
@@ -171,10 +181,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
         * MAC Forcing register: don't force link, speed, duplex
         * or flow control state to any particular values on physical
         * ports, but force the CPU port and all DSA ports to 1000 Mb/s
-        * full duplex.
+        * (100 Mb/s on 6085) full duplex.
         */
        if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
-               REG_WRITE(addr, 0x01, 0x003e);
+               if (ps->id == ID_6085)
+                       REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
+               else
+                       REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
        else
                REG_WRITE(addr, 0x01, 0x0003);
 
@@ -286,6 +299,8 @@ static int mv88e6131_setup(struct dsa_switch *ds)
        mv88e6xxx_ppu_state_init(ds);
        mutex_init(&ps->stats_mutex);
 
+       ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+
        ret = mv88e6131_switch_reset(ds);
        if (ret < 0)
                return ret;
index eb0e0aa..61156ca 100644 (file)
@@ -39,6 +39,8 @@ struct mv88e6xxx_priv_state {
         * Hold this mutex over snapshot + dump sequences.
         */
        struct mutex    stats_mutex;
+
+       int             id; /* switch product id */
 };
 
 struct mv88e6xxx_hw_stat {
index f3c0b54..4614bab 100644 (file)
@@ -221,9 +221,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
        return csum;
 }
 
-static int nf_ip_route(struct dst_entry **dst, struct flowi *fl)
+static int nf_ip_route(struct net *net, struct dst_entry **dst,
+                      struct flowi *fl, bool strict __always_unused)
 {
-       struct rtable *rt = ip_route_output_key(&init_net, &fl->u.ip4);
+       struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
        if (IS_ERR(rt))
                return PTR_ERR(rt);
        *dst = &rt->dst;
index ea10751..c1acf69 100644 (file)
@@ -1891,6 +1891,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 #ifdef CONFIG_IP_ROUTE_CLASSID
        rth->dst.tclassid = itag;
 #endif
+       rth->rt_route_iif = dev->ifindex;
        rth->rt_iif     = dev->ifindex;
        rth->dst.dev    = init_net.loopback_dev;
        dev_hold(rth->dst.dev);
@@ -2026,6 +2027,7 @@ static int __mkroute_input(struct sk_buff *skb,
        rth->rt_key_src = saddr;
        rth->rt_src     = saddr;
        rth->rt_gateway = daddr;
+       rth->rt_route_iif = in_dev->dev->ifindex;
        rth->rt_iif     = in_dev->dev->ifindex;
        rth->dst.dev    = (out_dev)->dev;
        dev_hold(rth->dst.dev);
@@ -2202,6 +2204,7 @@ local_input:
 #ifdef CONFIG_IP_ROUTE_CLASSID
        rth->dst.tclassid = itag;
 #endif
+       rth->rt_route_iif = dev->ifindex;
        rth->rt_iif     = dev->ifindex;
        rth->dst.dev    = net->loopback_dev;
        dev_hold(rth->dst.dev);
@@ -2401,7 +2404,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
        rth->rt_mark    = oldflp4->flowi4_mark;
        rth->rt_dst     = fl4->daddr;
        rth->rt_src     = fl4->saddr;
-       rth->rt_iif     = 0;
+       rth->rt_route_iif = 0;
+       rth->rt_iif     = oldflp4->flowi4_oif ? : dev_out->ifindex;
        /* get references to the devices that are to be hold by the routing
           cache entry */
        rth->dst.dev    = dev_out;
@@ -2716,6 +2720,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
                rt->rt_key_dst = ort->rt_key_dst;
                rt->rt_key_src = ort->rt_key_src;
                rt->rt_tos = ort->rt_tos;
+               rt->rt_route_iif = ort->rt_route_iif;
                rt->rt_iif = ort->rt_iif;
                rt->rt_oif = ort->rt_oif;
                rt->rt_mark = ort->rt_mark;
@@ -2725,7 +2730,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
                rt->rt_type = ort->rt_type;
                rt->rt_dst = ort->rt_dst;
                rt->rt_src = ort->rt_src;
-               rt->rt_iif = ort->rt_iif;
                rt->rt_gateway = ort->rt_gateway;
                rt->rt_spec_dst = ort->rt_spec_dst;
                rt->peer = ort->peer;
index 13e0e7f..d20a05e 100644 (file)
@@ -74,6 +74,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
        rt->rt_key_dst = fl4->daddr;
        rt->rt_key_src = fl4->saddr;
        rt->rt_tos = fl4->flowi4_tos;
+       rt->rt_route_iif = fl4->flowi4_iif;
        rt->rt_iif = fl4->flowi4_iif;
        rt->rt_oif = fl4->flowi4_oif;
        rt->rt_mark = fl4->flowi4_mark;
index 39aaca2..28bc1f6 100644 (file)
@@ -90,9 +90,18 @@ static int nf_ip6_reroute(struct sk_buff *skb,
        return 0;
 }
 
-static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl)
+static int nf_ip6_route(struct net *net, struct dst_entry **dst,
+                       struct flowi *fl, bool strict)
 {
-       *dst = ip6_route_output(&init_net, NULL, &fl->u.ip6);
+       static const struct ipv6_pinfo fake_pinfo;
+       static const struct inet_sock fake_sk = {
+               /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
+               .sk.sk_bound_dev_if = 1,
+               .pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
+       };
+       const void *sk = strict ? &fake_sk : NULL;
+
+       *dst = ip6_route_output(net, sk, &fl->u.ip6);
        return (*dst)->error;
 }
 
index 56fa125..4f49e5d 100644 (file)
@@ -1622,6 +1622,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                opt_skb = skb_clone(skb, GFP_ATOMIC);
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+               sock_rps_save_rxhash(sk, skb->rxhash);
                if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
                        goto reset;
                if (opt_skb)
@@ -1649,7 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                                __kfree_skb(opt_skb);
                        return 0;
                }
-       }
+       } else
+               sock_rps_save_rxhash(sk, skb->rxhash);
 
        if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
                goto reset;
index d7037c0..15c3774 100644 (file)
@@ -505,6 +505,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
        int rc;
        int is_udplite = IS_UDPLITE(sk);
 
+       if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
+               sock_rps_save_rxhash(sk, skb->rxhash);
+
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto drop;
 
index 9d192d6..c5d4530 100644 (file)
@@ -2541,7 +2541,6 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
                 * same TID from the same station
                 */
                rx->skb = skb;
-               rx->flags = 0;
 
                CALL_RXH(ieee80211_rx_h_decrypt)
                CALL_RXH(ieee80211_rx_h_check_more_data)
@@ -2612,6 +2611,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
                .sdata = sta->sdata,
                .local = sta->local,
                .queue = tid,
+               .flags = 0,
        };
        struct tid_ampdu_rx *tid_agg_rx;
 
index c3f988a..32bff6d 100644 (file)
@@ -652,7 +652,6 @@ comment "Xtables matches"
 config NETFILTER_XT_MATCH_ADDRTYPE
        tristate '"addrtype" address type match support'
        depends on NETFILTER_ADVANCED
-       depends on (IPV6 || IPV6=n)
        ---help---
          This option allows you to match what routing thinks of an address,
          eg. UNICAST, LOCAL, BROADCAST, ...
index bca9699..a113ff0 100644 (file)
@@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
        NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
        if (map->netmask != 32)
                NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
-       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
-                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
        NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
                      htonl(sizeof(*map) + map->memsize));
        if (with_timeout(map->timeout))
index 5e79017..00a3324 100644 (file)
@@ -434,8 +434,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
                goto nla_put_failure;
        NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
        NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
-       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
-                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
        NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
                      htonl(sizeof(*map)
                            + (map->last_ip - map->first_ip + 1) * map->dsize));
index 165f09b..6b38eb8 100644 (file)
@@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
                goto nla_put_failure;
        NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
        NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
-       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
-                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
        NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
                      htonl(sizeof(*map) + map->memsize));
        if (with_timeout(map->timeout))
index 253326e..9152e69 100644 (file)
@@ -26,6 +26,7 @@
 
 static LIST_HEAD(ip_set_type_list);            /* all registered set types */
 static DEFINE_MUTEX(ip_set_type_mutex);                /* protects ip_set_type_list */
+static DEFINE_RWLOCK(ip_set_ref_lock);         /* protects the set refs */
 
 static struct ip_set **ip_set_list;            /* all individual sets */
 static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
@@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
 static inline void
 __ip_set_get(ip_set_id_t index)
 {
-       atomic_inc(&ip_set_list[index]->ref);
+       write_lock_bh(&ip_set_ref_lock);
+       ip_set_list[index]->ref++;
+       write_unlock_bh(&ip_set_ref_lock);
 }
 
 static inline void
 __ip_set_put(ip_set_id_t index)
 {
-       atomic_dec(&ip_set_list[index]->ref);
+       write_lock_bh(&ip_set_ref_lock);
+       BUG_ON(ip_set_list[index]->ref == 0);
+       ip_set_list[index]->ref--;
+       write_unlock_bh(&ip_set_ref_lock);
 }
 
 /*
@@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
        struct ip_set *set = ip_set_list[index];
        int ret = 0;
 
-       BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+       BUG_ON(set == NULL);
        pr_debug("set %s, index %u\n", set->name, index);
 
        if (dim < set->type->dimension ||
@@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
        struct ip_set *set = ip_set_list[index];
        int ret;
 
-       BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+       BUG_ON(set == NULL);
        pr_debug("set %s, index %u\n", set->name, index);
 
        if (dim < set->type->dimension ||
@@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
        struct ip_set *set = ip_set_list[index];
        int ret = 0;
 
-       BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+       BUG_ON(set == NULL);
        pr_debug("set %s, index %u\n", set->name, index);
 
        if (dim < set->type->dimension ||
@@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del);
  * Find set by name, reference it once. The reference makes sure the
  * thing pointed to, does not go away under our feet.
  *
- * The nfnl mutex must already be activated.
  */
 ip_set_id_t
 ip_set_get_byname(const char *name, struct ip_set **set)
@@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
  * reference count by 1. The caller shall not assume the index
  * to be valid, after calling this function.
  *
- * The nfnl mutex must already be activated.
  */
 void
 ip_set_put_byindex(ip_set_id_t index)
 {
-       if (ip_set_list[index] != NULL) {
-               BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+       if (ip_set_list[index] != NULL)
                __ip_set_put(index);
-       }
 }
 EXPORT_SYMBOL_GPL(ip_set_put_byindex);
 
@@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
  * can't be destroyed. The set cannot be renamed due to
  * the referencing either.
  *
- * The nfnl mutex must already be activated.
  */
 const char *
 ip_set_name_byindex(ip_set_id_t index)
@@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index)
        const struct ip_set *set = ip_set_list[index];
 
        BUG_ON(set == NULL);
-       BUG_ON(atomic_read(&set->ref) == 0);
+       BUG_ON(set->ref == 0);
 
        /* Referenced, so it's safe */
        return set->name;
@@ -515,10 +516,7 @@ void
 ip_set_nfnl_put(ip_set_id_t index)
 {
        nfnl_lock();
-       if (ip_set_list[index] != NULL) {
-               BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
-               __ip_set_put(index);
-       }
+       ip_set_put_byindex(index);
        nfnl_unlock();
 }
 EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
 /*
  * Communication protocol with userspace over netlink.
  *
- * We already locked by nfnl_lock.
+ * The commands are serialized by the nfnl mutex.
  */
 
 static inline bool
@@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
                return -ENOMEM;
        rwlock_init(&set->lock);
        strlcpy(set->name, name, IPSET_MAXNAMELEN);
-       atomic_set(&set->ref, 0);
        set->family = family;
 
        /*
@@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
 
        /*
         * Here, we have a valid, constructed set and we are protected
-        * by nfnl_lock. Find the first free index in ip_set_list and
-        * check clashing.
+        * by the nfnl mutex. Find the first free index in ip_set_list
+        * and check clashing.
         */
        if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
                /* If this is the same set and requested, ignore error */
@@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
               const struct nlattr * const attr[])
 {
        ip_set_id_t i;
+       int ret = 0;
 
        if (unlikely(protocol_failed(attr)))
                return -IPSET_ERR_PROTOCOL;
 
-       /* References are protected by the nfnl mutex */
+       /* Commands are serialized and references are
+        * protected by the ip_set_ref_lock.
+        * External systems (i.e. xt_set) must call
+        * ip_set_put|get_nfnl_* functions, that way we
+        * can safely check references here.
+        *
+        * list:set timer can only decrement the reference
+        * counter, so if it's already zero, we can proceed
+        * without holding the lock.
+        */
+       read_lock_bh(&ip_set_ref_lock);
        if (!attr[IPSET_ATTR_SETNAME]) {
                for (i = 0; i < ip_set_max; i++) {
-                       if (ip_set_list[i] != NULL &&
-                           (atomic_read(&ip_set_list[i]->ref)))
-                               return -IPSET_ERR_BUSY;
+                       if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
+                               ret = IPSET_ERR_BUSY;
+                               goto out;
+                       }
                }
+               read_unlock_bh(&ip_set_ref_lock);
                for (i = 0; i < ip_set_max; i++) {
                        if (ip_set_list[i] != NULL)
                                ip_set_destroy_set(i);
                }
        } else {
                i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
-               if (i == IPSET_INVALID_ID)
-                       return -ENOENT;
-               else if (atomic_read(&ip_set_list[i]->ref))
-                       return -IPSET_ERR_BUSY;
+               if (i == IPSET_INVALID_ID) {
+                       ret = -ENOENT;
+                       goto out;
+               } else if (ip_set_list[i]->ref) {
+                       ret = -IPSET_ERR_BUSY;
+                       goto out;
+               }
+               read_unlock_bh(&ip_set_ref_lock);
 
                ip_set_destroy_set(i);
        }
        return 0;
+out:
+       read_unlock_bh(&ip_set_ref_lock);
+       return ret;
 }
 
 /* Flush sets */
@@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
        struct ip_set *set;
        const char *name2;
        ip_set_id_t i;
+       int ret = 0;
 
        if (unlikely(protocol_failed(attr) ||
                     attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
        set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
        if (set == NULL)
                return -ENOENT;
-       if (atomic_read(&set->ref) != 0)
-               return -IPSET_ERR_REFERENCED;
+
+       read_lock_bh(&ip_set_ref_lock);
+       if (set->ref != 0) {
+               ret = -IPSET_ERR_REFERENCED;
+               goto out;
+       }
 
        name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
        for (i = 0; i < ip_set_max; i++) {
                if (ip_set_list[i] != NULL &&
-                   STREQ(ip_set_list[i]->name, name2))
-                       return -IPSET_ERR_EXIST_SETNAME2;
+                   STREQ(ip_set_list[i]->name, name2)) {
+                       ret = -IPSET_ERR_EXIST_SETNAME2;
+                       goto out;
+               }
        }
        strncpy(set->name, name2, IPSET_MAXNAMELEN);
 
-       return 0;
+out:
+       read_unlock_bh(&ip_set_ref_lock);
+       return ret;
 }
 
 /* Swap two sets so that name/index points to the other.
  * References and set names are also swapped.
  *
- * We are protected by the nfnl mutex and references are
- * manipulated only by holding the mutex. The kernel interfaces
+ * The commands are serialized by the nfnl mutex and references are
+ * protected by the ip_set_ref_lock. The kernel interfaces
  * do not hold the mutex but the pointer settings are atomic
  * so the ip_set_list always contains valid pointers to the sets.
  */
@@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
        struct ip_set *from, *to;
        ip_set_id_t from_id, to_id;
        char from_name[IPSET_MAXNAMELEN];
-       u32 from_ref;
 
        if (unlikely(protocol_failed(attr) ||
                     attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -899,17 +924,15 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
              from->type->family == to->type->family))
                return -IPSET_ERR_TYPE_MISMATCH;
 
-       /* No magic here: ref munging protected by the nfnl_lock */
        strncpy(from_name, from->name, IPSET_MAXNAMELEN);
-       from_ref = atomic_read(&from->ref);
-
        strncpy(from->name, to->name, IPSET_MAXNAMELEN);
-       atomic_set(&from->ref, atomic_read(&to->ref));
        strncpy(to->name, from_name, IPSET_MAXNAMELEN);
-       atomic_set(&to->ref, from_ref);
 
+       write_lock_bh(&ip_set_ref_lock);
+       swap(from->ref, to->ref);
        ip_set_list[from_id] = to;
        ip_set_list[to_id] = from;
+       write_unlock_bh(&ip_set_ref_lock);
 
        return 0;
 }
@@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb)
 {
        if (cb->args[2]) {
                pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
-               __ip_set_put((ip_set_id_t) cb->args[1]);
+               ip_set_put_byindex((ip_set_id_t) cb->args[1]);
        }
        return 0;
 }
@@ -1068,7 +1091,7 @@ release_refcount:
        /* If there was an error or set is done, release set */
        if (ret || !cb->args[2]) {
                pr_debug("release set %s\n", ip_set_list[index]->name);
-               __ip_set_put(index);
+               ip_set_put_byindex(index);
        }
 
        /* If we dump all sets, continue with dumping last ones */
index a47c329..e9159e9 100644 (file)
@@ -43,14 +43,19 @@ struct list_set {
 static inline struct set_elem *
 list_set_elem(const struct list_set *map, u32 id)
 {
-       return (struct set_elem *)((char *)map->members + id * map->dsize);
+       return (struct set_elem *)((void *)map->members + id * map->dsize);
+}
+
+static inline struct set_telem *
+list_set_telem(const struct list_set *map, u32 id)
+{
+       return (struct set_telem *)((void *)map->members + id * map->dsize);
 }
 
 static inline bool
 list_set_timeout(const struct list_set *map, u32 id)
 {
-       const struct set_telem *elem =
-               (const struct set_telem *) list_set_elem(map, id);
+       const struct set_telem *elem = list_set_telem(map, id);
 
        return ip_set_timeout_test(elem->timeout);
 }
@@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id)
 static inline bool
 list_set_expired(const struct list_set *map, u32 id)
 {
-       const struct set_telem *elem =
-               (const struct set_telem *) list_set_elem(map, id);
+       const struct set_telem *elem = list_set_telem(map, id);
 
        return ip_set_timeout_expired(elem->timeout);
 }
 
-static inline int
-list_set_exist(const struct set_telem *elem)
-{
-       return elem->id != IPSET_INVALID_ID &&
-              !ip_set_timeout_expired(elem->timeout);
-}
-
 /* Set list without and with timeout */
 
 static int
@@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
        struct set_telem *e;
 
        for (; i < map->size; i++) {
-               e = (struct set_telem *)list_set_elem(map, i);
+               e = list_set_telem(map, i);
                swap(e->id, id);
+               swap(e->timeout, timeout);
                if (e->id == IPSET_INVALID_ID)
                        break;
-               swap(e->timeout, timeout);
        }
 }
 
@@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
                /* Last element replaced: e.g. add new,before,last */
                ip_set_put_byindex(e->id);
        if (with_timeout(map->timeout))
-               list_elem_tadd(map, i, id, timeout);
+               list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
        else
                list_elem_add(map, i, id);
 
@@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
 }
 
 static int
-list_set_del(struct list_set *map, ip_set_id_t id, u32 i)
+list_set_del(struct list_set *map, u32 i)
 {
        struct set_elem *a = list_set_elem(map, i), *b;
 
-       ip_set_put_byindex(id);
+       ip_set_put_byindex(a->id);
 
        for (; i < map->size - 1; i++) {
                b = list_set_elem(map, i + 1);
@@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
                                 (before == 0 ||
                                  (before > 0 &&
                                   next_id_eq(map, i, refid))))
-                               ret = list_set_del(map, id, i);
+                               ret = list_set_del(map, i);
                        else if (before < 0 &&
                                 elem->id == refid &&
                                 next_id_eq(map, i, id))
-                               ret = list_set_del(map, id, i + 1);
+                               ret = list_set_del(map, i + 1);
                }
                break;
        default:
@@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
        NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
        if (with_timeout(map->timeout))
                NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
-       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
-                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
        NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
                      htonl(sizeof(*map) + map->size * map->dsize));
        ipset_nest_end(skb, nested);
@@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set)
        struct set_telem *e;
        u32 i;
 
-       /* We run parallel with other readers (test element)
-        * but adding/deleting new entries is locked out */
-       read_lock_bh(&set->lock);
-       for (i = map->size - 1; i >= 0; i--) {
-               e = (struct set_telem *) list_set_elem(map, i);
-               if (e->id != IPSET_INVALID_ID &&
-                   list_set_expired(map, i))
-                       list_set_del(map, e->id, i);
+       write_lock_bh(&set->lock);
+       for (i = 0; i < map->size; i++) {
+               e = list_set_telem(map, i);
+               if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
+                       list_set_del(map, i);
        }
-       read_unlock_bh(&set->lock);
+       write_unlock_bh(&set->lock);
 
        map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
        add_timer(&map->gc);
index 33733c8..ae47090 100644 (file)
@@ -3120,7 +3120,7 @@ nla_put_failure:
 static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
                                   struct netlink_callback *cb)
 {
-       struct net *net = skb_net(skb);
+       struct net *net = skb_sknet(skb);
        struct netns_ipvs *ipvs = net_ipvs(net);
 
        mutex_lock(&__ip_vs_mutex);
index 8678823..bcd5ed6 100644 (file)
@@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f,
                CHECK_BOUND(bs, 2);
                count = *bs->cur++;
                count <<= 8;
-               count = *bs->cur++;
+               count += *bs->cur++;
                break;
        case SEMI:
                BYTE_ALIGN(bs);
index 533a183..18b2ce5 100644 (file)
@@ -731,10 +731,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
 
                memset(&fl2, 0, sizeof(fl2));
                fl2.daddr = dst->ip;
-               if (!afinfo->route((struct dst_entry **)&rt1,
-                                  flowi4_to_flowi(&fl1))) {
-                       if (!afinfo->route((struct dst_entry **)&rt2,
-                                          flowi4_to_flowi(&fl2))) {
+               if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
+                                  flowi4_to_flowi(&fl1), false)) {
+                       if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
+                                          flowi4_to_flowi(&fl2), false)) {
                                if (rt1->rt_gateway == rt2->rt_gateway &&
                                    rt1->dst.dev  == rt2->dst.dev)
                                        ret = 1;
@@ -755,10 +755,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
 
                memset(&fl2, 0, sizeof(fl2));
                ipv6_addr_copy(&fl2.daddr, &dst->in6);
-               if (!afinfo->route((struct dst_entry **)&rt1,
-                                  flowi6_to_flowi(&fl1))) {
-                       if (!afinfo->route((struct dst_entry **)&rt2,
-                                          flowi6_to_flowi(&fl2))) {
+               if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
+                                  flowi6_to_flowi(&fl1), false)) {
+                       if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
+                                          flowi6_to_flowi(&fl2), false)) {
                                if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
                                            sizeof(rt1->rt6i_gateway)) &&
                                    rt1->dst.dev == rt2->dst.dev)
index 6e6b46c..9e63b43 100644 (file)
@@ -166,7 +166,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
        rcu_read_lock();
        ai = nf_get_afinfo(family);
        if (ai != NULL)
-               ai->route((struct dst_entry **)&rt, &fl);
+               ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
        rcu_read_unlock();
 
        if (rt != NULL) {
index 2220b85..b77d383 100644 (file)
@@ -32,11 +32,32 @@ MODULE_ALIAS("ipt_addrtype");
 MODULE_ALIAS("ip6t_addrtype");
 
 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
-static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
+static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
+                           const struct in6_addr *addr)
 {
+       const struct nf_afinfo *afinfo;
+       struct flowi6 flow;
+       struct rt6_info *rt;
        u32 ret;
+       int route_err;
 
-       if (!rt)
+       memset(&flow, 0, sizeof(flow));
+       ipv6_addr_copy(&flow.daddr, addr);
+       if (dev)
+               flow.flowi6_oif = dev->ifindex;
+
+       rcu_read_lock();
+
+       afinfo = nf_get_afinfo(NFPROTO_IPV6);
+       if (afinfo != NULL)
+               route_err = afinfo->route(net, (struct dst_entry **)&rt,
+                                       flowi6_to_flowi(&flow), !!dev);
+       else
+               route_err = 1;
+
+       rcu_read_unlock();
+
+       if (route_err)
                return XT_ADDRTYPE_UNREACHABLE;
 
        if (rt->rt6i_flags & RTF_REJECT)
@@ -48,6 +69,9 @@ static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
                ret |= XT_ADDRTYPE_LOCAL;
        if (rt->rt6i_flags & RTF_ANYCAST)
                ret |= XT_ADDRTYPE_ANYCAST;
+
+
+       dst_release(&rt->dst);
        return ret;
 }
 
@@ -65,18 +89,8 @@ static bool match_type6(struct net *net, const struct net_device *dev,
                return false;
 
        if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
-            XT_ADDRTYPE_UNREACHABLE) & mask) {
-               struct rt6_info *rt;
-               u32 type;
-               int ifindex = dev ? dev->ifindex : 0;
-
-               rt = rt6_lookup(net, addr, NULL, ifindex, !!dev);
-
-               type = xt_addrtype_rt6_to_type(rt);
-
-               dst_release(&rt->dst);
-               return !!(mask & type);
-       }
+            XT_ADDRTYPE_UNREACHABLE) & mask)
+               return !!(mask & match_lookup_rt6(net, dev, addr));
        return true;
 }
 
index 2c0086a..481a86f 100644 (file)
@@ -195,7 +195,7 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
                return info->match_flags & XT_CONNTRACK_STATE;
        if ((info->match_flags & XT_CONNTRACK_DIRECTION) &&
            (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^
-           !!(info->invert_flags & XT_CONNTRACK_DIRECTION))
+           !(info->invert_flags & XT_CONNTRACK_DIRECTION))
                return false;
 
        if (info->match_flags & XT_CONNTRACK_ORIGSRC)