Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 17 Jul 2011 19:49:55 +0000 (12:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 17 Jul 2011 19:49:55 +0000 (12:49 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6:
  [CIFS] update cifs to version 1.74
  [CIFS] update limit for snprintf in cifs_construct_tcon
  cifs: Fix signing failure when server mandates signing for NTLMSSP

111 files changed:
Documentation/filesystems/nilfs2.txt
Documentation/networking/ip-sysctl.txt
arch/arm/mach-davinci/irq.c
arch/arm/mach-ixp4xx/common.c
arch/arm/mach-mmp/pxa168.c
arch/arm/mach-mmp/pxa910.c
arch/arm/mach-pxa/mfp-pxa2xx.c
arch/arm/mach-pxa/raumfeld.c
arch/arm/mach-s3c64xx/dma.c
arch/arm/plat-orion/gpio.c
arch/arm/plat-pxa/gpio.c
arch/arm/plat-s3c24xx/dma.c
arch/arm/plat-s5p/irq-gpioint.c
arch/arm/plat-samsung/dma.c
arch/arm/plat-samsung/include/plat/dma.h
arch/arm/plat-samsung/irq-uart.c
arch/arm/plat-samsung/irq-vic-timer.c
arch/mips/kernel/i8259.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/sparc/include/asm/irqflags_32.h
arch/sparc/include/asm/irqflags_64.h
arch/sparc/kernel/entry.S
arch/sparc/mm/leon_mm.c
arch/x86/mm/init_64.c
drivers/acpi/apei/hest.c
drivers/acpi/osl.c
drivers/base/memory.c
drivers/char/agp/intel-agp.h
drivers/gpio/wm831x-gpio.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_blit_kms.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_reg.h
drivers/gpu/drm/radeon/rs600.c
drivers/hwmon/adm1275.c
drivers/hwmon/pmbus_core.c
drivers/media/dvb/dvb-core/dvb_frontend.c
drivers/media/radio/Kconfig
drivers/media/rc/mceusb.c
drivers/media/rc/nuvoton-cir.c
drivers/media/video/cx23885/cx23885-core.c
drivers/media/video/tuner-core.c
drivers/mmc/core/mmc.c
drivers/net/bonding/bond_main.c
drivers/net/gianfar.c
drivers/net/gianfar.h
drivers/net/natsemi.c
drivers/net/r6040.c
drivers/net/slip.c
drivers/net/usb/hso.c
drivers/net/wireless/ath/ath5k/pci.c
drivers/net/wireless/ath/ath5k/sysfs.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/pcmcia/pxa2xx_vpac270.c
drivers/ssb/driver_pcicore.c
drivers/watchdog/Kconfig
fs/dcache.c
fs/gfs2/aops.c
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/log.c
fs/gfs2/ops_fstype.c
fs/gfs2/super.c
fs/gfs2/sys.c
fs/namei.c
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4xdr.c
fs/nfs/write.c
include/acpi/acpi_bus.h
include/acpi/acpiosxf.h
include/acpi/platform/aclinux.h
include/drm/drm_pciids.h
include/linux/irq.h
include/linux/memory.h
include/linux/mmc/card.h
include/linux/netdevice.h
include/net/sctp/command.h
include/net/sctp/ulpevent.h
kernel/irq/generic-chip.c
kernel/rcutree.c
kernel/rcutree_plugin.h
kernel/sched.c
net/8021q/vlan_dev.c
net/bluetooth/hci_conn.c
net/bluetooth/hidp/core.c
net/bluetooth/hidp/hidp.h
net/bluetooth/l2cap_core.c
net/mac80211/scan.c
net/mac80211/wpa.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/sm_statetable.c
net/sctp/socket.c
net/sctp/ulpevent.c
net/sunrpc/rpcb_clnt.c
net/sunrpc/sched.c
net/wireless/core.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/scan.c
net/xfrm/xfrm_state.c
scripts/depmod.sh

index d5c0cef..873a2ab 100644 (file)
@@ -40,7 +40,6 @@ Features which NILFS2 does not support yet:
        - POSIX ACLs
        - quotas
        - fsck
-       - resize
        - defragmentation
 
 Mount options
index d3d653a..bfe9242 100644 (file)
@@ -346,7 +346,7 @@ tcp_orphan_retries - INTEGER
        when RTO retransmissions remain unacknowledged.
        See tcp_retries2 for more details.
 
-       The default value is 7.
+       The default value is 8.
        If your machine is a loaded WEB server,
        you should think about lowering this value, such sockets
        may consume significant resources. Cf. tcp_max_orphans.
index bfe68ec..d8c1af0 100644 (file)
@@ -53,7 +53,7 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
 
        gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq);
        ct = gc->chip_types;
-       ct->chip.irq_ack = irq_gc_ack;
+       ct->chip.irq_ack = irq_gc_ack_set_bit;
        ct->chip.irq_mask = irq_gc_mask_clr_bit;
        ct->chip.irq_unmask = irq_gc_mask_set_bit;
 
index 74ed81a..0777257 100644 (file)
@@ -419,14 +419,20 @@ static void notrace ixp4xx_update_sched_clock(void)
 /*
  * clocksource
  */
+
+static cycle_t ixp4xx_clocksource_read(struct clocksource *c)
+{
+       return *IXP4XX_OSTS;
+}
+
 unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
 EXPORT_SYMBOL(ixp4xx_timer_freq);
 static void __init ixp4xx_clocksource_init(void)
 {
        init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq);
 
-       clocksource_mmio_init(&IXP4XX_OSTS, "OSTS", ixp4xx_timer_freq, 200, 32,
-                       clocksource_mmio_readl_up);
+       clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
+                       ixp4xx_clocksource_read);
 }
 
 /*
index 72b4e76..ab9f999 100644 (file)
@@ -79,7 +79,7 @@ static APBC_CLK(ssp4, PXA168_SSP4, 4, 0);
 static APBC_CLK(ssp5, PXA168_SSP5, 4, 0);
 static APBC_CLK(keypad, PXA168_KPC, 0, 32000);
 
-static APMU_CLK(nand, NAND, 0x01db, 208000000);
+static APMU_CLK(nand, NAND, 0x19b, 156000000);
 static APMU_CLK(lcd, LCD, 0x7f, 312000000);
 
 /* device and clock bindings */
index 8f92ccd..1464607 100644 (file)
@@ -110,7 +110,7 @@ static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000);
 static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000);
 static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000);
 
-static APMU_CLK(nand, NAND, 0x01db, 208000000);
+static APMU_CLK(nand, NAND, 0x19b, 156000000);
 static APMU_CLK(u2o, USB, 0x1b, 480000000);
 
 /* device and clock bindings */
index 87ae312..b27544b 100644 (file)
@@ -347,9 +347,9 @@ static int pxa2xx_mfp_suspend(void)
                if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) &&
                    (GPDR(i) & GPIO_bit(i))) {
                        if (GPLR(i) & GPIO_bit(i))
-                               PGSR(i) |= GPIO_bit(i);
+                               PGSR(gpio_to_bank(i)) |= GPIO_bit(i);
                        else
-                               PGSR(i) &= ~GPIO_bit(i);
+                               PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i);
                }
        }
 
index d130f77..2f37d43 100644 (file)
@@ -573,10 +573,10 @@ static struct pxafb_mode_info sharp_lq043t3dx02_mode = {
        .xres           = 480,
        .yres           = 272,
        .bpp            = 16,
-       .hsync_len      = 4,
+       .hsync_len      = 41,
        .left_margin    = 2,
        .right_margin   = 1,
-       .vsync_len      = 1,
+       .vsync_len      = 10,
        .upper_margin   = 3,
        .lower_margin   = 1,
        .sync           = 0,
@@ -596,29 +596,31 @@ static void __init raumfeld_lcd_init(void)
 {
        int ret;
 
-       pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
-
-       /* Earlier devices had the backlight regulator controlled
-        * via PWM, later versions use another controller for that */
-       if ((system_rev & 0xff) < 2) {
-               mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
-               pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
-               platform_device_register(&raumfeld_pwm_backlight_device);
-       } else
-               platform_device_register(&raumfeld_lt3593_device);
-
        ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable");
        if (ret < 0)
                pr_warning("Unable to request GPIO_TFT_VA_EN\n");
        else
                gpio_direction_output(GPIO_TFT_VA_EN, 1);
 
+       msleep(100);
+
        ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable");
        if (ret < 0)
                pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n");
        else
                gpio_direction_output(GPIO_DISPLAY_ENABLE, 1);
 
+       /* Hardware revision 2 has the backlight regulator controlled
+        * by an LT3593, earlier and later devices use PWM for that. */
+       if ((system_rev & 0xff) == 2) {
+               platform_device_register(&raumfeld_lt3593_device);
+       } else {
+               mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
+               pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
+               platform_device_register(&raumfeld_pwm_backlight_device);
+       }
+
+       pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
        platform_device_register(&pxa3xx_device_gcu);
 }
 
@@ -657,10 +659,10 @@ static struct lis3lv02d_platform_data lis3_pdata = {
 
 #define SPI_AK4104     \
 {                      \
-       .modalias       = "ak4104",     \
-       .max_speed_hz   = 10000,        \
-       .bus_num        = 0,            \
-       .chip_select    = 0,            \
+       .modalias       = "ak4104-codec",       \
+       .max_speed_hz   = 10000,                \
+       .bus_num        = 0,                    \
+       .chip_select    = 0,                    \
        .controller_data = (void *) GPIO_SPDIF_CS,      \
 }
 
index b197171..204bfaf 100644 (file)
@@ -113,7 +113,7 @@ found:
        return chan;
 }
 
-int s3c2410_dma_config(unsigned int channel, int xferunit)
+int s3c2410_dma_config(enum dma_ch channel, int xferunit)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -297,7 +297,7 @@ static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
        return 0;
 }
 
-int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
+int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -331,7 +331,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
  *
  */
 
-int s3c2410_dma_enqueue(unsigned int channel, void *id,
+int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
                        dma_addr_t data, int size)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -415,7 +415,7 @@ err_buff:
 EXPORT_SYMBOL(s3c2410_dma_enqueue);
 
 
-int s3c2410_dma_devconfig(unsigned int channel,
+int s3c2410_dma_devconfig(enum dma_ch channel,
                          enum s3c2410_dmasrc source,
                          unsigned long devaddr)
 {
@@ -463,7 +463,7 @@ int s3c2410_dma_devconfig(unsigned int channel,
 EXPORT_SYMBOL(s3c2410_dma_devconfig);
 
 
-int s3c2410_dma_getposition(unsigned int channel,
+int s3c2410_dma_getposition(enum dma_ch channel,
                            dma_addr_t *src, dma_addr_t *dst)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -487,7 +487,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition);
  * get control of an dma channel
 */
 
-int s3c2410_dma_request(unsigned int channel,
+int s3c2410_dma_request(enum dma_ch channel,
                        struct s3c2410_dma_client *client,
                        void *dev)
 {
@@ -533,7 +533,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
  * allowed to go through.
 */
 
-int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
+int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
        unsigned long flags;
index 5b4fffa..41ab97e 100644 (file)
@@ -432,7 +432,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
        ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF;
        ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
        ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
-       ct->chip.irq_ack = irq_gc_ack;
+       ct->chip.irq_ack = irq_gc_ack_clr_bit;
        ct->chip.irq_mask = irq_gc_mask_clr_bit;
        ct->chip.irq_unmask = irq_gc_mask_set_bit;
        ct->chip.irq_set_type = gpio_irq_set_type;
index 48ebb94..a11dc36 100644 (file)
@@ -50,7 +50,7 @@ static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
        return container_of(c, struct pxa_gpio_chip, chip)->regbase;
 }
 
-static inline struct pxa_gpio_chip *gpio_to_chip(unsigned gpio)
+static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
 {
        return &pxa_gpio_chips[gpio_to_bank(gpio)];
 }
@@ -161,7 +161,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
        int gpio = irq_to_gpio(d->irq);
        unsigned long gpdr, mask = GPIO_bit(gpio);
 
-       c = gpio_to_chip(gpio);
+       c = gpio_to_pxachip(gpio);
 
        if (type == IRQ_TYPE_PROBE) {
                /* Don't mess with enabled GPIOs using preconfigured edges or
@@ -230,7 +230,7 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
 static void pxa_ack_muxed_gpio(struct irq_data *d)
 {
        int gpio = irq_to_gpio(d->irq);
-       struct pxa_gpio_chip *c = gpio_to_chip(gpio);
+       struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
 
        __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
 }
@@ -238,7 +238,7 @@ static void pxa_ack_muxed_gpio(struct irq_data *d)
 static void pxa_mask_muxed_gpio(struct irq_data *d)
 {
        int gpio = irq_to_gpio(d->irq);
-       struct pxa_gpio_chip *c = gpio_to_chip(gpio);
+       struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
        uint32_t grer, gfer;
 
        c->irq_mask &= ~GPIO_bit(gpio);
@@ -252,7 +252,7 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
 static void pxa_unmask_muxed_gpio(struct irq_data *d)
 {
        int gpio = irq_to_gpio(d->irq);
-       struct pxa_gpio_chip *c = gpio_to_chip(gpio);
+       struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
 
        c->irq_mask |= GPIO_bit(gpio);
        update_edge_detect(c);
index a79a8cc..539bd0e 100644 (file)
@@ -712,7 +712,7 @@ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel);
  * get control of an dma channel
 */
 
-int s3c2410_dma_request(unsigned int channel,
+int s3c2410_dma_request(enum dma_ch channel,
                        struct s3c2410_dma_client *client,
                        void *dev)
 {
@@ -783,7 +783,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
  * allowed to go through.
 */
 
-int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
+int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
        unsigned long flags;
@@ -974,7 +974,7 @@ static int s3c2410_dma_started(struct s3c2410_dma_chan *chan)
 }
 
 int
-s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
+s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -1021,7 +1021,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
  * xfersize:     size of unit in bytes (1,2,4)
 */
 
-int s3c2410_dma_config(unsigned int channel,
+int s3c2410_dma_config(enum dma_ch channel,
                       int xferunit)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -1100,7 +1100,7 @@ EXPORT_SYMBOL(s3c2410_dma_config);
  * devaddr:   physical address of the source
 */
 
-int s3c2410_dma_devconfig(unsigned int channel,
+int s3c2410_dma_devconfig(enum dma_ch channel,
                          enum s3c2410_dmasrc source,
                          unsigned long devaddr)
 {
@@ -1173,7 +1173,7 @@ EXPORT_SYMBOL(s3c2410_dma_devconfig);
  * returns the current transfer points for the dma source and destination
 */
 
-int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst)
+int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
index 135abda..327ab9f 100644 (file)
@@ -152,7 +152,7 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
        if (!gc)
                return -ENOMEM;
        ct = gc->chip_types;
-       ct->chip.irq_ack = irq_gc_ack;
+       ct->chip.irq_ack = irq_gc_ack_set_bit;
        ct->chip.irq_mask = irq_gc_mask_set_bit;
        ct->chip.irq_unmask = irq_gc_mask_clr_bit;
        ct->chip.irq_set_type = s5p_gpioint_set_type,
index cb459dd..6143aa1 100644 (file)
@@ -41,7 +41,7 @@ struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel)
  * irq?
 */
 
-int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
+int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -56,7 +56,7 @@ int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
 }
 EXPORT_SYMBOL(s3c2410_dma_set_opfn);
 
-int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
+int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -71,7 +71,7 @@ int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
 }
 EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
 
-int s3c2410_dma_setflags(unsigned int channel, unsigned int flags)
+int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
index 2e8f8c6..8c273b7 100644 (file)
@@ -42,6 +42,7 @@ struct s3c2410_dma_client {
 };
 
 struct s3c2410_dma_chan;
+enum dma_ch;
 
 /* s3c2410_dma_cbfn_t
  *
@@ -62,7 +63,7 @@ typedef int  (*s3c2410_dma_opfn_t)(struct s3c2410_dma_chan *,
  * request a dma channel exclusivley
 */
 
-extern int s3c2410_dma_request(unsigned int channel,
+extern int s3c2410_dma_request(enum dma_ch channel,
                               struct s3c2410_dma_client *, void *dev);
 
 
@@ -71,14 +72,14 @@ extern int s3c2410_dma_request(unsigned int channel,
  * change the state of the dma channel
 */
 
-extern int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op);
+extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op);
 
 /* s3c2410_dma_setflags
  *
  * set the channel's flags to a given state
 */
 
-extern int s3c2410_dma_setflags(unsigned int channel,
+extern int s3c2410_dma_setflags(enum dma_ch channel,
                                unsigned int flags);
 
 /* s3c2410_dma_free
@@ -86,7 +87,7 @@ extern int s3c2410_dma_setflags(unsigned int channel,
  * free the dma channel (will also abort any outstanding operations)
 */
 
-extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *);
+extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *);
 
 /* s3c2410_dma_enqueue
  *
@@ -95,7 +96,7 @@ extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *);
  * drained before the buffer is given to the DMA system.
 */
 
-extern int s3c2410_dma_enqueue(unsigned int channel, void *id,
+extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
                               dma_addr_t data, int size);
 
 /* s3c2410_dma_config
@@ -103,14 +104,14 @@ extern int s3c2410_dma_enqueue(unsigned int channel, void *id,
  * configure the dma channel
 */
 
-extern int s3c2410_dma_config(unsigned int channel, int xferunit);
+extern int s3c2410_dma_config(enum dma_ch channel, int xferunit);
 
 /* s3c2410_dma_devconfig
  *
  * configure the device we're talking to
 */
 
-extern int s3c2410_dma_devconfig(unsigned int channel,
+extern int s3c2410_dma_devconfig(enum dma_ch channel,
                enum s3c2410_dmasrc source, unsigned long devaddr);
 
 /* s3c2410_dma_getposition
@@ -118,10 +119,10 @@ extern int s3c2410_dma_devconfig(unsigned int channel,
  * get the position that the dma transfer is currently at
 */
 
-extern int s3c2410_dma_getposition(unsigned int channel,
+extern int s3c2410_dma_getposition(enum dma_ch channel,
                                   dma_addr_t *src, dma_addr_t *dest);
 
-extern int s3c2410_dma_set_opfn(unsigned int, s3c2410_dma_opfn_t rtn);
-extern int s3c2410_dma_set_buffdone_fn(unsigned int, s3c2410_dma_cbfn_t rtn);
+extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
+extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
 
 
index 32582c0..657405c 100644 (file)
@@ -54,8 +54,15 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
 
        gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base,
                                    handle_level_irq);
+
+       if (!gc) {
+               pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
+                      __func__, uirq->base_irq);
+               return;
+       }
+
        ct = gc->chip_types;
-       ct->chip.irq_ack = irq_gc_ack;
+       ct->chip.irq_ack = irq_gc_ack_set_bit;
        ct->chip.irq_mask = irq_gc_mask_set_bit;
        ct->chip.irq_unmask = irq_gc_mask_clr_bit;
        ct->regs.ack = S3C64XX_UINTP;
index a607546..f714d06 100644 (file)
@@ -54,6 +54,13 @@ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq)
 
        s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq,
                                         S3C64XX_TINT_CSTAT, handle_level_irq);
+
+       if (!s3c_tgc) {
+               pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n",
+                      __func__, timer_irq);
+               return;
+       }
+
        ct = s3c_tgc->chip_types;
        ct->chip.irq_mask = irq_gc_mask_clr_bit;
        ct->chip.irq_unmask = irq_gc_mask_set_bit;
index c018696..5c74eb7 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
 #include <linux/irq.h>
 
 #include <asm/i8259.h>
@@ -215,14 +215,13 @@ spurious_8259A_irq:
        }
 }
 
-static int i8259A_resume(struct sys_device *dev)
+static void i8259A_resume(void)
 {
        if (i8259A_auto_eoi >= 0)
                init_8259A(i8259A_auto_eoi);
-       return 0;
 }
 
-static int i8259A_shutdown(struct sys_device *dev)
+static void i8259A_shutdown(void)
 {
        /* Put the i8259A into a quiescent state that
         * the kernel initialization code can get it
@@ -232,26 +231,17 @@ static int i8259A_shutdown(struct sys_device *dev)
                outb(0xff, PIC_MASTER_IMR);     /* mask all of 8259A-1 */
                outb(0xff, PIC_SLAVE_IMR);      /* mask all of 8259A-1 */
        }
-       return 0;
 }
 
-static struct sysdev_class i8259_sysdev_class = {
-       .name = "i8259",
+static struct syscore_ops i8259_syscore_ops = {
        .resume = i8259A_resume,
        .shutdown = i8259A_shutdown,
 };
 
-static struct sys_device device_i8259A = {
-       .id     = 0,
-       .cls    = &i8259_sysdev_class,
-};
-
 static int __init i8259A_init_sysfs(void)
 {
-       int error = sysdev_class_register(&i8259_sysdev_class);
-       if (!error)
-               error = sysdev_register(&device_i8259A);
-       return error;
+       register_syscore_ops(&i8259_syscore_ops);
+       return 0;
 }
 
 device_initcall(i8259A_init_sysfs);
index 33867ec..9d6a8ef 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/of.h>
 #include <linux/memblock.h>
 #include <linux/vmalloc.h>
+#include <linux/memory.h>
+
 #include <asm/firmware.h>
 #include <asm/machdep.h>
 #include <asm/pSeries_reconfig.h>
 static unsigned long get_memblock_size(void)
 {
        struct device_node *np;
-       unsigned int memblock_size = 0;
+       unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
+       struct resource r;
 
        np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
        if (np) {
-               const unsigned long *size;
+               const __be64 *size;
 
                size = of_get_property(np, "ibm,lmb-size", NULL);
-               memblock_size = size ? *size : 0;
-
+               if (size)
+                       memblock_size = be64_to_cpup(size);
                of_node_put(np);
-       } else {
+       } else  if (machine_is(pseries)) {
+               /* This fallback really only applies to pseries */
                unsigned int memzero_size = 0;
-               const unsigned int *regs;
 
                np = of_find_node_by_path("/memory@0");
                if (np) {
-                       regs = of_get_property(np, "reg", NULL);
-                       memzero_size = regs ? regs[3] : 0;
+                       if (!of_address_to_resource(np, 0, &r))
+                               memzero_size = resource_size(&r);
                        of_node_put(np);
                }
 
@@ -50,16 +53,21 @@ static unsigned long get_memblock_size(void)
                        sprintf(buf, "/memory@%x", memzero_size);
                        np = of_find_node_by_path(buf);
                        if (np) {
-                               regs = of_get_property(np, "reg", NULL);
-                               memblock_size = regs ? regs[3] : 0;
+                               if (!of_address_to_resource(np, 0, &r))
+                                       memblock_size = resource_size(&r);
                                of_node_put(np);
                        }
                }
        }
-
        return memblock_size;
 }
 
+/* WARNING: This is going to override the generic definition whenever
+ * pseries is built-in regardless of what platform is active at boot
+ * time. This is fine for now as this is the only "option" and it
+ * should work everywhere. If not, we'll have to turn this into a
+ * ppc_md. callback
+ */
 unsigned long memory_block_size_bytes(void)
 {
        return get_memblock_size();
index d4d0711..1484890 100644 (file)
@@ -18,7 +18,7 @@ extern void arch_local_irq_restore(unsigned long);
 extern unsigned long arch_local_irq_save(void);
 extern void arch_local_irq_enable(void);
 
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
 {
        unsigned long flags;
 
@@ -26,17 +26,17 @@ static inline unsigned long arch_local_save_flags(void)
        return flags;
 }
 
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
 {
        arch_local_irq_save();
 }
 
-static inline bool arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
 {
        return (flags & PSR_PIL) != 0;
 }
 
-static inline bool arch_irqs_disabled(void)
+static inline notrace bool arch_irqs_disabled(void)
 {
        return arch_irqs_disabled_flags(arch_local_save_flags());
 }
index aab969c..23cd27f 100644 (file)
@@ -14,7 +14,7 @@
 
 #ifndef __ASSEMBLY__
 
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
 {
        unsigned long flags;
 
@@ -26,7 +26,7 @@ static inline unsigned long arch_local_save_flags(void)
        return flags;
 }
 
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline notrace void arch_local_irq_restore(unsigned long flags)
 {
        __asm__ __volatile__(
                "wrpr   %0, %%pil"
@@ -36,7 +36,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
        );
 }
 
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
 {
        __asm__ __volatile__(
                "wrpr   %0, %%pil"
@@ -46,7 +46,7 @@ static inline void arch_local_irq_disable(void)
        );
 }
 
-static inline void arch_local_irq_enable(void)
+static inline notrace void arch_local_irq_enable(void)
 {
        __asm__ __volatile__(
                "wrpr   0, %%pil"
@@ -56,17 +56,17 @@ static inline void arch_local_irq_enable(void)
        );
 }
 
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace int arch_irqs_disabled_flags(unsigned long flags)
 {
        return (flags > 0);
 }
 
-static inline int arch_irqs_disabled(void)
+static inline notrace int arch_irqs_disabled(void)
 {
        return arch_irqs_disabled_flags(arch_local_save_flags());
 }
 
-static inline unsigned long arch_local_irq_save(void)
+static inline notrace unsigned long arch_local_irq_save(void)
 {
        unsigned long flags, tmp;
 
index 9fe08a1..f445e98 100644 (file)
@@ -293,7 +293,7 @@ maybe_smp4m_msg:
        WRITE_PAUSE
        wr      %l4, PSR_ET, %psr
        WRITE_PAUSE
-       sll     %o3, 28, %o2            ! shift for simpler checks below
+       srl     %o3, 28, %o2            ! shift for simpler checks below
 maybe_smp4m_msg_check_single:
        andcc   %o2, 0x1, %g0
        beq,a   maybe_smp4m_msg_check_mask
index c0e0129..e485a68 100644 (file)
@@ -226,7 +226,7 @@ void leon3_getCacheRegs(struct leon3_cacheregs *regs)
  * Leon2 and Leon3 differ in their way of telling cache information
  *
  */
-int leon_flush_needed(void)
+int __init leon_flush_needed(void)
 {
        int flush_needed = -1;
        unsigned int ssize, sets;
index d865c4a..bbaaa00 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/poison.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/memory.h>
 #include <linux/memory_hotplug.h>
 #include <linux/nmi.h>
 #include <linux/gfp.h>
@@ -895,8 +896,6 @@ const char *arch_vma_name(struct vm_area_struct *vma)
 }
 
 #ifdef CONFIG_X86_UV
-#define MIN_MEMORY_BLOCK_SIZE   (1 << SECTION_SIZE_BITS)
-
 unsigned long memory_block_size_bytes(void)
 {
        if (is_uv_system()) {
index abda378..181bc2f 100644 (file)
@@ -139,13 +139,23 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
 {
        struct platform_device *ghes_dev;
        struct ghes_arr *ghes_arr = data;
-       int rc;
+       int rc, i;
 
        if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
                return 0;
 
        if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
                return 0;
+       for (i = 0; i < ghes_arr->count; i++) {
+               struct acpi_hest_header *hdr;
+               ghes_dev = ghes_arr->ghes_devs[i];
+               hdr = *(struct acpi_hest_header **)ghes_dev->dev.platform_data;
+               if (hdr->source_id == hest_hdr->source_id) {
+                       pr_warning(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n",
+                                  hdr->source_id);
+                       return -EIO;
+               }
+       }
        ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
        if (!ghes_dev)
                return -ENOMEM;
index 52ca964..372f9b7 100644 (file)
@@ -1332,23 +1332,6 @@ int acpi_resources_are_enforced(void)
 }
 EXPORT_SYMBOL(acpi_resources_are_enforced);
 
-/*
- * Create and initialize a spinlock.
- */
-acpi_status
-acpi_os_create_lock(acpi_spinlock *out_handle)
-{
-       spinlock_t *lock;
-
-       lock = ACPI_ALLOCATE(sizeof(spinlock_t));
-       if (!lock)
-               return AE_NO_MEMORY;
-       spin_lock_init(lock);
-       *out_handle = lock;
-
-       return AE_OK;
-}
-
 /*
  * Deallocate the memory for a spinlock.
  */
index 9f9b235..45d7c8f 100644 (file)
@@ -30,7 +30,6 @@
 static DEFINE_MUTEX(mem_sysfs_mutex);
 
 #define MEMORY_CLASS_NAME      "memory"
-#define MIN_MEMORY_BLOCK_SIZE  (1 << SECTION_SIZE_BITS)
 
 static int sections_per_block;
 
index 999803c..5da67f1 100644 (file)
 #define G4x_GMCH_SIZE_MASK     (0xf << 8)
 #define G4x_GMCH_SIZE_1M       (0x1 << 8)
 #define G4x_GMCH_SIZE_2M       (0x3 << 8)
-#define G4x_GMCH_SIZE_VT_1M    (0x9 << 8)
-#define G4x_GMCH_SIZE_VT_1_5M  (0xa << 8)
-#define G4x_GMCH_SIZE_VT_2M    (0xc << 8)
+#define G4x_GMCH_SIZE_VT_EN    (0x8 << 8)
+#define G4x_GMCH_SIZE_VT_1M    (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_1_5M  ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_2M    (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
 
 #define GFX_FLSH_CNTL          0x2170 /* 915+ */
 
index 309644c..2bcfb0b 100644 (file)
@@ -180,6 +180,7 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
                        break;
                case WM831X_GPIO_PULL_UP:
                        pull = "pullup";
+                       break;
                default:
                        pull = "INVALID PULL";
                        break;
index e178702..296fbd6 100644 (file)
@@ -1943,7 +1943,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (!dev_priv->mm.gtt) {
                DRM_ERROR("Failed to initialize GTT\n");
                ret = -ENODEV;
-               goto out_iomapfree;
+               goto out_rmmap;
        }
 
        agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
@@ -1987,7 +1987,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (dev_priv->wq == NULL) {
                DRM_ERROR("Failed to create our workqueue.\n");
                ret = -ENOMEM;
-               goto out_iomapfree;
+               goto out_mtrrfree;
        }
 
        /* enable GEM by default */
@@ -2074,13 +2074,21 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        return 0;
 
 out_gem_unload:
+       if (dev_priv->mm.inactive_shrinker.shrink)
+               unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
        if (dev->pdev->msi_enabled)
                pci_disable_msi(dev->pdev);
 
        intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
        destroy_workqueue(dev_priv->wq);
-out_iomapfree:
+out_mtrrfree:
+       if (dev_priv->mm.gtt_mtrr >= 0) {
+               mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
+                        dev->agp->agp_info.aper_size * 1024 * 1024);
+               dev_priv->mm.gtt_mtrr = -1;
+       }
        io_mapping_free(dev_priv->mm.gtt_mapping);
 out_rmmap:
        pci_iounmap(dev->pdev, dev_priv->regs);
index 013d304..eb91e2d 100644 (file)
@@ -52,7 +52,7 @@ module_param_named(powersave, i915_powersave, int, 0600);
 unsigned int i915_semaphores = 0;
 module_param_named(semaphores, i915_semaphores, int, 0600);
 
-unsigned int i915_enable_rc6 = 1;
+unsigned int i915_enable_rc6 = 0;
 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
 
 unsigned int i915_enable_fbc = 0;
@@ -577,6 +577,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
        if (get_seconds() - dev_priv->last_gpu_reset < 5) {
                DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
        } else switch (INTEL_INFO(dev)->gen) {
+       case 7:
        case 6:
                ret = gen6_do_reset(dev, flags);
                /* If reset with a user forcewake, try to restore */
index 391b55f..e2aced6 100644 (file)
@@ -50,7 +50,6 @@ struct intel_dp {
        bool has_audio;
        int force_audio;
        uint32_t color_range;
-       int dpms_mode;
        uint8_t link_bw;
        uint8_t lane_count;
        uint8_t dpcd[4];
@@ -138,8 +137,8 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
 {
        int max_lane_count = 4;
 
-       if (intel_dp->dpcd[0] >= 0x11) {
-               max_lane_count = intel_dp->dpcd[2] & 0x1f;
+       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+               max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
                switch (max_lane_count) {
                case 1: case 2: case 4:
                        break;
@@ -153,7 +152,7 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
 static int
 intel_dp_max_link_bw(struct intel_dp *intel_dp)
 {
-       int max_link_bw = intel_dp->dpcd[1];
+       int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
 
        switch (max_link_bw) {
        case DP_LINK_BW_1_62:
@@ -774,7 +773,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        /*
         * Check for DPCD version > 1.1 and enhanced framing support
         */
-       if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
+       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+           (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
                intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
                intel_dp->DP |= DP_ENHANCED_FRAMING;
        }
@@ -942,11 +942,44 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
        udelay(200);
 }
 
+/* If the sink supports it, try to set the power state appropriately */
+static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+{
+       int ret, i;
+
+       /* Should have a valid DPCD by this point */
+       if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+               return;
+
+       if (mode != DRM_MODE_DPMS_ON) {
+               ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
+                                                 DP_SET_POWER_D3);
+               if (ret != 1)
+                       DRM_DEBUG_DRIVER("failed to write sink power state\n");
+       } else {
+               /*
+                * When turning on, we need to retry for 1ms to give the sink
+                * time to wake up.
+                */
+               for (i = 0; i < 3; i++) {
+                       ret = intel_dp_aux_native_write_1(intel_dp,
+                                                         DP_SET_POWER,
+                                                         DP_SET_POWER_D0);
+                       if (ret == 1)
+                               break;
+                       msleep(1);
+               }
+       }
+}
+
 static void intel_dp_prepare(struct drm_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        struct drm_device *dev = encoder->dev;
 
+       /* Wake up the sink first */
+       intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
        if (is_edp(intel_dp)) {
                ironlake_edp_backlight_off(dev);
                ironlake_edp_panel_off(dev);
@@ -990,6 +1023,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
        if (mode != DRM_MODE_DPMS_ON) {
                if (is_edp(intel_dp))
                        ironlake_edp_backlight_off(dev);
+               intel_dp_sink_dpms(intel_dp, mode);
                intel_dp_link_down(intel_dp);
                if (is_edp(intel_dp))
                        ironlake_edp_panel_off(dev);
@@ -998,6 +1032,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
        } else {
                if (is_edp(intel_dp))
                        ironlake_edp_panel_vdd_on(intel_dp);
+               intel_dp_sink_dpms(intel_dp, mode);
                if (!(dp_reg & DP_PORT_EN)) {
                        intel_dp_start_link_train(intel_dp);
                        if (is_edp(intel_dp)) {
@@ -1009,7 +1044,31 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
                if (is_edp(intel_dp))
                        ironlake_edp_backlight_on(dev);
        }
-       intel_dp->dpms_mode = mode;
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
+                              uint8_t *recv, int recv_bytes)
+{
+       int ret, i;
+
+       /*
+        * Sinks are *supposed* to come up within 1ms from an off state,
+        * but we're also supposed to retry 3 times per the spec.
+        */
+       for (i = 0; i < 3; i++) {
+               ret = intel_dp_aux_native_read(intel_dp, address, recv,
+                                              recv_bytes);
+               if (ret == recv_bytes)
+                       return true;
+               msleep(1);
+       }
+
+       return false;
 }
 
 /*
@@ -1019,14 +1078,10 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
 static bool
 intel_dp_get_link_status(struct intel_dp *intel_dp)
 {
-       int ret;
-
-       ret = intel_dp_aux_native_read(intel_dp,
-                                      DP_LANE0_1_STATUS,
-                                      intel_dp->link_status, DP_LINK_STATUS_SIZE);
-       if (ret != DP_LINK_STATUS_SIZE)
-               return false;
-       return true;
+       return intel_dp_aux_native_read_retry(intel_dp,
+                                             DP_LANE0_1_STATUS,
+                                             intel_dp->link_status,
+                                             DP_LINK_STATUS_SIZE);
 }
 
 static uint8_t
@@ -1515,6 +1570,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 static void
 intel_dp_check_link_status(struct intel_dp *intel_dp)
 {
+       int ret;
+
        if (!intel_dp->base.base.crtc)
                return;
 
@@ -1523,6 +1580,15 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
                return;
        }
 
+       /* Try to read receiver status if the link appears to be up */
+       ret = intel_dp_aux_native_read(intel_dp,
+                                      0x000, intel_dp->dpcd,
+                                      sizeof (intel_dp->dpcd));
+       if (ret != sizeof(intel_dp->dpcd)) {
+               intel_dp_link_down(intel_dp);
+               return;
+       }
+
        if (!intel_channel_eq_ok(intel_dp)) {
                intel_dp_start_link_train(intel_dp);
                intel_dp_complete_link_train(intel_dp);
@@ -1533,6 +1599,7 @@ static enum drm_connector_status
 ironlake_dp_detect(struct intel_dp *intel_dp)
 {
        enum drm_connector_status status;
+       bool ret;
 
        /* Can't disconnect eDP, but you can close the lid... */
        if (is_edp(intel_dp)) {
@@ -1543,13 +1610,11 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
        }
 
        status = connector_status_disconnected;
-       if (intel_dp_aux_native_read(intel_dp,
-                                    0x000, intel_dp->dpcd,
-                                    sizeof (intel_dp->dpcd))
-           == sizeof(intel_dp->dpcd)) {
-               if (intel_dp->dpcd[0] != 0)
-                       status = connector_status_connected;
-       }
+       ret = intel_dp_aux_native_read_retry(intel_dp,
+                                            0x000, intel_dp->dpcd,
+                                            sizeof (intel_dp->dpcd));
+       if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0)
+               status = connector_status_connected;
        DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
                      intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
        return status;
@@ -1586,7 +1651,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
        if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
                                     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
        {
-               if (intel_dp->dpcd[0] != 0)
+               if (intel_dp->dpcd[DP_DPCD_REV] != 0)
                        status = connector_status_connected;
        }
 
@@ -1790,8 +1855,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
 {
        struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
 
-       if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
-               intel_dp_check_link_status(intel_dp);
+       intel_dp_check_link_status(intel_dp);
 }
 
 /* Return which DP Port should be selected for Transcoder DP control */
@@ -1859,7 +1923,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                return;
 
        intel_dp->output_reg = output_reg;
-       intel_dp->dpms_mode = -1;
 
        intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
        if (!intel_connector) {
@@ -1954,8 +2017,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                                               sizeof(intel_dp->dpcd));
                ironlake_edp_panel_vdd_off(intel_dp);
                if (ret == sizeof(intel_dp->dpcd)) {
-                       if (intel_dp->dpcd[0] >= 0x11)
-                               dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
+                       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+                               dev_priv->no_aux_handshake =
+                                       intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
                                        DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
                } else {
                        /* if this fails, presume the device is a ghost */
index c0e0ee6..39ac2b6 100644 (file)
@@ -165,7 +165,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
 static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
 {
-       return intel_wait_ring_buffer(ring, ring->space - 8);
+       return intel_wait_ring_buffer(ring, ring->size - 8);
 }
 
 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
index 660f964..15bd047 100644 (file)
@@ -2000,7 +2000,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                        gb_backend_map = 0x66442200;
                        break;
                case CHIP_JUNIPER:
-                       gb_backend_map = 0x00006420;
+                       gb_backend_map = 0x00002200;
                        break;
                default:
                        gb_backend_map =
index 57f3bc1..2eb2518 100644 (file)
@@ -252,7 +252,7 @@ draw_auto(struct radeon_device *rdev)
 
 }
 
-/* emits 36 */
+/* emits 39 */
 static void
 set_default_state(struct radeon_device *rdev)
 {
@@ -531,6 +531,11 @@ set_default_state(struct radeon_device *rdev)
                radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
                radeon_ring_write(rdev, 0);
 
+               /* setup LDS */
+               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(rdev, 0x10001000);
+
                /* SQ config */
                radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
                radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
@@ -773,7 +778,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
        /* calculate number of loops correctly */
        ring_size = num_loops * dwords_per_loop;
        /* set default  + shaders */
-       ring_size += 52; /* shaders + def state */
+       ring_size += 55; /* shaders + def state */
        ring_size += 10; /* fence emit for VB IB */
        ring_size += 5; /* done copy */
        ring_size += 10; /* fence emit for done copy */
index 3fc5fa1..229a20f 100644 (file)
@@ -331,7 +331,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
 
        seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
        viph_control = RREG32(RADEON_VIPH_CONTROL);
-       bus_cntl = RREG32(RADEON_BUS_CNTL);
+       bus_cntl = RREG32(RV370_BUS_CNTL);
        d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
        d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
        vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -350,7 +350,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
        WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
 
        /* enable the rom */
-       WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+       WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
 
        /* Disable VGA mode */
        WREG32(AVIVO_D1VGA_CONTROL,
@@ -367,7 +367,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
        /* restore regs */
        WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
        WREG32(RADEON_VIPH_CONTROL, viph_control);
-       WREG32(RADEON_BUS_CNTL, bus_cntl);
+       WREG32(RV370_BUS_CNTL, bus_cntl);
        WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
        WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
        WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -390,7 +390,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
 
        seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
        viph_control = RREG32(RADEON_VIPH_CONTROL);
-       bus_cntl = RREG32(RADEON_BUS_CNTL);
+       if (rdev->flags & RADEON_IS_PCIE)
+               bus_cntl = RREG32(RV370_BUS_CNTL);
+       else
+               bus_cntl = RREG32(RADEON_BUS_CNTL);
        crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
        crtc2_gen_cntl = 0;
        crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
@@ -412,7 +415,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
        WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
 
        /* enable the rom */
-       WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+       if (rdev->flags & RADEON_IS_PCIE)
+               WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+       else
+               WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
 
        /* Turn off mem requests and CRTC for both controllers */
        WREG32(RADEON_CRTC_GEN_CNTL,
@@ -439,7 +445,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
        /* restore regs */
        WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
        WREG32(RADEON_VIPH_CONTROL, viph_control);
-       WREG32(RADEON_BUS_CNTL, bus_cntl);
+       if (rdev->flags & RADEON_IS_PCIE)
+               WREG32(RV370_BUS_CNTL, bus_cntl);
+       else
+               WREG32(RADEON_BUS_CNTL, bus_cntl);
        WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
        if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
                WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
index cbfca3a..9792d4f 100644 (file)
@@ -52,6 +52,12 @@ void radeon_connector_hotplug(struct drm_connector *connector)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
+       /* bail if the connector does not have hpd pin, e.g.,
+        * VGA, TV, etc.
+        */
+       if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
+               return;
+
        radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 
        /* powering up/down the eDP panel generates hpd events which
index ec93a75..bc44a3d 100644 (file)
 #       define RADEON_BUS_READ_BURST         (1 << 30)
 #define RADEON_BUS_CNTL1                    0x0034
 #       define RADEON_BUS_WAIT_ON_LOCK_EN    (1 << 4)
+#define RV370_BUS_CNTL                      0x004c
+#       define RV370_BUS_BIOS_DIS_ROM        (1 << 2)
 /* rv370/rv380, rv410, r423/r430/r480, r5xx */
 #define RADEON_MSI_REARM_EN                0x0160
 #      define RV370_MSI_REARM_EN            (1 << 0)
index 6e3b11e..1f5850e 100644 (file)
@@ -426,7 +426,7 @@ int rs600_gart_init(struct radeon_device *rdev)
        return radeon_gart_table_vram_alloc(rdev);
 }
 
-int rs600_gart_enable(struct radeon_device *rdev)
+static int rs600_gart_enable(struct radeon_device *rdev)
 {
        u32 tmp;
        int r, i;
@@ -440,8 +440,8 @@ int rs600_gart_enable(struct radeon_device *rdev)
                return r;
        radeon_gart_restore(rdev);
        /* Enable bus master */
-       tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
-       WREG32(R_00004C_BUS_CNTL, tmp);
+       tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+       WREG32(RADEON_BUS_CNTL, tmp);
        /* FIXME: setup default page */
        WREG32_MC(R_000100_MC_PT0_CNTL,
                  (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
index b9b7caf..8bc1bd6 100644 (file)
@@ -53,23 +53,23 @@ static int adm1275_probe(struct i2c_client *client,
        info->direct[PSC_VOLTAGE_IN] = true;
        info->direct[PSC_VOLTAGE_OUT] = true;
        info->direct[PSC_CURRENT_OUT] = true;
-       info->m[PSC_CURRENT_OUT] = 800;
+       info->m[PSC_CURRENT_OUT] = 807;
        info->b[PSC_CURRENT_OUT] = 20475;
        info->R[PSC_CURRENT_OUT] = -1;
        info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
 
        if (config & ADM1275_VRANGE) {
-               info->m[PSC_VOLTAGE_IN] = 19045;
+               info->m[PSC_VOLTAGE_IN] = 19199;
                info->b[PSC_VOLTAGE_IN] = 0;
                info->R[PSC_VOLTAGE_IN] = -2;
-               info->m[PSC_VOLTAGE_OUT] = 19045;
+               info->m[PSC_VOLTAGE_OUT] = 19199;
                info->b[PSC_VOLTAGE_OUT] = 0;
                info->R[PSC_VOLTAGE_OUT] = -2;
        } else {
-               info->m[PSC_VOLTAGE_IN] = 6666;
+               info->m[PSC_VOLTAGE_IN] = 6720;
                info->b[PSC_VOLTAGE_IN] = 0;
                info->R[PSC_VOLTAGE_IN] = -1;
-               info->m[PSC_VOLTAGE_OUT] = 6666;
+               info->m[PSC_VOLTAGE_OUT] = 6720;
                info->b[PSC_VOLTAGE_OUT] = 0;
                info->R[PSC_VOLTAGE_OUT] = -1;
        }
index 744672c..8e31a8e 100644 (file)
@@ -362,8 +362,8 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
  * Convert linear sensor values to milli- or micro-units
  * depending on sensor type.
  */
-static int pmbus_reg2data_linear(struct pmbus_data *data,
-                                struct pmbus_sensor *sensor)
+static long pmbus_reg2data_linear(struct pmbus_data *data,
+                                 struct pmbus_sensor *sensor)
 {
        s16 exponent;
        s32 mantissa;
@@ -397,15 +397,15 @@ static int pmbus_reg2data_linear(struct pmbus_data *data,
        else
                val >>= -exponent;
 
-       return (int)val;
+       return val;
 }
 
 /*
  * Convert direct sensor values to milli- or micro-units
  * depending on sensor type.
  */
-static int pmbus_reg2data_direct(struct pmbus_data *data,
-                                struct pmbus_sensor *sensor)
+static long pmbus_reg2data_direct(struct pmbus_data *data,
+                                 struct pmbus_sensor *sensor)
 {
        long val = (s16) sensor->data;
        long m, b, R;
@@ -440,12 +440,12 @@ static int pmbus_reg2data_direct(struct pmbus_data *data,
                R++;
        }
 
-       return (int)((val - b) / m);
+       return (val - b) / m;
 }
 
-static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
+static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
 {
-       int val;
+       long val;
 
        if (data->info->direct[sensor->class])
                val = pmbus_reg2data_direct(data, sensor);
@@ -619,7 +619,7 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
        if (!s1 && !s2)
                *val = !!regval;
        else {
-               int v1, v2;
+               long v1, v2;
                struct pmbus_sensor *sensor1, *sensor2;
 
                sensor1 = &data->sensors[s1];
@@ -661,7 +661,7 @@ static ssize_t pmbus_show_sensor(struct device *dev,
        if (sensor->data < 0)
                return sensor->data;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor));
+       return snprintf(buf, PAGE_SIZE, "%ld\n", pmbus_reg2data(data, sensor));
 }
 
 static ssize_t pmbus_set_sensor(struct device *dev,
index 9827804..5b6b451 100644 (file)
@@ -1988,6 +1988,14 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
        if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) {
                if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0)
                        goto err0;
+
+               /* If we took control of the bus, we need to force
+                  reinitialization.  This is because many ts_bus_ctrl()
+                  functions strobe the RESET pin on the demod, and if the
+                  frontend thread already exists then the dvb_init() routine
+                  won't get called (which is what usually does initial
+                  register configuration). */
+               fepriv->reinitialise = 1;
        }
 
        if ((ret = dvb_generic_open (inode, file)) < 0)
index e4c97fd..52798a1 100644 (file)
@@ -168,7 +168,7 @@ config RADIO_MAXIRADIO
 
 config RADIO_MIROPCM20
        tristate "miroSOUND PCM20 radio"
-       depends on ISA && VIDEO_V4L2 && SND
+       depends on ISA && ISA_DMA_API && VIDEO_V4L2 && SND
        select SND_ISA
        select SND_MIRO
        ---help---
@@ -201,7 +201,7 @@ config RADIO_SF16FMI
 
 config RADIO_SF16FMR2
        tristate "SF16FMR2 Radio"
-       depends on ISA && VIDEO_V4L2
+       depends on ISA && VIDEO_V4L2 && SND
        ---help---
          Choose Y here if you have one of these FM radio cards.
 
index 06dfe09..ec972dc 100644 (file)
@@ -558,9 +558,10 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
                                 inout, data1);
                        break;
                case MCE_CMD_S_TIMEOUT:
-                       /* value is in units of 50us, so x*50/100 or x/2 ms */
+                       /* value is in units of 50us, so x*50/1000 ms */
                        dev_info(dev, "%s receive timeout of %d ms\n",
-                                inout, ((data1 << 8) | data2) / 2);
+                                inout,
+                                ((data1 << 8) | data2) * MCE_TIME_UNIT / 1000);
                        break;
                case MCE_CMD_G_TIMEOUT:
                        dev_info(dev, "Get receive timeout\n");
@@ -847,7 +848,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
        switch (ir->buf_in[index]) {
        /* 2-byte return value commands */
        case MCE_CMD_S_TIMEOUT:
-               ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
+               ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
                break;
 
        /* 1-byte return value commands */
@@ -1078,7 +1079,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
        rc->priv = ir;
        rc->driver_type = RC_DRIVER_IR_RAW;
        rc->allowed_protos = RC_TYPE_ALL;
-       rc->timeout = US_TO_NS(1000);
+       rc->timeout = MS_TO_NS(100);
        if (!ir->flags.no_tx) {
                rc->s_tx_mask = mceusb_set_tx_mask;
                rc->s_tx_carrier = mceusb_set_tx_carrier;
index 565f24c..ce595f9 100644 (file)
@@ -1110,7 +1110,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
        rdev->dev.parent = &pdev->dev;
        rdev->driver_name = NVT_DRIVER_NAME;
        rdev->map_name = RC_MAP_RC6_MCE;
-       rdev->timeout = US_TO_NS(1000);
+       rdev->timeout = MS_TO_NS(100);
        /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
        rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
 #if 0
index 64d9b21..419777a 100644 (file)
@@ -2060,12 +2060,8 @@ static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
                goto fail_irq;
        }
 
-       if (!pci_enable_msi(pci_dev))
-               err = request_irq(pci_dev->irq, cx23885_irq,
-                                 IRQF_DISABLED, dev->name, dev);
-       else
-               err = request_irq(pci_dev->irq, cx23885_irq,
-                                 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+       err = request_irq(pci_dev->irq, cx23885_irq,
+                         IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
        if (err < 0) {
                printk(KERN_ERR "%s: can't get IRQ %d\n",
                       dev->name, pci_dev->irq);
@@ -2114,7 +2110,6 @@ static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
 
        /* unregister stuff */
        free_irq(pci_dev->irq, dev);
-       pci_disable_msi(pci_dev);
 
        cx23885_dev_unregister(dev);
        v4l2_device_unregister(v4l2_dev);
index cfa9f7e..a03945a 100644 (file)
@@ -714,10 +714,19 @@ static int tuner_remove(struct i2c_client *client)
  * returns 0.
  * This function is needed for boards that have a separate tuner for
  * radio (like devices with tea5767).
+ * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
+ *       select a TV frequency. So, t_mode = T_ANALOG_TV could actually
+ *      be used to represent a Digital TV too.
  */
 static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
 {
-       if ((1 << mode & t->mode_mask) == 0)
+       int t_mode;
+       if (mode == V4L2_TUNER_RADIO)
+               t_mode = T_RADIO;
+       else
+               t_mode = T_ANALOG_TV;
+
+       if ((t_mode & t->mode_mask) == 0)
                return -EINVAL;
 
        return 0;
@@ -984,7 +993,7 @@ static void tuner_status(struct dvb_frontend *fe)
        case V4L2_TUNER_RADIO:
                p = "radio";
                break;
-       case V4L2_TUNER_DIGITAL_TV:
+       case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */
                p = "digital TV";
                break;
        case V4L2_TUNER_ANALOG_TV:
@@ -1135,9 +1144,8 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
                return 0;
        if (vt->type == t->mode && analog_ops->get_afc)
                vt->afc = analog_ops->get_afc(&t->fe);
-       if (vt->type == V4L2_TUNER_ANALOG_TV)
+       if (t->mode != V4L2_TUNER_RADIO) {
                vt->capability |= V4L2_TUNER_CAP_NORM;
-       if (vt->type != V4L2_TUNER_RADIO) {
                vt->rangelow = tv_range[0] * 16;
                vt->rangehigh = tv_range[1] * 16;
                return 0;
index 2a7e43b..aa7d1d7 100644 (file)
@@ -247,12 +247,12 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                return 0;
 
        /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
+       card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
        if (card->csd.structure == 3) {
-               int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
-               if (ext_csd_struct > 2) {
+               if (card->ext_csd.raw_ext_csd_structure > 2) {
                        printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
                                "version %d\n", mmc_hostname(card->host),
-                                       ext_csd_struct);
+                                       card->ext_csd.raw_ext_csd_structure);
                        err = -EINVAL;
                        goto out;
                }
@@ -266,6 +266,10 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                goto out;
        }
 
+       card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
+       card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
+       card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
+       card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
        if (card->ext_csd.rev >= 2) {
                card->ext_csd.sectors =
                        ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
@@ -277,7 +281,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
                        mmc_card_set_blockaddr(card);
        }
-
+       card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
        switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
        case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
             EXT_CSD_CARD_TYPE_26:
@@ -307,6 +311,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                        mmc_hostname(card->host));
        }
 
+       card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
+       card->ext_csd.raw_erase_timeout_mult =
+               ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+       card->ext_csd.raw_hc_erase_grp_size =
+               ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
        if (card->ext_csd.rev >= 3) {
                u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
                card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
@@ -334,6 +343,16 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
        }
 
+       card->ext_csd.raw_hc_erase_gap_size =
+               ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
+       card->ext_csd.raw_sec_trim_mult =
+               ext_csd[EXT_CSD_SEC_TRIM_MULT];
+       card->ext_csd.raw_sec_erase_mult =
+               ext_csd[EXT_CSD_SEC_ERASE_MULT];
+       card->ext_csd.raw_sec_feature_support =
+               ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+       card->ext_csd.raw_trim_mult =
+               ext_csd[EXT_CSD_TRIM_MULT];
        if (card->ext_csd.rev >= 4) {
                /*
                 * Enhanced area feature support -- check whether the eMMC
@@ -341,7 +360,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                 * area offset and size to user by adding sysfs interface.
                 */
                if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
-                               (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+                   (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
                        u8 hc_erase_grp_sz =
                                ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
                        u8 hc_wp_grp_sz =
@@ -401,17 +420,17 @@ static inline void mmc_free_ext_csd(u8 *ext_csd)
 }
 
 
-static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
-                       unsigned bus_width)
+static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
 {
        u8 *bw_ext_csd;
        int err;
 
+       if (bus_width == MMC_BUS_WIDTH_1)
+               return 0;
+
        err = mmc_get_ext_csd(card, &bw_ext_csd);
-       if (err)
-               return err;
 
-       if ((ext_csd == NULL || bw_ext_csd == NULL)) {
+       if (err || bw_ext_csd == NULL) {
                if (bus_width != MMC_BUS_WIDTH_1)
                        err = -EINVAL;
                goto out;
@@ -421,35 +440,40 @@ static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
                goto out;
 
        /* only compare read only fields */
-       err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] ==
+       err = (!(card->ext_csd.raw_partition_support ==
                        bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
-               (ext_csd[EXT_CSD_ERASED_MEM_CONT] ==
+               (card->ext_csd.raw_erased_mem_count ==
                        bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
-               (ext_csd[EXT_CSD_REV] ==
+               (card->ext_csd.rev ==
                        bw_ext_csd[EXT_CSD_REV]) &&
-               (ext_csd[EXT_CSD_STRUCTURE] ==
+               (card->ext_csd.raw_ext_csd_structure ==
                        bw_ext_csd[EXT_CSD_STRUCTURE]) &&
-               (ext_csd[EXT_CSD_CARD_TYPE] ==
+               (card->ext_csd.raw_card_type ==
                        bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
-               (ext_csd[EXT_CSD_S_A_TIMEOUT] ==
+               (card->ext_csd.raw_s_a_timeout ==
                        bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
-               (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
+               (card->ext_csd.raw_hc_erase_gap_size ==
                        bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
-               (ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] ==
+               (card->ext_csd.raw_erase_timeout_mult ==
                        bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
-               (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
+               (card->ext_csd.raw_hc_erase_grp_size ==
                        bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
-               (ext_csd[EXT_CSD_SEC_TRIM_MULT] ==
+               (card->ext_csd.raw_sec_trim_mult ==
                        bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
-               (ext_csd[EXT_CSD_SEC_ERASE_MULT] ==
+               (card->ext_csd.raw_sec_erase_mult ==
                        bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
-               (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] ==
+               (card->ext_csd.raw_sec_feature_support ==
                        bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
-               (ext_csd[EXT_CSD_TRIM_MULT] ==
+               (card->ext_csd.raw_trim_mult ==
                        bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
-               memcmp(&ext_csd[EXT_CSD_SEC_CNT],
-                      &bw_ext_csd[EXT_CSD_SEC_CNT],
-                      4) != 0);
+               (card->ext_csd.raw_sectors[0] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
+               (card->ext_csd.raw_sectors[1] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
+               (card->ext_csd.raw_sectors[2] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
+               (card->ext_csd.raw_sectors[3] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 3]));
        if (err)
                err = -EINVAL;
 
@@ -770,7 +794,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                                 */
                                if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
                                        err = mmc_compare_ext_csds(card,
-                                               ext_csd,
                                                bus_width);
                                else
                                        err = mmc_bus_test(card, bus_width);
index eafe44a..63c22b0 100644 (file)
@@ -1428,9 +1428,9 @@ out:
        return features;
 }
 
-#define BOND_VLAN_FEATURES     (NETIF_F_ALL_TX_OFFLOADS | \
-                                NETIF_F_SOFT_FEATURES | \
-                                NETIF_F_LRO)
+#define BOND_VLAN_FEATURES     (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+                                NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+                                NETIF_F_HIGHDMA | NETIF_F_LRO)
 
 static void bond_compute_features(struct bonding *bond)
 {
index 2dfcc80..dfa55f9 100644 (file)
@@ -2289,6 +2289,23 @@ static int gfar_set_mac_address(struct net_device *dev)
        return 0;
 }
 
+/* Check if rx parser should be activated */
+void gfar_check_rx_parser_mode(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs;
+       u32 tempval;
+
+       regs = priv->gfargrp[0].regs;
+
+       tempval = gfar_read(&regs->rctrl);
+       /* If parse is no longer required, then disable parser */
+       if (tempval & RCTRL_REQ_PARSER)
+               tempval |= RCTRL_PRSDEP_INIT;
+       else
+               tempval &= ~RCTRL_PRSDEP_INIT;
+       gfar_write(&regs->rctrl, tempval);
+}
+
 
 /* Enables and disables VLAN insertion/extraction */
 static void gfar_vlan_rx_register(struct net_device *dev,
@@ -2325,12 +2342,9 @@ static void gfar_vlan_rx_register(struct net_device *dev,
                /* Disable VLAN tag extraction */
                tempval = gfar_read(&regs->rctrl);
                tempval &= ~RCTRL_VLEX;
-               /* If parse is no longer required, then disable parser */
-               if (tempval & RCTRL_REQ_PARSER)
-                       tempval |= RCTRL_PRSDEP_INIT;
-               else
-                       tempval &= ~RCTRL_PRSDEP_INIT;
                gfar_write(&regs->rctrl, tempval);
+
+               gfar_check_rx_parser_mode(priv);
        }
 
        gfar_change_mtu(dev, dev->mtu);
index ba36dc7..440e69d 100644 (file)
@@ -274,7 +274,7 @@ extern const char gfar_driver_version[];
 #define RCTRL_PROM             0x00000008
 #define RCTRL_EMEN             0x00000002
 #define RCTRL_REQ_PARSER       (RCTRL_VLEX | RCTRL_IPCSEN | \
-                                RCTRL_TUCSEN)
+                                RCTRL_TUCSEN | RCTRL_FILREN)
 #define RCTRL_CHECKSUMMING     (RCTRL_IPCSEN | RCTRL_TUCSEN | \
                                RCTRL_PRSDEP_INIT)
 #define RCTRL_EXTHASH          (RCTRL_GHTX)
@@ -1156,6 +1156,7 @@ extern void gfar_configure_coalescing(struct gfar_private *priv,
                unsigned long tx_mask, unsigned long rx_mask);
 void gfar_init_sysfs(struct net_device *dev);
 int gfar_set_features(struct net_device *dev, u32 features);
+extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
 
index 8f8b65a..60f46bc 100644 (file)
@@ -140,7 +140,7 @@ MODULE_LICENSE("GPL");
 module_param(mtu, int, 0);
 module_param(debug, int, 0);
 module_param(rx_copybreak, int, 0);
-module_param(dspcfg_workaround, int, 1);
+module_param(dspcfg_workaround, int, 0);
 module_param_array(options, int, NULL, 0);
 module_param_array(full_duplex, int, NULL, 0);
 MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
@@ -2028,8 +2028,8 @@ static void drain_rx(struct net_device *dev)
                np->rx_ring[i].cmd_status = 0;
                np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
                if (np->rx_skbuff[i]) {
-                       pci_unmap_single(np->pci_dev,
-                               np->rx_dma[i], buflen,
+                       pci_unmap_single(np->pci_dev, np->rx_dma[i],
+                               buflen + NATSEMI_PADDING,
                                PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(np->rx_skbuff[i]);
                }
index 200a363..0ffec46 100644 (file)
@@ -677,9 +677,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
                if (status & RX_FIFO_FULL)
                        dev->stats.rx_fifo_errors++;
 
-               /* Mask off RX interrupt */
-               misr &= ~RX_INTS;
-               napi_schedule(&lp->napi);
+               if (likely(napi_schedule_prep(&lp->napi))) {
+                       /* Mask off RX interrupt */
+                       misr &= ~RX_INTS;
+                       __napi_schedule(&lp->napi);
+               }
        }
 
        /* TX interrupt request */
index 8ec1a9a..2f110fb 100644 (file)
@@ -182,10 +182,10 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
 #ifdef SL_INCLUDE_CSLIP
        cbuff = xchg(&sl->cbuff, cbuff);
        slcomp = xchg(&sl->slcomp, slcomp);
+#endif
 #ifdef CONFIG_SLIP_MODE_SLIP6
        sl->xdata    = 0;
        sl->xbits    = 0;
-#endif
 #endif
        spin_unlock_bh(&sl->lock);
        err = 0;
index 387ca43..304fe78 100644 (file)
@@ -2421,10 +2421,8 @@ static void hso_free_net_device(struct hso_device *hso_dev)
 
        remove_net_device(hso_net->parent);
 
-       if (hso_net->net) {
+       if (hso_net->net)
                unregister_netdev(hso_net->net);
-               free_netdev(hso_net->net);
-       }
 
        /* start freeing */
        for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
@@ -2436,6 +2434,9 @@ static void hso_free_net_device(struct hso_device *hso_dev)
        kfree(hso_net->mux_bulk_tx_buf);
        hso_net->mux_bulk_tx_buf = NULL;
 
+       if (hso_net->net)
+               free_netdev(hso_net->net);
+
        kfree(hso_dev);
 }
 
index 296c316..f2c0c23 100644 (file)
@@ -297,7 +297,9 @@ ath5k_pci_remove(struct pci_dev *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int ath5k_pci_suspend(struct device *dev)
 {
-       struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+       struct ath5k_softc *sc = hw->priv;
 
        ath5k_led_off(sc);
        return 0;
@@ -306,7 +308,8 @@ static int ath5k_pci_suspend(struct device *dev)
 static int ath5k_pci_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct ath5k_softc *sc = pci_get_drvdata(pdev);
+       struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+       struct ath5k_softc *sc = hw->priv;
 
        /*
         * Suspend/Resume resets the PCI configuration space, so we have to
index 929c68c..a073cdc 100644 (file)
@@ -10,7 +10,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev,             \
                        struct device_attribute *attr,                  \
                        char *buf)                                      \
 {                                                                      \
-       struct ath5k_softc *sc = dev_get_drvdata(dev);                  \
+       struct ieee80211_hw *hw = dev_get_drvdata(dev);                 \
+       struct ath5k_softc *sc = hw->priv;                              \
        return snprintf(buf, PAGE_SIZE, "%d\n", get);                   \
 }                                                                      \
                                                                        \
@@ -18,7 +19,8 @@ static ssize_t ath5k_attr_store_##name(struct device *dev,            \
                        struct device_attribute *attr,                  \
                        const char *buf, size_t count)                  \
 {                                                                      \
-       struct ath5k_softc *sc = dev_get_drvdata(dev);                  \
+       struct ieee80211_hw *hw = dev_get_drvdata(dev);                 \
+       struct ath5k_softc *sc = hw->priv;                              \
        int val;                                                        \
                                                                        \
        val = (int)simple_strtoul(buf, NULL, 10);                       \
@@ -33,7 +35,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev,             \
                        struct device_attribute *attr,                  \
                        char *buf)                                      \
 {                                                                      \
-       struct ath5k_softc *sc = dev_get_drvdata(dev);                  \
+       struct ieee80211_hw *hw = dev_get_drvdata(dev);                 \
+       struct ath5k_softc *sc = hw->priv;                              \
        return snprintf(buf, PAGE_SIZE, "%d\n", get);                   \
 }                                                                      \
 static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
index 3779b89..33443bc 100644 (file)
@@ -671,7 +671,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
         * TODO - this could be improved to be dependent on the rate.
         *      The hardware can keep up at lower rates, but not higher rates
         */
-       if (fi->keyix != ATH9K_TXKEYIX_INVALID)
+       if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
+           !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
                ndelim += ATH_AGGR_ENCRYPTDELIM;
 
        /*
index 2fb53d0..333b69e 100644 (file)
@@ -112,6 +112,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
        { USB_DEVICE(0x04bb, 0x093f) },
        /* NEC WL300NU-G */
        { USB_DEVICE(0x0409, 0x0249) },
+       /* NEC WL300NU-AG */
+       { USB_DEVICE(0x0409, 0x02b4) },
        /* AVM FRITZ!WLAN USB Stick N */
        { USB_DEVICE(0x057c, 0x8401) },
        /* AVM FRITZ!WLAN USB Stick N 2.4 */
index 092e342..942f7a3 100644 (file)
@@ -298,6 +298,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+       {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
        {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
        /* HP - Lite-On ,8188CUS Slim Combo */
index 712baab..e956f65 100644 (file)
@@ -76,10 +76,10 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
 static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
 {
        if (skt->nr == 0)
-               gpio_request_array(vpac270_pcmcia_gpios,
+               gpio_free_array(vpac270_pcmcia_gpios,
                                        ARRAY_SIZE(vpac270_pcmcia_gpios));
        else
-               gpio_request_array(vpac270_cf_gpios,
+               gpio_free_array(vpac270_cf_gpios,
                                        ARRAY_SIZE(vpac270_cf_gpios));
 }
 
index 2a20dab..d6620ad 100644 (file)
@@ -516,8 +516,17 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
 
 static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
 {
+       ssb_pcicore_fix_sprom_core_index(pc);
+
        /* Disable PCI interrupts. */
        ssb_write32(pc->dev, SSB_INTVEC, 0);
+
+       /* Additional PCIe always once-executed workarounds */
+       if (pc->dev->id.coreid == SSB_DEV_PCIE) {
+               ssb_pcicore_serdes_workaround(pc);
+               /* TODO: ASPM */
+               /* TODO: Clock Request Update */
+       }
 }
 
 void ssb_pcicore_init(struct ssb_pcicore *pc)
@@ -529,8 +538,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
        if (!ssb_device_is_enabled(dev))
                ssb_device_enable(dev, 0);
 
-       ssb_pcicore_fix_sprom_core_index(pc);
-
 #ifdef CONFIG_SSB_PCICORE_HOSTMODE
        pc->hostmode = pcicore_is_in_hostmode(pc);
        if (pc->hostmode)
@@ -538,13 +545,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
 #endif /* CONFIG_SSB_PCICORE_HOSTMODE */
        if (!pc->hostmode)
                ssb_pcicore_init_clientmode(pc);
-
-       /* Additional PCIe always once-executed workarounds */
-       if (dev->id.coreid == SSB_DEV_PCIE) {
-               ssb_pcicore_serdes_workaround(pc);
-               /* TODO: ASPM */
-               /* TODO: Clock Request Update */
-       }
 }
 
 static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
index 9536d38..21d816e 100644 (file)
@@ -599,8 +599,7 @@ config IT87_WDT
 
 config HP_WATCHDOG
        tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
-       depends on X86
-       default m
+       depends on X86 && PCI
        help
          A software monitoring watchdog and NMI sourcing driver. This driver
          will detect lockups and provide a stack trace. This is a driver that
index 37f72ee..6e4ea6d 100644 (file)
@@ -2213,14 +2213,15 @@ static void dentry_unlock_parents_for_move(struct dentry *dentry,
  * The hash value has to match the hash queue that the dentry is on..
  */
 /*
- * d_move - move a dentry
+ * __d_move - move a dentry
  * @dentry: entry to move
  * @target: new dentry
  *
  * Update the dcache to reflect the move of a file name. Negative
- * dcache entries should not be moved in this way.
+ * dcache entries should not be moved in this way.  Caller hold
+ * rename_lock.
  */
-void d_move(struct dentry * dentry, struct dentry * target)
+static void __d_move(struct dentry * dentry, struct dentry * target)
 {
        if (!dentry->d_inode)
                printk(KERN_WARNING "VFS: moving negative dcache entry\n");
@@ -2228,8 +2229,6 @@ void d_move(struct dentry * dentry, struct dentry * target)
        BUG_ON(d_ancestor(dentry, target));
        BUG_ON(d_ancestor(target, dentry));
 
-       write_seqlock(&rename_lock);
-
        dentry_lock_for_move(dentry, target);
 
        write_seqcount_begin(&dentry->d_seq);
@@ -2275,6 +2274,20 @@ void d_move(struct dentry * dentry, struct dentry * target)
        spin_unlock(&target->d_lock);
        fsnotify_d_move(dentry);
        spin_unlock(&dentry->d_lock);
+}
+
+/*
+ * d_move - move a dentry
+ * @dentry: entry to move
+ * @target: new dentry
+ *
+ * Update the dcache to reflect the move of a file name. Negative
+ * dcache entries should not be moved in this way.
+ */
+void d_move(struct dentry *dentry, struct dentry *target)
+{
+       write_seqlock(&rename_lock);
+       __d_move(dentry, target);
        write_sequnlock(&rename_lock);
 }
 EXPORT_SYMBOL(d_move);
@@ -2302,7 +2315,7 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
  * This helper attempts to cope with remotely renamed directories
  *
  * It assumes that the caller is already holding
- * dentry->d_parent->d_inode->i_mutex and the inode->i_lock
+ * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
  *
  * Note: If ever the locking in lock_rename() changes, then please
  * remember to update this too...
@@ -2317,11 +2330,6 @@ static struct dentry *__d_unalias(struct inode *inode,
        if (alias->d_parent == dentry->d_parent)
                goto out_unalias;
 
-       /* Check for loops */
-       ret = ERR_PTR(-ELOOP);
-       if (d_ancestor(alias, dentry))
-               goto out_err;
-
        /* See lock_rename() */
        ret = ERR_PTR(-EBUSY);
        if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
@@ -2331,7 +2339,7 @@ static struct dentry *__d_unalias(struct inode *inode,
                goto out_err;
        m2 = &alias->d_parent->d_inode->i_mutex;
 out_unalias:
-       d_move(alias, dentry);
+       __d_move(alias, dentry);
        ret = alias;
 out_err:
        spin_unlock(&inode->i_lock);
@@ -2416,15 +2424,24 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                alias = __d_find_alias(inode, 0);
                if (alias) {
                        actual = alias;
-                       /* Is this an anonymous mountpoint that we could splice
-                        * into our tree? */
-                       if (IS_ROOT(alias)) {
+                       write_seqlock(&rename_lock);
+
+                       if (d_ancestor(alias, dentry)) {
+                               /* Check for loops */
+                               actual = ERR_PTR(-ELOOP);
+                       } else if (IS_ROOT(alias)) {
+                               /* Is this an anonymous mountpoint that we
+                                * could splice into our tree? */
                                __d_materialise_dentry(dentry, alias);
+                               write_sequnlock(&rename_lock);
                                __d_drop(alias);
                                goto found;
+                       } else {
+                               /* Nope, but we must(!) avoid directory
+                                * aliasing */
+                               actual = __d_unalias(inode, dentry, alias);
                        }
-                       /* Nope, but we must(!) avoid directory aliasing */
-                       actual = __d_unalias(inode, dentry, alias);
+                       write_sequnlock(&rename_lock);
                        if (IS_ERR(actual))
                                dput(alias);
                        goto out_nolock;
index 802ac5e..f9fbbe9 100644 (file)
@@ -1069,6 +1069,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
                return 0;
 
        gfs2_log_lock(sdp);
+       spin_lock(&sdp->sd_ail_lock);
        head = bh = page_buffers(page);
        do {
                if (atomic_read(&bh->b_count))
@@ -1080,6 +1081,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
                        goto not_possible;
                bh = bh->b_this_page;
        } while(bh != head);
+       spin_unlock(&sdp->sd_ail_lock);
        gfs2_log_unlock(sdp);
 
        head = bh = page_buffers(page);
@@ -1112,6 +1114,7 @@ not_possible: /* Should never happen */
        WARN_ON(buffer_dirty(bh));
        WARN_ON(buffer_pinned(bh));
 cannot_release:
+       spin_unlock(&sdp->sd_ail_lock);
        gfs2_log_unlock(sdp);
        return 0;
 }
index 8ef70f4..2cca293 100644 (file)
@@ -47,10 +47,10 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl)
                                bd_ail_gl_list);
                bh = bd->bd_bh;
                gfs2_remove_from_ail(bd);
-               spin_unlock(&sdp->sd_ail_lock);
-
                bd->bd_bh = NULL;
                bh->b_private = NULL;
+               spin_unlock(&sdp->sd_ail_lock);
+
                bd->bd_blkno = bh->b_blocknr;
                gfs2_log_lock(sdp);
                gfs2_assert_withdraw(sdp, !buffer_busy(bh));
@@ -221,8 +221,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
                }
        }
 
-       if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
+       if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
+               gfs2_log_flush(gl->gl_sbd, NULL);
                gl->gl_sbd->sd_rindex_uptodate = 0;
+       }
        if (ip && S_ISREG(ip->i_inode.i_mode))
                truncate_inode_pages(ip->i_inode.i_mapping, 0);
 }
index 0a064e9..81206e7 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/buffer_head.h>
 #include <linux/rcupdate.h>
 #include <linux/rculist_bl.h>
+#include <linux/completion.h>
 
 #define DIO_WAIT       0x00000010
 #define DIO_METADATA   0x00000020
@@ -546,6 +547,7 @@ struct gfs2_sbd {
        struct gfs2_glock *sd_trans_gl;
        wait_queue_head_t sd_glock_wait;
        atomic_t sd_glock_disposal;
+       struct completion sd_locking_init;
 
        /* Inode Stuff */
 
index 903115f..85c6292 100644 (file)
@@ -903,6 +903,7 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
                if (gfs2_ail1_empty(sdp))
                        break;
        }
+       gfs2_log_flush(sdp, NULL);
 }
 
 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
index 8ac9ae1..2a77071 100644 (file)
@@ -72,6 +72,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
 
        init_waitqueue_head(&sdp->sd_glock_wait);
        atomic_set(&sdp->sd_glock_disposal, 0);
+       init_completion(&sdp->sd_locking_init);
        spin_lock_init(&sdp->sd_statfs_spin);
 
        spin_lock_init(&sdp->sd_rindex_spin);
@@ -1017,11 +1018,13 @@ hostdata_error:
                fsname++;
        if (lm->lm_mount == NULL) {
                fs_info(sdp, "Now mounting FS...\n");
+               complete(&sdp->sd_locking_init);
                return 0;
        }
        ret = lm->lm_mount(sdp, fsname);
        if (ret == 0)
                fs_info(sdp, "Joined cluster. Now mounting FS...\n");
+       complete(&sdp->sd_locking_init);
        return ret;
 }
 
index ed540e7..fb0edf7 100644 (file)
@@ -757,13 +757,17 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
        struct timespec atime;
        struct gfs2_dinode *di;
        int ret = -EAGAIN;
+       int unlock_required = 0;
 
        /* Skip timestamp update, if this is from a memalloc */
        if (current->flags & PF_MEMALLOC)
                goto do_flush;
-       ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
-       if (ret)
-               goto do_flush;
+       if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
+               ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+               if (ret)
+                       goto do_flush;
+               unlock_required = 1;
+       }
        ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
        if (ret)
                goto do_unlock;
@@ -780,7 +784,8 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
        }
        gfs2_trans_end(sdp);
 do_unlock:
-       gfs2_glock_dq_uninit(&gh);
+       if (unlock_required)
+               gfs2_glock_dq_uninit(&gh);
 do_flush:
        if (wbc->sync_mode == WB_SYNC_ALL)
                gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
@@ -1427,7 +1432,20 @@ out:
        return error;
 }
 
-/*
+/**
+ * gfs2_evict_inode - Remove an inode from cache
+ * @inode: The inode to evict
+ *
+ * There are three cases to consider:
+ * 1. i_nlink == 0, we are final opener (and must deallocate)
+ * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
+ * 3. i_nlink > 0
+ *
+ * If the fs is read only, then we have to treat all cases as per #3
+ * since we are unable to do any deallocation. The inode will be
+ * deallocated by the next read/write node to attempt an allocation
+ * in the same resource group
+ *
  * We have to (at the moment) hold the inodes main lock to cover
  * the gap between unlocking the shared lock on the iopen lock and
  * taking the exclusive lock. I'd rather do a shared -> exclusive
@@ -1470,6 +1488,8 @@ static void gfs2_evict_inode(struct inode *inode)
        if (error)
                goto out_truncate;
 
+       /* Case 1 starts here */
+
        if (S_ISDIR(inode->i_mode) &&
            (ip->i_diskflags & GFS2_DIF_EXHASH)) {
                error = gfs2_dir_exhash_dealloc(ip);
@@ -1493,13 +1513,16 @@ static void gfs2_evict_inode(struct inode *inode)
        goto out_unlock;
 
 out_truncate:
+       /* Case 2 starts here */
        error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
        if (error)
                goto out_unlock;
-       gfs2_final_release_pages(ip);
+       /* Needs to be done before glock release & also in a transaction */
+       truncate_inode_pages(&inode->i_data, 0);
        gfs2_trans_end(sdp);
 
 out_unlock:
+       /* Error path for case 1 */
        if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
                gfs2_glock_dq(&ip->i_iopen_gh);
        gfs2_holder_uninit(&ip->i_iopen_gh);
@@ -1507,6 +1530,7 @@ out_unlock:
        if (error && error != GLR_TRYFAILED && error != -EROFS)
                fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
 out:
+       /* Case 3 starts here */
        truncate_inode_pages(&inode->i_data, 0);
        end_writeback(inode);
 
index e20eab3..443cabc 100644 (file)
@@ -338,6 +338,9 @@ static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
        rv = sscanf(buf, "%u", &first);
        if (rv != 1 || first > 1)
                return -EINVAL;
+       rv = wait_for_completion_killable(&sdp->sd_locking_init);
+       if (rv)
+               return rv;
        spin_lock(&sdp->sd_jindex_spin);
        rv = -EBUSY;
        if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
@@ -414,7 +417,9 @@ static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
        rv = sscanf(buf, "%d", &jid);
        if (rv != 1)
                return -EINVAL;
-
+       rv = wait_for_completion_killable(&sdp->sd_locking_init);
+       if (rv)
+               return rv;
        spin_lock(&sdp->sd_jindex_spin);
        rv = -EINVAL;
        if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
index 0223c41..5c867dd 100644 (file)
@@ -433,6 +433,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
                        goto err_parent;
                BUG_ON(nd->inode != parent->d_inode);
        } else {
+               if (dentry->d_parent != parent)
+                       goto err_parent;
                spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
                if (!__d_rcu_to_refcount(dentry, nd->seq))
                        goto err_child;
index 0bafcc9..f9d03ab 100644 (file)
@@ -398,7 +398,6 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
         * this offset and save the original offset.
         */
        data->args.offset = filelayout_get_dserver_offset(lseg, offset);
-       data->mds_offset = offset;
 
        /* Perform an asynchronous write */
        status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
index 6870bc6..e6e8f3b 100644 (file)
@@ -91,7 +91,7 @@ static int nfs4_stat_to_errno(int);
 #define encode_getfh_maxsz      (op_encode_hdr_maxsz)
 #define decode_getfh_maxsz      (op_decode_hdr_maxsz + 1 + \
                                ((3+NFS4_FHSIZE) >> 2))
-#define nfs4_fattr_bitmap_maxsz 3
+#define nfs4_fattr_bitmap_maxsz 4
 #define encode_getattr_maxsz    (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
 #define nfs4_name_maxsz                (1 + ((3 + NFS4_MAXNAMLEN) >> 2))
 #define nfs4_path_maxsz                (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
index e268e3b..7271680 100644 (file)
@@ -864,6 +864,8 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
 
        data->args.fh     = NFS_FH(inode);
        data->args.offset = req_offset(req) + offset;
+       /* pnfs_set_layoutcommit needs this */
+       data->mds_offset = data->args.offset;
        data->args.pgbase = req->wb_pgbase + offset;
        data->args.pages  = data->pagevec;
        data->args.count  = count;
index 3a10ef5..6cd5b64 100644 (file)
@@ -210,7 +210,7 @@ struct acpi_device_power_state {
 struct acpi_device_power {
        int state;              /* Current state */
        struct acpi_device_power_flags flags;
-       struct acpi_device_power_state states[4];       /* Power states (D0-D3) */
+       struct acpi_device_power_state states[ACPI_D_STATE_COUNT];      /* Power states (D0-D3Cold) */
 };
 
 /* Performance Management */
index a756bc8..4543b6f 100644 (file)
@@ -98,8 +98,11 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
 /*
  * Spinlock primitives
  */
+
+#ifndef acpi_os_create_lock
 acpi_status
 acpi_os_create_lock(acpi_spinlock *out_handle);
+#endif
 
 void acpi_os_delete_lock(acpi_spinlock handle);
 
index 5d2a5e9..2ce1be9 100644 (file)
@@ -159,6 +159,24 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
        } while (0)
 #endif
 
+/*
+ * When lockdep is enabled, the spin_lock_init() macro stringifies it's
+ * argument and uses that as a name for the lock in debugging.
+ * By executing spin_lock_init() in a macro the key changes from "lock" for
+ * all locks to the name of the argument of acpi_os_create_lock(), which
+ * prevents lockdep from reporting false positives for ACPICA locks.
+ */
+#define acpi_os_create_lock(__handle)                          \
+({                                                             \
+       spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock));        \
+                                                               \
+       if (lock) {                                             \
+               *(__handle) = lock;                             \
+               spin_lock_init(*(__handle));                    \
+       }                                                       \
+       lock ? AE_OK : AE_NO_MEMORY;                            \
+})
+
 #endif /* __KERNEL__ */
 
 #endif /* __ACLINUX_H__ */
index e08f344..3d53efd 100644 (file)
        {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
index 8b45384..baa397e 100644 (file)
@@ -676,7 +676,8 @@ void irq_gc_mask_disable_reg(struct irq_data *d);
 void irq_gc_mask_set_bit(struct irq_data *d);
 void irq_gc_mask_clr_bit(struct irq_data *d);
 void irq_gc_unmask_enable_reg(struct irq_data *d);
-void irq_gc_ack(struct irq_data *d);
+void irq_gc_ack_set_bit(struct irq_data *d);
+void irq_gc_ack_clr_bit(struct irq_data *d);
 void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
 void irq_gc_eoi(struct irq_data *d);
 int irq_gc_set_wake(struct irq_data *d, unsigned int on);
index e1e3b2b..935699b 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/compiler.h>
 #include <linux/mutex.h>
 
+#define MIN_MEMORY_BLOCK_SIZE     (1 << SECTION_SIZE_BITS)
+
 struct memory_block {
        unsigned long start_section_nr;
        unsigned long end_section_nr;
index c6927a4..6ad4355 100644 (file)
@@ -64,6 +64,19 @@ struct mmc_ext_csd {
        unsigned long long      enhanced_area_offset;   /* Units: Byte */
        unsigned int            enhanced_area_size;     /* Units: KB */
        unsigned int            boot_size;              /* in bytes */
+       u8                      raw_partition_support;  /* 160 */
+       u8                      raw_erased_mem_count;   /* 181 */
+       u8                      raw_ext_csd_structure;  /* 194 */
+       u8                      raw_card_type;          /* 196 */
+       u8                      raw_s_a_timeout;                /* 217 */
+       u8                      raw_hc_erase_gap_size;  /* 221 */
+       u8                      raw_erase_timeout_mult; /* 223 */
+       u8                      raw_hc_erase_grp_size;  /* 224 */
+       u8                      raw_sec_trim_mult;      /* 229 */
+       u8                      raw_sec_erase_mult;     /* 230 */
+       u8                      raw_sec_feature_support;/* 231 */
+       u8                      raw_trim_mult;          /* 232 */
+       u8                      raw_sectors[4];         /* 212 - 4 bytes */
 };
 
 struct sd_scr {
index 54b8b4d..9e19477 100644 (file)
@@ -1097,12 +1097,6 @@ struct net_device {
 #define NETIF_F_ALL_FCOE       (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
                                 NETIF_F_FSO)
 
-#define NETIF_F_ALL_TX_OFFLOADS        (NETIF_F_ALL_CSUM | NETIF_F_SG | \
-                                NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
-                                NETIF_F_HIGHDMA | \
-                                NETIF_F_SCTP_CSUM | \
-                                NETIF_F_ALL_FCOE)
-
        /*
         * If one device supports one of these features, then enable them
         * for all in netdev_increment_features.
index dd6847e..6506458 100644 (file)
@@ -63,6 +63,7 @@ typedef enum {
        SCTP_CMD_ECN_ECNE,      /* Do delayed ECNE processing. */
        SCTP_CMD_ECN_CWR,       /* Do delayed CWR processing.  */
        SCTP_CMD_TIMER_START,   /* Start a timer.  */
+       SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */
        SCTP_CMD_TIMER_RESTART, /* Restart a timer. */
        SCTP_CMD_TIMER_STOP,    /* Stop a timer. */
        SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */
index 99b027b..ca4693b 100644 (file)
@@ -80,7 +80,7 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
 
 void sctp_ulpevent_free(struct sctp_ulpevent *);
 int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list);
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list);
 
 struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
        const struct sctp_association *asoc,
index 31a9db7..3a2cab4 100644 (file)
@@ -101,10 +101,10 @@ void irq_gc_unmask_enable_reg(struct irq_data *d)
 }
 
 /**
- * irq_gc_ack - Ack pending interrupt
+ * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
  * @d: irq_data
  */
-void irq_gc_ack(struct irq_data *d)
+void irq_gc_ack_set_bit(struct irq_data *d)
 {
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
        u32 mask = 1 << (d->irq - gc->irq_base);
@@ -114,6 +114,20 @@ void irq_gc_ack(struct irq_data *d)
        irq_gc_unlock(gc);
 }
 
+/**
+ * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
+ * @d: irq_data
+ */
+void irq_gc_ack_clr_bit(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       u32 mask = ~(1 << (d->irq - gc->irq_base));
+
+       irq_gc_lock(gc);
+       irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
+       irq_gc_unlock(gc);
+}
+
 /**
  * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt
  * @d: irq_data
index 7e59ffb..ba06207 100644 (file)
@@ -84,9 +84,32 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
 
 static struct rcu_state *rcu_state;
 
+/*
+ * The rcu_scheduler_active variable transitions from zero to one just
+ * before the first task is spawned.  So when this variable is zero, RCU
+ * can assume that there is but one task, allowing RCU to (for example)
+ * optimized synchronize_sched() to a simple barrier().  When this variable
+ * is one, RCU must actually do all the hard work required to detect real
+ * grace periods.  This variable is also used to suppress boot-time false
+ * positives from lockdep-RCU error checking.
+ */
 int rcu_scheduler_active __read_mostly;
 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 
+/*
+ * The rcu_scheduler_fully_active variable transitions from zero to one
+ * during the early_initcall() processing, which is after the scheduler
+ * is capable of creating new tasks.  So RCU processing (for example,
+ * creating tasks for RCU priority boosting) must be delayed until after
+ * rcu_scheduler_fully_active transitions from zero to one.  We also
+ * currently delay invocation of any RCU callbacks until after this point.
+ *
+ * It might later prove better for people registering RCU callbacks during
+ * early boot to take responsibility for these callbacks, but one step at
+ * a time.
+ */
+static int rcu_scheduler_fully_active __read_mostly;
+
 #ifdef CONFIG_RCU_BOOST
 
 /*
@@ -98,7 +121,6 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 DEFINE_PER_CPU(char, rcu_cpu_has_work);
-static char rcu_kthreads_spawnable;
 
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
@@ -1467,6 +1489,8 @@ static void rcu_process_callbacks(struct softirq_action *unused)
  */
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 {
+       if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
+               return;
        if (likely(!rsp->boost)) {
                rcu_do_batch(rsp, rdp);
                return;
index 14dc7dd..75113cb 100644 (file)
@@ -1532,7 +1532,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
        struct sched_param sp;
        struct task_struct *t;
 
-       if (!rcu_kthreads_spawnable ||
+       if (!rcu_scheduler_fully_active ||
            per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
                return 0;
        t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
@@ -1639,7 +1639,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
        struct sched_param sp;
        struct task_struct *t;
 
-       if (!rcu_kthreads_spawnable ||
+       if (!rcu_scheduler_fully_active ||
            rnp->qsmaskinit == 0)
                return 0;
        if (rnp->node_kthread_task == NULL) {
@@ -1665,7 +1665,7 @@ static int __init rcu_spawn_kthreads(void)
        int cpu;
        struct rcu_node *rnp;
 
-       rcu_kthreads_spawnable = 1;
+       rcu_scheduler_fully_active = 1;
        for_each_possible_cpu(cpu) {
                per_cpu(rcu_cpu_has_work, cpu) = 0;
                if (cpu_online(cpu))
@@ -1687,7 +1687,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
        struct rcu_node *rnp = rdp->mynode;
 
        /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
-       if (rcu_kthreads_spawnable) {
+       if (rcu_scheduler_fully_active) {
                (void)rcu_spawn_one_cpu_kthread(cpu);
                if (rnp->node_kthread_task == NULL)
                        (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
@@ -1726,6 +1726,13 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
 {
 }
 
+static int __init rcu_scheduler_really_started(void)
+{
+       rcu_scheduler_fully_active = 1;
+       return 0;
+}
+early_initcall(rcu_scheduler_really_started);
+
 static void __cpuinit rcu_prepare_kthreads(int cpu)
 {
 }
index 9769c75..3dc716f 100644 (file)
@@ -7757,6 +7757,9 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
 #endif
 #endif
        cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+#ifndef CONFIG_64BIT
+       cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+#endif
 }
 
 static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
index 86bff9b..6e82148 100644 (file)
@@ -528,7 +528,11 @@ static int vlan_dev_init(struct net_device *dev)
                                          (1<<__LINK_STATE_DORMANT))) |
                      (1<<__LINK_STATE_PRESENT);
 
-       dev->hw_features = NETIF_F_ALL_TX_OFFLOADS;
+       dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
+                          NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |
+                          NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
+                          NETIF_F_ALL_FCOE;
+
        dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
        dev->gso_max_size = real_dev->gso_max_size;
 
index d3a05b9..bcd158f 100644 (file)
@@ -393,6 +393,9 @@ int hci_conn_del(struct hci_conn *conn)
 
        hci_dev_put(hdev);
 
+       if (conn->handle == 0)
+               kfree(conn);
+
        return 0;
 }
 
index c405a95..43b4c2d 100644 (file)
@@ -464,7 +464,8 @@ static void hidp_idle_timeout(unsigned long arg)
 {
        struct hidp_session *session = (struct hidp_session *) arg;
 
-       kthread_stop(session->task);
+       atomic_inc(&session->terminate);
+       wake_up_process(session->task);
 }
 
 static void hidp_set_timer(struct hidp_session *session)
@@ -535,7 +536,8 @@ static void hidp_process_hid_control(struct hidp_session *session,
                skb_queue_purge(&session->ctrl_transmit);
                skb_queue_purge(&session->intr_transmit);
 
-               kthread_stop(session->task);
+               atomic_inc(&session->terminate);
+               wake_up_process(current);
        }
 }
 
@@ -706,9 +708,8 @@ static int hidp_session(void *arg)
        add_wait_queue(sk_sleep(intr_sk), &intr_wait);
        session->waiting_for_startup = 0;
        wake_up_interruptible(&session->startup_queue);
-       while (!kthread_should_stop()) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
+       set_current_state(TASK_INTERRUPTIBLE);
+       while (!atomic_read(&session->terminate)) {
                if (ctrl_sk->sk_state != BT_CONNECTED ||
                                intr_sk->sk_state != BT_CONNECTED)
                        break;
@@ -726,6 +727,7 @@ static int hidp_session(void *arg)
                hidp_process_transmit(session);
 
                schedule();
+               set_current_state(TASK_INTERRUPTIBLE);
        }
        set_current_state(TASK_RUNNING);
        remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
@@ -1060,7 +1062,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
 err_add_device:
        hid_destroy_device(session->hid);
        session->hid = NULL;
-       kthread_stop(session->task);
+       atomic_inc(&session->terminate);
+       wake_up_process(session->task);
 
 unlink:
        hidp_del_timer(session);
@@ -1111,7 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req)
                        skb_queue_purge(&session->ctrl_transmit);
                        skb_queue_purge(&session->intr_transmit);
 
-                       kthread_stop(session->task);
+                       atomic_inc(&session->terminate);
+                       wake_up_process(session->task);
                }
        } else
                err = -ENOENT;
index 19e9500..af1bcc8 100644 (file)
@@ -142,6 +142,7 @@ struct hidp_session {
        uint ctrl_mtu;
        uint intr_mtu;
 
+       atomic_t terminate;
        struct task_struct *task;
 
        unsigned char keys[8];
index 56fdd91..7705e26 100644 (file)
@@ -620,7 +620,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
                                        struct sock *parent = bt_sk(sk)->parent;
                                        rsp.result = cpu_to_le16(L2CAP_CR_PEND);
                                        rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
-                                       parent->sk_data_ready(parent, 0);
+                                       if (parent)
+                                               parent->sk_data_ready(parent, 0);
 
                                } else {
                                        sk->sk_state = BT_CONFIG;
@@ -2323,7 +2324,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
 
        sk = chan->sk;
 
-       if (sk->sk_state != BT_CONFIG) {
+       if (sk->sk_state != BT_CONFIG && sk->sk_state != BT_CONNECT2) {
                struct l2cap_cmd_rej rej;
 
                rej.reason = cpu_to_le16(0x0002);
@@ -2334,7 +2335,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
 
        /* Reject if config buffer is too small. */
        len = cmd_len - sizeof(*req);
-       if (chan->conf_len + len > sizeof(chan->conf_req)) {
+       if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
                l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
                                l2cap_build_conf_rsp(chan, rsp,
                                        L2CAP_CONF_REJECT, flags), rsp);
@@ -4009,7 +4010,8 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                                        struct sock *parent = bt_sk(sk)->parent;
                                        res = L2CAP_CR_PEND;
                                        stat = L2CAP_CS_AUTHOR_PEND;
-                                       parent->sk_data_ready(parent, 0);
+                                       if (parent)
+                                               parent->sk_data_ready(parent, 0);
                                } else {
                                        sk->sk_state = BT_CONFIG;
                                        res = L2CAP_CR_SUCCESS;
index 58ffa7d..669d2e3 100644 (file)
@@ -877,7 +877,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
        for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
                local->sched_scan_ies.ie[i] = kzalloc(2 +
                                                      IEEE80211_MAX_SSID_LEN +
-                                                     local->scan_ies_len,
+                                                     local->scan_ies_len +
+                                                     req->ie_len,
                                                      GFP_KERNEL);
                if (!local->sched_scan_ies.ie[i]) {
                        ret = -ENOMEM;
index d91c1a2..8f6a302 100644 (file)
@@ -86,6 +86,11 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       int queue = rx->queue;
+
+       /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
+       if (rx->queue == NUM_RX_DATA_QUEUES - 1)
+               queue = 0;
 
        /*
         * it makes no sense to check for MIC errors on anything other
@@ -148,8 +153,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
 
 update_iv:
        /* update IV in key information to be able to detect replays */
-       rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32;
-       rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16;
+       rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32;
+       rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16;
 
        return RX_CONTINUE;
 
@@ -241,6 +246,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
        struct ieee80211_key *key = rx->key;
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       int queue = rx->queue;
+
+       /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
+       if (rx->queue == NUM_RX_DATA_QUEUES - 1)
+               queue = 0;
 
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
@@ -261,7 +271,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
        res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
                                          key, skb->data + hdrlen,
                                          skb->len - hdrlen, rx->sta->sta.addr,
-                                         hdr->addr1, hwaccel, rx->queue,
+                                         hdr->addr1, hwaccel, queue,
                                          &rx->tkip_iv32,
                                          &rx->tkip_iv16);
        if (res != TKIP_DECRYPT_OK)
index b4f3cf0..08b3cea 100644 (file)
@@ -500,23 +500,20 @@ int sctp_packet_transmit(struct sctp_packet *packet)
         * Note: Adler-32 is no longer applicable, as has been replaced
         * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
         */
-       if (!sctp_checksum_disable &&
-           !(dst->dev->features & (NETIF_F_NO_CSUM | NETIF_F_SCTP_CSUM))) {
-               __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
+       if (!sctp_checksum_disable) {
+               if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
+                       __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
 
-               /* 3) Put the resultant value into the checksum field in the
-                *    common header, and leave the rest of the bits unchanged.
-                */
-               sh->checksum = sctp_end_cksum(crc32);
-       } else {
-               if (dst->dev->features & NETIF_F_SCTP_CSUM) {
+                       /* 3) Put the resultant value into the checksum field in the
+                        *    common header, and leave the rest of the bits unchanged.
+                        */
+                       sh->checksum = sctp_end_cksum(crc32);
+               } else {
                        /* no need to seed pseudo checksum for SCTP */
                        nskb->ip_summed = CHECKSUM_PARTIAL;
                        nskb->csum_start = (skb_transport_header(nskb) -
                                            nskb->head);
                        nskb->csum_offset = offsetof(struct sctphdr, checksum);
-               } else {
-                       nskb->ip_summed = CHECKSUM_UNNECESSARY;
                }
        }
 
index 1c88c89..d036821 100644 (file)
@@ -1582,6 +1582,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
 #endif /* SCTP_DEBUG */
        if (transport) {
                if (bytes_acked) {
+                       struct sctp_association *asoc = transport->asoc;
+
                        /* We may have counted DATA that was migrated
                         * to this transport due to DEL-IP operation.
                         * Subtract those bytes, since the were never
@@ -1600,6 +1602,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                        transport->error_count = 0;
                        transport->asoc->overall_error_count = 0;
 
+                       /*
+                        * While in SHUTDOWN PENDING, we may have started
+                        * the T5 shutdown guard timer after reaching the
+                        * retransmission limit. Stop that timer as soon
+                        * as the receiver acknowledged any data.
+                        */
+                       if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
+                           del_timer(&asoc->timers
+                               [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
+                                       sctp_association_put(asoc);
+
                        /* Mark the destination transport address as
                         * active if it is not so marked.
                         */
@@ -1629,10 +1642,15 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                         * A sender is doing zero window probing when the
                         * receiver's advertised window is zero, and there is
                         * only one data chunk in flight to the receiver.
+                        *
+                        * Allow the association to timeout while in SHUTDOWN
+                        * PENDING or SHUTDOWN RECEIVED in case the receiver
+                        * stays in zero window mode forever.
                         */
                        if (!q->asoc->peer.rwnd &&
                            !list_empty(&tlist) &&
-                           (sack_ctsn+2 == q->asoc->next_tsn)) {
+                           (sack_ctsn+2 == q->asoc->next_tsn) &&
+                           q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
                                SCTP_DEBUG_PRINTK("%s: SACK received for zero "
                                                  "window probe: %u\n",
                                                  __func__, sack_ctsn);
index 534c2e5..6e0f882 100644 (file)
@@ -670,10 +670,19 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
        /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
         * HEARTBEAT should clear the error counter of the destination
         * transport address to which the HEARTBEAT was sent.
-        * The association's overall error count is also cleared.
         */
        t->error_count = 0;
-       t->asoc->overall_error_count = 0;
+
+       /*
+        * Although RFC4960 specifies that the overall error count must
+        * be cleared when a HEARTBEAT ACK is received, we make an
+        * exception while in SHUTDOWN PENDING. If the peer keeps its
+        * window shut forever, we may never be able to transmit our
+        * outstanding data and rely on the retransmission limit be reached
+        * to shutdown the association.
+        */
+       if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+               t->asoc->overall_error_count = 0;
 
        /* Clear the hb_sent flag to signal that we had a good
         * acknowledgement.
@@ -1437,6 +1446,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                        sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
                        break;
 
+               case SCTP_CMD_TIMER_START_ONCE:
+                       timer = &asoc->timers[cmd->obj.to];
+
+                       if (timer_pending(timer))
+                               break;
+                       /* fall through */
+
                case SCTP_CMD_TIMER_START:
                        timer = &asoc->timers[cmd->obj.to];
                        timeout = asoc->timeouts[cmd->obj.to];
index a297283..2461171 100644 (file)
@@ -5154,7 +5154,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
         * The sender of the SHUTDOWN MAY also start an overall guard timer
         * 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
         */
-       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
        if (asoc->autoclose)
@@ -5299,14 +5299,28 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
        SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
 
        if (asoc->overall_error_count >= asoc->max_retrans) {
-               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
-                               SCTP_ERROR(ETIMEDOUT));
-               /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
-               sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-                               SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
-               return SCTP_DISPOSITION_DELETE_TCB;
+               if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
+                       /*
+                        * We are here likely because the receiver had its rwnd
+                        * closed for a while and we have not been able to
+                        * transmit the locally queued data within the maximum
+                        * retransmission attempts limit.  Start the T5
+                        * shutdown guard timer to give the receiver one last
+                        * chance and some additional time to recover before
+                        * aborting.
+                        */
+                       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
+                               SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+               } else {
+                       sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                                       SCTP_ERROR(ETIMEDOUT));
+                       /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
+                       sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+                                       SCTP_PERR(SCTP_ERROR_NO_ERROR));
+                       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+                       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+                       return SCTP_DISPOSITION_DELETE_TCB;
+               }
        }
 
        /* E1) For the destination address for which the timer
index 0338dc6..7c211a7 100644 (file)
@@ -827,7 +827,7 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
        /* SCTP_STATE_ESTABLISHED */ \
        TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
        /* SCTP_STATE_SHUTDOWN_PENDING */ \
-       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
        /* SCTP_STATE_SHUTDOWN_SENT */ \
        TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
        /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
index 08c6238..d3ccf79 100644 (file)
@@ -1384,6 +1384,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
        struct sctp_endpoint *ep;
        struct sctp_association *asoc;
        struct list_head *pos, *temp;
+       unsigned int data_was_unread;
 
        SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
 
@@ -1393,6 +1394,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
 
        ep = sctp_sk(sk)->ep;
 
+       /* Clean up any skbs sitting on the receive queue.  */
+       data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
+       data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
+
        /* Walk all associations on an endpoint.  */
        list_for_each_safe(pos, temp, &ep->asocs) {
                asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1410,7 +1415,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
                        }
                }
 
-               if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
+               if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
+                   !skb_queue_empty(&asoc->ulpq.reasm) ||
+                   (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
                        struct sctp_chunk *chunk;
 
                        chunk = sctp_make_abort_user(asoc, NULL, 0);
@@ -1420,10 +1427,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
                        sctp_primitive_SHUTDOWN(asoc, NULL);
        }
 
-       /* Clean up any skbs sitting on the receive queue.  */
-       sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
-       sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
-
        /* On a TCP-style socket, block for at most linger_time if set. */
        if (sctp_style(sk, TCP) && timeout)
                sctp_wait_for_close(sk, timeout);
index e70e5fc..8a84017 100644 (file)
@@ -1081,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event)
 }
 
 /* Purge the skb lists holding ulpevents. */
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list)
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
 {
        struct sk_buff *skb;
-       while ((skb = skb_dequeue(list)) != NULL)
-               sctp_ulpevent_free(sctp_skb2event(skb));
+       unsigned int data_unread = 0;
+
+       while ((skb = skb_dequeue(list)) != NULL) {
+               struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+               if (!sctp_ulpevent_is_notification(event))
+                       data_unread += skb->len;
+
+               sctp_ulpevent_free(event);
+       }
+
+       return data_unread;
 }
index 9a80a92..e45d2fb 100644 (file)
@@ -597,7 +597,7 @@ void rpcb_getport_async(struct rpc_task *task)
        u32 bind_version;
        struct rpc_xprt *xprt;
        struct rpc_clnt *rpcb_clnt;
-       static struct rpcbind_args *map;
+       struct rpcbind_args *map;
        struct rpc_task *child;
        struct sockaddr_storage addr;
        struct sockaddr *sap = (struct sockaddr *)&addr;
index a27406b..4814e24 100644 (file)
@@ -616,30 +616,25 @@ static void __rpc_execute(struct rpc_task *task)
        BUG_ON(RPC_IS_QUEUED(task));
 
        for (;;) {
+               void (*do_action)(struct rpc_task *);
 
                /*
-                * Execute any pending callback.
+                * Execute any pending callback first.
                 */
-               if (task->tk_callback) {
-                       void (*save_callback)(struct rpc_task *);
-
-                       /*
-                        * We set tk_callback to NULL before calling it,
-                        * in case it sets the tk_callback field itself:
-                        */
-                       save_callback = task->tk_callback;
-                       task->tk_callback = NULL;
-                       save_callback(task);
-               } else {
+               do_action = task->tk_callback;
+               task->tk_callback = NULL;
+               if (do_action == NULL) {
                        /*
                         * Perform the next FSM step.
-                        * tk_action may be NULL when the task has been killed
-                        * by someone else.
+                        * tk_action may be NULL if the task has been killed.
+                        * In particular, note that rpc_killall_tasks may
+                        * do this at any time, so beware when dereferencing.
                         */
-                       if (task->tk_action == NULL)
+                       do_action = task->tk_action;
+                       if (do_action == NULL)
                                break;
-                       task->tk_action(task);
                }
+               do_action(task);
 
                /*
                 * Lockless check for whether task is sleeping or not.
index c22ef34..880dbe2 100644 (file)
@@ -366,6 +366,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
 
        mutex_init(&rdev->mtx);
        mutex_init(&rdev->devlist_mtx);
+       mutex_init(&rdev->sched_scan_mtx);
        INIT_LIST_HEAD(&rdev->netdev_list);
        spin_lock_init(&rdev->bss_lock);
        INIT_LIST_HEAD(&rdev->bss_list);
@@ -701,6 +702,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
        rfkill_destroy(rdev->rfkill);
        mutex_destroy(&rdev->mtx);
        mutex_destroy(&rdev->devlist_mtx);
+       mutex_destroy(&rdev->sched_scan_mtx);
        list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
                cfg80211_put_bss(&scan->pub);
        cfg80211_rdev_free_wowlan(rdev);
@@ -737,12 +739,16 @@ static void wdev_cleanup_work(struct work_struct *work)
                ___cfg80211_scan_done(rdev, true);
        }
 
+       cfg80211_unlock_rdev(rdev);
+
+       mutex_lock(&rdev->sched_scan_mtx);
+
        if (WARN_ON(rdev->sched_scan_req &&
                    rdev->sched_scan_req->dev == wdev->netdev)) {
                __cfg80211_stop_sched_scan(rdev, false);
        }
 
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 
        mutex_lock(&rdev->devlist_mtx);
        rdev->opencount--;
@@ -830,9 +836,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
                        break;
                case NL80211_IFTYPE_P2P_CLIENT:
                case NL80211_IFTYPE_STATION:
-                       cfg80211_lock_rdev(rdev);
+                       mutex_lock(&rdev->sched_scan_mtx);
                        __cfg80211_stop_sched_scan(rdev, false);
-                       cfg80211_unlock_rdev(rdev);
+                       mutex_unlock(&rdev->sched_scan_mtx);
 
                        wdev_lock(wdev);
 #ifdef CONFIG_CFG80211_WEXT
index 3dce1f1..a570ff9 100644 (file)
@@ -65,6 +65,8 @@ struct cfg80211_registered_device {
        struct work_struct scan_done_wk;
        struct work_struct sched_scan_results_wk;
 
+       struct mutex sched_scan_mtx;
+
 #ifdef CONFIG_NL80211_TESTMODE
        struct genl_info *testmode_info;
 #endif
index f07602d..cea3381 100644 (file)
@@ -3461,9 +3461,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
                return -EINVAL;
 
-       if (rdev->sched_scan_req)
-               return -EINPROGRESS;
-
        if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
                return -EINVAL;
 
@@ -3502,12 +3499,21 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        if (ie_len > wiphy->max_scan_ie_len)
                return -EINVAL;
 
+       mutex_lock(&rdev->sched_scan_mtx);
+
+       if (rdev->sched_scan_req) {
+               err = -EINPROGRESS;
+               goto out;
+       }
+
        request = kzalloc(sizeof(*request)
                        + sizeof(*request->ssids) * n_ssids
                        + sizeof(*request->channels) * n_channels
                        + ie_len, GFP_KERNEL);
-       if (!request)
-               return -ENOMEM;
+       if (!request) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        if (n_ssids)
                request->ssids = (void *)&request->channels[n_channels];
@@ -3605,6 +3611,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
 out_free:
        kfree(request);
 out:
+       mutex_unlock(&rdev->sched_scan_mtx);
        return err;
 }
 
@@ -3612,12 +3619,17 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb,
                                   struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       int err;
 
        if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
            !rdev->ops->sched_scan_stop)
                return -EOPNOTSUPP;
 
-       return __cfg80211_stop_sched_scan(rdev, false);
+       mutex_lock(&rdev->sched_scan_mtx);
+       err = __cfg80211_stop_sched_scan(rdev, false);
+       mutex_unlock(&rdev->sched_scan_mtx);
+
+       return err;
 }
 
 static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
index 7a6c676..ae0c225 100644 (file)
@@ -100,14 +100,14 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
        rdev = container_of(wk, struct cfg80211_registered_device,
                            sched_scan_results_wk);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
 
        /* we don't have sched_scan_req anymore if the scan is stopping */
        if (rdev->sched_scan_req)
                nl80211_send_sched_scan_results(rdev,
                                                rdev->sched_scan_req->dev);
 
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 }
 
 void cfg80211_sched_scan_results(struct wiphy *wiphy)
@@ -123,9 +123,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
        __cfg80211_stop_sched_scan(rdev, true);
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
 
@@ -135,7 +135,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
        int err;
        struct net_device *dev;
 
-       ASSERT_RDEV_LOCK(rdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        if (!rdev->sched_scan_req)
                return 0;
index d70f85e..9414b9c 100644 (file)
@@ -1345,6 +1345,8 @@ out:
                        xfrm_state_check_expire(x1);
 
                err = 0;
+               x->km.state = XFRM_STATE_DEAD;
+               __xfrm_state_put(x);
        }
        spin_unlock_bh(&x1->lock);
 
index 3b029cb..a272356 100755 (executable)
@@ -21,13 +21,15 @@ fi
 # older versions of depmod require the version string to start with three
 # numbers, so we cheat with a symlink here
 depmod_hack_needed=true
-mkdir -p .tmp_depmod/lib/modules/$KERNELRELEASE
-if "$DEPMOD" -b .tmp_depmod $KERNELRELEASE 2>/dev/null; then
-       if test -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep -o \
-               -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep.bin; then
+tmp_dir=$(mktemp -d ${TMPDIR:-/tmp}/depmod.XXXXXX)
+mkdir -p "$tmp_dir/lib/modules/$KERNELRELEASE"
+if "$DEPMOD" -b "$tmp_dir" $KERNELRELEASE 2>/dev/null; then
+       if test -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep" -o \
+               -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep.bin"; then
                depmod_hack_needed=false
        fi
 fi
+rm -rf "$tmp_dir"
 if $depmod_hack_needed; then
        symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE"
        ln -s "$KERNELRELEASE" "$symlink"