Merge branch 'modsplit-Oct31_2011' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Nov 2011 03:44:47 +0000 (19:44 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Nov 2011 03:44:47 +0000 (19:44 -0800)
* 'modsplit-Oct31_2011' of git://git.kernel.org/pub/scm/linux/kernel/git/paulg/linux: (230 commits)
  Revert "tracing: Include module.h in define_trace.h"
  irq: don't put module.h into irq.h for tracking irqgen modules.
  bluetooth: macroize two small inlines to avoid module.h
  ip_vs.h: fix implicit use of module_get/module_put from module.h
  nf_conntrack.h: fix up fallout from implicit moduleparam.h presence
  include: replace linux/module.h with "struct module" wherever possible
  include: convert various register fcns to macros to avoid include chaining
  crypto.h: remove unused crypto_tfm_alg_modname() inline
  uwb.h: fix implicit use of asm/page.h for PAGE_SIZE
  pm_runtime.h: explicitly requires notifier.h
  linux/dmaengine.h: fix implicit use of bitmap.h and asm/page.h
  miscdevice.h: fix up implicit use of lists and types
  stop_machine.h: fix implicit use of smp.h for smp_processor_id
  of: fix implicit use of errno.h in include/linux/of.h
  of_platform.h: delete needless include <linux/module.h>
  acpi: remove module.h include from platform/aclinux.h
  miscdevice.h: delete unnecessary inclusion of module.h
  device_cgroup.h: delete needless include <linux/module.h>
  net: sch_generic remove redundant use of <linux/module.h>
  net: inet_timewait_sock doesnt need <linux/module.h>
  ...

Fix up trivial conflicts (other header files, and  removal of the ab3550 mfd driver) in
 - drivers/media/dvb/frontends/dibx000_common.c
 - drivers/media/video/{mt9m111.c,ov6650.c}
 - drivers/mfd/ab3550-core.c
 - include/linux/dmaengine.h

225 files changed:
1  2 
arch/arm/mach-exynos/dev-sysmmu.c
arch/arm/mach-imx/mach-mx31lilly.c
arch/arm/mach-imx/mach-mx31lite.c
arch/arm/mach-imx/mach-mx31moboard.c
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap1/board-sx1.c
arch/arm/mach-omap1/board-voiceblue.c
arch/arm/mach-omap2/board-omap3evm.c
arch/arm/mach-omap2/clockdomain.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/prcm.c
arch/arm/mach-omap2/voltage.c
arch/arm/mach-s3c2410/mach-h1940.c
arch/arm/mach-tegra/pcie.c
arch/arm/plat-samsung/dev-backlight.c
arch/arm/plat-samsung/platformdata.c
arch/microblaze/kernel/dma.c
arch/mips/kernel/cpu-probe.c
arch/powerpc/include/asm/machdep.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/dma.c
arch/powerpc/kernel/ibmebus.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/vio.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/mmu_context_hash64.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/tlb_nohash.c
arch/powerpc/platforms/ps3/system-bus.c
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/sysdev/cpm_common.c
arch/powerpc/sysdev/fsl_rio.c
arch/powerpc/xmon/xmon.c
arch/sh/kernel/topology.c
arch/sh/mm/init.c
arch/x86/crypto/aes_glue.c
arch/x86/kernel/cpu/mcheck/mce.c
block/ioctl.c
drivers/base/power/runtime.c
drivers/block/aoe/aoeblk.c
drivers/block/ps3vram.c
drivers/block/virtio_blk.c
drivers/char/ttyprintk.c
drivers/char/virtio_console.c
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/intel_mid_dma.c
drivers/dma/ipu/ipu_idmac.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpio-mxs.c
drivers/hid/hid-roccat.c
drivers/i2c/busses/i2c-sh7760.c
drivers/i2c/busses/i2c-tegra.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/qib/qib_driver.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/qib/qib_iba7220.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_fs.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/leds/leds-asic3.c
drivers/leds/leds-gpio.c
drivers/leds/leds-lm3530.c
drivers/md/dm-log-userspace-base.c
drivers/md/dm-raid.c
drivers/md/faulty.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/common/saa7146_core.c
drivers/media/common/saa7146_fops.c
drivers/media/common/saa7146_hlp.c
drivers/media/common/saa7146_video.c
drivers/media/dvb/frontends/dibx000_common.c
drivers/media/radio/radio-wl1273.c
drivers/media/radio/wl128x/fmdrv_v4l2.c
drivers/media/rc/ir-lirc-codec.c
drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
drivers/media/rc/rc-main.c
drivers/media/video/adp1653.c
drivers/media/video/cx25840/cx25840-ir.c
drivers/media/video/hexium_gemini.c
drivers/media/video/hexium_orion.c
drivers/media/video/imx074.c
drivers/media/video/m5mols/m5mols_core.c
drivers/media/video/mt9m001.c
drivers/media/video/mt9m111.c
drivers/media/video/mt9t031.c
drivers/media/video/mt9v022.c
drivers/media/video/mxb.c
drivers/media/video/noon010pc30.c
drivers/media/video/ov6650.c
drivers/media/video/pvrusb2/pvrusb2-hdw.c
drivers/media/video/pvrusb2/pvrusb2-v4l2.c
drivers/media/video/rj54n1cb0c.c
drivers/media/video/sh_mobile_csi2.c
drivers/media/video/sr030pc30.c
drivers/media/video/tvp7002.c
drivers/media/video/v4l2-ctrls.c
drivers/media/video/v4l2-device.c
drivers/media/video/v4l2-subdev.c
drivers/mfd/ab3100-core.c
drivers/mfd/asic3.c
drivers/mfd/max8997.c
drivers/mfd/twl-core.c
drivers/mfd/twl6030-irq.c
drivers/net/bonding/bond_procfs.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/pd.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/srq.c
drivers/platform/x86/wmi.c
drivers/regulator/88pm8607.c
drivers/regulator/core.c
drivers/regulator/db8500-prcmu.c
drivers/regulator/mc13783-regulator.c
drivers/regulator/tps6586x-regulator.c
drivers/s390/char/vmur.c
drivers/s390/cio/qdio_debug.c
drivers/s390/kvm/kvm_virtio.c
drivers/scsi/device_handler/scsi_dh.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/scsi_lib.c
drivers/staging/media/solo6x10/g723.c
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_alua.c
drivers/target/target_core_device.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/usb/core/driver.c
drivers/video/backlight/l4f00242t03.c
drivers/w1/w1_int.c
drivers/xen/xenbus/xenbus_client.c
fs/cifs/connect.c
fs/logfs/super.c
fs/nfs/nfs4filelayout.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/write.c
fs/nfsd/nfssvc.c
include/drm/drmP.h
include/linux/blkdev.h
include/linux/crypto.h
include/linux/device.h
include/linux/dmaengine.h
include/linux/gameport.h
include/linux/of.h
include/media/saa7146.h
include/net/inet_timewait_sock.h
include/net/ip_vs.h
include/net/sock.h
include/sound/core.h
include/xen/xenbus.h
kernel/cpu.c
kernel/cpuset.c
kernel/crash_dump.c
kernel/events/core.c
kernel/freezer.c
kernel/module.c
kernel/power/qos.c
kernel/stop_machine.c
kernel/sys.c
kernel/utsname_sysctl.c
mm/bounce.c
mm/highmem.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/mlock.c
mm/mmap.c
mm/oom_kill.c
mm/page-writeback.c
mm/rmap.c
mm/shmem.c
mm/swap.c
mm/swapfile.c
net/8021q/vlan_core.c
net/ipv4/udplite.c
net/ipv6/addrconf.c
net/ipv6/netfilter.c
net/ipv6/route.c
net/ipv6/udplite.c
sound/core/control.c
sound/core/hwdep.c
sound/pci/hda/hda_hwdep.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/hda/patch_via.c
sound/pci/intel8x0.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/soc/samsung/ac97.c
sound/soc/samsung/dma.c

@@@ -12,6 -12,7 +12,7 @@@
  
  #include <linux/platform_device.h>
  #include <linux/dma-mapping.h>
+ #include <linux/export.h>
  
  #include <mach/map.h>
  #include <mach/irqs.h>
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/delay.h>
  #include <linux/platform_device.h>
  #include <linux/interrupt.h>
+ #include <linux/moduleparam.h>
  #include <linux/smsc911x.h>
  #include <linux/mtd/physmap.h>
  #include <linux/spi/spi.h>
@@@ -299,7 -300,6 +300,7 @@@ MACHINE_START(LILLY1131, "INCO startec 
        .map_io = mx31_map_io,
        .init_early = imx31_init_early,
        .init_irq = mx31_init_irq,
 +      .handle_irq = imx31_handle_irq,
        .timer = &mx31lilly_timer,
        .init_machine = mx31lilly_board_init,
  MACHINE_END
@@@ -21,6 -21,7 +21,7 @@@
  #include <linux/memory.h>
  #include <linux/platform_device.h>
  #include <linux/gpio.h>
+ #include <linux/moduleparam.h>
  #include <linux/smsc911x.h>
  #include <linux/mfd/mc13783.h>
  #include <linux/spi/spi.h>
@@@ -112,7 -113,8 +113,7 @@@ static const struct spi_imx_master spi1
  };
  
  static struct mc13xxx_platform_data mc13783_pdata __initdata = {
 -      .flags  = MC13XXX_USE_RTC |
 -                MC13XXX_USE_REGULATOR,
 +      .flags = MC13XXX_USE_RTC,
  };
  
  static struct spi_board_info mc13783_spi_dev __initdata = {
@@@ -283,7 -285,6 +284,7 @@@ MACHINE_START(MX31LITE, "LogicPD i.MX3
        .map_io = mx31lite_map_io,
        .init_early = imx31_init_early,
        .init_irq = mx31_init_irq,
 +      .handle_irq = imx31_handle_irq,
        .timer = &mx31lite_timer,
        .init_machine = mx31lite_init,
  MACHINE_END
@@@ -18,6 -18,7 +18,7 @@@
  #include <linux/gpio.h>
  #include <linux/init.h>
  #include <linux/interrupt.h>
+ #include <linux/moduleparam.h>
  #include <linux/leds.h>
  #include <linux/memory.h>
  #include <linux/mtd/physmap.h>
  #include <linux/spi/spi.h>
  #include <linux/types.h>
  #include <linux/memblock.h>
 +#include <linux/clk.h>
 +#include <linux/io.h>
 +#include <linux/err.h>
 +#include <linux/input.h>
  
  #include <linux/usb/otg.h>
  #include <linux/usb/ulpi.h>
@@@ -226,7 -223,7 +227,7 @@@ static struct mc13xxx_regulator_init_da
        },
  };
  
 -static struct mc13783_led_platform_data moboard_led[] = {
 +static struct mc13xxx_led_platform_data moboard_led[] = {
        {
                .id = MC13783_LED_R1,
                .name = "coreboard-led-4:red",
        },
  };
  
 -static struct mc13783_leds_platform_data moboard_leds = {
 +static struct mc13xxx_leds_platform_data moboard_leds = {
        .num_leds = ARRAY_SIZE(moboard_led),
        .led = moboard_led,
        .flags = MC13783_LED_SLEWLIMTC,
        .tc2_period = MC13783_LED_PERIOD_10MS,
  };
  
 +static struct mc13xxx_buttons_platform_data moboard_buttons = {
 +      .b1on_flags = MC13783_BUTTON_DBNC_750MS | MC13783_BUTTON_ENABLE |
 +                      MC13783_BUTTON_POL_INVERT,
 +      .b1on_key = KEY_POWER,
 +};
 +
  static struct mc13xxx_platform_data moboard_pmic = {
        .regulators = {
                .regulators = moboard_regulators,
                .num_regulators = ARRAY_SIZE(moboard_regulators),
        },
        .leds = &moboard_leds,
 -      .flags = MC13XXX_USE_REGULATOR | MC13XXX_USE_RTC |
 -              MC13XXX_USE_ADC | MC13XXX_USE_LED,
 +      .buttons = &moboard_buttons,
 +      .flags = MC13XXX_USE_RTC | MC13XXX_USE_ADC,
  };
  
  static struct spi_board_info moboard_spi_board_info[] __initdata = {
@@@ -500,18 -491,6 +501,18 @@@ err
  
  }
  
 +static void mx31moboard_poweroff(void)
 +{
 +      struct clk *clk = clk_get_sys("imx2-wdt.0", NULL);
 +
 +      if (!IS_ERR(clk))
 +              clk_enable(clk);
 +
 +      mxc_iomux_mode(MX31_PIN_WATCHDOG_RST__WATCHDOG_RST);
 +
 +      __raw_writew(1 << 6 | 1 << 2, MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
 +}
 +
  static int mx31moboard_baseboard;
  core_param(mx31moboard_baseboard, mx31moboard_baseboard, int, 0444);
  
@@@ -550,8 -529,6 +551,8 @@@ static void __init mx31moboard_init(voi
  
        moboard_usbh2_init();
  
 +      pm_power_off = mx31moboard_poweroff;
 +
        switch (mx31moboard_baseboard) {
        case MX31NOBOARD:
                break;
@@@ -596,7 -573,6 +597,7 @@@ MACHINE_START(MX31MOBOARD, "EPFL Mobot
        .map_io = mx31_map_io,
        .init_early = imx31_init_early,
        .init_irq = mx31_init_irq,
 +      .handle_irq = imx31_handle_irq,
        .timer = &mx31moboard_timer,
        .init_machine = mx31moboard_init,
  MACHINE_END
@@@ -19,6 -19,7 +19,7 @@@
  #include <linux/leds.h>
  #include <linux/platform_device.h>
  #include <linux/serial_8250.h>
+ #include <linux/export.h>
  
  #include <media/soc_camera.h>
  
@@@ -134,6 -135,12 +135,6 @@@ void ams_delta_latch2_write(u16 mask, u
        *(volatile __u16 *) AMS_DELTA_LATCH2_VIRT = ams_delta_latch2_reg;
  }
  
 -static void __init ams_delta_init_irq(void)
 -{
 -      omap1_init_common_hw();
 -      omap1_init_irq();
 -}
 -
  static struct map_desc ams_delta_io_desc[] __initdata = {
        /* AMS_DELTA_LATCH1 */
        {
@@@ -372,13 -379,17 +373,13 @@@ static int __init ams_delta_modem_init(
  }
  arch_initcall(ams_delta_modem_init);
  
 -static void __init ams_delta_map_io(void)
 -{
 -      omap1_map_common_io();
 -}
 -
  MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
        /* Maintainer: Jonathan McDowell <noodles@earth.li> */
        .atag_offset    = 0x100,
 -      .map_io         = ams_delta_map_io,
 +      .map_io         = omap15xx_map_io,
 +      .init_early     = omap1_init_early,
        .reserve        = omap_reserve,
 -      .init_irq       = ams_delta_init_irq,
 +      .init_irq       = omap1_init_irq,
        .init_machine   = ams_delta_init,
        .timer          = &omap1_timer,
  MACHINE_END
@@@ -26,6 -26,7 +26,7 @@@
  #include <linux/types.h>
  #include <linux/i2c.h>
  #include <linux/errno.h>
+ #include <linux/export.h>
  
  #include <mach/hardware.h>
  #include <asm/mach-types.h>
@@@ -406,13 -407,24 +407,13 @@@ static void __init omap_sx1_init(void
        gpio_direction_output(11, 0);   /*A_SWITCH = 0 */
        gpio_direction_output(15, 0);   /*A_USB_ON = 0 */
  }
 -/*----------------------------------------*/
 -static void __init omap_sx1_init_irq(void)
 -{
 -      omap1_init_common_hw();
 -      omap1_init_irq();
 -}
 -/*----------------------------------------*/
 -
 -static void __init omap_sx1_map_io(void)
 -{
 -      omap1_map_common_io();
 -}
  
  MACHINE_START(SX1, "OMAP310 based Siemens SX1")
        .atag_offset    = 0x100,
 -      .map_io         = omap_sx1_map_io,
 +      .map_io         = omap15xx_map_io,
 +      .init_early     = omap1_init_early,
        .reserve        = omap_reserve,
 -      .init_irq       = omap_sx1_init_irq,
 +      .init_irq       = omap1_init_irq,
        .init_machine   = omap_sx1_init,
        .timer          = &omap1_timer,
  MACHINE_END
@@@ -25,6 -25,7 +25,7 @@@
  #include <linux/serial_8250.h>
  #include <linux/serial_reg.h>
  #include <linux/smc91x.h>
+ #include <linux/export.h>
  
  #include <mach/hardware.h>
  #include <mach/system.h>
@@@ -159,6 -160,17 +160,6 @@@ static struct omap_usb_config voiceblue
  static struct omap_board_config_kernel voiceblue_config[] = {
  };
  
 -static void __init voiceblue_init_irq(void)
 -{
 -      omap1_init_common_hw();
 -      omap1_init_irq();
 -}
 -
 -static void __init voiceblue_map_io(void)
 -{
 -      omap1_map_common_io();
 -}
 -
  #define MACHINE_PANICED               1
  #define MACHINE_REBOOTING     2
  #define MACHINE_REBOOT                4
@@@ -291,10 -303,9 +292,10 @@@ static void __init voiceblue_init(void
  MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
        /* Maintainer: Ladislav Michl <michl@2n.cz> */
        .atag_offset    = 0x100,
 -      .map_io         = voiceblue_map_io,
 +      .map_io         = omap15xx_map_io,
 +      .init_early     = omap1_init_early,
        .reserve        = omap_reserve,
 -      .init_irq       = voiceblue_init_irq,
 +      .init_irq       = omap1_init_irq,
        .init_machine   = voiceblue_init,
        .timer          = &omap1_timer,
  MACHINE_END
@@@ -34,6 -34,7 +34,7 @@@
  #include <linux/regulator/fixed.h>
  #include <linux/regulator/machine.h>
  #include <linux/mmc/host.h>
+ #include <linux/export.h>
  
  #include <mach/hardware.h>
  #include <asm/mach-types.h>
@@@ -519,6 -520,12 +520,6 @@@ static int __init omap3_evm_i2c_init(vo
  static struct omap_board_config_kernel omap3_evm_config[] __initdata = {
  };
  
 -static void __init omap3_evm_init_early(void)
 -{
 -      omap2_init_common_infrastructure();
 -      omap2_init_common_devices(mt46h32m32lf6_sdrc_params, NULL);
 -}
 -
  static struct usbhs_omap_board_data usbhs_bdata __initdata = {
  
        .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
@@@ -633,7 -640,6 +634,7 @@@ static void __init omap3_evm_init(void
        omap_display_init(&omap3_evm_dss_data);
  
        omap_serial_init();
 +      omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL);
  
        /* OMAP3EVM uses ISP1504 phy and so register nop transceiver */
        usb_nop_xceiv_register();
@@@ -678,7 -684,7 +679,7 @@@ MACHINE_START(OMAP3EVM, "OMAP3 EVM"
        .atag_offset    = 0x100,
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
 -      .init_early     = omap3_evm_init_early,
 +      .init_early     = omap35xx_init_early,
        .init_irq       = omap3_init_irq,
        .init_machine   = omap3_evm_init,
        .timer          = &omap3_timer,
@@@ -17,6 -17,7 +17,7 @@@
  #include <linux/device.h>
  #include <linux/list.h>
  #include <linux/errno.h>
+ #include <linux/string.h>
  #include <linux/delay.h>
  #include <linux/clk.h>
  #include <linux/limits.h>
@@@ -73,6 -74,9 +74,6 @@@ static int _clkdm_register(struct clock
        if (!clkdm || !clkdm->name)
                return -EINVAL;
  
 -      if (!omap_chip_is(clkdm->omap_chip))
 -              return -EINVAL;
 -
        pwrdm = pwrdm_lookup(clkdm->pwrdm.name);
        if (!pwrdm) {
                pr_err("clockdomain: %s: powerdomain %s does not exist\n",
@@@ -102,10 -106,13 +103,10 @@@ static struct clkdm_dep *_clkdm_deps_lo
  {
        struct clkdm_dep *cd;
  
 -      if (!clkdm || !deps || !omap_chip_is(clkdm->omap_chip))
 +      if (!clkdm || !deps)
                return ERR_PTR(-EINVAL);
  
        for (cd = deps; cd->clkdm_name; cd++) {
 -              if (!omap_chip_is(cd->omap_chip))
 -                      continue;
 -
                if (!cd->clkdm && cd->clkdm_name)
                        cd->clkdm = _clkdm_lookup(cd->clkdm_name);
  
@@@ -142,6 -149,9 +143,6 @@@ static void _autodep_lookup(struct clkd
        if (!autodep)
                return;
  
 -      if (!omap_chip_is(autodep->omap_chip))
 -              return;
 -
        clkdm = clkdm_lookup(autodep->clkdm.name);
        if (!clkdm) {
                pr_err("clockdomain: autodeps: clockdomain %s does not exist\n",
@@@ -173,6 -183,9 +174,6 @@@ void _clkdm_add_autodeps(struct clockdo
                if (IS_ERR(autodep->clkdm.ptr))
                        continue;
  
 -              if (!omap_chip_is(autodep->omap_chip))
 -                      continue;
 -
                pr_debug("clockdomain: adding %s sleepdep/wkdep for "
                         "clkdm %s\n", autodep->clkdm.ptr->name,
                         clkdm->name);
@@@ -204,6 -217,9 +205,6 @@@ void _clkdm_del_autodeps(struct clockdo
                if (IS_ERR(autodep->clkdm.ptr))
                        continue;
  
 -              if (!omap_chip_is(autodep->omap_chip))
 -                      continue;
 -
                pr_debug("clockdomain: removing %s sleepdep/wkdep for "
                         "clkdm %s\n", autodep->clkdm.ptr->name,
                         clkdm->name);
@@@ -228,6 -244,8 +229,6 @@@ static void _resolve_clkdm_deps(struct 
        struct clkdm_dep *cd;
  
        for (cd = clkdm_deps; cd && cd->clkdm_name; cd++) {
 -              if (!omap_chip_is(cd->omap_chip))
 -                      continue;
                if (cd->clkdm)
                        continue;
                cd->clkdm = _clkdm_lookup(cd->clkdm_name);
  /* Public functions */
  
  /**
 - * clkdm_init - set up the clockdomain layer
 - * @clkdms: optional pointer to an array of clockdomains to register
 - * @init_autodeps: optional pointer to an array of autodeps to register
 - * @custom_funcs: func pointers for arch specific implementations
 + * clkdm_register_platform_funcs - register clockdomain implementation fns
 + * @co: func pointers for arch specific implementations
   *
 - * Set up internal state.  If a pointer to an array of clockdomains
 - * @clkdms was supplied, loop through the list of clockdomains,
 - * register all that are available on the current platform. Similarly,
 - * if a pointer to an array of clockdomain autodependencies
 - * @init_autodeps was provided, register those.  No return value.
 + * Register the list of function pointers used to implement the
 + * clockdomain functions on different OMAP SoCs.  Should be called
 + * before any other clkdm_register*() function.  Returns -EINVAL if
 + * @co is null, -EEXIST if platform functions have already been
 + * registered, or 0 upon success.
   */
 -void clkdm_init(struct clockdomain **clkdms,
 -              struct clkdm_autodep *init_autodeps,
 -              struct clkdm_ops *custom_funcs)
 +int clkdm_register_platform_funcs(struct clkdm_ops *co)
 +{
 +      if (!co)
 +              return -EINVAL;
 +
 +      if (arch_clkdm)
 +              return -EEXIST;
 +
 +      arch_clkdm = co;
 +
 +      return 0;
 +};
 +
 +/**
 + * clkdm_register_clkdms - register SoC clockdomains
 + * @cs: pointer to an array of struct clockdomain to register
 + *
 + * Register the clockdomains available on a particular OMAP SoC.  Must
 + * be called after clkdm_register_platform_funcs().  May be called
 + * multiple times.  Returns -EACCES if called before
 + * clkdm_register_platform_funcs(); -EINVAL if the argument @cs is
 + * null; or 0 upon success.
 + */
 +int clkdm_register_clkdms(struct clockdomain **cs)
  {
        struct clockdomain **c = NULL;
 -      struct clockdomain *clkdm;
 -      struct clkdm_autodep *autodep = NULL;
  
 -      if (!custom_funcs)
 -              WARN(1, "No custom clkdm functions registered\n");
 -      else
 -              arch_clkdm = custom_funcs;
 +      if (!arch_clkdm)
 +              return -EACCES;
 +
 +      if (!cs)
 +              return -EINVAL;
  
 -      if (clkdms)
 -              for (c = clkdms; *c; c++)
 -                      _clkdm_register(*c);
 +      for (c = cs; *c; c++)
 +              _clkdm_register(*c);
 +
 +      return 0;
 +}
 +
 +/**
 + * clkdm_register_autodeps - register autodeps (if required)
 + * @ia: pointer to a static array of struct clkdm_autodep to register
 + *
 + * Register clockdomain "automatic dependencies."  These are
 + * clockdomain wakeup and sleep dependencies that are automatically
 + * added whenever the first clock inside a clockdomain is enabled, and
 + * removed whenever the last clock inside a clockdomain is disabled.
 + * These are currently only used on OMAP3 devices, and are deprecated,
 + * since they waste energy.  However, until the OMAP2/3 IP block
 + * enable/disable sequence can be converted to match the OMAP4
 + * sequence, they are needed.
 + *
 + * Must be called only after all of the SoC clockdomains are
 + * registered, since the function will resolve autodep clockdomain
 + * names into clockdomain pointers.
 + *
 + * The struct clkdm_autodep @ia array must be static, as this function
 + * does not copy the array elements.
 + *
 + * Returns -EACCES if called before any clockdomains have been
 + * registered, -EINVAL if called with a null @ia argument, -EEXIST if
 + * autodeps have already been registered, or 0 upon success.
 + */
 +int clkdm_register_autodeps(struct clkdm_autodep *ia)
 +{
 +      struct clkdm_autodep *a = NULL;
 +
 +      if (list_empty(&clkdm_list))
 +              return -EACCES;
 +
 +      if (!ia)
 +              return -EINVAL;
  
 -      autodeps = init_autodeps;
        if (autodeps)
 -              for (autodep = autodeps; autodep->clkdm.ptr; autodep++)
 -                      _autodep_lookup(autodep);
 +              return -EEXIST;
 +
 +      autodeps = ia;
 +      for (a = autodeps; a->clkdm.ptr; a++)
 +              _autodep_lookup(a);
 +
 +      return 0;
 +}
 +
 +/**
 + * clkdm_complete_init - set up the clockdomain layer
 + *
 + * Put all clockdomains into software-supervised mode; PM code should
 + * later enable hardware-supervised mode as appropriate.  Must be
 + * called after clkdm_register_clkdms().  Returns -EACCES if called
 + * before clkdm_register_clkdms(), or 0 upon success.
 + */
 +int clkdm_complete_init(void)
 +{
 +      struct clockdomain *clkdm;
 +
 +      if (list_empty(&clkdm_list))
 +              return -EACCES;
  
 -      /*
 -       * Put all clockdomains into software-supervised mode; PM code
 -       * should later enable hardware-supervised mode as appropriate
 -       */
        list_for_each_entry(clkdm, &clkdm_list, node) {
                if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
                        clkdm_wakeup(clkdm);
                _resolve_clkdm_deps(clkdm, clkdm->sleepdep_srcs);
                clkdm_clear_all_sleepdeps(clkdm);
        }
 +
 +      return 0;
  }
  
  /**
@@@ -15,6 -15,7 +15,7 @@@
   * GNU General Public License for more details.
   */
  
+ #include <linux/string.h>
  #include <linux/kernel.h>
  #include <linux/init.h>
  #include <linux/platform_device.h>
@@@ -37,6 -38,14 +38,6 @@@ static struct platform_device omap_disp
        },
  };
  
 -static struct omap_device_pm_latency omap_dss_latency[] = {
 -      [0] = {
 -              .deactivate_func        = omap_device_idle_hwmods,
 -              .activate_func          = omap_device_enable_hwmods,
 -              .flags                  = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
 -      },
 -};
 -
  struct omap_dss_hwmod_data {
        const char *oh_name;
        const char *dev_name;
@@@ -119,7 -128,7 +120,7 @@@ int __init omap_display_init(struct oma
  {
        int r = 0;
        struct omap_hwmod *oh;
 -      struct omap_device *od;
 +      struct platform_device *pdev;
        int i, oh_count;
        struct omap_display_platform_data pdata;
        const struct omap_dss_hwmod_data *curr_dss_hwmod;
                        return -ENODEV;
                }
  
 -              od = omap_device_build(curr_dss_hwmod[i].dev_name,
 +              pdev = omap_device_build(curr_dss_hwmod[i].dev_name,
                                curr_dss_hwmod[i].id, oh, &pdata,
                                sizeof(struct omap_display_platform_data),
 -                              omap_dss_latency,
 -                              ARRAY_SIZE(omap_dss_latency), 0);
 +                              NULL, 0, 0);
  
 -              if (WARN((IS_ERR(od)), "Could not build omap_device for %s\n",
 +              if (WARN((IS_ERR(pdev)), "Could not build omap_device for %s\n",
                                curr_dss_hwmod[i].oh_name))
                        return -ENODEV;
        }
diff --combined arch/arm/mach-omap2/pm.c
@@@ -14,6 -14,7 +14,7 @@@
  #include <linux/io.h>
  #include <linux/err.h>
  #include <linux/opp.h>
+ #include <linux/export.h>
  
  #include <plat/omap-pm.h>
  #include <plat/omap_device.h>
  
  static struct omap_device_pm_latency *pm_lats;
  
 -static struct device *mpu_dev;
 -static struct device *iva_dev;
 -static struct device *l3_dev;
 -static struct device *dsp_dev;
 -
 -struct device *omap2_get_mpuss_device(void)
 -{
 -      WARN_ON_ONCE(!mpu_dev);
 -      return mpu_dev;
 -}
 -
 -struct device *omap2_get_iva_device(void)
 -{
 -      WARN_ON_ONCE(!iva_dev);
 -      return iva_dev;
 -}
 -
 -struct device *omap2_get_l3_device(void)
 -{
 -      WARN_ON_ONCE(!l3_dev);
 -      return l3_dev;
 -}
 -
 -struct device *omap4_get_dsp_device(void)
 -{
 -      WARN_ON_ONCE(!dsp_dev);
 -      return dsp_dev;
 -}
 -EXPORT_SYMBOL(omap4_get_dsp_device);
 -
 -/* static int _init_omap_device(struct omap_hwmod *oh, void *user) */
 -static int _init_omap_device(char *name, struct device **new_dev)
 +static int _init_omap_device(char *name)
  {
        struct omap_hwmod *oh;
 -      struct omap_device *od;
 +      struct platform_device *pdev;
  
        oh = omap_hwmod_lookup(name);
        if (WARN(!oh, "%s: could not find omap_hwmod for %s\n",
                 __func__, name))
                return -ENODEV;
  
 -      od = omap_device_build(oh->name, 0, oh, NULL, 0, pm_lats, 0, false);
 -      if (WARN(IS_ERR(od), "%s: could not build omap_device for %s\n",
 +      pdev = omap_device_build(oh->name, 0, oh, NULL, 0, pm_lats, 0, false);
 +      if (WARN(IS_ERR(pdev), "%s: could not build omap_device for %s\n",
                 __func__, name))
                return -ENODEV;
  
 -      *new_dev = &od->pdev.dev;
 -
        return 0;
  }
  
   */
  static void omap2_init_processor_devices(void)
  {
 -      _init_omap_device("mpu", &mpu_dev);
 +      _init_omap_device("mpu");
        if (omap3_has_iva())
 -              _init_omap_device("iva", &iva_dev);
 +              _init_omap_device("iva");
  
        if (cpu_is_omap44xx()) {
 -              _init_omap_device("l3_main_1", &l3_dev);
 -              _init_omap_device("dsp", &dsp_dev);
 -              _init_omap_device("iva", &iva_dev);
 +              _init_omap_device("l3_main_1");
 +              _init_omap_device("dsp");
 +              _init_omap_device("iva");
        } else {
 -              _init_omap_device("l3_main", &l3_dev);
 +              _init_omap_device("l3_main");
        }
  }
  
@@@ -103,8 -137,8 +104,8 @@@ int omap_set_pwrdm_state(struct powerdo
  
        ret = pwrdm_set_next_pwrst(pwrdm, state);
        if (ret) {
 -              printk(KERN_ERR "Unable to set state of powerdomain: %s\n",
 -                     pwrdm->name);
 +              pr_err("%s: unable to set state of powerdomain: %s\n",
 +                     __func__, pwrdm->name);
                goto err;
        }
  
@@@ -128,44 -162,37 +129,44 @@@ err
  }
  
  /*
 - * This API is to be called during init to put the various voltage
 + * This API is to be called during init to set the various voltage
   * domains to the voltage as per the opp table. Typically we boot up
   * at the nominal voltage. So this function finds out the rate of
   * the clock associated with the voltage domain, finds out the correct
 - * opp entry and puts the voltage domain to the voltage specifies
 + * opp entry and sets the voltage domain to the voltage specified
   * in the opp entry
   */
  static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
 -                                              struct device *dev)
 +                                       const char *oh_name)
  {
        struct voltagedomain *voltdm;
        struct clk *clk;
        struct opp *opp;
        unsigned long freq, bootup_volt;
 +      struct device *dev;
 +
 +      if (!vdd_name || !clk_name || !oh_name) {
 +              pr_err("%s: invalid parameters\n", __func__);
 +              goto exit;
 +      }
  
 -      if (!vdd_name || !clk_name || !dev) {
 -              printk(KERN_ERR "%s: Invalid parameters!\n", __func__);
 +      dev = omap_device_get_by_hwmod_name(oh_name);
 +      if (IS_ERR(dev)) {
 +              pr_err("%s: Unable to get dev pointer for hwmod %s\n",
 +                      __func__, oh_name);
                goto exit;
        }
  
 -      voltdm = omap_voltage_domain_lookup(vdd_name);
 +      voltdm = voltdm_lookup(vdd_name);
        if (IS_ERR(voltdm)) {
 -              printk(KERN_ERR "%s: Unable to get vdd pointer for vdd_%s\n",
 +              pr_err("%s: unable to get vdd pointer for vdd_%s\n",
                        __func__, vdd_name);
                goto exit;
        }
  
        clk =  clk_get(NULL, clk_name);
        if (IS_ERR(clk)) {
 -              printk(KERN_ERR "%s: unable to get clk %s\n",
 -                      __func__, clk_name);
 +              pr_err("%s: unable to get clk %s\n", __func__, clk_name);
                goto exit;
        }
  
  
        opp = opp_find_freq_ceil(dev, &freq);
        if (IS_ERR(opp)) {
 -              printk(KERN_ERR "%s: unable to find boot up OPP for vdd_%s\n",
 +              pr_err("%s: unable to find boot up OPP for vdd_%s\n",
                        __func__, vdd_name);
                goto exit;
        }
  
        bootup_volt = opp_get_voltage(opp);
        if (!bootup_volt) {
 -              printk(KERN_ERR "%s: unable to find voltage corresponding"
 +              pr_err("%s: unable to find voltage corresponding "
                        "to the bootup OPP for vdd_%s\n", __func__, vdd_name);
                goto exit;
        }
  
 -      omap_voltage_scale_vdd(voltdm, bootup_volt);
 +      voltdm_scale(voltdm, bootup_volt);
        return 0;
  
  exit:
 -      printk(KERN_ERR "%s: Unable to put vdd_%s to its init voltage\n\n",
 -              __func__, vdd_name);
 +      pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name);
        return -EINVAL;
  }
  
@@@ -199,8 -227,8 +200,8 @@@ static void __init omap3_init_voltages(
        if (!cpu_is_omap34xx())
                return;
  
 -      omap2_set_init_voltage("mpu", "dpll1_ck", mpu_dev);
 -      omap2_set_init_voltage("core", "l3_ick", l3_dev);
 +      omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu");
 +      omap2_set_init_voltage("core", "l3_ick", "l3_main");
  }
  
  static void __init omap4_init_voltages(void)
        if (!cpu_is_omap44xx())
                return;
  
 -      omap2_set_init_voltage("mpu", "dpll_mpu_ck", mpu_dev);
 -      omap2_set_init_voltage("core", "l3_div_ck", l3_dev);
 -      omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", iva_dev);
 +      omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu");
 +      omap2_set_init_voltage("core", "l3_div_ck", "l3_main_1");
 +      omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
  }
  
  static int __init omap2_common_pm_init(void)
  {
 -      omap2_init_processor_devices();
 +      if (!of_have_populated_dt())
 +              omap2_init_processor_devices();
        omap_pm_if_init();
  
        return 0;
@@@ -23,6 -23,7 +23,7 @@@
  #include <linux/clk.h>
  #include <linux/io.h>
  #include <linux/delay.h>
+ #include <linux/export.h>
  
  #include <mach/system.h>
  #include <plat/common.h>
@@@ -151,10 -152,17 +152,10 @@@ int omap2_cm_wait_idlest(void __iomem *
  
  void __init omap2_set_globals_prcm(struct omap_globals *omap2_globals)
  {
 -      /* Static mapping, never released */
 -      if (omap2_globals->prm) {
 -              prm_base = ioremap(omap2_globals->prm, SZ_8K);
 -              WARN_ON(!prm_base);
 -      }
 -      if (omap2_globals->cm) {
 -              cm_base = ioremap(omap2_globals->cm, SZ_8K);
 -              WARN_ON(!cm_base);
 -      }
 -      if (omap2_globals->cm2) {
 -              cm2_base = ioremap(omap2_globals->cm2, SZ_8K);
 -              WARN_ON(!cm2_base);
 -      }
 +      if (omap2_globals->prm)
 +              prm_base = omap2_globals->prm;
 +      if (omap2_globals->cm)
 +              cm_base = omap2_globals->cm;
 +      if (omap2_globals->cm2)
 +              cm2_base = omap2_globals->cm2;
  }
  
  #include <linux/delay.h>
  #include <linux/io.h>
 -#include <linux/clk.h>
  #include <linux/err.h>
+ #include <linux/export.h>
  #include <linux/debugfs.h>
  #include <linux/slab.h>
 +#include <linux/clk.h>
  
  #include <plat/common.h>
  
  #include "control.h"
  
  #include "voltage.h"
 +#include "powerdomain.h"
  
  #include "vc.h"
  #include "vp.h"
  
 -#define VOLTAGE_DIR_SIZE      16
 -
 -
 -static struct omap_vdd_info **vdd_info;
 -
 -/*
 - * Number of scalable voltage domains.
 - */
 -static int nr_scalable_vdd;
 -
 -/* XXX document */
 -static s16 prm_mod_offs;
 -static s16 prm_irqst_ocp_mod_offs;
 -
 -static struct dentry *voltage_dir;
 -
 -/* Init function pointers */
 -static int vp_forceupdate_scale_voltage(struct omap_vdd_info *vdd,
 -                                      unsigned long target_volt);
 -
 -static u32 omap3_voltage_read_reg(u16 mod, u8 offset)
 -{
 -      return omap2_prm_read_mod_reg(mod, offset);
 -}
 -
 -static void omap3_voltage_write_reg(u32 val, u16 mod, u8 offset)
 -{
 -      omap2_prm_write_mod_reg(val, mod, offset);
 -}
 -
 -static u32 omap4_voltage_read_reg(u16 mod, u8 offset)
 -{
 -      return omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
 -                                      mod, offset);
 -}
 -
 -static void omap4_voltage_write_reg(u32 val, u16 mod, u8 offset)
 -{
 -      omap4_prminst_write_inst_reg(val, OMAP4430_PRM_PARTITION, mod, offset);
 -}
 -
 -static int __init _config_common_vdd_data(struct omap_vdd_info *vdd)
 -{
 -      char *sys_ck_name;
 -      struct clk *sys_ck;
 -      u32 sys_clk_speed, timeout_val, waittime;
 -
 -      /*
 -       * XXX Clockfw should handle this, or this should be in a
 -       * struct record
 -       */
 -      if (cpu_is_omap24xx() || cpu_is_omap34xx())
 -              sys_ck_name = "sys_ck";
 -      else if (cpu_is_omap44xx())
 -              sys_ck_name = "sys_clkin_ck";
 -      else
 -              return -EINVAL;
 -
 -      /*
 -       * Sys clk rate is require to calculate vp timeout value and
 -       * smpswaittimemin and smpswaittimemax.
 -       */
 -      sys_ck = clk_get(NULL, sys_ck_name);
 -      if (IS_ERR(sys_ck)) {
 -              pr_warning("%s: Could not get the sys clk to calculate"
 -                      "various vdd_%s params\n", __func__, vdd->voltdm.name);
 -              return -EINVAL;
 -      }
 -      sys_clk_speed = clk_get_rate(sys_ck);
 -      clk_put(sys_ck);
 -      /* Divide to avoid overflow */
 -      sys_clk_speed /= 1000;
 -
 -      /* Generic voltage parameters */
 -      vdd->volt_scale = vp_forceupdate_scale_voltage;
 -      vdd->vp_enabled = false;
 -
 -      vdd->vp_rt_data.vpconfig_erroroffset =
 -              (vdd->pmic_info->vp_erroroffset <<
 -               vdd->vp_data->vp_common->vpconfig_erroroffset_shift);
 -
 -      timeout_val = (sys_clk_speed * vdd->pmic_info->vp_timeout_us) / 1000;
 -      vdd->vp_rt_data.vlimitto_timeout = timeout_val;
 -      vdd->vp_rt_data.vlimitto_vddmin = vdd->pmic_info->vp_vddmin;
 -      vdd->vp_rt_data.vlimitto_vddmax = vdd->pmic_info->vp_vddmax;
 -
 -      waittime = ((vdd->pmic_info->step_size / vdd->pmic_info->slew_rate) *
 -                              sys_clk_speed) / 1000;
 -      vdd->vp_rt_data.vstepmin_smpswaittimemin = waittime;
 -      vdd->vp_rt_data.vstepmax_smpswaittimemax = waittime;
 -      vdd->vp_rt_data.vstepmin_stepmin = vdd->pmic_info->vp_vstepmin;
 -      vdd->vp_rt_data.vstepmax_stepmax = vdd->pmic_info->vp_vstepmax;
 -
 -      return 0;
 -}
 -
 -/* Voltage debugfs support */
 -static int vp_volt_debug_get(void *data, u64 *val)
 -{
 -      struct omap_vdd_info *vdd = (struct omap_vdd_info *) data;
 -      u8 vsel;
 -
 -      if (!vdd) {
 -              pr_warning("Wrong paramater passed\n");
 -              return -EINVAL;
 -      }
 -
 -      vsel = vdd->read_reg(prm_mod_offs, vdd->vp_data->voltage);
 -
 -      if (!vdd->pmic_info->vsel_to_uv) {
 -              pr_warning("PMIC function to convert vsel to voltage"
 -                      "in uV not registerd\n");
 -              return -EINVAL;
 -      }
 -
 -      *val = vdd->pmic_info->vsel_to_uv(vsel);
 -      return 0;
 -}
 -
 -static int nom_volt_debug_get(void *data, u64 *val)
 -{
 -      struct omap_vdd_info *vdd = (struct omap_vdd_info *) data;
 -
 -      if (!vdd) {
 -              pr_warning("Wrong paramater passed\n");
 -              return -EINVAL;
 -      }
 -
 -      *val = omap_voltage_get_nom_volt(&vdd->voltdm);
 -
 -      return 0;
 -}
 -
 -DEFINE_SIMPLE_ATTRIBUTE(vp_volt_debug_fops, vp_volt_debug_get, NULL, "%llu\n");
 -DEFINE_SIMPLE_ATTRIBUTE(nom_volt_debug_fops, nom_volt_debug_get, NULL,
 -                                                              "%llu\n");
 -static void vp_latch_vsel(struct omap_vdd_info *vdd)
 -{
 -      u32 vpconfig;
 -      unsigned long uvdc;
 -      char vsel;
 -
 -      uvdc = omap_voltage_get_nom_volt(&vdd->voltdm);
 -      if (!uvdc) {
 -              pr_warning("%s: unable to find current voltage for vdd_%s\n",
 -                      __func__, vdd->voltdm.name);
 -              return;
 -      }
 -
 -      if (!vdd->pmic_info || !vdd->pmic_info->uv_to_vsel) {
 -              pr_warning("%s: PMIC function to convert voltage in uV to"
 -                      " vsel not registered\n", __func__);
 -              return;
 -      }
 -
 -      vsel = vdd->pmic_info->uv_to_vsel(uvdc);
 -
 -      vpconfig = vdd->read_reg(prm_mod_offs, vdd->vp_data->vpconfig);
 -      vpconfig &= ~(vdd->vp_data->vp_common->vpconfig_initvoltage_mask |
 -                      vdd->vp_data->vp_common->vpconfig_initvdd);
 -      vpconfig |= vsel << vdd->vp_data->vp_common->vpconfig_initvoltage_shift;
 -
 -      vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
 -
 -      /* Trigger initVDD value copy to voltage processor */
 -      vdd->write_reg((vpconfig | vdd->vp_data->vp_common->vpconfig_initvdd),
 -                     prm_mod_offs, vdd->vp_data->vpconfig);
 -
 -      /* Clear initVDD copy trigger bit */
 -      vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
 -}
 -
 -/* Generic voltage init functions */
 -static void __init vp_init(struct omap_vdd_info *vdd)
 -{
 -      u32 vp_val;
 -
 -      if (!vdd->read_reg || !vdd->write_reg) {
 -              pr_err("%s: No read/write API for accessing vdd_%s regs\n",
 -                      __func__, vdd->voltdm.name);
 -              return;
 -      }
 -
 -      vp_val = vdd->vp_rt_data.vpconfig_erroroffset |
 -              (vdd->vp_rt_data.vpconfig_errorgain <<
 -              vdd->vp_data->vp_common->vpconfig_errorgain_shift) |
 -              vdd->vp_data->vp_common->vpconfig_timeouten;
 -      vdd->write_reg(vp_val, prm_mod_offs, vdd->vp_data->vpconfig);
 -
 -      vp_val = ((vdd->vp_rt_data.vstepmin_smpswaittimemin <<
 -              vdd->vp_data->vp_common->vstepmin_smpswaittimemin_shift) |
 -              (vdd->vp_rt_data.vstepmin_stepmin <<
 -              vdd->vp_data->vp_common->vstepmin_stepmin_shift));
 -      vdd->write_reg(vp_val, prm_mod_offs, vdd->vp_data->vstepmin);
 -
 -      vp_val = ((vdd->vp_rt_data.vstepmax_smpswaittimemax <<
 -              vdd->vp_data->vp_common->vstepmax_smpswaittimemax_shift) |
 -              (vdd->vp_rt_data.vstepmax_stepmax <<
 -              vdd->vp_data->vp_common->vstepmax_stepmax_shift));
 -      vdd->write_reg(vp_val, prm_mod_offs, vdd->vp_data->vstepmax);
 -
 -      vp_val = ((vdd->vp_rt_data.vlimitto_vddmax <<
 -              vdd->vp_data->vp_common->vlimitto_vddmax_shift) |
 -              (vdd->vp_rt_data.vlimitto_vddmin <<
 -              vdd->vp_data->vp_common->vlimitto_vddmin_shift) |
 -              (vdd->vp_rt_data.vlimitto_timeout <<
 -              vdd->vp_data->vp_common->vlimitto_timeout_shift));
 -      vdd->write_reg(vp_val, prm_mod_offs, vdd->vp_data->vlimitto);
 -}
 -
 -static void __init vdd_debugfs_init(struct omap_vdd_info *vdd)
 -{
 -      char *name;
 -
 -      name = kzalloc(VOLTAGE_DIR_SIZE, GFP_KERNEL);
 -      if (!name) {
 -              pr_warning("%s: Unable to allocate memory for debugfs"
 -                      " directory name for vdd_%s",
 -                      __func__, vdd->voltdm.name);
 -              return;
 -      }
 -      strcpy(name, "vdd_");
 -      strcat(name, vdd->voltdm.name);
 -
 -      vdd->debug_dir = debugfs_create_dir(name, voltage_dir);
 -      kfree(name);
 -      if (IS_ERR(vdd->debug_dir)) {
 -              pr_warning("%s: Unable to create debugfs directory for"
 -                      " vdd_%s\n", __func__, vdd->voltdm.name);
 -              vdd->debug_dir = NULL;
 -              return;
 -      }
 -
 -      (void) debugfs_create_x16("vp_errorgain", S_IRUGO, vdd->debug_dir,
 -                              &(vdd->vp_rt_data.vpconfig_errorgain));
 -      (void) debugfs_create_x16("vp_smpswaittimemin", S_IRUGO,
 -                              vdd->debug_dir,
 -                              &(vdd->vp_rt_data.vstepmin_smpswaittimemin));
 -      (void) debugfs_create_x8("vp_stepmin", S_IRUGO, vdd->debug_dir,
 -                              &(vdd->vp_rt_data.vstepmin_stepmin));
 -      (void) debugfs_create_x16("vp_smpswaittimemax", S_IRUGO,
 -                              vdd->debug_dir,
 -                              &(vdd->vp_rt_data.vstepmax_smpswaittimemax));
 -      (void) debugfs_create_x8("vp_stepmax", S_IRUGO, vdd->debug_dir,
 -                              &(vdd->vp_rt_data.vstepmax_stepmax));
 -      (void) debugfs_create_x8("vp_vddmax", S_IRUGO, vdd->debug_dir,
 -                              &(vdd->vp_rt_data.vlimitto_vddmax));
 -      (void) debugfs_create_x8("vp_vddmin", S_IRUGO, vdd->debug_dir,
 -                              &(vdd->vp_rt_data.vlimitto_vddmin));
 -      (void) debugfs_create_x16("vp_timeout", S_IRUGO, vdd->debug_dir,
 -                              &(vdd->vp_rt_data.vlimitto_timeout));
 -      (void) debugfs_create_file("curr_vp_volt", S_IRUGO, vdd->debug_dir,
 -                              (void *) vdd, &vp_volt_debug_fops);
 -      (void) debugfs_create_file("curr_nominal_volt", S_IRUGO,
 -                              vdd->debug_dir, (void *) vdd,
 -                              &nom_volt_debug_fops);
 -}
 -
 -/* Voltage scale and accessory APIs */
 -static int _pre_volt_scale(struct omap_vdd_info *vdd,
 -              unsigned long target_volt, u8 *target_vsel, u8 *current_vsel)
 -{
 -      struct omap_volt_data *volt_data;
 -      const struct omap_vc_common_data *vc_common;
 -      const struct omap_vp_common_data *vp_common;
 -      u32 vc_cmdval, vp_errgain_val;
 -
 -      vc_common = vdd->vc_data->vc_common;
 -      vp_common = vdd->vp_data->vp_common;
 -
 -      /* Check if suffiecient pmic info is available for this vdd */
 -      if (!vdd->pmic_info) {
 -              pr_err("%s: Insufficient pmic info to scale the vdd_%s\n",
 -                      __func__, vdd->voltdm.name);
 -              return -EINVAL;
 -      }
 -
 -      if (!vdd->pmic_info->uv_to_vsel) {
 -              pr_err("%s: PMIC function to convert voltage in uV to"
 -                      "vsel not registered. Hence unable to scale voltage"
 -                      "for vdd_%s\n", __func__, vdd->voltdm.name);
 -              return -ENODATA;
 -      }
 -
 -      if (!vdd->read_reg || !vdd->write_reg) {
 -              pr_err("%s: No read/write API for accessing vdd_%s regs\n",
 -                      __func__, vdd->voltdm.name);
 -              return -EINVAL;
 -      }
 -
 -      /* Get volt_data corresponding to target_volt */
 -      volt_data = omap_voltage_get_voltdata(&vdd->voltdm, target_volt);
 -      if (IS_ERR(volt_data))
 -              volt_data = NULL;
 -
 -      *target_vsel = vdd->pmic_info->uv_to_vsel(target_volt);
 -      *current_vsel = vdd->read_reg(prm_mod_offs, vdd->vp_data->voltage);
 -
 -      /* Setting the ON voltage to the new target voltage */
 -      vc_cmdval = vdd->read_reg(prm_mod_offs, vdd->vc_data->cmdval_reg);
 -      vc_cmdval &= ~vc_common->cmd_on_mask;
 -      vc_cmdval |= (*target_vsel << vc_common->cmd_on_shift);
 -      vdd->write_reg(vc_cmdval, prm_mod_offs, vdd->vc_data->cmdval_reg);
 -
 -      /* Setting vp errorgain based on the voltage */
 -      if (volt_data) {
 -              vp_errgain_val = vdd->read_reg(prm_mod_offs,
 -                                             vdd->vp_data->vpconfig);
 -              vdd->vp_rt_data.vpconfig_errorgain = volt_data->vp_errgain;
 -              vp_errgain_val &= ~vp_common->vpconfig_errorgain_mask;
 -              vp_errgain_val |= vdd->vp_rt_data.vpconfig_errorgain <<
 -                      vp_common->vpconfig_errorgain_shift;
 -              vdd->write_reg(vp_errgain_val, prm_mod_offs,
 -                             vdd->vp_data->vpconfig);
 -      }
 -
 -      return 0;
 -}
 -
 -static void _post_volt_scale(struct omap_vdd_info *vdd,
 -              unsigned long target_volt, u8 target_vsel, u8 current_vsel)
 -{
 -      u32 smps_steps = 0, smps_delay = 0;
 -
 -      smps_steps = abs(target_vsel - current_vsel);
 -      /* SMPS slew rate / step size. 2us added as buffer. */
 -      smps_delay = ((smps_steps * vdd->pmic_info->step_size) /
 -                      vdd->pmic_info->slew_rate) + 2;
 -      udelay(smps_delay);
 -
 -      vdd->curr_volt = target_volt;
 -}
 -
 -/* vc_bypass_scale_voltage - VC bypass method of voltage scaling */
 -static int vc_bypass_scale_voltage(struct omap_vdd_info *vdd,
 -              unsigned long target_volt)
 -{
 -      u32 loop_cnt = 0, retries_cnt = 0;
 -      u32 vc_valid, vc_bypass_val_reg, vc_bypass_value;
 -      u8 target_vsel, current_vsel;
 -      int ret;
 -
 -      ret = _pre_volt_scale(vdd, target_volt, &target_vsel, &current_vsel);
 -      if (ret)
 -              return ret;
 -
 -      vc_valid = vdd->vc_data->vc_common->valid;
 -      vc_bypass_val_reg = vdd->vc_data->vc_common->bypass_val_reg;
 -      vc_bypass_value = (target_vsel << vdd->vc_data->vc_common->data_shift) |
 -                      (vdd->pmic_info->pmic_reg <<
 -                      vdd->vc_data->vc_common->regaddr_shift) |
 -                      (vdd->pmic_info->i2c_slave_addr <<
 -                      vdd->vc_data->vc_common->slaveaddr_shift);
 -
 -      vdd->write_reg(vc_bypass_value, prm_mod_offs, vc_bypass_val_reg);
 -      vdd->write_reg(vc_bypass_value | vc_valid, prm_mod_offs,
 -                     vc_bypass_val_reg);
 -
 -      vc_bypass_value = vdd->read_reg(prm_mod_offs, vc_bypass_val_reg);
 -      /*
 -       * Loop till the bypass command is acknowledged from the SMPS.
 -       * NOTE: This is legacy code. The loop count and retry count needs
 -       * to be revisited.
 -       */
 -      while (!(vc_bypass_value & vc_valid)) {
 -              loop_cnt++;
 -
 -              if (retries_cnt > 10) {
 -                      pr_warning("%s: Retry count exceeded\n", __func__);
 -                      return -ETIMEDOUT;
 -              }
 -
 -              if (loop_cnt > 50) {
 -                      retries_cnt++;
 -                      loop_cnt = 0;
 -                      udelay(10);
 -              }
 -              vc_bypass_value = vdd->read_reg(prm_mod_offs,
 -                                              vc_bypass_val_reg);
 -      }
 -
 -      _post_volt_scale(vdd, target_volt, target_vsel, current_vsel);
 -      return 0;
 -}
 -
 -/* VP force update method of voltage scaling */
 -static int vp_forceupdate_scale_voltage(struct omap_vdd_info *vdd,
 -              unsigned long target_volt)
 -{
 -      u32 vpconfig;
 -      u8 target_vsel, current_vsel, prm_irqst_reg;
 -      int ret, timeout = 0;
 -
 -      ret = _pre_volt_scale(vdd, target_volt, &target_vsel, &current_vsel);
 -      if (ret)
 -              return ret;
 -
 -      prm_irqst_reg = vdd->vp_data->prm_irqst_data->prm_irqst_reg;
 -
 -      /*
 -       * Clear all pending TransactionDone interrupt/status. Typical latency
 -       * is <3us
 -       */
 -      while (timeout++ < VP_TRANXDONE_TIMEOUT) {
 -              vdd->write_reg(vdd->vp_data->prm_irqst_data->tranxdone_status,
 -                             prm_irqst_ocp_mod_offs, prm_irqst_reg);
 -              if (!(vdd->read_reg(prm_irqst_ocp_mod_offs, prm_irqst_reg) &
 -                    vdd->vp_data->prm_irqst_data->tranxdone_status))
 -                      break;
 -              udelay(1);
 -      }
 -      if (timeout >= VP_TRANXDONE_TIMEOUT) {
 -              pr_warning("%s: vdd_%s TRANXDONE timeout exceeded."
 -                      "Voltage change aborted", __func__, vdd->voltdm.name);
 -              return -ETIMEDOUT;
 -      }
 -
 -      /* Configure for VP-Force Update */
 -      vpconfig = vdd->read_reg(prm_mod_offs, vdd->vp_data->vpconfig);
 -      vpconfig &= ~(vdd->vp_data->vp_common->vpconfig_initvdd |
 -                      vdd->vp_data->vp_common->vpconfig_forceupdate |
 -                      vdd->vp_data->vp_common->vpconfig_initvoltage_mask);
 -      vpconfig |= ((target_vsel <<
 -                      vdd->vp_data->vp_common->vpconfig_initvoltage_shift));
 -      vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
 -
 -      /* Trigger initVDD value copy to voltage processor */
 -      vpconfig |= vdd->vp_data->vp_common->vpconfig_initvdd;
 -      vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
 -
 -      /* Force update of voltage */
 -      vpconfig |= vdd->vp_data->vp_common->vpconfig_forceupdate;
 -      vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
 -
 -      /*
 -       * Wait for TransactionDone. Typical latency is <200us.
 -       * Depends on SMPSWAITTIMEMIN/MAX and voltage change
 -       */
 -      timeout = 0;
 -      omap_test_timeout((vdd->read_reg(prm_irqst_ocp_mod_offs, prm_irqst_reg) &
 -                         vdd->vp_data->prm_irqst_data->tranxdone_status),
 -                        VP_TRANXDONE_TIMEOUT, timeout);
 -      if (timeout >= VP_TRANXDONE_TIMEOUT)
 -              pr_err("%s: vdd_%s TRANXDONE timeout exceeded."
 -                      "TRANXDONE never got set after the voltage update\n",
 -                      __func__, vdd->voltdm.name);
 -
 -      _post_volt_scale(vdd, target_volt, target_vsel, current_vsel);
 -
 -      /*
 -       * Disable TransactionDone interrupt , clear all status, clear
 -       * control registers
 -       */
 -      timeout = 0;
 -      while (timeout++ < VP_TRANXDONE_TIMEOUT) {
 -              vdd->write_reg(vdd->vp_data->prm_irqst_data->tranxdone_status,
 -                             prm_irqst_ocp_mod_offs, prm_irqst_reg);
 -              if (!(vdd->read_reg(prm_irqst_ocp_mod_offs, prm_irqst_reg) &
 -                    vdd->vp_data->prm_irqst_data->tranxdone_status))
 -                      break;
 -              udelay(1);
 -      }
 -
 -      if (timeout >= VP_TRANXDONE_TIMEOUT)
 -              pr_warning("%s: vdd_%s TRANXDONE timeout exceeded while trying"
 -                      "to clear the TRANXDONE status\n",
 -                      __func__, vdd->voltdm.name);
 -
 -      vpconfig = vdd->read_reg(prm_mod_offs, vdd->vp_data->vpconfig);
 -      /* Clear initVDD copy trigger bit */
 -      vpconfig &= ~vdd->vp_data->vp_common->vpconfig_initvdd;
 -      vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
 -      /* Clear force bit */
 -      vpconfig &= ~vdd->vp_data->vp_common->vpconfig_forceupdate;
 -      vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
 -
 -      return 0;
 -}
 -
 -static void __init omap3_vfsm_init(struct omap_vdd_info *vdd)
 -{
 -      /*
 -       * Voltage Manager FSM parameters init
 -       * XXX This data should be passed in from the board file
 -       */
 -      vdd->write_reg(OMAP3_CLKSETUP, prm_mod_offs, OMAP3_PRM_CLKSETUP_OFFSET);
 -      vdd->write_reg(OMAP3_VOLTOFFSET, prm_mod_offs,
 -                     OMAP3_PRM_VOLTOFFSET_OFFSET);
 -      vdd->write_reg(OMAP3_VOLTSETUP2, prm_mod_offs,
 -                     OMAP3_PRM_VOLTSETUP2_OFFSET);
 -}
 -
 -static void __init omap3_vc_init(struct omap_vdd_info *vdd)
 -{
 -      static bool is_initialized;
 -      u8 on_vsel, onlp_vsel, ret_vsel, off_vsel;
 -      u32 vc_val;
 -
 -      if (is_initialized)
 -              return;
 -
 -      /* Set up the on, inactive, retention and off voltage */
 -      on_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->on_volt);
 -      onlp_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->onlp_volt);
 -      ret_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->ret_volt);
 -      off_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->off_volt);
 -      vc_val  = ((on_vsel << vdd->vc_data->vc_common->cmd_on_shift) |
 -              (onlp_vsel << vdd->vc_data->vc_common->cmd_onlp_shift) |
 -              (ret_vsel << vdd->vc_data->vc_common->cmd_ret_shift) |
 -              (off_vsel << vdd->vc_data->vc_common->cmd_off_shift));
 -      vdd->write_reg(vc_val, prm_mod_offs, vdd->vc_data->cmdval_reg);
 -
 -      /*
 -       * Generic VC parameters init
 -       * XXX This data should be abstracted out
 -       */
 -      vdd->write_reg(OMAP3430_CMD1_MASK | OMAP3430_RAV1_MASK, prm_mod_offs,
 -                      OMAP3_PRM_VC_CH_CONF_OFFSET);
 -      vdd->write_reg(OMAP3430_MCODE_SHIFT | OMAP3430_HSEN_MASK, prm_mod_offs,
 -                      OMAP3_PRM_VC_I2C_CFG_OFFSET);
 -
 -      omap3_vfsm_init(vdd);
 -
 -      is_initialized = true;
 -}
 -
 -
 -/* OMAP4 specific voltage init functions */
 -static void __init omap4_vc_init(struct omap_vdd_info *vdd)
 -{
 -      static bool is_initialized;
 -      u32 vc_val;
 -
 -      if (is_initialized)
 -              return;
 -
 -      /* TODO: Configure setup times and CMD_VAL values*/
 -
 -      /*
 -       * Generic VC parameters init
 -       * XXX This data should be abstracted out
 -       */
 -      vc_val = (OMAP4430_RAV_VDD_MPU_L_MASK | OMAP4430_CMD_VDD_MPU_L_MASK |
 -                OMAP4430_RAV_VDD_IVA_L_MASK | OMAP4430_CMD_VDD_IVA_L_MASK |
 -                OMAP4430_RAV_VDD_CORE_L_MASK | OMAP4430_CMD_VDD_CORE_L_MASK);
 -      vdd->write_reg(vc_val, prm_mod_offs, OMAP4_PRM_VC_CFG_CHANNEL_OFFSET);
 -
 -      /* XXX These are magic numbers and do not belong! */
 -      vc_val = (0x60 << OMAP4430_SCLL_SHIFT | 0x26 << OMAP4430_SCLH_SHIFT);
 -      vdd->write_reg(vc_val, prm_mod_offs, OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET);
 -
 -      is_initialized = true;
 -}
 -
 -static void __init omap_vc_init(struct omap_vdd_info *vdd)
 -{
 -      u32 vc_val;
 -
 -      if (!vdd->pmic_info || !vdd->pmic_info->uv_to_vsel) {
 -              pr_err("%s: PMIC info requried to configure vc for"
 -                      "vdd_%s not populated.Hence cannot initialize vc\n",
 -                      __func__, vdd->voltdm.name);
 -              return;
 -      }
 -
 -      if (!vdd->read_reg || !vdd->write_reg) {
 -              pr_err("%s: No read/write API for accessing vdd_%s regs\n",
 -                      __func__, vdd->voltdm.name);
 -              return;
 -      }
 -
 -      /* Set up the SMPS_SA(i2c slave address in VC */
 -      vc_val = vdd->read_reg(prm_mod_offs,
 -                             vdd->vc_data->vc_common->smps_sa_reg);
 -      vc_val &= ~vdd->vc_data->smps_sa_mask;
 -      vc_val |= vdd->pmic_info->i2c_slave_addr << vdd->vc_data->smps_sa_shift;
 -      vdd->write_reg(vc_val, prm_mod_offs,
 -                     vdd->vc_data->vc_common->smps_sa_reg);
 -
 -      /* Setup the VOLRA(pmic reg addr) in VC */
 -      vc_val = vdd->read_reg(prm_mod_offs,
 -                             vdd->vc_data->vc_common->smps_volra_reg);
 -      vc_val &= ~vdd->vc_data->smps_volra_mask;
 -      vc_val |= vdd->pmic_info->pmic_reg << vdd->vc_data->smps_volra_shift;
 -      vdd->write_reg(vc_val, prm_mod_offs,
 -                     vdd->vc_data->vc_common->smps_volra_reg);
 -
 -      /* Configure the setup times */
 -      vc_val = vdd->read_reg(prm_mod_offs, vdd->vfsm->voltsetup_reg);
 -      vc_val &= ~vdd->vfsm->voltsetup_mask;
 -      vc_val |= vdd->pmic_info->volt_setup_time <<
 -                      vdd->vfsm->voltsetup_shift;
 -      vdd->write_reg(vc_val, prm_mod_offs, vdd->vfsm->voltsetup_reg);
 -
 -      if (cpu_is_omap34xx())
 -              omap3_vc_init(vdd);
 -      else if (cpu_is_omap44xx())
 -              omap4_vc_init(vdd);
 -}
 -
 -static int __init omap_vdd_data_configure(struct omap_vdd_info *vdd)
 -{
 -      int ret = -EINVAL;
 -
 -      if (!vdd->pmic_info) {
 -              pr_err("%s: PMIC info requried to configure vdd_%s not"
 -                      "populated.Hence cannot initialize vdd_%s\n",
 -                      __func__, vdd->voltdm.name, vdd->voltdm.name);
 -              goto ovdc_out;
 -      }
 -
 -      if (IS_ERR_VALUE(_config_common_vdd_data(vdd)))
 -              goto ovdc_out;
 -
 -      if (cpu_is_omap34xx()) {
 -              vdd->read_reg = omap3_voltage_read_reg;
 -              vdd->write_reg = omap3_voltage_write_reg;
 -              ret = 0;
 -      } else if (cpu_is_omap44xx()) {
 -              vdd->read_reg = omap4_voltage_read_reg;
 -              vdd->write_reg = omap4_voltage_write_reg;
 -              ret = 0;
 -      }
 -
 -ovdc_out:
 -      return ret;
 -}
 +static LIST_HEAD(voltdm_list);
  
  /* Public functions */
  /**
 - * omap_voltage_get_nom_volt() - Gets the current non-auto-compensated voltage
 - * @voltdm:   pointer to the VDD for which current voltage info is needed
 + * voltdm_get_voltage() - Gets the current non-auto-compensated voltage
 + * @voltdm:   pointer to the voltdm for which current voltage info is needed
   *
 - * API to get the current non-auto-compensated voltage for a VDD.
 - * Returns 0 in case of error else returns the current voltage for the VDD.
 + * API to get the current non-auto-compensated voltage for a voltage domain.
 + * Returns 0 in case of error else returns the current voltage.
   */
 -unsigned long omap_voltage_get_nom_volt(struct voltagedomain *voltdm)
 +unsigned long voltdm_get_voltage(struct voltagedomain *voltdm)
  {
 -      struct omap_vdd_info *vdd;
 -
        if (!voltdm || IS_ERR(voltdm)) {
                pr_warning("%s: VDD specified does not exist!\n", __func__);
                return 0;
        }
  
 -      vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
 -
 -      return vdd->curr_volt;
 +      return voltdm->nominal_volt;
  }
  
  /**
 - * omap_vp_get_curr_volt() - API to get the current vp voltage.
 - * @voltdm:   pointer to the VDD.
 - *
 - * This API returns the current voltage for the specified voltage processor
 - */
 -unsigned long omap_vp_get_curr_volt(struct voltagedomain *voltdm)
 -{
 -      struct omap_vdd_info *vdd;
 -      u8 curr_vsel;
 -
 -      if (!voltdm || IS_ERR(voltdm)) {
 -              pr_warning("%s: VDD specified does not exist!\n", __func__);
 -              return 0;
 -      }
 -
 -      vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
 -      if (!vdd->read_reg) {
 -              pr_err("%s: No read API for reading vdd_%s regs\n",
 -                      __func__, voltdm->name);
 -              return 0;
 -      }
 -
 -      curr_vsel = vdd->read_reg(prm_mod_offs, vdd->vp_data->voltage);
 -
 -      if (!vdd->pmic_info || !vdd->pmic_info->vsel_to_uv) {
 -              pr_warning("%s: PMIC function to convert vsel to voltage"
 -                      "in uV not registerd\n", __func__);
 -              return 0;
 -      }
 -
 -      return vdd->pmic_info->vsel_to_uv(curr_vsel);
 -}
 -
 -/**
 - * omap_vp_enable() - API to enable a particular VP
 - * @voltdm:   pointer to the VDD whose VP is to be enabled.
 - *
 - * This API enables a particular voltage processor. Needed by the smartreflex
 - * class drivers.
 - */
 -void omap_vp_enable(struct voltagedomain *voltdm)
 -{
 -      struct omap_vdd_info *vdd;
 -      u32 vpconfig;
 -
 -      if (!voltdm || IS_ERR(voltdm)) {
 -              pr_warning("%s: VDD specified does not exist!\n", __func__);
 -              return;
 -      }
 -
 -      vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
 -      if (!vdd->read_reg || !vdd->write_reg) {
 -              pr_err("%s: No read/write API for accessing vdd_%s regs\n",
 -                      __func__, voltdm->name);
 -              return;
 -      }
 -
 -      /* If VP is already enabled, do nothing. Return */
 -      if (vdd->vp_enabled)
 -              return;
 -
 -      vp_latch_vsel(vdd);
 -
 -      /* Enable VP */
 -      vpconfig = vdd->read_reg(prm_mod_offs, vdd->vp_data->vpconfig);
 -      vpconfig |= vdd->vp_data->vp_common->vpconfig_vpenable;
 -      vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
 -      vdd->vp_enabled = true;
 -}
 -
 -/**
 - * omap_vp_disable() - API to disable a particular VP
 - * @voltdm:   pointer to the VDD whose VP is to be disabled.
 - *
 - * This API disables a particular voltage processor. Needed by the smartreflex
 - * class drivers.
 - */
 -void omap_vp_disable(struct voltagedomain *voltdm)
 -{
 -      struct omap_vdd_info *vdd;
 -      u32 vpconfig;
 -      int timeout;
 -
 -      if (!voltdm || IS_ERR(voltdm)) {
 -              pr_warning("%s: VDD specified does not exist!\n", __func__);
 -              return;
 -      }
 -
 -      vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
 -      if (!vdd->read_reg || !vdd->write_reg) {
 -              pr_err("%s: No read/write API for accessing vdd_%s regs\n",
 -                      __func__, voltdm->name);
 -              return;
 -      }
 -
 -      /* If VP is already disabled, do nothing. Return */
 -      if (!vdd->vp_enabled) {
 -              pr_warning("%s: Trying to disable VP for vdd_%s when"
 -                      "it is already disabled\n", __func__, voltdm->name);
 -              return;
 -      }
 -
 -      /* Disable VP */
 -      vpconfig = vdd->read_reg(prm_mod_offs, vdd->vp_data->vpconfig);
 -      vpconfig &= ~vdd->vp_data->vp_common->vpconfig_vpenable;
 -      vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
 -
 -      /*
 -       * Wait for VP idle Typical latency is <2us. Maximum latency is ~100us
 -       */
 -      omap_test_timeout((vdd->read_reg(prm_mod_offs, vdd->vp_data->vstatus)),
 -                              VP_IDLE_TIMEOUT, timeout);
 -
 -      if (timeout >= VP_IDLE_TIMEOUT)
 -              pr_warning("%s: vdd_%s idle timedout\n",
 -                      __func__, voltdm->name);
 -
 -      vdd->vp_enabled = false;
 -
 -      return;
 -}
 -
 -/**
 - * omap_voltage_scale_vdd() - API to scale voltage of a particular
 - *                            voltage domain.
 - * @voltdm:   pointer to the VDD which is to be scaled.
 - * @target_volt:      The target voltage of the voltage domain
 + * voltdm_scale() - API to scale voltage of a particular voltage domain.
 + * @voltdm: pointer to the voltage domain which is to be scaled.
 + * @target_volt: The target voltage of the voltage domain
   *
   * This API should be called by the kernel to do the voltage scaling
 - * for a particular voltage domain during dvfs or any other situation.
 + * for a particular voltage domain during DVFS.
   */
 -int omap_voltage_scale_vdd(struct voltagedomain *voltdm,
 -              unsigned long target_volt)
 +int voltdm_scale(struct voltagedomain *voltdm,
 +               unsigned long target_volt)
  {
 -      struct omap_vdd_info *vdd;
 +      int ret;
  
        if (!voltdm || IS_ERR(voltdm)) {
                pr_warning("%s: VDD specified does not exist!\n", __func__);
                return -EINVAL;
        }
  
 -      vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
 -
 -      if (!vdd->volt_scale) {
 +      if (!voltdm->scale) {
                pr_err("%s: No voltage scale API registered for vdd_%s\n",
                        __func__, voltdm->name);
                return -ENODATA;
        }
  
 -      return vdd->volt_scale(vdd, target_volt);
 +      ret = voltdm->scale(voltdm, target_volt);
 +      if (!ret)
 +              voltdm->nominal_volt = target_volt;
 +
 +      return ret;
  }
  
  /**
 - * omap_voltage_reset() - Resets the voltage of a particular voltage domain
 - *                    to that of the current OPP.
 - * @voltdm:   pointer to the VDD whose voltage is to be reset.
 + * voltdm_reset() - Resets the voltage of a particular voltage domain
 + *                to that of the current OPP.
 + * @voltdm: pointer to the voltage domain whose voltage is to be reset.
   *
   * This API finds out the correct voltage the voltage domain is supposed
   * to be at and resets the voltage to that level. Should be used especially
   * while disabling any voltage compensation modules.
   */
 -void omap_voltage_reset(struct voltagedomain *voltdm)
 +void voltdm_reset(struct voltagedomain *voltdm)
  {
 -      unsigned long target_uvdc;
 +      unsigned long target_volt;
  
        if (!voltdm || IS_ERR(voltdm)) {
                pr_warning("%s: VDD specified does not exist!\n", __func__);
                return;
        }
  
 -      target_uvdc = omap_voltage_get_nom_volt(voltdm);
 -      if (!target_uvdc) {
 +      target_volt = voltdm_get_voltage(voltdm);
 +      if (!target_volt) {
                pr_err("%s: unable to find current voltage for vdd_%s\n",
                        __func__, voltdm->name);
                return;
        }
  
 -      omap_voltage_scale_vdd(voltdm, target_uvdc);
 +      voltdm_scale(voltdm, target_volt);
  }
  
  /**
   *
   */
  void omap_voltage_get_volttable(struct voltagedomain *voltdm,
 -              struct omap_volt_data **volt_data)
 +                              struct omap_volt_data **volt_data)
  {
 -      struct omap_vdd_info *vdd;
 -
        if (!voltdm || IS_ERR(voltdm)) {
                pr_warning("%s: VDD specified does not exist!\n", __func__);
                return;
        }
  
 -      vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
 -
 -      *volt_data = vdd->volt_data;
 +      *volt_data = voltdm->volt_data;
  }
  
  /**
   * domain or if there is no matching entry.
   */
  struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
 -              unsigned long volt)
 +                                               unsigned long volt)
  {
 -      struct omap_vdd_info *vdd;
        int i;
  
        if (!voltdm || IS_ERR(voltdm)) {
                return ERR_PTR(-EINVAL);
        }
  
 -      vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
 -
 -      if (!vdd->volt_data) {
 +      if (!voltdm->volt_data) {
                pr_warning("%s: voltage table does not exist for vdd_%s\n",
                        __func__, voltdm->name);
                return ERR_PTR(-ENODATA);
        }
  
 -      for (i = 0; vdd->volt_data[i].volt_nominal != 0; i++) {
 -              if (vdd->volt_data[i].volt_nominal == volt)
 -                      return &vdd->volt_data[i];
 +      for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
 +              if (voltdm->volt_data[i].volt_nominal == volt)
 +                      return &voltdm->volt_data[i];
        }
  
        pr_notice("%s: Unable to match the current voltage with the voltage"
   * omap_voltage_register_pmic() - API to register PMIC specific data
   * @voltdm:   pointer to the VDD for which the PMIC specific data is
   *            to be registered
 - * @pmic_info:        the structure containing pmic info
 + * @pmic:     the structure containing pmic info
   *
   * This API is to be called by the SOC/PMIC file to specify the
 - * pmic specific info as present in omap_volt_pmic_info structure.
 + * pmic specific info as present in omap_voltdm_pmic structure.
   */
  int omap_voltage_register_pmic(struct voltagedomain *voltdm,
 -              struct omap_volt_pmic_info *pmic_info)
 +                             struct omap_voltdm_pmic *pmic)
  {
 -      struct omap_vdd_info *vdd;
 -
        if (!voltdm || IS_ERR(voltdm)) {
                pr_warning("%s: VDD specified does not exist!\n", __func__);
                return -EINVAL;
        }
  
 -      vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
 -
 -      vdd->pmic_info = pmic_info;
 +      voltdm->pmic = pmic;
  
        return 0;
  }
  
 -/**
 - * omap_voltage_get_dbgdir() - API to get pointer to the debugfs directory
 - *                            corresponding to a voltage domain.
 - *
 - * @voltdm:   pointer to the VDD whose debug directory is required.
 - *
 - * This API returns pointer to the debugfs directory corresponding
 - * to the voltage domain. Should be used by drivers requiring to
 - * add any debug entry for a particular voltage domain. Returns NULL
 - * in case of error.
 - */
 -struct dentry *omap_voltage_get_dbgdir(struct voltagedomain *voltdm)
 -{
 -      struct omap_vdd_info *vdd;
 -
 -      if (!voltdm || IS_ERR(voltdm)) {
 -              pr_warning("%s: VDD specified does not exist!\n", __func__);
 -              return NULL;
 -      }
 -
 -      vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
 -
 -      return vdd->debug_dir;
 -}
 -
  /**
   * omap_change_voltscale_method() - API to change the voltage scaling method.
   * @voltdm:   pointer to the VDD whose voltage scaling method
   * defined in voltage.h
   */
  void omap_change_voltscale_method(struct voltagedomain *voltdm,
 -              int voltscale_method)
 +                                int voltscale_method)
  {
 -      struct omap_vdd_info *vdd;
 -
        if (!voltdm || IS_ERR(voltdm)) {
                pr_warning("%s: VDD specified does not exist!\n", __func__);
                return;
        }
  
 -      vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
 -
        switch (voltscale_method) {
        case VOLTSCALE_VPFORCEUPDATE:
 -              vdd->volt_scale = vp_forceupdate_scale_voltage;
 +              voltdm->scale = omap_vp_forceupdate_scale;
                return;
        case VOLTSCALE_VCBYPASS:
 -              vdd->volt_scale = vc_bypass_scale_voltage;
 +              voltdm->scale = omap_vc_bypass_scale;
                return;
        default:
                pr_warning("%s: Trying to change the method of voltage scaling"
  }
  
  /**
 - * omap_voltage_domain_lookup() - API to get the voltage domain pointer
 - * @name:     Name of the voltage domain
 + * omap_voltage_late_init() - Init the various voltage parameters
   *
 - * This API looks up in the global vdd_info struct for the
 - * existence of voltage domain <name>. If it exists, the API returns
 - * a pointer to the voltage domain structure corresponding to the
 - * VDD<name>. Else retuns error pointer.
 + * This API is to be called in the later stages of the
 + * system boot to init the voltage controller and
 + * voltage processors.
   */
 -struct voltagedomain *omap_voltage_domain_lookup(char *name)
 +int __init omap_voltage_late_init(void)
  {
 -      int i;
 +      struct voltagedomain *voltdm;
  
 -      if (!vdd_info) {
 -              pr_err("%s: Voltage driver init not yet happened.Faulting!\n",
 +      if (list_empty(&voltdm_list)) {
 +              pr_err("%s: Voltage driver support not added\n",
                        __func__);
 -              return ERR_PTR(-EINVAL);
 +              return -EINVAL;
        }
  
 -      if (!name) {
 -              pr_err("%s: No name to get the votage domain!\n", __func__);
 -              return ERR_PTR(-EINVAL);
 +      list_for_each_entry(voltdm, &voltdm_list, node) {
 +              struct clk *sys_ck;
 +
 +              if (!voltdm->scalable)
 +                      continue;
 +
 +              sys_ck = clk_get(NULL, voltdm->sys_clk.name);
 +              if (IS_ERR(sys_ck)) {
 +                      pr_warning("%s: Could not get sys clk.\n", __func__);
 +                      return -EINVAL;
 +              }
 +              voltdm->sys_clk.rate = clk_get_rate(sys_ck);
 +              WARN_ON(!voltdm->sys_clk.rate);
 +              clk_put(sys_ck);
 +
 +              if (voltdm->vc) {
 +                      voltdm->scale = omap_vc_bypass_scale;
 +                      omap_vc_init_channel(voltdm);
 +              }
 +
 +              if (voltdm->vp) {
 +                      voltdm->scale = omap_vp_forceupdate_scale;
 +                      omap_vp_init(voltdm);
 +              }
        }
  
 -      for (i = 0; i < nr_scalable_vdd; i++) {
 -              if (!(strcmp(name, vdd_info[i]->voltdm.name)))
 -                      return &vdd_info[i]->voltdm;
 +      return 0;
 +}
 +
 +static struct voltagedomain *_voltdm_lookup(const char *name)
 +{
 +      struct voltagedomain *voltdm, *temp_voltdm;
 +
 +      voltdm = NULL;
 +
 +      list_for_each_entry(temp_voltdm, &voltdm_list, node) {
 +              if (!strcmp(name, temp_voltdm->name)) {
 +                      voltdm = temp_voltdm;
 +                      break;
 +              }
        }
  
 -      return ERR_PTR(-EINVAL);
 +      return voltdm;
  }
  
  /**
 - * omap_voltage_late_init() - Init the various voltage parameters
 + * voltdm_add_pwrdm - add a powerdomain to a voltagedomain
 + * @voltdm: struct voltagedomain * to add the powerdomain to
 + * @pwrdm: struct powerdomain * to associate with a voltagedomain
   *
 - * This API is to be called in the later stages of the
 - * system boot to init the voltage controller and
 - * voltage processors.
 + * Associate the powerdomain @pwrdm with a voltagedomain @voltdm.  This
 + * enables the use of voltdm_for_each_pwrdm().  Returns -EINVAL if
 + * presented with invalid pointers; -ENOMEM if memory could not be allocated;
 + * or 0 upon success.
   */
 -int __init omap_voltage_late_init(void)
 +int voltdm_add_pwrdm(struct voltagedomain *voltdm, struct powerdomain *pwrdm)
  {
 -      int i;
 +      if (!voltdm || !pwrdm)
 +              return -EINVAL;
  
 -      if (!vdd_info) {
 -              pr_err("%s: Voltage driver support not added\n",
 -                      __func__);
 +      pr_debug("voltagedomain: associating powerdomain %s with voltagedomain "
 +               "%s\n", pwrdm->name, voltdm->name);
 +
 +      list_add(&pwrdm->voltdm_node, &voltdm->pwrdm_list);
 +
 +      return 0;
 +}
 +
 +/**
 + * voltdm_for_each_pwrdm - call function for each pwrdm in a voltdm
 + * @voltdm: struct voltagedomain * to iterate over
 + * @fn: callback function *
 + *
 + * Call the supplied function @fn for each powerdomain in the
 + * voltagedomain @voltdm.  Returns -EINVAL if presented with invalid
 + * pointers; or passes along the last return value of the callback
 + * function, which should be 0 for success or anything else to
 + * indicate failure.
 + */
 +int voltdm_for_each_pwrdm(struct voltagedomain *voltdm,
 +                        int (*fn)(struct voltagedomain *voltdm,
 +                                  struct powerdomain *pwrdm))
 +{
 +      struct powerdomain *pwrdm;
 +      int ret = 0;
 +
 +      if (!fn)
                return -EINVAL;
 -      }
  
 -      voltage_dir = debugfs_create_dir("voltage", NULL);
 -      if (IS_ERR(voltage_dir))
 -              pr_err("%s: Unable to create voltage debugfs main dir\n",
 -                      __func__);
 -      for (i = 0; i < nr_scalable_vdd; i++) {
 -              if (omap_vdd_data_configure(vdd_info[i]))
 -                      continue;
 -              omap_vc_init(vdd_info[i]);
 -              vp_init(vdd_info[i]);
 -              vdd_debugfs_init(vdd_info[i]);
 +      list_for_each_entry(pwrdm, &voltdm->pwrdm_list, voltdm_node)
 +              ret = (*fn)(voltdm, pwrdm);
 +
 +      return ret;
 +}
 +
 +/**
 + * voltdm_for_each - call function on each registered voltagedomain
 + * @fn: callback function *
 + *
 + * Call the supplied function @fn for each registered voltagedomain.
 + * The callback function @fn can return anything but 0 to bail out
 + * early from the iterator.  Returns the last return value of the
 + * callback function, which should be 0 for success or anything else
 + * to indicate failure; or -EINVAL if the function pointer is null.
 + */
 +int voltdm_for_each(int (*fn)(struct voltagedomain *voltdm, void *user),
 +                  void *user)
 +{
 +      struct voltagedomain *temp_voltdm;
 +      int ret = 0;
 +
 +      if (!fn)
 +              return -EINVAL;
 +
 +      list_for_each_entry(temp_voltdm, &voltdm_list, node) {
 +              ret = (*fn)(temp_voltdm, user);
 +              if (ret)
 +                      break;
        }
  
 -      return 0;
 +      return ret;
  }
  
 -/* XXX document */
 -int __init omap_voltage_early_init(s16 prm_mod, s16 prm_irqst_ocp_mod,
 -                                 struct omap_vdd_info *omap_vdd_array[],
 -                                 u8 omap_vdd_count)
 +static int _voltdm_register(struct voltagedomain *voltdm)
  {
 -      prm_mod_offs = prm_mod;
 -      prm_irqst_ocp_mod_offs = prm_irqst_ocp_mod;
 -      vdd_info = omap_vdd_array;
 -      nr_scalable_vdd = omap_vdd_count;
 +      if (!voltdm || !voltdm->name)
 +              return -EINVAL;
 +
 +      INIT_LIST_HEAD(&voltdm->pwrdm_list);
 +      list_add(&voltdm->node, &voltdm_list);
 +
 +      pr_debug("voltagedomain: registered %s\n", voltdm->name);
 +
        return 0;
  }
 +
 +/**
 + * voltdm_lookup - look up a voltagedomain by name, return a pointer
 + * @name: name of voltagedomain
 + *
 + * Find a registered voltagedomain by its name @name.  Returns a pointer
 + * to the struct voltagedomain if found, or NULL otherwise.
 + */
 +struct voltagedomain *voltdm_lookup(const char *name)
 +{
 +      struct voltagedomain *voltdm ;
 +
 +      if (!name)
 +              return NULL;
 +
 +      voltdm = _voltdm_lookup(name);
 +
 +      return voltdm;
 +}
 +
 +/**
 + * voltdm_init - set up the voltagedomain layer
 + * @voltdm_list: array of struct voltagedomain pointers to register
 + *
 + * Loop through the array of voltagedomains @voltdm_list, registering all
 + * that are available on the current CPU. If voltdm_list is supplied
 + * and not null, all of the referenced voltagedomains will be
 + * registered.  No return value.
 + */
 +void voltdm_init(struct voltagedomain **voltdms)
 +{
 +      struct voltagedomain **v;
 +
 +      if (voltdms) {
 +              for (v = voltdms; *v; v++)
 +                      _voltdm_register(*v);
 +      }
 +}
@@@ -35,6 -35,7 +35,7 @@@
  #include <video/platform_lcd.h>
  
  #include <linux/mmc/host.h>
+ #include <linux/export.h>
  
  #include <asm/mach/arch.h>
  #include <asm/mach/map.h>
@@@ -696,9 -697,9 +697,9 @@@ static void __init h1940_init(void
                              S3C2410_MISCCR_USBSUSPND0 |
                              S3C2410_MISCCR_USBSUSPND1, 0x0);
  
 -      tmp =   (0x78 << S3C24XX_PLLCON_MDIVSHIFT)
 -            | (0x02 << S3C24XX_PLLCON_PDIVSHIFT)
 -            | (0x03 << S3C24XX_PLLCON_SDIVSHIFT);
 +      tmp =   (0x78 << S3C24XX_PLL_MDIV_SHIFT)
 +            | (0x02 << S3C24XX_PLL_PDIV_SHIFT)
 +            | (0x03 << S3C24XX_PLL_SDIV_SHIFT);
        writel(tmp, S3C2410_UPLLCON);
  
        gpio_request(S3C2410_GPC(0), "LCD power");
@@@ -32,6 -32,7 +32,7 @@@
  #include <linux/irq.h>
  #include <linux/clk.h>
  #include <linux/delay.h>
+ #include <linux/export.h>
  
  #include <asm/sizes.h>
  #include <asm/mach/pci.h>
@@@ -41,8 -42,6 +42,8 @@@
  #include <mach/clk.h>
  #include <mach/powergate.h>
  
 +#include "board.h"
 +
  /* register definitions */
  #define AFI_OFFSET    0x3800
  #define PADS_OFFSET   0x3000
  static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
  
  #define pmc_writel(value, reg) \
 -      __raw_writel(value, (u32)reg_pmc_base + (reg))
 +      __raw_writel(value, reg_pmc_base + (reg))
  #define pmc_readl(reg) \
 -      __raw_readl((u32)reg_pmc_base + (reg))
 +      __raw_readl(reg_pmc_base + (reg))
  
  /*
   * Tegra2 defines 1GB in the AXI address map for PCIe.
@@@ -462,7 -461,7 +463,7 @@@ static struct pci_bus __init *tegra_pci
        struct tegra_pcie_port *pp;
  
        if (nr >= tegra_pcie.num_ports)
 -              return 0;
 +              return NULL;
  
        pp = tegra_pcie.port + nr;
        pp->root_bus_nr = sys->busnr;
  
  #include <linux/gpio.h>
  #include <linux/platform_device.h>
+ #include <linux/slab.h>
  #include <linux/io.h>
  #include <linux/pwm_backlight.h>
 +#include <linux/slab.h>
  
  #include <plat/devs.h>
  #include <plat/gpio-cfg.h>
  */
  
  #include <linux/kernel.h>
+ #include <linux/slab.h>
  #include <linux/string.h>
  #include <linux/platform_device.h>
  
  #include <plat/devs.h>
 +#include <plat/sdhci.h>
  
  void __init *s3c_set_platdata(void *pd, size_t pdsize,
                              struct platform_device *pdev)
        pdev->dev.platform_data = npd;
        return npd;
  }
 +
 +void s3c_sdhci_set_platdata(struct s3c_sdhci_platdata *pd,
 +                           struct s3c_sdhci_platdata *set)
 +{
 +      set->cd_type = pd->cd_type;
 +      set->ext_cd_init = pd->ext_cd_init;
 +      set->ext_cd_cleanup = pd->ext_cd_cleanup;
 +      set->ext_cd_gpio = pd->ext_cd_gpio;
 +      set->ext_cd_gpio_invert = pd->ext_cd_gpio_invert;
 +
 +      if (pd->max_width)
 +              set->max_width = pd->max_width;
 +      if (pd->cfg_gpio)
 +              set->cfg_gpio = pd->cfg_gpio;
 +      if (pd->host_caps)
 +              set->host_caps |= pd->host_caps;
 +      if (pd->clk_type)
 +              set->clk_type = pd->clk_type;
 +}
@@@ -10,7 -10,9 +10,8 @@@
  #include <linux/dma-mapping.h>
  #include <linux/gfp.h>
  #include <linux/dma-debug.h>
+ #include <linux/export.h>
  #include <asm/bug.h>
 -#include <asm/cacheflush.h>
  
  /*
   * Generic direct DMA implementation
   * can set archdata.dma_data to an unsigned long holding the offset. By
   * default the offset is PCI_DRAM_OFFSET.
   */
 -static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
 -                              size_t size, enum dma_data_direction direction)
 -{
 -      switch (direction) {
 -      case DMA_TO_DEVICE:
 -      case DMA_BIDIRECTIONAL:
 -              flush_dcache_range(paddr + offset, paddr + offset + size);
 -              break;
 -      case DMA_FROM_DEVICE:
 -              invalidate_dcache_range(paddr + offset, paddr + offset + size);
 -              break;
 -      default:
 -              BUG();
 -      }
 -}
  
  static unsigned long get_dma_direct_offset(struct device *dev)
  {
@@@ -75,7 -92,7 +76,7 @@@ static int dma_direct_map_sg(struct dev
        /* FIXME this part of code is untested */
        for_each_sg(sgl, sg, nents, i) {
                sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
 -              __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
 +              __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
                                                        sg->length, direction);
        }
  
@@@ -100,7 -117,7 +101,7 @@@ static inline dma_addr_t dma_direct_map
                                             enum dma_data_direction direction,
                                             struct dma_attrs *attrs)
  {
 -      __dma_sync_page(page_to_phys(page), offset, size, direction);
 +      __dma_sync(page_to_phys(page) + offset, size, direction);
        return page_to_phys(page) + offset + get_dma_direct_offset(dev);
  }
  
@@@ -115,63 -132,7 +116,63 @@@ static inline void dma_direct_unmap_pag
   * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
   * dma_address is physical address
   */
 -      __dma_sync_page(dma_address, 0 , size, direction);
 +      __dma_sync(dma_address, size, direction);
 +}
 +
 +static inline void
 +dma_direct_sync_single_for_cpu(struct device *dev,
 +                             dma_addr_t dma_handle, size_t size,
 +                             enum dma_data_direction direction)
 +{
 +      /*
 +       * It's pointless to flush the cache as the memory segment
 +       * is given to the CPU
 +       */
 +
 +      if (direction == DMA_FROM_DEVICE)
 +              __dma_sync(dma_handle, size, direction);
 +}
 +
 +static inline void
 +dma_direct_sync_single_for_device(struct device *dev,
 +                                dma_addr_t dma_handle, size_t size,
 +                                enum dma_data_direction direction)
 +{
 +      /*
 +       * It's pointless to invalidate the cache if the device isn't
 +       * supposed to write to the relevant region
 +       */
 +
 +      if (direction == DMA_TO_DEVICE)
 +              __dma_sync(dma_handle, size, direction);
 +}
 +
 +static inline void
 +dma_direct_sync_sg_for_cpu(struct device *dev,
 +                         struct scatterlist *sgl, int nents,
 +                         enum dma_data_direction direction)
 +{
 +      struct scatterlist *sg;
 +      int i;
 +
 +      /* FIXME this part of code is untested */
 +      if (direction == DMA_FROM_DEVICE)
 +              for_each_sg(sgl, sg, nents, i)
 +                      __dma_sync(sg->dma_address, sg->length, direction);
 +}
 +
 +static inline void
 +dma_direct_sync_sg_for_device(struct device *dev,
 +                            struct scatterlist *sgl, int nents,
 +                            enum dma_data_direction direction)
 +{
 +      struct scatterlist *sg;
 +      int i;
 +
 +      /* FIXME this part of code is untested */
 +      if (direction == DMA_TO_DEVICE)
 +              for_each_sg(sgl, sg, nents, i)
 +                      __dma_sync(sg->dma_address, sg->length, direction);
  }
  
  struct dma_map_ops dma_direct_ops = {
        .dma_supported  = dma_direct_dma_supported,
        .map_page       = dma_direct_map_page,
        .unmap_page     = dma_direct_unmap_page,
 +      .sync_single_for_cpu            = dma_direct_sync_single_for_cpu,
 +      .sync_single_for_device         = dma_direct_sync_single_for_device,
 +      .sync_sg_for_cpu                = dma_direct_sync_sg_for_cpu,
 +      .sync_sg_for_device             = dma_direct_sync_sg_for_device,
  };
  EXPORT_SYMBOL(dma_direct_ops);
  
@@@ -16,7 -16,7 +16,7 @@@
  #include <linux/ptrace.h>
  #include <linux/smp.h>
  #include <linux/stddef.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  
  #include <asm/bugs.h>
  #include <asm/cpu.h>
@@@ -24,6 -24,7 +24,7 @@@
  #include <asm/mipsregs.h>
  #include <asm/system.h>
  #include <asm/watch.h>
+ #include <asm/elf.h>
  #include <asm/spram.h>
  #include <asm/uaccess.h>
  
@@@ -978,10 -979,7 +979,10 @@@ static inline void cpu_probe_cavium(str
  platform:
                set_elf_platform(cpu, "octeon");
                break;
 +      case PRID_IMP_CAVIUM_CN61XX:
        case PRID_IMP_CAVIUM_CN63XX:
 +      case PRID_IMP_CAVIUM_CN66XX:
 +      case PRID_IMP_CAVIUM_CN68XX:
                c->cputype = CPU_CAVIUM_OCTEON2;
                __cpu_name[cpu] = "Cavium Octeon II";
                set_elf_platform(cpu, "octeon2");
@@@ -12,6 -12,7 +12,7 @@@
  #include <linux/seq_file.h>
  #include <linux/init.h>
  #include <linux/dma-mapping.h>
+ #include <linux/export.h>
  
  #include <asm/setup.h>
  
@@@ -85,9 -86,8 +86,9 @@@ struct machdep_calls 
        void            (*pci_dma_dev_setup)(struct pci_dev *dev);
        void            (*pci_dma_bus_setup)(struct pci_bus *bus);
  
 -      /* Platform set_dma_mask override */
 +      /* Platform set_dma_mask and dma_get_required_mask overrides */
        int             (*dma_set_mask)(struct device *dev, u64 dma_mask);
 +      u64             (*dma_get_required_mask)(struct device *dev);
  
        int             (*probe)(void);
        void            (*setup_arch)(void); /* Optional, may be NULL */
@@@ -5,6 -5,7 +5,7 @@@
   * busses using the iommu infrastructure
   */
  
+ #include <linux/export.h>
  #include <asm/iommu.h>
  
  /*
@@@ -90,27 -91,13 +91,27 @@@ static int dma_iommu_dma_supported(stru
                return 1;
  }
  
 +static u64 dma_iommu_get_required_mask(struct device *dev)
 +{
 +      struct iommu_table *tbl = get_iommu_table_base(dev);
 +      u64 mask;
 +      if (!tbl)
 +              return 0;
 +
 +      mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
 +      mask += mask - 1;
 +
 +      return mask;
 +}
 +
  struct dma_map_ops dma_iommu_ops = {
 -      .alloc_coherent = dma_iommu_alloc_coherent,
 -      .free_coherent  = dma_iommu_free_coherent,
 -      .map_sg         = dma_iommu_map_sg,
 -      .unmap_sg       = dma_iommu_unmap_sg,
 -      .dma_supported  = dma_iommu_dma_supported,
 -      .map_page       = dma_iommu_map_page,
 -      .unmap_page     = dma_iommu_unmap_page,
 +      .alloc_coherent         = dma_iommu_alloc_coherent,
 +      .free_coherent          = dma_iommu_free_coherent,
 +      .map_sg                 = dma_iommu_map_sg,
 +      .unmap_sg               = dma_iommu_unmap_sg,
 +      .dma_supported          = dma_iommu_dma_supported,
 +      .map_page               = dma_iommu_map_page,
 +      .unmap_page             = dma_iommu_unmap_page,
 +      .get_required_mask      = dma_iommu_get_required_mask,
  };
  EXPORT_SYMBOL(dma_iommu_ops);
@@@ -10,6 -10,7 +10,7 @@@
  #include <linux/dma-debug.h>
  #include <linux/gfp.h>
  #include <linux/memblock.h>
+ #include <linux/export.h>
  #include <asm/bug.h>
  #include <asm/abs_addr.h>
  #include <asm/machdep.h>
@@@ -96,18 -97,6 +97,18 @@@ static int dma_direct_dma_supported(str
  #endif
  }
  
 +static u64 dma_direct_get_required_mask(struct device *dev)
 +{
 +      u64 end, mask;
 +
 +      end = memblock_end_of_DRAM() + get_dma_offset(dev);
 +
 +      mask = 1ULL << (fls64(end) - 1);
 +      mask += mask - 1;
 +
 +      return mask;
 +}
 +
  static inline dma_addr_t dma_direct_map_page(struct device *dev,
                                             struct page *page,
                                             unsigned long offset,
@@@ -149,14 -138,13 +150,14 @@@ static inline void dma_direct_sync_sing
  #endif
  
  struct dma_map_ops dma_direct_ops = {
 -      .alloc_coherent = dma_direct_alloc_coherent,
 -      .free_coherent  = dma_direct_free_coherent,
 -      .map_sg         = dma_direct_map_sg,
 -      .unmap_sg       = dma_direct_unmap_sg,
 -      .dma_supported  = dma_direct_dma_supported,
 -      .map_page       = dma_direct_map_page,
 -      .unmap_page     = dma_direct_unmap_page,
 +      .alloc_coherent                 = dma_direct_alloc_coherent,
 +      .free_coherent                  = dma_direct_free_coherent,
 +      .map_sg                         = dma_direct_map_sg,
 +      .unmap_sg                       = dma_direct_unmap_sg,
 +      .dma_supported                  = dma_direct_dma_supported,
 +      .map_page                       = dma_direct_map_page,
 +      .unmap_page                     = dma_direct_unmap_page,
 +      .get_required_mask              = dma_direct_get_required_mask,
  #ifdef CONFIG_NOT_COHERENT_CACHE
        .sync_single_for_cpu            = dma_direct_sync_single,
        .sync_single_for_device         = dma_direct_sync_single,
@@@ -183,23 -171,6 +184,23 @@@ int dma_set_mask(struct device *dev, u6
  }
  EXPORT_SYMBOL(dma_set_mask);
  
 +u64 dma_get_required_mask(struct device *dev)
 +{
 +      struct dma_map_ops *dma_ops = get_dma_ops(dev);
 +
 +      if (ppc_md.dma_get_required_mask)
 +              return ppc_md.dma_get_required_mask(dev);
 +
 +      if (unlikely(dma_ops == NULL))
 +              return 0;
 +
 +      if (dma_ops->get_required_mask)
 +              return dma_ops->get_required_mask(dev);
 +
 +      return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
 +}
 +EXPORT_SYMBOL_GPL(dma_get_required_mask);
 +
  static int __init dma_init(void)
  {
         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
   */
  
  #include <linux/init.h>
+ #include <linux/export.h>
  #include <linux/console.h>
  #include <linux/kobject.h>
  #include <linux/dma-mapping.h>
  #include <linux/interrupt.h>
  #include <linux/of.h>
  #include <linux/slab.h>
+ #include <linux/stat.h>
  #include <linux/of_platform.h>
  #include <asm/ibmebus.h>
  #include <asm/abs_addr.h>
@@@ -125,23 -127,17 +127,23 @@@ static void ibmebus_unmap_sg(struct dev
  
  static int ibmebus_dma_supported(struct device *dev, u64 mask)
  {
 -      return 1;
 +      return mask == DMA_BIT_MASK(64);
 +}
 +
 +static u64 ibmebus_dma_get_required_mask(struct device *dev)
 +{
 +      return DMA_BIT_MASK(64);
  }
  
  static struct dma_map_ops ibmebus_dma_ops = {
 -      .alloc_coherent = ibmebus_alloc_coherent,
 -      .free_coherent  = ibmebus_free_coherent,
 -      .map_sg         = ibmebus_map_sg,
 -      .unmap_sg       = ibmebus_unmap_sg,
 -      .dma_supported  = ibmebus_dma_supported,
 -      .map_page       = ibmebus_map_page,
 -      .unmap_page     = ibmebus_unmap_page,
 +      .alloc_coherent     = ibmebus_alloc_coherent,
 +      .free_coherent      = ibmebus_free_coherent,
 +      .map_sg             = ibmebus_map_sg,
 +      .unmap_sg           = ibmebus_unmap_sg,
 +      .dma_supported      = ibmebus_dma_supported,
 +      .get_required_mask  = ibmebus_dma_get_required_mask,
 +      .map_page           = ibmebus_map_page,
 +      .unmap_page         = ibmebus_unmap_page,
  };
  
  static int ibmebus_match_path(struct device *dev, void *data)
@@@ -21,6 -21,7 +21,7 @@@
  #include <linux/string.h>
  #include <linux/init.h>
  #include <linux/bootmem.h>
+ #include <linux/export.h>
  #include <linux/of_address.h>
  #include <linux/of_pci.h>
  #include <linux/mm.h>
@@@ -1730,17 -1731,6 +1731,17 @@@ void __devinit pcibios_scan_phb(struct 
  
        if (mode == PCI_PROBE_NORMAL)
                hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
 +
 +      /* Configure PCI Express settings */
 +      if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
 +              struct pci_bus *child;
 +              list_for_each_entry(child, &bus->children, node) {
 +                      struct pci_dev *self = child->self;
 +                      if (!self)
 +                              continue;
 +                      pcie_bus_configure_settings(child, self->pcie_mpss);
 +              }
 +      }
  }
  
  static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
@@@ -27,7 -27,7 +27,7 @@@
  #include <linux/delay.h>
  #include <linux/initrd.h>
  #include <linux/bitops.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/kexec.h>
  #include <linux/debugfs.h>
  #include <linux/irq.h>
@@@ -54,8 -54,6 +54,8 @@@
  #include <asm/pci-bridge.h>
  #include <asm/phyp_dump.h>
  #include <asm/kexec.h>
 +#include <asm/opal.h>
 +
  #include <mm/mmu_decl.h>
  
  #ifdef DEBUG
@@@ -709,23 -707,11 +709,23 @@@ void __init early_init_devtree(void *pa
        of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
  #endif
  
 +#ifdef CONFIG_PPC_POWERNV
 +      /* Some machines might need OPAL info for debugging, grab it now. */
 +      of_scan_flat_dt(early_init_dt_scan_opal, NULL);
 +#endif
 +
  #ifdef CONFIG_PHYP_DUMP
        /* scan tree to see if dump occurred during last boot */
        of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
  #endif
  
 +      /* Pre-initialize the cmd_line with the content of boot_commmand_line,
 +       * which will be empty except when the content of the variable has
 +       * been overriden by a bootloading mechanism. This happens typically
 +       * with HAL takeover
 +       */
 +      strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
 +
        /* Retrieve various informations from the /chosen node of the
         * device-tree, including the platform type, initrd location and
         * size, TCE reserve, and more ...
  
        of_scan_flat_dt(early_init_dt_scan_root, NULL);
        of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
 -      setup_initial_memory_limit(memstart_addr, first_memblock_size);
  
        /* Save command line for /proc/cmdline and then parse parameters */
        strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
        parse_early_param();
  
 +      /* make sure we've parsed cmdline for mem= before this */
 +      if (memory_limit)
 +              first_memblock_size = min(first_memblock_size, memory_limit);
 +      setup_initial_memory_limit(memstart_addr, first_memblock_size);
        /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
        memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
        /* If relocatable, reserve first 32k for interrupt vectors etc. */
@@@ -30,9 -30,6 +30,6 @@@
  #include <linux/seccomp.h>
  #include <linux/audit.h>
  #include <trace/syscall.h>
- #ifdef CONFIG_PPC32
- #include <linux/module.h>
- #endif
  #include <linux/hw_breakpoint.h>
  #include <linux/perf_event.h>
  
@@@ -1497,14 -1494,9 +1494,14 @@@ long arch_ptrace(struct task_struct *ch
                if (index < PT_FPR0) {
                        tmp = ptrace_get_reg(child, (int) index);
                } else {
 +                      unsigned int fpidx = index - PT_FPR0;
 +
                        flush_fp_to_thread(child);
 -                      tmp = ((unsigned long *)child->thread.fpr)
 -                              [TS_FPRWIDTH * (index - PT_FPR0)];
 +                      if (fpidx < (PT_FPSCR - PT_FPR0))
 +                              tmp = ((unsigned long *)child->thread.fpr)
 +                                      [fpidx * TS_FPRWIDTH];
 +                      else
 +                              tmp = child->thread.fpscr.val;
                }
                ret = put_user(tmp, datalp);
                break;
                if (index < PT_FPR0) {
                        ret = ptrace_put_reg(child, index, data);
                } else {
 +                      unsigned int fpidx = index - PT_FPR0;
 +
                        flush_fp_to_thread(child);
 -                      ((unsigned long *)child->thread.fpr)
 -                              [TS_FPRWIDTH * (index - PT_FPR0)] = data;
 +                      if (fpidx < (PT_FPSCR - PT_FPR0))
 +                              ((unsigned long *)child->thread.fpr)
 +                                      [fpidx * TS_FPRWIDTH] = data;
 +                      else
 +                              child->thread.fpscr.val = data;
                        ret = 0;
                }
                break;
@@@ -12,7 -12,7 +12,7 @@@
  
  #undef DEBUG
  
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/string.h>
  #include <linux/sched.h>
  #include <linux/init.h>
@@@ -278,14 -278,14 +278,14 @@@ static void __init initialize_cache_inf
  
        DBG(" -> initialize_cache_info()\n");
  
 -      for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
 +      for_each_node_by_type(np, "cpu") {
                num_cpus += 1;
  
 -              /* We're assuming *all* of the CPUs have the same
 +              /*
 +               * We're assuming *all* of the CPUs have the same
                 * d-cache and i-cache sizes... -Peter
                 */
 -
 -              if ( num_cpus == 1 ) {
 +              if (num_cpus == 1) {
                        const u32 *sizep, *lsizep;
                        u32 size, lsize;
  
                        sizep = of_get_property(np, "d-cache-size", NULL);
                        if (sizep != NULL)
                                size = *sizep;
 -                      lsizep = of_get_property(np, "d-cache-block-size", NULL);
 +                      lsizep = of_get_property(np, "d-cache-block-size",
 +                                               NULL);
                        /* fallback if block size missing */
                        if (lsizep == NULL)
 -                              lsizep = of_get_property(np, "d-cache-line-size", NULL);
 +                              lsizep = of_get_property(np,
 +                                                       "d-cache-line-size",
 +                                                       NULL);
                        if (lsizep != NULL)
                                lsize = *lsizep;
                        if (sizep == 0 || lsizep == 0)
                        sizep = of_get_property(np, "i-cache-size", NULL);
                        if (sizep != NULL)
                                size = *sizep;
 -                      lsizep = of_get_property(np, "i-cache-block-size", NULL);
 +                      lsizep = of_get_property(np, "i-cache-block-size",
 +                                               NULL);
                        if (lsizep == NULL)
 -                              lsizep = of_get_property(np, "i-cache-line-size", NULL);
 +                              lsizep = of_get_property(np,
 +                                                       "i-cache-line-size",
 +                                                       NULL);
                        if (lsizep != NULL)
                                lsize = *lsizep;
                        if (sizep == 0 || lsizep == 0)
@@@ -18,7 -18,7 +18,7 @@@
  #undef DEBUG
  
  #include <linux/kernel.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/sched.h>
  #include <linux/smp.h>
  #include <linux/interrupt.h>
  static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
  #define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
  #define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
 +
 +/* State of each CPU during hotplug phases */
 +static DEFINE_PER_CPU(int, cpu_state) = { 0 };
 +
  #else
  static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
  #define get_idle_for_cpu(x)      (idle_thread_array[(x)])
@@@ -108,25 -104,12 +108,25 @@@ int __devinit smp_generic_kick_cpu(int 
         * cpu_start field to become non-zero After we set cpu_start,
         * the processor will continue on to secondary_start
         */
 -      paca[nr].cpu_start = 1;
 -      smp_mb();
 +      if (!paca[nr].cpu_start) {
 +              paca[nr].cpu_start = 1;
 +              smp_mb();
 +              return 0;
 +      }
 +
 +#ifdef CONFIG_HOTPLUG_CPU
 +      /*
 +       * Ok it's not there, so it might be soft-unplugged, let's
 +       * try to bring it back
 +       */
 +      per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
 +      smp_wmb();
 +      smp_send_reschedule(nr);
 +#endif /* CONFIG_HOTPLUG_CPU */
  
        return 0;
  }
 -#endif
 +#endif /* CONFIG_PPC64 */
  
  static irqreturn_t call_function_action(int irq, void *data)
  {
@@@ -374,6 -357,8 +374,6 @@@ void __devinit smp_prepare_boot_cpu(voi
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
 -/* State of each CPU during hotplug phases */
 -static DEFINE_PER_CPU(int, cpu_state) = { 0 };
  
  int generic_cpu_disable(void)
  {
@@@ -421,11 -406,6 +421,11 @@@ void generic_set_cpu_dead(unsigned int 
  {
        per_cpu(cpu_state, cpu) = CPU_DEAD;
  }
 +
 +int generic_check_cpu_restart(unsigned int cpu)
 +{
 +      return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
 +}
  #endif
  
  struct create_idle {
   */
  
  #include <linux/types.h>
+ #include <linux/stat.h>
  #include <linux/device.h>
  #include <linux/init.h>
  #include <linux/slab.h>
  #include <linux/console.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/mm.h>
  #include <linux/dma-mapping.h>
  #include <linux/kobject.h>
@@@ -605,20 -606,15 +606,20 @@@ static int vio_dma_iommu_dma_supported(
          return dma_iommu_ops.dma_supported(dev, mask);
  }
  
 -struct dma_map_ops vio_dma_mapping_ops = {
 -      .alloc_coherent = vio_dma_iommu_alloc_coherent,
 -      .free_coherent  = vio_dma_iommu_free_coherent,
 -      .map_sg         = vio_dma_iommu_map_sg,
 -      .unmap_sg       = vio_dma_iommu_unmap_sg,
 -      .map_page       = vio_dma_iommu_map_page,
 -      .unmap_page     = vio_dma_iommu_unmap_page,
 -      .dma_supported  = vio_dma_iommu_dma_supported,
 +static u64 vio_dma_get_required_mask(struct device *dev)
 +{
 +        return dma_iommu_ops.get_required_mask(dev);
 +}
  
 +struct dma_map_ops vio_dma_mapping_ops = {
 +      .alloc_coherent    = vio_dma_iommu_alloc_coherent,
 +      .free_coherent     = vio_dma_iommu_free_coherent,
 +      .map_sg            = vio_dma_iommu_map_sg,
 +      .unmap_sg          = vio_dma_iommu_unmap_sg,
 +      .map_page          = vio_dma_iommu_map_page,
 +      .unmap_page        = vio_dma_iommu_unmap_page,
 +      .dma_supported     = vio_dma_iommu_dma_supported,
 +      .get_required_mask = vio_dma_get_required_mask,
  };
  
  /**
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/proc_fs.h>
  #include <linux/stat.h>
  #include <linux/sysctl.h>
+ #include <linux/export.h>
  #include <linux/ctype.h>
  #include <linux/cache.h>
  #include <linux/init.h>
@@@ -105,6 -106,9 +106,6 @@@ int mmu_kernel_ssize = MMU_SEGSIZE_256M
  int mmu_highuser_ssize = MMU_SEGSIZE_256M;
  u16 mmu_slb_size = 64;
  EXPORT_SYMBOL_GPL(mmu_slb_size);
 -#ifdef CONFIG_HUGETLB_PAGE
 -unsigned int HPAGE_SHIFT;
 -#endif
  #ifdef CONFIG_PPC_64K_PAGES
  int mmu_ci_restrictions;
  #endif
@@@ -531,11 -535,11 +532,11 @@@ static unsigned long __init htab_get_ta
  }
  
  #ifdef CONFIG_MEMORY_HOTPLUG
 -void create_section_mapping(unsigned long start, unsigned long end)
 +int create_section_mapping(unsigned long start, unsigned long end)
  {
 -      BUG_ON(htab_bolt_mapping(start, end, __pa(start),
 +      return htab_bolt_mapping(start, end, __pa(start),
                                 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
 -                               mmu_kernel_ssize));
 +                               mmu_kernel_ssize);
  }
  
  int remove_section_mapping(unsigned long start, unsigned long end)
diff --combined arch/powerpc/mm/mem.c
@@@ -17,7 -17,7 +17,7 @@@
   *
   */
  
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/sched.h>
  #include <linux/kernel.h>
  #include <linux/errno.h>
@@@ -123,8 -123,7 +123,8 @@@ int arch_add_memory(int nid, u64 start
        pgdata = NODE_DATA(nid);
  
        start = (unsigned long)__va(start);
 -      create_section_mapping(start, start + size);
 +      if (create_section_mapping(start, start + size))
 +              return -EINVAL;
  
        /* this should work for most non-highmem platforms */
        zone = pgdata->node_zones;
@@@ -549,9 -548,4 +549,9 @@@ void update_mmu_cache(struct vm_area_st
                return;
        hash_preload(vma->vm_mm, address, access, trap);
  #endif /* CONFIG_PPC_STD_MMU */
 +#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
 +      && defined(CONFIG_HUGETLB_PAGE)
 +      if (is_vm_hugetlb_page(vma))
 +              book3e_hugetlb_preload(vma->vm_mm, address, *ptep);
 +#endif
  }
@@@ -18,7 -18,7 +18,7 @@@
  #include <linux/mm.h>
  #include <linux/spinlock.h>
  #include <linux/idr.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/gfp.h>
  #include <linux/slab.h>
  
@@@ -136,8 -136,8 +136,8 @@@ int use_cop(unsigned long acop, struct 
        if (!mm || !acop)
                return -EINVAL;
  
 -      /* We need to make sure mm_users doesn't change */
 -      down_read(&mm->mmap_sem);
 +      /* The page_table_lock ensures mm_users won't change under us */
 +      spin_lock(&mm->page_table_lock);
        spin_lock(mm->context.cop_lockp);
  
        if (mm->context.cop_pid == COP_PID_NONE) {
  
  out:
        spin_unlock(mm->context.cop_lockp);
 -      up_read(&mm->mmap_sem);
 +      spin_unlock(&mm->page_table_lock);
  
        return ret;
  }
@@@ -185,8 -185,8 +185,8 @@@ void drop_cop(unsigned long acop, struc
        if (WARN_ON_ONCE(!mm))
                return;
  
 -      /* We need to make sure mm_users doesn't change */
 -      down_read(&mm->mmap_sem);
 +      /* The page_table_lock ensures mm_users won't change under us */
 +      spin_lock(&mm->page_table_lock);
        spin_lock(mm->context.cop_lockp);
  
        mm->context.acop &= ~acop;
        }
  
        spin_unlock(mm->context.cop_lockp);
 -      up_read(&mm->mmap_sem);
 +      spin_unlock(&mm->page_table_lock);
  }
  EXPORT_SYMBOL_GPL(drop_cop);
  
diff --combined arch/powerpc/mm/numa.c
@@@ -13,7 -13,7 +13,7 @@@
  #include <linux/init.h>
  #include <linux/mm.h>
  #include <linux/mmzone.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/nodemask.h>
  #include <linux/cpu.h>
  #include <linux/notifier.h>
@@@ -709,7 -709,8 +709,7 @@@ static void __init parse_drconf_memory(
  
  static int __init parse_numa_properties(void)
  {
 -      struct device_node *cpu = NULL;
 -      struct device_node *memory = NULL;
 +      struct device_node *memory;
        int default_nid = 0;
        unsigned long i;
  
         * each node to be onlined must have NODE_DATA etc backing it.
         */
        for_each_present_cpu(i) {
 +              struct device_node *cpu;
                int nid;
  
                cpu = of_get_cpu_node(i, NULL);
        }
  
        get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
 -      memory = NULL;
 -      while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
 +
 +      for_each_node_by_type(memory, "memory") {
                unsigned long start;
                unsigned long size;
                int nid;
@@@ -800,9 -800,8 +800,9 @@@ new_range
        }
  
        /*
 -       * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory
 -       * property in the ibm,dynamic-reconfiguration-memory node.
 +       * Now do the same thing for each MEMBLOCK listed in the
 +       * ibm,dynamic-memory property in the
 +       * ibm,dynamic-reconfiguration-memory node.
         */
        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
        if (memory)
@@@ -1188,10 -1187,10 +1188,10 @@@ static int hot_add_drconf_scn_to_nid(st
   */
  int hot_add_node_scn_to_nid(unsigned long scn_addr)
  {
 -      struct device_node *memory = NULL;
 +      struct device_node *memory;
        int nid = -1;
  
 -      while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
 +      for_each_node_by_type(memory, "memory") {
                unsigned long start, size;
                int ranges;
                const unsigned int *memcell_buf;
                        break;
                }
  
 -              of_node_put(memory);
                if (nid >= 0)
                        break;
        }
  
 +      of_node_put(memory);
 +
        return nid;
  }
  
@@@ -28,6 -28,7 +28,7 @@@
   */
  
  #include <linux/kernel.h>
+ #include <linux/export.h>
  #include <linux/mm.h>
  #include <linux/init.h>
  #include <linux/highmem.h>
  #include <linux/spinlock.h>
  #include <linux/memblock.h>
  #include <linux/of_fdt.h>
 +#include <linux/hugetlb.h>
  
  #include <asm/tlbflush.h>
  #include <asm/tlb.h>
  #include <asm/code-patching.h>
 +#include <asm/hugetlb.h>
  
  #include "mmu_decl.h"
  
 -#ifdef CONFIG_PPC_BOOK3E
 +/*
 + * This struct lists the sw-supported page sizes.  The hardawre MMU may support
 + * other sizes not listed here.   The .ind field is only used on MMUs that have
 + * indirect page table entries.
 + */
 +#ifdef CONFIG_PPC_BOOK3E_MMU
 +#ifdef CONFIG_FSL_BOOKE
 +struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 +      [MMU_PAGE_4K] = {
 +              .shift  = 12,
 +              .enc    = BOOK3E_PAGESZ_4K,
 +      },
 +      [MMU_PAGE_4M] = {
 +              .shift  = 22,
 +              .enc    = BOOK3E_PAGESZ_4M,
 +      },
 +      [MMU_PAGE_16M] = {
 +              .shift  = 24,
 +              .enc    = BOOK3E_PAGESZ_16M,
 +      },
 +      [MMU_PAGE_64M] = {
 +              .shift  = 26,
 +              .enc    = BOOK3E_PAGESZ_64M,
 +      },
 +      [MMU_PAGE_256M] = {
 +              .shift  = 28,
 +              .enc    = BOOK3E_PAGESZ_256M,
 +      },
 +      [MMU_PAGE_1G] = {
 +              .shift  = 30,
 +              .enc    = BOOK3E_PAGESZ_1GB,
 +      },
 +};
 +#else
  struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
        [MMU_PAGE_4K] = {
                .shift  = 12,
                .enc    = BOOK3E_PAGESZ_1GB,
        },
  };
 +#endif /* CONFIG_FSL_BOOKE */
 +
  static inline int mmu_get_tsize(int psize)
  {
        return mmu_psize_defs[psize].enc;
@@@ -124,7 -88,7 +125,7 @@@ static inline int mmu_get_tsize(int psi
        /* This isn't used on !Book3E for now */
        return 0;
  }
 -#endif
 +#endif /* CONFIG_PPC_BOOK3E_MMU */
  
  /* The variables below are currently only used on 64-bit Book3E
   * though this will probably be made common with other nohash
@@@ -303,11 -267,6 +304,11 @@@ void __flush_tlb_page(struct mm_struct 
  
  void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
  {
 +#ifdef CONFIG_HUGETLB_PAGE
 +      if (is_vm_hugetlb_page(vma))
 +              flush_hugetlb_page(vma, vmaddr);
 +#endif
 +
        __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
                         mmu_get_tsize(mmu_virtual_psize), 0);
  }
@@@ -642,28 -601,13 +643,28 @@@ void __cpuinit early_init_mmu_secondary
  void setup_initial_memory_limit(phys_addr_t first_memblock_base,
                                phys_addr_t first_memblock_size)
  {
 -      /* On Embedded 64-bit, we adjust the RMA size to match
 +      /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
         * the bolted TLB entry. We know for now that only 1G
         * entries are supported though that may eventually
 -       * change. We crop it to the size of the first MEMBLOCK to
 +       * change.
 +       *
 +       * on FSL Embedded 64-bit, we adjust the RMA size to match the
 +       * first bolted TLB entry size.  We still limit max to 1G even if
 +       * the TLB could cover more.  This is due to what the early init
 +       * code is setup to do.
 +       *
 +       * We crop it to the size of the first MEMBLOCK to
         * avoid going over total available memory just in case...
         */
 -      ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
 +#ifdef CONFIG_PPC_FSL_BOOK3E
 +      if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
 +              unsigned long linear_sz;
 +              linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET,
 +                                      first_memblock_base);
 +              ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
 +      } else
 +#endif
 +              ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
  
        /* Finally limit subsequent allocations */
        memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
@@@ -20,7 -20,7 +20,7 @@@
  
  #include <linux/kernel.h>
  #include <linux/init.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/dma-mapping.h>
  #include <linux/err.h>
  #include <linux/slab.h>
@@@ -695,18 -695,12 +695,18 @@@ static int ps3_dma_supported(struct dev
        return mask >= DMA_BIT_MASK(32);
  }
  
 +static u64 ps3_dma_get_required_mask(struct device *_dev)
 +{
 +      return DMA_BIT_MASK(32);
 +}
 +
  static struct dma_map_ops ps3_sb_dma_ops = {
        .alloc_coherent = ps3_alloc_coherent,
        .free_coherent = ps3_free_coherent,
        .map_sg = ps3_sb_map_sg,
        .unmap_sg = ps3_sb_unmap_sg,
        .dma_supported = ps3_dma_supported,
 +      .get_required_mask = ps3_dma_get_required_mask,
        .map_page = ps3_sb_map_page,
        .unmap_page = ps3_unmap_page,
  };
@@@ -717,7 -711,6 +717,7 @@@ static struct dma_map_ops ps3_ioc0_dma_
        .map_sg = ps3_ioc0_map_sg,
        .unmap_sg = ps3_ioc0_unmap_sg,
        .dma_supported = ps3_dma_supported,
 +      .get_required_mask = ps3_dma_get_required_mask,
        .map_page = ps3_ioc0_map_page,
        .unmap_page = ps3_unmap_page,
  };
@@@ -22,6 -22,7 +22,7 @@@
   */
  
  #include <linux/delay.h>
+ #include <linux/sched.h>      /* for init_mm */
  #include <linux/init.h>
  #include <linux/list.h>
  #include <linux/pci.h>
@@@ -29,6 -30,7 +30,7 @@@
  #include <linux/rbtree.h>
  #include <linux/seq_file.h>
  #include <linux/spinlock.h>
+ #include <linux/export.h>
  #include <linux/of.h>
  
  #include <linux/atomic.h>
@@@ -1338,7 -1340,7 +1340,7 @@@ static const struct file_operations pro
  static int __init eeh_init_proc(void)
  {
        if (machine_is(pseries))
 -              proc_create("ppc64/eeh", 0, NULL, &proc_eeh_operations);
 +              proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
        return 0;
  }
  __initcall(eeh_init_proc);
@@@ -29,6 -29,7 +29,7 @@@
  #include <linux/slab.h>
  #include <linux/mm.h>
  #include <linux/spinlock.h>
+ #include <linux/sched.h>      /* for show_stack */
  #include <linux/string.h>
  #include <linux/pci.h>
  #include <linux/dma-mapping.h>
@@@ -939,14 -940,14 +940,14 @@@ static u64 enable_ddw(struct pci_dev *d
        if (ret) {
                dev_info(&dev->dev, "failed to map direct window for %s: %d\n",
                         dn->full_name, ret);
 -              goto out_clear_window;
 +              goto out_free_window;
        }
  
        ret = prom_add_property(pdn, win64);
        if (ret) {
                dev_err(&dev->dev, "unable to add dma window property for %s: %d",
                         pdn->full_name, ret);
 -              goto out_clear_window;
 +              goto out_free_window;
        }
  
        window->device = pdn;
        dma_addr = of_read_number(&create.addr_hi, 2);
        goto out_unlock;
  
 +out_free_window:
 +      kfree(window);
 +
  out_clear_window:
        remove_ddw(pdn);
  
@@@ -1080,38 -1078,12 +1081,38 @@@ check_mask
        return 0;
  }
  
 +static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
 +{
 +      if (!dev->dma_mask)
 +              return 0;
 +
 +      if (!disable_ddw && dev_is_pci(dev)) {
 +              struct pci_dev *pdev = to_pci_dev(dev);
 +              struct device_node *dn;
 +
 +              dn = pci_device_to_OF_node(pdev);
 +
 +              /* search upwards for ibm,dma-window */
 +              for (; dn && PCI_DN(dn) && !PCI_DN(dn)->iommu_table;
 +                              dn = dn->parent)
 +                      if (of_get_property(dn, "ibm,dma-window", NULL))
 +                              break;
 +              /* if there is a ibm,ddw-applicable property require 64 bits */
 +              if (dn && PCI_DN(dn) &&
 +                              of_get_property(dn, "ibm,ddw-applicable", NULL))
 +                      return DMA_BIT_MASK(64);
 +      }
 +
 +      return dma_iommu_ops.get_required_mask(dev);
 +}
 +
  #else  /* CONFIG_PCI */
  #define pci_dma_bus_setup_pSeries     NULL
  #define pci_dma_dev_setup_pSeries     NULL
  #define pci_dma_bus_setup_pSeriesLP   NULL
  #define pci_dma_dev_setup_pSeriesLP   NULL
  #define dma_set_mask_pSeriesLP                NULL
 +#define dma_get_required_mask_pSeriesLP       NULL
  #endif /* !CONFIG_PCI */
  
  static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
@@@ -1215,7 -1187,6 +1216,7 @@@ void iommu_init_early_pSeries(void
                ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
                ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
 +              ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
        } else {
                ppc_md.tce_build = tce_build_pSeries;
                ppc_md.tce_free  = tce_free_pSeries;
@@@ -3,7 -3,7 +3,7 @@@
   *
   * Author: Scott Wood <scottwood@freescale.com>
   *
 - * Copyright 2007 Freescale Semiconductor, Inc.
 + * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
   *
   * Some parts derived from commproc.c/cpm2_common.c, which is:
   * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
@@@ -20,6 -20,7 +20,7 @@@
  #include <linux/init.h>
  #include <linux/of_device.h>
  #include <linux/spinlock.h>
+ #include <linux/export.h>
  #include <linux/of.h>
  #include <linux/slab.h>
  
@@@ -146,7 -147,6 +147,7 @@@ unsigned long cpm_muram_alloc(unsigned 
        spin_lock_irqsave(&cpm_muram_lock, flags);
        cpm_muram_info.alignment = align;
        start = rh_alloc(&cpm_muram_info, size, "commproc");
 +      memset(cpm_muram_addr(start), 0, size);
        spin_unlock_irqrestore(&cpm_muram_lock, flags);
  
        return start;
@@@ -23,7 -23,7 +23,7 @@@
   */
  
  #include <linux/init.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/types.h>
  #include <linux/dma-mapping.h>
  #include <linux/interrupt.h>
@@@ -1608,7 -1608,6 +1608,7 @@@ int fsl_rio_setup(struct platform_devic
        return 0;
  err:
        iounmap(priv->regs_win);
 +      release_resource(&port->iores);
  err_res:
        kfree(priv);
  err_priv:
diff --combined arch/powerpc/xmon/xmon.c
@@@ -18,7 -18,7 +18,7 @@@
  #include <linux/delay.h>
  #include <linux/kallsyms.h>
  #include <linux/cpumask.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/sysrq.h>
  #include <linux/interrupt.h>
  #include <linux/irq.h>
@@@ -340,8 -340,8 +340,8 @@@ int cpus_are_in_xmon(void
  
  static inline int unrecoverable_excp(struct pt_regs *regs)
  {
 -#ifdef CONFIG_4xx
 -      /* We have no MSR_RI bit on 4xx, so we simply return false */
 +#if defined(CONFIG_4xx) || defined(CONFIG_BOOK3E)
 +      /* We have no MSR_RI bit on 4xx or Book3e, so we simply return false */
        return 0;
  #else
        return ((regs->msr & MSR_RI) == 0);
  #include <linux/cpumask.h>
  #include <linux/init.h>
  #include <linux/percpu.h>
 +#include <linux/topology.h>
  #include <linux/node.h>
  #include <linux/nodemask.h>
+ #include <linux/export.h>
  
  static DEFINE_PER_CPU(struct cpu, cpu_devices);
  
diff --combined arch/sh/mm/init.c
@@@ -18,6 -18,7 +18,7 @@@
  #include <linux/io.h>
  #include <linux/memblock.h>
  #include <linux/dma-mapping.h>
+ #include <linux/export.h>
  #include <asm/mmu_context.h>
  #include <asm/mmzone.h>
  #include <asm/kexec.h>
@@@ -287,8 -288,6 +288,8 @@@ static void __init do_init_bootmem(void
  static void __init early_reserve_mem(void)
  {
        unsigned long start_pfn;
 +      u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
 +      u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
  
        /*
         * Partially used pages are not usable - thus
         * this catches the (definitely buggy) case of us accidentally
         * initializing the bootmem allocator with an invalid RAM area.
         */
 -      memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
 -                  (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
 -                  (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
 +      memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
  
        /*
         * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
         */
        if (CONFIG_ZERO_PAGE_OFFSET != 0)
 -              memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
 +              memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
  
        /*
         * Handle additional early reservations
@@@ -3,8 -3,8 +3,9 @@@
   *
   */
  
+ #include <linux/module.h>
  #include <crypto/aes.h>
 +#include <asm/aes.h>
  
  asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
  asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
@@@ -36,7 -36,9 +36,8 @@@
  #include <linux/fs.h>
  #include <linux/mm.h>
  #include <linux/debugfs.h>
 -#include <linux/edac_mce.h>
  #include <linux/irq_work.h>
+ #include <linux/export.h>
  
  #include <asm/processor.h>
  #include <asm/mce.h>
@@@ -143,20 -145,23 +144,20 @@@ static struct mce_log mcelog = 
  void mce_log(struct mce *mce)
  {
        unsigned next, entry;
 +      int ret = 0;
  
        /* Emit the trace record: */
        trace_mce_record(mce);
  
 +      ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
 +      if (ret == NOTIFY_STOP)
 +              return;
 +
        mce->finished = 0;
        wmb();
        for (;;) {
                entry = rcu_dereference_check_mce(mcelog.next);
                for (;;) {
 -                      /*
 -                       * If edac_mce is enabled, it will check the error type
 -                       * and will process it, if it is a known error.
 -                       * Otherwise, the error will be sent through mcelog
 -                       * interface
 -                       */
 -                      if (edac_mce_parse(mce))
 -                              return;
  
                        /*
                         * When the buffer fills up discard new entries.
@@@ -552,8 -557,10 +553,8 @@@ void machine_check_poll(enum mcp_flags 
                 * Don't get the IP here because it's unlikely to
                 * have anything to do with the actual error location.
                 */
 -              if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) {
 +              if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce)
                        mce_log(&m);
 -                      atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m);
 -              }
  
                /*
                 * Clear state for this bank.
diff --combined block/ioctl.c
@@@ -1,5 -1,6 +1,6 @@@
  #include <linux/capability.h>
  #include <linux/blkdev.h>
+ #include <linux/export.h>
  #include <linux/gfp.h>
  #include <linux/blkpg.h>
  #include <linux/hdreg.h>
@@@ -101,7 -102,7 +102,7 @@@ static int blkdev_reread_part(struct bl
        struct gendisk *disk = bdev->bd_disk;
        int res;
  
 -      if (!disk_partitionable(disk) || bdev != bdev->bd_contains)
 +      if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains)
                return -EINVAL;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
@@@ -8,6 -8,7 +8,7 @@@
   */
  
  #include <linux/sched.h>
+ #include <linux/export.h>
  #include <linux/pm_runtime.h>
  #include <trace/events/rpm.h>
  #include "power.h"
@@@ -29,10 -30,13 +30,10 @@@ static int rpm_suspend(struct device *d
  void update_pm_runtime_accounting(struct device *dev)
  {
        unsigned long now = jiffies;
 -      int delta;
 +      unsigned long delta;
  
        delta = now - dev->power.accounting_timestamp;
  
 -      if (delta < 0)
 -              delta = 0;
 -
        dev->power.accounting_timestamp = now;
  
        if (dev->power.disable_depth > 0)
@@@ -293,9 -297,6 +294,9 @@@ static int rpm_callback(int (*cb)(struc
   * the callback was running then carry it out, otherwise send an idle
   * notification for its parent (if the suspend succeeded and both
   * ignore_children of parent->power and irq_safe of dev->power are not set).
 + * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
 + * flag is set and the next autosuspend-delay expiration time is in the
 + * future, schedule another autosuspend attempt.
   *
   * This function must be called under dev->power.lock with interrupts disabled.
   */
@@@ -416,21 -417,10 +417,21 @@@ static int rpm_suspend(struct device *d
        if (retval) {
                __update_runtime_status(dev, RPM_ACTIVE);
                dev->power.deferred_resume = false;
 -              if (retval == -EAGAIN || retval == -EBUSY)
 +              if (retval == -EAGAIN || retval == -EBUSY) {
                        dev->power.runtime_error = 0;
 -              else
 +
 +                      /*
 +                       * If the callback routine failed an autosuspend, and
 +                       * if the last_busy time has been updated so that there
 +                       * is a new autosuspend expiration time, automatically
 +                       * reschedule another autosuspend.
 +                       */
 +                      if ((rpmflags & RPM_AUTO) &&
 +                          pm_runtime_autosuspend_expiration(dev) != 0)
 +                              goto repeat;
 +              } else {
                        pm_runtime_cancel_pending(dev);
 +              }
                wake_up_all(&dev->power.wait_queue);
                goto out;
        }
@@@ -15,6 -15,7 +15,7 @@@
  #include <linux/genhd.h>
  #include <linux/netdevice.h>
  #include <linux/mutex.h>
+ #include <linux/export.h>
  #include "aoe.h"
  
  static DEFINE_MUTEX(aoeblk_mutex);
@@@ -159,7 -160,7 +160,7 @@@ aoeblk_release(struct gendisk *disk, fm
        return 0;
  }
  
 -static int
 +static void
  aoeblk_make_request(struct request_queue *q, struct bio *bio)
  {
        struct sk_buff_head queue;
        if (bio == NULL) {
                printk(KERN_ERR "aoe: bio is NULL\n");
                BUG();
 -              return 0;
 +              return;
        }
        d = bio->bi_bdev->bd_disk->private_data;
        if (d == NULL) {
                printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
                BUG();
                bio_endio(bio, -ENXIO);
 -              return 0;
 +              return;
        } else if (bio->bi_io_vec == NULL) {
                printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
                BUG();
                bio_endio(bio, -ENXIO);
 -              return 0;
 +              return;
        }
        buf = mempool_alloc(d->bufpool, GFP_NOIO);
        if (buf == NULL) {
                printk(KERN_INFO "aoe: buf allocation failure\n");
                bio_endio(bio, -ENOMEM);
 -              return 0;
 +              return;
        }
        memset(buf, 0, sizeof(*buf));
        INIT_LIST_HEAD(&buf->bufs);
                spin_unlock_irqrestore(&d->lock, flags);
                mempool_free(buf, d->bufpool);
                bio_endio(bio, -ENXIO);
 -              return 0;
 +              return;
        }
  
        list_add_tail(&buf->bufs, &d->bufq);
  
        spin_unlock_irqrestore(&d->lock, flags);
        aoenet_xmit(&queue);
 -
 -      return 0;
  }
  
  static int
diff --combined drivers/block/ps3vram.c
@@@ -10,6 -10,7 +10,7 @@@
  
  #include <linux/blkdev.h>
  #include <linux/delay.h>
+ #include <linux/module.h>
  #include <linux/proc_fs.h>
  #include <linux/seq_file.h>
  #include <linux/slab.h>
@@@ -596,7 -597,7 +597,7 @@@ out
        return next;
  }
  
 -static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
 +static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
  {
        struct ps3_system_bus_device *dev = q->queuedata;
        struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
        spin_unlock_irq(&priv->lock);
  
        if (busy)
 -              return 0;
 +              return;
  
        do {
                bio = ps3vram_do_bio(dev, bio);
        } while (bio);
 -
 -      return 0;
  }
  
  static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
@@@ -3,18 -3,16 +3,19 @@@
  #include <linux/slab.h>
  #include <linux/blkdev.h>
  #include <linux/hdreg.h>
+ #include <linux/module.h>
  #include <linux/virtio.h>
  #include <linux/virtio_blk.h>
  #include <linux/scatterlist.h>
  #include <linux/string_helpers.h>
  #include <scsi/scsi_cmnd.h>
 +#include <linux/idr.h>
  
  #define PART_BITS 4
  
 -static int major, index;
 +static int major;
 +static DEFINE_IDA(vd_index_ida);
 +
  struct workqueue_struct *virtblk_wq;
  
  struct virtio_blk
@@@ -38,9 -36,6 +39,9 @@@
        /* What host tells us, plus 2 for header & tailer. */
        unsigned int sg_elems;
  
 +      /* Ida index - used to track minor number allocations. */
 +      int index;
 +
        /* Scatterlist: can be too big for stack. */
        struct scatterlist sg[/*sg_elems*/];
  };
@@@ -282,11 -277,6 +283,11 @@@ static int index_to_minor(int index
        return index << PART_BITS;
  }
  
 +static int minor_to_index(int minor)
 +{
 +      return minor >> PART_BITS;
 +}
 +
  static ssize_t virtblk_serial_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
  {
@@@ -352,17 -342,14 +353,17 @@@ static int __devinit virtblk_probe(stru
  {
        struct virtio_blk *vblk;
        struct request_queue *q;
 -      int err;
 +      int err, index;
        u64 cap;
        u32 v, blk_size, sg_elems, opt_io_size;
        u16 min_io_size;
        u8 physical_block_exp, alignment_offset;
  
 -      if (index_to_minor(index) >= 1 << MINORBITS)
 -              return -ENOSPC;
 +      err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
 +                           GFP_KERNEL);
 +      if (err < 0)
 +              goto out;
 +      index = err;
  
        /* We need to know how many segments before we allocate. */
        err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
                                    sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
        if (!vblk) {
                err = -ENOMEM;
 -              goto out;
 +              goto out_free_index;
        }
  
        INIT_LIST_HEAD(&vblk->reqs);
        vblk->disk->private_data = vblk;
        vblk->disk->fops = &virtblk_fops;
        vblk->disk->driverfs_dev = &vdev->dev;
 -      index++;
 +      vblk->index = index;
  
        /* configure queue flush support */
        if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
@@@ -530,8 -517,6 +531,8 @@@ out_free_vq
        vdev->config->del_vqs(vdev);
  out_free_vblk:
        kfree(vblk);
 +out_free_index:
 +      ida_simple_remove(&vd_index_ida, index);
  out:
        return err;
  }
  static void __devexit virtblk_remove(struct virtio_device *vdev)
  {
        struct virtio_blk *vblk = vdev->priv;
 +      int index = vblk->index;
  
        flush_work(&vblk->config_work);
  
        mempool_destroy(vblk->pool);
        vdev->config->del_vqs(vdev);
        kfree(vblk);
 +      ida_simple_remove(&vd_index_ida, index);
  }
  
  static const struct virtio_device_id id_table[] = {
diff --combined drivers/char/ttyprintk.c
@@@ -17,6 -17,7 +17,7 @@@
  #include <linux/device.h>
  #include <linux/serial.h>
  #include <linux/tty.h>
+ #include <linux/export.h>
  
  struct ttyprintk_port {
        struct tty_port port;
@@@ -170,7 -171,7 +171,7 @@@ static const struct tty_operations ttyp
        .ioctl = tpk_ioctl,
  };
  
 -struct tty_port_operations null_ops = { };
 +static struct tty_port_operations null_ops = { };
  
  static struct tty_driver *ttyprintk_driver;
  
   */
  #include <linux/cdev.h>
  #include <linux/debugfs.h>
 +#include <linux/completion.h>
  #include <linux/device.h>
  #include <linux/err.h>
 +#include <linux/freezer.h>
  #include <linux/fs.h>
  #include <linux/init.h>
  #include <linux/list.h>
@@@ -34,6 -32,7 +34,7 @@@
  #include <linux/virtio_console.h>
  #include <linux/wait.h>
  #include <linux/workqueue.h>
+ #include <linux/module.h>
  #include "../tty/hvc/hvc_console.h"
  
  /*
@@@ -75,7 -74,6 +76,7 @@@ struct ports_driver_data 
  static struct ports_driver_data pdrvdata;
  
  DEFINE_SPINLOCK(pdrvdata_lock);
 +DECLARE_COMPLETION(early_console_added);
  
  /* This struct holds information that's relevant only for console ports */
  struct console {
@@@ -154,10 -152,6 +155,10 @@@ struct ports_device 
        int chr_major;
  };
  
 +struct port_stats {
 +      unsigned long bytes_sent, bytes_received, bytes_discarded;
 +};
 +
  /* This struct holds the per-port data */
  struct port {
        /* Next port in the list, head is in the ports_device */
        /* File in the debugfs directory that exposes this port's information */
        struct dentry *debugfs_file;
  
 +      /*
 +       * Keep count of the bytes sent, received and discarded for
 +       * this port for accounting and debugging purposes.  These
 +       * counts are not reset across port open / close events.
 +       */
 +      struct port_stats stats;
 +
        /*
         * The entries in this struct will be valid if this port is
         * hooked up to an hvc console
@@@ -361,19 -348,17 +362,19 @@@ fail
  }
  
  /* Callers should take appropriate locks */
 -static void *get_inbuf(struct port *port)
 +static struct port_buffer *get_inbuf(struct port *port)
  {
        struct port_buffer *buf;
 -      struct virtqueue *vq;
        unsigned int len;
  
 -      vq = port->in_vq;
 -      buf = virtqueue_get_buf(vq, &len);
 +      if (port->inbuf)
 +              return port->inbuf;
 +
 +      buf = virtqueue_get_buf(port->in_vq, &len);
        if (buf) {
                buf->len = len;
                buf->offset = 0;
 +              port->stats.bytes_received += len;
        }
        return buf;
  }
@@@ -400,27 -385,32 +401,27 @@@ static int add_inbuf(struct virtqueue *
  static void discard_port_data(struct port *port)
  {
        struct port_buffer *buf;
 -      struct virtqueue *vq;
 -      unsigned int len;
 -      int ret;
 +      unsigned int err;
  
        if (!port->portdev) {
                /* Device has been unplugged.  vqs are already gone. */
                return;
        }
 -      vq = port->in_vq;
 -      if (port->inbuf)
 -              buf = port->inbuf;
 -      else
 -              buf = virtqueue_get_buf(vq, &len);
 +      buf = get_inbuf(port);
  
 -      ret = 0;
 +      err = 0;
        while (buf) {
 -              if (add_inbuf(vq, buf) < 0) {
 -                      ret++;
 +              port->stats.bytes_discarded += buf->len - buf->offset;
 +              if (add_inbuf(port->in_vq, buf) < 0) {
 +                      err++;
                        free_buf(buf);
                }
 -              buf = virtqueue_get_buf(vq, &len);
 +              port->inbuf = NULL;
 +              buf = get_inbuf(port);
        }
 -      port->inbuf = NULL;
 -      if (ret)
 +      if (err)
                dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
 -                       ret);
 +                       err);
  }
  
  static bool port_has_data(struct port *port)
        unsigned long flags;
        bool ret;
  
 +      ret = false;
        spin_lock_irqsave(&port->inbuf_lock, flags);
 -      if (port->inbuf) {
 -              ret = true;
 -              goto out;
 -      }
        port->inbuf = get_inbuf(port);
 -      if (port->inbuf) {
 +      if (port->inbuf)
                ret = true;
 -              goto out;
 -      }
 -      ret = false;
 -out:
 +
        spin_unlock_irqrestore(&port->inbuf_lock, flags);
        return ret;
  }
@@@ -534,8 -530,6 +535,8 @@@ static ssize_t send_buf(struct port *po
                cpu_relax();
  done:
        spin_unlock_irqrestore(&port->outvq_lock, flags);
 +
 +      port->stats.bytes_sent += in_count;
        /*
         * We're expected to return the amount of data we wrote -- all
         * of it
@@@ -640,8 -634,8 +641,8 @@@ static ssize_t port_fops_read(struct fi
                if (filp->f_flags & O_NONBLOCK)
                        return -EAGAIN;
  
 -              ret = wait_event_interruptible(port->waitqueue,
 -                                             !will_read_block(port));
 +              ret = wait_event_freezable(port->waitqueue,
 +                                         !will_read_block(port));
                if (ret < 0)
                        return ret;
        }
@@@ -684,8 -678,8 +685,8 @@@ static ssize_t port_fops_write(struct f
                if (nonblock)
                        return -EAGAIN;
  
 -              ret = wait_event_interruptible(port->waitqueue,
 -                                             !will_write_block(port));
 +              ret = wait_event_freezable(port->waitqueue,
 +                                         !will_write_block(port));
                if (ret < 0)
                        return ret;
        }
@@@ -1065,14 -1059,6 +1066,14 @@@ static ssize_t debugfs_read(struct fil
                               "host_connected: %d\n", port->host_connected);
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
                               "outvq_full: %d\n", port->outvq_full);
 +      out_offset += snprintf(buf + out_offset, out_count - out_offset,
 +                             "bytes_sent: %lu\n", port->stats.bytes_sent);
 +      out_offset += snprintf(buf + out_offset, out_count - out_offset,
 +                             "bytes_received: %lu\n",
 +                             port->stats.bytes_received);
 +      out_offset += snprintf(buf + out_offset, out_count - out_offset,
 +                             "bytes_discarded: %lu\n",
 +                             port->stats.bytes_discarded);
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
                               "is_console: %s\n",
                               is_console_port(port) ? "yes" : "no");
@@@ -1158,7 -1144,6 +1159,7 @@@ static int add_port(struct ports_devic
        port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
  
        port->host_connected = port->guest_connected = false;
 +      port->stats = (struct port_stats) { 0 };
  
        port->outvq_full = false;
  
@@@ -1368,7 -1353,6 +1369,7 @@@ static void handle_control_message(stru
                        break;
  
                init_port_console(port);
 +              complete(&early_console_added);
                /*
                 * Could remove the port here in case init fails - but
                 * have to notify the host first.
                send_sigio_to_port(port);
                break;
        case VIRTIO_CONSOLE_PORT_NAME:
 +              /*
 +               * If we woke up after hibernation, we can get this
 +               * again.  Skip it in that case.
 +               */
 +              if (port->name)
 +                      break;
 +
                /*
                 * Skip the size of the header and the cpkt to get the size
                 * of the name that was sent
@@@ -1505,7 -1482,8 +1506,7 @@@ static void in_intr(struct virtqueue *v
                return;
  
        spin_lock_irqsave(&port->inbuf_lock, flags);
 -      if (!port->inbuf)
 -              port->inbuf = get_inbuf(port);
 +      port->inbuf = get_inbuf(port);
  
        /*
         * Don't queue up data when port is closed.  This condition
@@@ -1586,7 -1564,7 +1587,7 @@@ static int init_vqs(struct ports_devic
        portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
                                   GFP_KERNEL);
        if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
 -                      !portdev->out_vqs) {
 +          !portdev->out_vqs) {
                err = -ENOMEM;
                goto free;
        }
@@@ -1671,10 -1649,6 +1672,10 @@@ static int __devinit virtcons_probe(str
        struct ports_device *portdev;
        int err;
        bool multiport;
 +      bool early = early_put_chars != NULL;
 +
 +      /* Ensure to read early_put_chars now */
 +      barrier();
  
        portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
        if (!portdev) {
  
        multiport = false;
        portdev->config.max_nr_ports = 1;
 -      if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
 +      if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
 +                            offsetof(struct virtio_console_config,
 +                                     max_nr_ports),
 +                            &portdev->config.max_nr_ports) == 0)
                multiport = true;
 -              vdev->config->get(vdev, offsetof(struct virtio_console_config,
 -                                               max_nr_ports),
 -                                &portdev->config.max_nr_ports,
 -                                sizeof(portdev->config.max_nr_ports));
 -      }
  
        err = init_vqs(portdev);
        if (err < 0) {
  
        __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
                           VIRTIO_CONSOLE_DEVICE_READY, 1);
 +
 +      /*
 +       * If there was an early virtio console, assume that there are no
 +       * other consoles. We need to wait until the hvc_alloc matches the
 +       * hvc_instantiate, otherwise tty_open will complain, resulting in
 +       * a "Warning: unable to open an initial console" boot failure.
 +       * Without multiport this is done in add_port above. With multiport
 +       * this might take some host<->guest communication - thus we have to
 +       * wait.
 +       */
 +      if (multiport && early)
 +              wait_for_completion(&early_console_added);
 +
        return 0;
  
  free_vqs:
diff --combined drivers/dma/imx-dma.c
@@@ -14,7 -14,6 +14,7 @@@
   * http://www.gnu.org/copyleft/gpl.html
   */
  #include <linux/init.h>
 +#include <linux/module.h>
  #include <linux/types.h>
  #include <linux/mm.h>
  #include <linux/interrupt.h>
@@@ -24,6 -23,7 +24,7 @@@
  #include <linux/slab.h>
  #include <linux/platform_device.h>
  #include <linux/dmaengine.h>
+ #include <linux/module.h>
  
  #include <asm/irq.h>
  #include <mach/dma-v1.h>
diff --combined drivers/dma/imx-sdma.c
@@@ -18,7 -18,6 +18,7 @@@
   */
  
  #include <linux/init.h>
 +#include <linux/module.h>
  #include <linux/types.h>
  #include <linux/mm.h>
  #include <linux/interrupt.h>
@@@ -35,6 -34,7 +35,7 @@@
  #include <linux/dmaengine.h>
  #include <linux/of.h>
  #include <linux/of_device.h>
+ #include <linux/module.h>
  
  #include <asm/irq.h>
  #include <mach/sdma.h>
@@@ -319,7 -319,6 +320,7 @@@ struct sdma_engine 
        dma_addr_t                      context_phys;
        struct dma_device               dma_device;
        struct clk                      *clk;
 +      struct mutex                    channel_0_lock;
        struct sdma_script_start_addrs  *script_addrs;
  };
  
@@@ -417,15 -416,11 +418,15 @@@ static int sdma_load_script(struct sdma
        dma_addr_t buf_phys;
        int ret;
  
 +      mutex_lock(&sdma->channel_0_lock);
 +
        buf_virt = dma_alloc_coherent(NULL,
                        size,
                        &buf_phys, GFP_KERNEL);
 -      if (!buf_virt)
 -              return -ENOMEM;
 +      if (!buf_virt) {
 +              ret = -ENOMEM;
 +              goto err_out;
 +      }
  
        bd0->mode.command = C0_SETPM;
        bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
  
        dma_free_coherent(NULL, size, buf_virt, buf_phys);
  
 +err_out:
 +      mutex_unlock(&sdma->channel_0_lock);
 +
        return ret;
  }
  
@@@ -665,8 -657,6 +666,8 @@@ static int sdma_load_context(struct sdm
        dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
        dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
  
 +      mutex_lock(&sdma->channel_0_lock);
 +
        memset(context, 0, sizeof(*context));
        context->channel_state.pc = load_address;
  
  
        ret = sdma_run_channel(&sdma->channel[0]);
  
 +      mutex_unlock(&sdma->channel_0_lock);
 +
        return ret;
  }
  
@@@ -1144,17 -1132,18 +1145,17 @@@ static void sdma_add_scripts(struct sdm
                        saddr_arr[i] = addr_arr[i];
  }
  
 -static int __init sdma_get_firmware(struct sdma_engine *sdma,
 -              const char *fw_name)
 +static void sdma_load_firmware(const struct firmware *fw, void *context)
  {
 -      const struct firmware *fw;
 +      struct sdma_engine *sdma = context;
        const struct sdma_firmware_header *header;
 -      int ret;
        const struct sdma_script_start_addrs *addr;
        unsigned short *ram_code;
  
 -      ret = request_firmware(&fw, fw_name, sdma->dev);
 -      if (ret)
 -              return ret;
 +      if (!fw) {
 +              dev_err(sdma->dev, "firmware not found\n");
 +              return;
 +      }
  
        if (fw->size < sizeof(*header))
                goto err_firmware;
  
  err_firmware:
        release_firmware(fw);
 +}
 +
 +static int __init sdma_get_firmware(struct sdma_engine *sdma,
 +              const char *fw_name)
 +{
 +      int ret;
 +
 +      ret = request_firmware_nowait(THIS_MODULE,
 +                      FW_ACTION_HOTPLUG, fw_name, sdma->dev,
 +                      GFP_KERNEL, sdma, sdma_load_firmware);
  
        return ret;
  }
@@@ -1291,14 -1270,11 +1292,14 @@@ static int __init sdma_probe(struct pla
        struct sdma_platform_data *pdata = pdev->dev.platform_data;
        int i;
        struct sdma_engine *sdma;
 +      s32 *saddr_arr;
  
        sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
        if (!sdma)
                return -ENOMEM;
  
 +      mutex_init(&sdma->channel_0_lock);
 +
        sdma->dev = &pdev->dev;
  
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
                goto err_alloc;
        }
  
 +      /* initially no scripts available */
 +      saddr_arr = (s32 *)sdma->script_addrs;
 +      for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
 +              saddr_arr[i] = -EINVAL;
 +
        if (of_id)
                pdev->id_entry = of_id->data;
        sdma->devtype = pdev->id_entry->driver_data;
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/interrupt.h>
  #include <linux/pm_runtime.h>
  #include <linux/intel_mid_dma.h>
+ #include <linux/module.h>
  
  #define MAX_CHAN      4 /*max ch across controllers*/
  #include "intel_mid_dma_regs.h"
@@@ -115,15 -116,16 +116,15 @@@ DMAC1 interrupt Functions*
  
  /**
   * dmac1_mask_periphral_intr -        mask the periphral interrupt
 - * @midc: dma channel for which masking is required
 + * @mid: dma device for which masking is required
   *
   * Masks the DMA periphral interrupt
   * this is valid for DMAC1 family controllers only
   * This controller should have periphral mask registers already mapped
   */
 -static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
 +static void dmac1_mask_periphral_intr(struct middma_device *mid)
  {
        u32 pimr;
 -      struct middma_device *mid = to_middma_device(midc->chan.device);
  
        if (mid->pimr_mask) {
                pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
@@@ -183,6 -185,7 +184,6 @@@ static void enable_dma_interrupt(struc
  static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
  {
        /*Check LPE PISR, make sure fwd is disabled*/
 -      dmac1_mask_periphral_intr(midc);
        iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
        iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
        iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
@@@ -1112,6 -1115,7 +1113,6 @@@ static int mid_setup_dma(struct pci_de
  
                midch->chan.device = &dma->common;
                midch->chan.cookie =  1;
 -              midch->chan.chan_id = i;
                midch->ch_id = dma->chan_base + i;
                pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
  
        dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
        dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
        dma->common.dev = &pdev->dev;
 -      dma->common.chancnt = dma->max_chan;
  
        dma->common.device_alloc_chan_resources =
                                        intel_mid_dma_alloc_chan_resources;
@@@ -1346,7 -1351,6 +1347,7 @@@ int dma_suspend(struct pci_dev *pci, pm
                if (device->ch[i].in_use)
                        return -EAGAIN;
        }
 +      dmac1_mask_periphral_intr(device);
        device->state = SUSPENDED;
        pci_save_state(pci);
        pci_disable_device(pci);
@@@ -21,6 -21,7 +21,7 @@@
  #include <linux/string.h>
  #include <linux/interrupt.h>
  #include <linux/io.h>
+ #include <linux/module.h>
  
  #include <mach/ipu.h>
  
@@@ -1307,7 -1308,6 +1308,7 @@@ static irqreturn_t idmac_interrupt(int 
            ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
                callback = descnew->txd.callback;
                callback_param = descnew->txd.callback_param;
 +              list_del_init(&descnew->list);
                spin_unlock(&ichan->lock);
                if (callback)
                        callback(callback_param);
@@@ -1429,58 -1429,39 +1430,58 @@@ static int __idmac_control(struct dma_c
  {
        struct idmac_channel *ichan = to_idmac_chan(chan);
        struct idmac *idmac = to_idmac(chan->device);
 +      struct ipu *ipu = to_ipu(idmac);
 +      struct list_head *list, *tmp;
        unsigned long flags;
        int i;
  
 -      /* Only supports DMA_TERMINATE_ALL */
 -      if (cmd != DMA_TERMINATE_ALL)
 -              return -ENXIO;
 +      switch (cmd) {
 +      case DMA_PAUSE:
 +              spin_lock_irqsave(&ipu->lock, flags);
 +              ipu_ic_disable_task(ipu, chan->chan_id);
  
 -      ipu_disable_channel(idmac, ichan,
 -                          ichan->status >= IPU_CHANNEL_ENABLED);
 +              /* Return all descriptors into "prepared" state */
 +              list_for_each_safe(list, tmp, &ichan->queue)
 +                      list_del_init(list);
  
 -      tasklet_disable(&to_ipu(idmac)->tasklet);
 +              ichan->sg[0] = NULL;
 +              ichan->sg[1] = NULL;
  
 -      /* ichan->queue is modified in ISR, have to spinlock */
 -      spin_lock_irqsave(&ichan->lock, flags);
 -      list_splice_init(&ichan->queue, &ichan->free_list);
 +              spin_unlock_irqrestore(&ipu->lock, flags);
  
 -      if (ichan->desc)
 -              for (i = 0; i < ichan->n_tx_desc; i++) {
 -                      struct idmac_tx_desc *desc = ichan->desc + i;
 -                      if (list_empty(&desc->list))
 -                              /* Descriptor was prepared, but not submitted */
 -                              list_add(&desc->list, &ichan->free_list);
 +              ichan->status = IPU_CHANNEL_INITIALIZED;
 +              break;
 +      case DMA_TERMINATE_ALL:
 +              ipu_disable_channel(idmac, ichan,
 +                                  ichan->status >= IPU_CHANNEL_ENABLED);
  
 -                      async_tx_clear_ack(&desc->txd);
 -              }
 +              tasklet_disable(&ipu->tasklet);
  
 -      ichan->sg[0] = NULL;
 -      ichan->sg[1] = NULL;
 -      spin_unlock_irqrestore(&ichan->lock, flags);
 +              /* ichan->queue is modified in ISR, have to spinlock */
 +              spin_lock_irqsave(&ichan->lock, flags);
 +              list_splice_init(&ichan->queue, &ichan->free_list);
  
 -      tasklet_enable(&to_ipu(idmac)->tasklet);
 +              if (ichan->desc)
 +                      for (i = 0; i < ichan->n_tx_desc; i++) {
 +                              struct idmac_tx_desc *desc = ichan->desc + i;
 +                              if (list_empty(&desc->list))
 +                                      /* Descriptor was prepared, but not submitted */
 +                                      list_add(&desc->list, &ichan->free_list);
  
 -      ichan->status = IPU_CHANNEL_INITIALIZED;
 +                              async_tx_clear_ack(&desc->txd);
 +                      }
 +
 +              ichan->sg[0] = NULL;
 +              ichan->sg[1] = NULL;
 +              spin_unlock_irqrestore(&ichan->lock, flags);
 +
 +              tasklet_enable(&ipu->tasklet);
 +
 +              ichan->status = IPU_CHANNEL_INITIALIZED;
 +              break;
 +      default:
 +              return -ENOSYS;
 +      }
  
        return 0;
  }
@@@ -1683,6 -1664,7 +1684,6 @@@ static void __exit ipu_idmac_exit(struc
                struct idmac_channel *ichan = ipu->channel + i;
  
                idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0);
 -              idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
        }
  
        dma_async_device_unregister(&idmac->dma);
diff --combined drivers/gpio/gpio-mxc.c
  #include <linux/basic_mmio_gpio.h>
  #include <linux/of.h>
  #include <linux/of_device.h>
+ #include <linux/module.h>
  #include <asm-generic/bug.h>
  #include <asm/mach/irq.h>
  
 +#define irq_to_gpio(irq)      ((irq) - MXC_GPIO_IRQ_START)
 +
  enum mxc_gpio_hwtype {
        IMX1_GPIO,      /* runs on i.mx1 */
        IMX21_GPIO,     /* runs on i.mx21 and i.mx27 */
@@@ -345,15 -344,6 +346,15 @@@ static void __devinit mxc_gpio_get_hw(s
        mxc_gpio_hwtype = hwtype;
  }
  
 +static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 +{
 +      struct bgpio_chip *bgc = to_bgpio_chip(gc);
 +      struct mxc_gpio_port *port =
 +              container_of(bgc, struct mxc_gpio_port, bgc);
 +
 +      return port->virtual_irq_start + offset;
 +}
 +
  static int __devinit mxc_gpio_probe(struct platform_device *pdev)
  {
        struct device_node *np = pdev->dev.of_node;
        if (err)
                goto out_iounmap;
  
 +      port->bgc.gc.to_irq = mxc_gpio_to_irq;
        port->bgc.gc.base = pdev->id * 32;
        port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir);
        port->bgc.data = port->bgc.read_reg(port->bgc.reg_set);
diff --combined drivers/gpio/gpio-mxs.c
@@@ -28,6 -28,7 +28,7 @@@
  #include <linux/platform_device.h>
  #include <linux/slab.h>
  #include <linux/basic_mmio_gpio.h>
+ #include <linux/module.h>
  #include <mach/mxs.h>
  
  #define MXS_SET               0x4
@@@ -49,8 -50,6 +50,8 @@@
  #define GPIO_INT_LEV_MASK     (1 << 0)
  #define GPIO_INT_POL_MASK     (1 << 1)
  
 +#define irq_to_gpio(irq)      ((irq) - MXS_GPIO_IRQ_START)
 +
  struct mxs_gpio_port {
        void __iomem *base;
        int id;
diff --combined drivers/hid/hid-roccat.c
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/poll.h>
  #include <linux/sched.h>
  #include <linux/hid-roccat.h>
+ #include <linux/module.h>
  
  #define ROCCAT_FIRST_MINOR 0
  #define ROCCAT_MAX_DEVICES 8
@@@ -162,27 -163,27 +163,27 @@@ static int roccat_open(struct inode *in
  
        device = devices[minor];
  
 -      mutex_lock(&device->readers_lock);
 -
        if (!device) {
                pr_emerg("roccat device with minor %d doesn't exist\n", minor);
                error = -ENODEV;
 -              goto exit_err;
 +              goto exit_err_devices;
        }
  
 +      mutex_lock(&device->readers_lock);
 +
        if (!device->open++) {
                /* power on device on adding first reader */
                error = hid_hw_power(device->hid, PM_HINT_FULLON);
                if (error < 0) {
                        --device->open;
 -                      goto exit_err;
 +                      goto exit_err_readers;
                }
  
                error = hid_hw_open(device->hid);
                if (error < 0) {
                        hid_hw_power(device->hid, PM_HINT_NORMAL);
                        --device->open;
 -                      goto exit_err;
 +                      goto exit_err_readers;
                }
        }
  
        list_add_tail(&reader->node, &device->readers);
        file->private_data = reader;
  
 -exit_unlock:
 +exit_err_readers:
        mutex_unlock(&device->readers_lock);
 +exit_err_devices:
        mutex_unlock(&devices_lock);
 +      if (error)
 +              kfree(reader);
        return error;
 -exit_err:
 -      kfree(reader);
 -      goto exit_unlock;
  }
  
  static int roccat_release(struct inode *inode, struct file *file)
@@@ -17,6 -17,7 +17,7 @@@
  #include <linux/platform_device.h>
  #include <linux/slab.h>
  #include <linux/io.h>
+ #include <linux/module.h>
  
  #include <asm/clock.h>
  #include <asm/i2c-sh7760.h>
@@@ -502,7 -503,7 +503,7 @@@ static int __devinit sh7760_i2c_probe(s
        }
        OUT32(id, I2CCCR, ret);
  
 -      if (request_irq(id->irq, sh7760_i2c_irq, IRQF_DISABLED,
 +      if (request_irq(id->irq, sh7760_i2c_irq, 0,
                        SH7760_I2C_DEVNAME, id)) {
                dev_err(&pdev->dev, "cannot get irq %d\n", id->irq);
                ret = -EBUSY;
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/slab.h>
  #include <linux/i2c-tegra.h>
  #include <linux/of_i2c.h>
+ #include <linux/module.h>
  
  #include <asm/unaligned.h>
  
@@@ -566,7 -567,7 +567,7 @@@ static int tegra_i2c_probe(struct platf
        struct clk *clk;
        struct clk *i2c_clk;
        const unsigned int *prop;
 -      void *base;
 +      void __iomem *base;
        int irq;
        int ret = 0;
  
@@@ -36,6 -36,7 +36,7 @@@
  #include <linux/completion.h>
  #include <linux/dma-mapping.h>
  #include <linux/device.h>
+ #include <linux/module.h>
  #include <linux/err.h>
  #include <linux/idr.h>
  #include <linux/interrupt.h>
@@@ -889,8 -890,6 +890,8 @@@ retest
                break;
        case IB_CM_ESTABLISHED:
                spin_unlock_irq(&cm_id_priv->lock);
 +              if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
 +                      break;
                ib_send_cm_dreq(cm_id, NULL, 0);
                goto retest;
        case IB_CM_DREQ_SENT:
@@@ -1010,6 -1009,7 +1011,6 @@@ static void cm_format_req(struct cm_req
        req_msg->service_id = param->service_id;
        req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
        cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
 -      cm_req_set_resp_res(req_msg, param->responder_resources);
        cm_req_set_init_depth(req_msg, param->initiator_depth);
        cm_req_set_remote_resp_timeout(req_msg,
                                       param->remote_cm_response_timeout);
        cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
        cm_req_set_local_resp_timeout(req_msg,
                                      param->local_cm_response_timeout);
 -      cm_req_set_retry_count(req_msg, param->retry_count);
        req_msg->pkey = param->primary_path->pkey;
        cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
 -      cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
        cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
 -      cm_req_set_srq(req_msg, param->srq);
 +
 +      if (param->qp_type != IB_QPT_XRC_INI) {
 +              cm_req_set_resp_res(req_msg, param->responder_resources);
 +              cm_req_set_retry_count(req_msg, param->retry_count);
 +              cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
 +              cm_req_set_srq(req_msg, param->srq);
 +      }
  
        if (pri_path->hop_limit <= 1) {
                req_msg->primary_local_lid = pri_path->slid;
@@@ -1085,8 -1081,7 +1086,8 @@@ static int cm_validate_req_param(struc
        if (!param->primary_path)
                return -EINVAL;
  
 -      if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
 +      if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
 +          param->qp_type != IB_QPT_XRC_INI)
                return -EINVAL;
  
        if (param->private_data &&
@@@ -1607,24 -1602,18 +1608,24 @@@ static void cm_format_rep(struct cm_rep
        cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
        rep_msg->local_comm_id = cm_id_priv->id.local_id;
        rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
 -      cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
        cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
        rep_msg->resp_resources = param->responder_resources;
 -      rep_msg->initiator_depth = param->initiator_depth;
        cm_rep_set_target_ack_delay(rep_msg,
                                    cm_id_priv->av.port->cm_dev->ack_delay);
        cm_rep_set_failover(rep_msg, param->failover_accepted);
 -      cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
        cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
 -      cm_rep_set_srq(rep_msg, param->srq);
        rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
  
 +      if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
 +              rep_msg->initiator_depth = param->initiator_depth;
 +              cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
 +              cm_rep_set_srq(rep_msg, param->srq);
 +              cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
 +      } else {
 +              cm_rep_set_srq(rep_msg, 1);
 +              cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
 +      }
 +
        if (param->private_data && param->private_data_len)
                memcpy(rep_msg->private_data, param->private_data,
                       param->private_data_len);
@@@ -1672,7 -1661,7 +1673,7 @@@ int ib_send_cm_rep(struct ib_cm_id *cm_
        cm_id_priv->initiator_depth = param->initiator_depth;
        cm_id_priv->responder_resources = param->responder_resources;
        cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
 -      cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
 +      cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
  
  out:  spin_unlock_irqrestore(&cm_id_priv->lock, flags);
        return ret;
@@@ -1743,7 -1732,7 +1744,7 @@@ error:  spin_unlock_irqrestore(&cm_id_pr
  }
  EXPORT_SYMBOL(ib_send_cm_rtu);
  
 -static void cm_format_rep_event(struct cm_work *work)
 +static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
  {
        struct cm_rep_msg *rep_msg;
        struct ib_cm_rep_event_param *param;
        param = &work->cm_event.param.rep_rcvd;
        param->remote_ca_guid = rep_msg->local_ca_guid;
        param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
 -      param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
 +      param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
        param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
        param->responder_resources = rep_msg->initiator_depth;
        param->initiator_depth = rep_msg->resp_resources;
@@@ -1820,7 -1809,7 +1821,7 @@@ static int cm_rep_handler(struct cm_wor
                return -EINVAL;
        }
  
 -      cm_format_rep_event(work);
 +      cm_format_rep_event(work, cm_id_priv->qp_type);
  
        spin_lock_irq(&cm_id_priv->lock);
        switch (cm_id_priv->id.state) {
  
        cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
        cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
 -      cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
 +      cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
  
        spin_lock(&cm.lock);
        /* Check for duplicate REP. */
  
        cm_id_priv->id.state = IB_CM_REP_RCVD;
        cm_id_priv->id.remote_id = rep_msg->local_comm_id;
 -      cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
 +      cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
        cm_id_priv->initiator_depth = rep_msg->resp_resources;
        cm_id_priv->responder_resources = rep_msg->initiator_depth;
        cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
@@@ -3504,8 -3493,7 +3505,8 @@@ static int cm_init_qp_rtr_attr(struct c
                qp_attr->path_mtu = cm_id_priv->path_mtu;
                qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
                qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
 -              if (cm_id_priv->qp_type == IB_QPT_RC) {
 +              if (cm_id_priv->qp_type == IB_QPT_RC ||
 +                  cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
                        *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
                                         IB_QP_MIN_RNR_TIMER;
                        qp_attr->max_dest_rd_atomic =
@@@ -3550,21 -3538,15 +3551,21 @@@ static int cm_init_qp_rts_attr(struct c
                if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
                        *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
                        qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
 -                      if (cm_id_priv->qp_type == IB_QPT_RC) {
 -                              *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
 -                                               IB_QP_RNR_RETRY |
 +                      switch (cm_id_priv->qp_type) {
 +                      case IB_QPT_RC:
 +                      case IB_QPT_XRC_INI:
 +                              *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
                                                 IB_QP_MAX_QP_RD_ATOMIC;
 -                              qp_attr->timeout = cm_id_priv->av.timeout;
                                qp_attr->retry_cnt = cm_id_priv->retry_count;
                                qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
 -                              qp_attr->max_rd_atomic =
 -                                      cm_id_priv->initiator_depth;
 +                              qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
 +                              /* fall through */
 +                      case IB_QPT_XRC_TGT:
 +                              *qp_attr_mask |= IB_QP_TIMEOUT;
 +                              qp_attr->timeout = cm_id_priv->av.timeout;
 +                              break;
 +                      default:
 +                              break;
                        }
                        if (cm_id_priv->alt_av.ah_attr.dlid) {
                                *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
@@@ -41,6 -41,7 +41,7 @@@
  #include <linux/idr.h>
  #include <linux/inetdevice.h>
  #include <linux/slab.h>
+ #include <linux/module.h>
  
  #include <net/tcp.h>
  #include <net/ipv6.h>
@@@ -81,7 -82,6 +82,7 @@@ static DEFINE_IDR(sdp_ps)
  static DEFINE_IDR(tcp_ps);
  static DEFINE_IDR(udp_ps);
  static DEFINE_IDR(ipoib_ps);
 +static DEFINE_IDR(ib_ps);
  
  struct cma_device {
        struct list_head        list;
@@@ -1180,15 -1180,6 +1181,15 @@@ static void cma_set_req_event_data(stru
        event->param.conn.qp_num = req_data->remote_qpn;
  }
  
 +static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
 +{
 +      return (((ib_event->event == IB_CM_REQ_RECEIVED) ||
 +               (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
 +              ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
 +               (id->qp_type == IB_QPT_UD)) ||
 +              (!id->qp_type));
 +}
 +
  static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
  {
        struct rdma_id_private *listen_id, *conn_id;
        int offset, ret;
  
        listen_id = cm_id->context;
 +      if (!cma_check_req_qp_type(&listen_id->id, ib_event))
 +              return -EINVAL;
 +
        if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
                return -ECONNABORTED;
  
        memset(&event, 0, sizeof event);
        offset = cma_user_data_offset(listen_id->id.ps);
        event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
 -      if (listen_id->id.qp_type == IB_QPT_UD) {
 +      if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
                conn_id = cma_new_udp_id(&listen_id->id, ib_event);
                event.param.ud.private_data = ib_event->private_data + offset;
                event.param.ud.private_data_len =
@@@ -1341,8 -1329,6 +1342,8 @@@ static int cma_iw_handler(struct iw_cm_
                switch (iw_event->status) {
                case 0:
                        event.event = RDMA_CM_EVENT_ESTABLISHED;
 +                      event.param.conn.initiator_depth = iw_event->ird;
 +                      event.param.conn.responder_resources = iw_event->ord;
                        break;
                case -ECONNRESET:
                case -ECONNREFUSED:
                break;
        case IW_CM_EVENT_ESTABLISHED:
                event.event = RDMA_CM_EVENT_ESTABLISHED;
 +              event.param.conn.initiator_depth = iw_event->ird;
 +              event.param.conn.responder_resources = iw_event->ord;
                break;
        default:
                BUG_ON(1);
@@@ -1450,8 -1434,8 +1451,8 @@@ static int iw_conn_req_handler(struct i
        event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
        event.param.conn.private_data = iw_event->private_data;
        event.param.conn.private_data_len = iw_event->private_data_len;
 -      event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
 -      event.param.conn.responder_resources = attr.max_qp_rd_atom;
 +      event.param.conn.initiator_depth = iw_event->ird;
 +      event.param.conn.responder_resources = iw_event->ord;
  
        /*
         * Protect against the user destroying conn_id from another thread
@@@ -2251,9 -2235,6 +2252,9 @@@ static int cma_get_port(struct rdma_id_
        case RDMA_PS_IPOIB:
                ps = &ipoib_ps;
                break;
 +      case RDMA_PS_IB:
 +              ps = &ib_ps;
 +              break;
        default:
                return -EPROTONOSUPPORT;
        }
@@@ -2589,7 -2570,7 +2590,7 @@@ static int cma_connect_ib(struct rdma_i
        req.service_id = cma_get_service_id(id_priv->id.ps,
                                            (struct sockaddr *) &route->addr.dst_addr);
        req.qp_num = id_priv->qp_num;
 -      req.qp_type = IB_QPT_RC;
 +      req.qp_type = id_priv->id.qp_type;
        req.starting_psn = id_priv->seq_num;
        req.responder_resources = conn_param->responder_resources;
        req.initiator_depth = conn_param->initiator_depth;
@@@ -2636,16 -2617,14 +2637,16 @@@ static int cma_connect_iw(struct rdma_i
        if (ret)
                goto out;
  
 -      iw_param.ord = conn_param->initiator_depth;
 -      iw_param.ird = conn_param->responder_resources;
 -      iw_param.private_data = conn_param->private_data;
 -      iw_param.private_data_len = conn_param->private_data_len;
 -      if (id_priv->id.qp)
 +      if (conn_param) {
 +              iw_param.ord = conn_param->initiator_depth;
 +              iw_param.ird = conn_param->responder_resources;
 +              iw_param.private_data = conn_param->private_data;
 +              iw_param.private_data_len = conn_param->private_data_len;
 +              iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
 +      } else {
 +              memset(&iw_param, 0, sizeof iw_param);
                iw_param.qpn = id_priv->qp_num;
 -      else
 -              iw_param.qpn = conn_param->qp_num;
 +      }
        ret = iw_cm_connect(cm_id, &iw_param);
  out:
        if (ret) {
@@@ -2787,20 -2766,14 +2788,20 @@@ int rdma_accept(struct rdma_cm_id *id, 
  
        switch (rdma_node_get_transport(id->device->node_type)) {
        case RDMA_TRANSPORT_IB:
 -              if (id->qp_type == IB_QPT_UD)
 -                      ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
 -                                              conn_param->private_data,
 -                                              conn_param->private_data_len);
 -              else if (conn_param)
 -                      ret = cma_accept_ib(id_priv, conn_param);
 -              else
 -                      ret = cma_rep_recv(id_priv);
 +              if (id->qp_type == IB_QPT_UD) {
 +                      if (conn_param)
 +                              ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
 +                                                      conn_param->private_data,
 +                                                      conn_param->private_data_len);
 +                      else
 +                              ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
 +                                                      NULL, 0);
 +              } else {
 +                      if (conn_param)
 +                              ret = cma_accept_ib(id_priv, conn_param);
 +                      else
 +                              ret = cma_rep_recv(id_priv);
 +              }
                break;
        case RDMA_TRANSPORT_IWARP:
                ret = cma_accept_iw(id_priv, conn_param);
@@@ -3488,7 -3461,6 +3489,7 @@@ static void __exit cma_cleanup(void
        idr_destroy(&tcp_ps);
        idr_destroy(&udp_ps);
        idr_destroy(&ipoib_ps);
 +      idr_destroy(&ib_ps);
  }
  
  module_init(cma_init);
@@@ -35,6 -35,7 +35,7 @@@
   */
  #include <linux/dma-mapping.h>
  #include <linux/slab.h>
+ #include <linux/module.h>
  #include <rdma/ib_cache.h>
  
  #include "mad_priv.h"
@@@ -1596,9 -1597,6 +1597,9 @@@ find_mad_agent(struct ib_mad_port_priva
                                        mad->mad_hdr.class_version].class;
                        if (!class)
                                goto out;
 +                      if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
 +                          IB_MGMT_MAX_METHODS)
 +                              goto out;
                        method = class->method_table[convert_mgmt_class(
                                                        mad->mad_hdr.mgmt_class)];
                        if (method)
@@@ -35,6 -35,7 +35,7 @@@
  #include "core_priv.h"
  
  #include <linux/slab.h>
+ #include <linux/stat.h>
  #include <linux/string.h>
  
  #include <rdma/ib_mad.h>
@@@ -185,35 -186,17 +186,35 @@@ static ssize_t rate_show(struct ib_por
        if (ret)
                return ret;
  
 +      rate = (25 * attr.active_speed) / 10;
 +
        switch (attr.active_speed) {
 -      case 2: speed = " DDR"; break;
 -      case 4: speed = " QDR"; break;
 +      case 2:
 +              speed = " DDR";
 +              break;
 +      case 4:
 +              speed = " QDR";
 +              break;
 +      case 8:
 +              speed = " FDR10";
 +              rate = 10;
 +              break;
 +      case 16:
 +              speed = " FDR";
 +              rate = 14;
 +              break;
 +      case 32:
 +              speed = " EDR";
 +              rate = 25;
 +              break;
        }
  
 -      rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
 +      rate *= ib_width_enum_to_int(attr.active_width);
        if (rate < 0)
                return -EINVAL;
  
        return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
 -                     rate / 10, rate % 10 ? ".5" : "",
 +                     rate, (attr.active_speed == 1) ? ".5" : "",
                       ib_width_enum_to_int(attr.active_width), speed);
  }
  
@@@ -41,6 -41,7 +41,7 @@@
  #include <linux/miscdevice.h>
  #include <linux/slab.h>
  #include <linux/sysctl.h>
+ #include <linux/module.h>
  
  #include <rdma/rdma_user_cm.h>
  #include <rdma/ib_marshall.h>
@@@ -276,7 -277,7 +277,7 @@@ static int ucma_event_handler(struct rd
        ucma_set_event_context(ctx, event, uevent);
        uevent->resp.event = event->event;
        uevent->resp.status = event->status;
 -      if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
 +      if (cm_id->qp_type == IB_QPT_UD)
                ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
        else
                ucma_copy_conn_event(&uevent->resp.param.conn,
@@@ -377,9 -378,6 +378,9 @@@ static int ucma_get_qp_type(struct rdma
        case RDMA_PS_IPOIB:
                *qp_type = IB_QPT_UD;
                return 0;
 +      case RDMA_PS_IB:
 +              *qp_type = cmd->qp_type;
 +              return 0;
        default:
                return -EINVAL;
        }
@@@ -1273,7 -1271,7 +1274,7 @@@ static ssize_t ucma_write(struct file *
        if (copy_from_user(&hdr, buf, sizeof(hdr)))
                return -EFAULT;
  
 -      if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
 +      if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
                return -EINVAL;
  
        if (hdr.in + sizeof(hdr) > len)
@@@ -35,6 -35,7 +35,7 @@@
  #include <linux/mm.h>
  #include <linux/dma-mapping.h>
  #include <linux/sched.h>
+ #include <linux/export.h>
  #include <linux/hugetlb.h>
  #include <linux/dma-attrs.h>
  #include <linux/slab.h>
@@@ -136,7 -137,7 +137,7 @@@ struct ib_umem *ib_umem_get(struct ib_u
  
        down_write(&current->mm->mmap_sem);
  
 -      locked     = npages + current->mm->locked_vm;
 +      locked     = npages + current->mm->pinned_vm;
        lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  
        if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
@@@ -206,7 -207,7 +207,7 @@@ out
                __ib_umem_release(context->device, umem, 0);
                kfree(umem);
        } else
 -              current->mm->locked_vm = locked;
 +              current->mm->pinned_vm = locked;
  
        up_write(&current->mm->mmap_sem);
        if (vma_list)
@@@ -222,7 -223,7 +223,7 @@@ static void ib_umem_account(struct work
        struct ib_umem *umem = container_of(work, struct ib_umem, work);
  
        down_write(&umem->mm->mmap_sem);
 -      umem->mm->locked_vm -= umem->diff;
 +      umem->mm->pinned_vm -= umem->diff;
        up_write(&umem->mm->mmap_sem);
        mmput(umem->mm);
        kfree(umem);
@@@ -38,8 -38,8 +38,9 @@@
  
  #include <linux/errno.h>
  #include <linux/err.h>
+ #include <linux/export.h>
  #include <linux/string.h>
 +#include <linux/slab.h>
  
  #include <rdma/ib_verbs.h>
  #include <rdma/ib_cache.h>
@@@ -78,31 -78,6 +79,31 @@@ enum ib_rate mult_to_ib_rate(int mult
  }
  EXPORT_SYMBOL(mult_to_ib_rate);
  
 +int ib_rate_to_mbps(enum ib_rate rate)
 +{
 +      switch (rate) {
 +      case IB_RATE_2_5_GBPS: return 2500;
 +      case IB_RATE_5_GBPS:   return 5000;
 +      case IB_RATE_10_GBPS:  return 10000;
 +      case IB_RATE_20_GBPS:  return 20000;
 +      case IB_RATE_30_GBPS:  return 30000;
 +      case IB_RATE_40_GBPS:  return 40000;
 +      case IB_RATE_60_GBPS:  return 60000;
 +      case IB_RATE_80_GBPS:  return 80000;
 +      case IB_RATE_120_GBPS: return 120000;
 +      case IB_RATE_14_GBPS:  return 14062;
 +      case IB_RATE_56_GBPS:  return 56250;
 +      case IB_RATE_112_GBPS: return 112500;
 +      case IB_RATE_168_GBPS: return 168750;
 +      case IB_RATE_25_GBPS:  return 25781;
 +      case IB_RATE_100_GBPS: return 103125;
 +      case IB_RATE_200_GBPS: return 206250;
 +      case IB_RATE_300_GBPS: return 309375;
 +      default:               return -1;
 +      }
 +}
 +EXPORT_SYMBOL(ib_rate_to_mbps);
 +
  enum rdma_transport_type
  rdma_node_get_transport(enum rdma_node_type node_type)
  {
@@@ -276,13 -251,6 +277,13 @@@ struct ib_srq *ib_create_srq(struct ib_
                srq->uobject       = NULL;
                srq->event_handler = srq_init_attr->event_handler;
                srq->srq_context   = srq_init_attr->srq_context;
 +              srq->srq_type      = srq_init_attr->srq_type;
 +              if (srq->srq_type == IB_SRQT_XRC) {
 +                      srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
 +                      srq->ext.xrc.cq   = srq_init_attr->ext.xrc.cq;
 +                      atomic_inc(&srq->ext.xrc.xrcd->usecnt);
 +                      atomic_inc(&srq->ext.xrc.cq->usecnt);
 +              }
                atomic_inc(&pd->usecnt);
                atomic_set(&srq->usecnt, 0);
        }
@@@ -312,29 -280,16 +313,29 @@@ EXPORT_SYMBOL(ib_query_srq)
  int ib_destroy_srq(struct ib_srq *srq)
  {
        struct ib_pd *pd;
 +      enum ib_srq_type srq_type;
 +      struct ib_xrcd *uninitialized_var(xrcd);
 +      struct ib_cq *uninitialized_var(cq);
        int ret;
  
        if (atomic_read(&srq->usecnt))
                return -EBUSY;
  
        pd = srq->pd;
 +      srq_type = srq->srq_type;
 +      if (srq_type == IB_SRQT_XRC) {
 +              xrcd = srq->ext.xrc.xrcd;
 +              cq = srq->ext.xrc.cq;
 +      }
  
        ret = srq->device->destroy_srq(srq);
 -      if (!ret)
 +      if (!ret) {
                atomic_dec(&pd->usecnt);
 +              if (srq_type == IB_SRQT_XRC) {
 +                      atomic_dec(&xrcd->usecnt);
 +                      atomic_dec(&cq->usecnt);
 +              }
 +      }
  
        return ret;
  }
@@@ -342,123 -297,28 +343,123 @@@ EXPORT_SYMBOL(ib_destroy_srq)
  
  /* Queue pairs */
  
 +static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
 +{
 +      struct ib_qp *qp = context;
 +
 +      list_for_each_entry(event->element.qp, &qp->open_list, open_list)
 +              event->element.qp->event_handler(event, event->element.qp->qp_context);
 +}
 +
 +static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
 +{
 +      mutex_lock(&xrcd->tgt_qp_mutex);
 +      list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
 +      mutex_unlock(&xrcd->tgt_qp_mutex);
 +}
 +
 +static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
 +                                void (*event_handler)(struct ib_event *, void *),
 +                                void *qp_context)
 +{
 +      struct ib_qp *qp;
 +      unsigned long flags;
 +
 +      qp = kzalloc(sizeof *qp, GFP_KERNEL);
 +      if (!qp)
 +              return ERR_PTR(-ENOMEM);
 +
 +      qp->real_qp = real_qp;
 +      atomic_inc(&real_qp->usecnt);
 +      qp->device = real_qp->device;
 +      qp->event_handler = event_handler;
 +      qp->qp_context = qp_context;
 +      qp->qp_num = real_qp->qp_num;
 +      qp->qp_type = real_qp->qp_type;
 +
 +      spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
 +      list_add(&qp->open_list, &real_qp->open_list);
 +      spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
 +
 +      return qp;
 +}
 +
 +struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
 +                       struct ib_qp_open_attr *qp_open_attr)
 +{
 +      struct ib_qp *qp, *real_qp;
 +
 +      if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
 +              return ERR_PTR(-EINVAL);
 +
 +      qp = ERR_PTR(-EINVAL);
 +      mutex_lock(&xrcd->tgt_qp_mutex);
 +      list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
 +              if (real_qp->qp_num == qp_open_attr->qp_num) {
 +                      qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
 +                                        qp_open_attr->qp_context);
 +                      break;
 +              }
 +      }
 +      mutex_unlock(&xrcd->tgt_qp_mutex);
 +      return qp;
 +}
 +EXPORT_SYMBOL(ib_open_qp);
 +
  struct ib_qp *ib_create_qp(struct ib_pd *pd,
                           struct ib_qp_init_attr *qp_init_attr)
  {
 -      struct ib_qp *qp;
 +      struct ib_qp *qp, *real_qp;
 +      struct ib_device *device;
  
 -      qp = pd->device->create_qp(pd, qp_init_attr, NULL);
 +      device = pd ? pd->device : qp_init_attr->xrcd->device;
 +      qp = device->create_qp(pd, qp_init_attr, NULL);
  
        if (!IS_ERR(qp)) {
 -              qp->device        = pd->device;
 -              qp->pd            = pd;
 -              qp->send_cq       = qp_init_attr->send_cq;
 -              qp->recv_cq       = qp_init_attr->recv_cq;
 -              qp->srq           = qp_init_attr->srq;
 -              qp->uobject       = NULL;
 -              qp->event_handler = qp_init_attr->event_handler;
 -              qp->qp_context    = qp_init_attr->qp_context;
 -              qp->qp_type       = qp_init_attr->qp_type;
 -              atomic_inc(&pd->usecnt);
 -              atomic_inc(&qp_init_attr->send_cq->usecnt);
 -              atomic_inc(&qp_init_attr->recv_cq->usecnt);
 -              if (qp_init_attr->srq)
 -                      atomic_inc(&qp_init_attr->srq->usecnt);
 +              qp->device     = device;
 +              qp->real_qp    = qp;
 +              qp->uobject    = NULL;
 +              qp->qp_type    = qp_init_attr->qp_type;
 +
 +              if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
 +                      qp->event_handler = __ib_shared_qp_event_handler;
 +                      qp->qp_context = qp;
 +                      qp->pd = NULL;
 +                      qp->send_cq = qp->recv_cq = NULL;
 +                      qp->srq = NULL;
 +                      qp->xrcd = qp_init_attr->xrcd;
 +                      atomic_inc(&qp_init_attr->xrcd->usecnt);
 +                      INIT_LIST_HEAD(&qp->open_list);
 +                      atomic_set(&qp->usecnt, 0);
 +
 +                      real_qp = qp;
 +                      qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
 +                                        qp_init_attr->qp_context);
 +                      if (!IS_ERR(qp))
 +                              __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
 +                      else
 +                              real_qp->device->destroy_qp(real_qp);
 +              } else {
 +                      qp->event_handler = qp_init_attr->event_handler;
 +                      qp->qp_context = qp_init_attr->qp_context;
 +                      if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
 +                              qp->recv_cq = NULL;
 +                              qp->srq = NULL;
 +                      } else {
 +                              qp->recv_cq = qp_init_attr->recv_cq;
 +                              atomic_inc(&qp_init_attr->recv_cq->usecnt);
 +                              qp->srq = qp_init_attr->srq;
 +                              if (qp->srq)
 +                                      atomic_inc(&qp_init_attr->srq->usecnt);
 +                      }
 +
 +                      qp->pd      = pd;
 +                      qp->send_cq = qp_init_attr->send_cq;
 +                      qp->xrcd    = NULL;
 +
 +                      atomic_inc(&pd->usecnt);
 +                      atomic_inc(&qp_init_attr->send_cq->usecnt);
 +              }
        }
  
        return qp;
@@@ -467,8 -327,8 +468,8 @@@ EXPORT_SYMBOL(ib_create_qp)
  
  static const struct {
        int                     valid;
 -      enum ib_qp_attr_mask    req_param[IB_QPT_RAW_ETHERTYPE + 1];
 -      enum ib_qp_attr_mask    opt_param[IB_QPT_RAW_ETHERTYPE + 1];
 +      enum ib_qp_attr_mask    req_param[IB_QPT_MAX];
 +      enum ib_qp_attr_mask    opt_param[IB_QPT_MAX];
  } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
        [IB_QPS_RESET] = {
                [IB_QPS_RESET] = { .valid = 1 },
                                [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
                                                IB_QP_PORT                      |
                                                IB_QP_ACCESS_FLAGS),
 +                              [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX            |
 +                                              IB_QP_PORT                      |
 +                                              IB_QP_ACCESS_FLAGS),
 +                              [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX            |
 +                                              IB_QP_PORT                      |
 +                                              IB_QP_ACCESS_FLAGS),
                                [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
                                                IB_QP_QKEY),
                                [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
                                [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
                                                IB_QP_PORT                      |
                                                IB_QP_ACCESS_FLAGS),
 +                              [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX            |
 +                                              IB_QP_PORT                      |
 +                                              IB_QP_ACCESS_FLAGS),
 +                              [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX            |
 +                                              IB_QP_PORT                      |
 +                                              IB_QP_ACCESS_FLAGS),
                                [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
                                                IB_QP_QKEY),
                                [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
                                                IB_QP_RQ_PSN                    |
                                                IB_QP_MAX_DEST_RD_ATOMIC        |
                                                IB_QP_MIN_RNR_TIMER),
 +                              [IB_QPT_XRC_INI] = (IB_QP_AV                    |
 +                                              IB_QP_PATH_MTU                  |
 +                                              IB_QP_DEST_QPN                  |
 +                                              IB_QP_RQ_PSN),
 +                              [IB_QPT_XRC_TGT] = (IB_QP_AV                    |
 +                                              IB_QP_PATH_MTU                  |
 +                                              IB_QP_DEST_QPN                  |
 +                                              IB_QP_RQ_PSN                    |
 +                                              IB_QP_MAX_DEST_RD_ATOMIC        |
 +                                              IB_QP_MIN_RNR_TIMER),
                        },
                        .opt_param = {
                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX               |
                                 [IB_QPT_RC]  = (IB_QP_ALT_PATH                 |
                                                 IB_QP_ACCESS_FLAGS             |
                                                 IB_QP_PKEY_INDEX),
 +                               [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH             |
 +                                               IB_QP_ACCESS_FLAGS             |
 +                                               IB_QP_PKEY_INDEX),
 +                               [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH             |
 +                                               IB_QP_ACCESS_FLAGS             |
 +                                               IB_QP_PKEY_INDEX),
                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX               |
                                                 IB_QP_QKEY),
                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX               |
                                                IB_QP_RNR_RETRY                 |
                                                IB_QP_SQ_PSN                    |
                                                IB_QP_MAX_QP_RD_ATOMIC),
 +                              [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT               |
 +                                              IB_QP_RETRY_CNT                 |
 +                                              IB_QP_RNR_RETRY                 |
 +                                              IB_QP_SQ_PSN                    |
 +                                              IB_QP_MAX_QP_RD_ATOMIC),
 +                              [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT               |
 +                                              IB_QP_SQ_PSN),
                                [IB_QPT_SMI] = IB_QP_SQ_PSN,
                                [IB_QPT_GSI] = IB_QP_SQ_PSN,
                        },
                                                 IB_QP_ACCESS_FLAGS             |
                                                 IB_QP_MIN_RNR_TIMER            |
                                                 IB_QP_PATH_MIG_STATE),
 +                               [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE            |
 +                                               IB_QP_ALT_PATH                 |
 +                                               IB_QP_ACCESS_FLAGS             |
 +                                               IB_QP_PATH_MIG_STATE),
 +                               [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE            |
 +                                               IB_QP_ALT_PATH                 |
 +                                               IB_QP_ACCESS_FLAGS             |
 +                                               IB_QP_MIN_RNR_TIMER            |
 +                                               IB_QP_PATH_MIG_STATE),
                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                |
                                                 IB_QP_QKEY),
                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                |
                                                IB_QP_ALT_PATH                  |
                                                IB_QP_PATH_MIG_STATE            |
                                                IB_QP_MIN_RNR_TIMER),
 +                              [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE             |
 +                                              IB_QP_ACCESS_FLAGS              |
 +                                              IB_QP_ALT_PATH                  |
 +                                              IB_QP_PATH_MIG_STATE),
 +                              [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE             |
 +                                              IB_QP_ACCESS_FLAGS              |
 +                                              IB_QP_ALT_PATH                  |
 +                                              IB_QP_PATH_MIG_STATE            |
 +                                              IB_QP_MIN_RNR_TIMER),
                                [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
                                                IB_QP_QKEY),
                                [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
                                [IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
                                [IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
                                [IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
 +                              [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
 +                              [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
                                [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
                                [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
                        }
                                                IB_QP_ACCESS_FLAGS              |
                                                IB_QP_MIN_RNR_TIMER             |
                                                IB_QP_PATH_MIG_STATE),
 +                              [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE             |
 +                                              IB_QP_ALT_PATH                  |
 +                                              IB_QP_ACCESS_FLAGS              |
 +                                              IB_QP_PATH_MIG_STATE),
 +                              [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE             |
 +                                              IB_QP_ALT_PATH                  |
 +                                              IB_QP_ACCESS_FLAGS              |
 +                                              IB_QP_MIN_RNR_TIMER             |
 +                                              IB_QP_PATH_MIG_STATE),
                                [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
                                                IB_QP_QKEY),
                                [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
                                                IB_QP_PKEY_INDEX                |
                                                IB_QP_MIN_RNR_TIMER             |
                                                IB_QP_PATH_MIG_STATE),
 +                              [IB_QPT_XRC_INI] = (IB_QP_PORT                  |
 +                                              IB_QP_AV                        |
 +                                              IB_QP_TIMEOUT                   |
 +                                              IB_QP_RETRY_CNT                 |
 +                                              IB_QP_RNR_RETRY                 |
 +                                              IB_QP_MAX_QP_RD_ATOMIC          |
 +                                              IB_QP_ALT_PATH                  |
 +                                              IB_QP_ACCESS_FLAGS              |
 +                                              IB_QP_PKEY_INDEX                |
 +                                              IB_QP_PATH_MIG_STATE),
 +                              [IB_QPT_XRC_TGT] = (IB_QP_PORT                  |
 +                                              IB_QP_AV                        |
 +                                              IB_QP_TIMEOUT                   |
 +                                              IB_QP_MAX_DEST_RD_ATOMIC        |
 +                                              IB_QP_ALT_PATH                  |
 +                                              IB_QP_ACCESS_FLAGS              |
 +                                              IB_QP_PKEY_INDEX                |
 +                                              IB_QP_MIN_RNR_TIMER             |
 +                                              IB_QP_PATH_MIG_STATE),
                                [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
                                                IB_QP_QKEY),
                                [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
@@@ -803,7 -580,7 +804,7 @@@ int ib_modify_qp(struct ib_qp *qp
                 struct ib_qp_attr *qp_attr,
                 int qp_attr_mask)
  {
 -      return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
 +      return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
  }
  EXPORT_SYMBOL(ib_modify_qp);
  
@@@ -813,59 -590,11 +814,59 @@@ int ib_query_qp(struct ib_qp *qp
                struct ib_qp_init_attr *qp_init_attr)
  {
        return qp->device->query_qp ?
 -              qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
 +              qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
                -ENOSYS;
  }
  EXPORT_SYMBOL(ib_query_qp);
  
 +int ib_close_qp(struct ib_qp *qp)
 +{
 +      struct ib_qp *real_qp;
 +      unsigned long flags;
 +
 +      real_qp = qp->real_qp;
 +      if (real_qp == qp)
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
 +      list_del(&qp->open_list);
 +      spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
 +
 +      atomic_dec(&real_qp->usecnt);
 +      kfree(qp);
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL(ib_close_qp);
 +
 +static int __ib_destroy_shared_qp(struct ib_qp *qp)
 +{
 +      struct ib_xrcd *xrcd;
 +      struct ib_qp *real_qp;
 +      int ret;
 +
 +      real_qp = qp->real_qp;
 +      xrcd = real_qp->xrcd;
 +
 +      mutex_lock(&xrcd->tgt_qp_mutex);
 +      ib_close_qp(qp);
 +      if (atomic_read(&real_qp->usecnt) == 0)
 +              list_del(&real_qp->xrcd_list);
 +      else
 +              real_qp = NULL;
 +      mutex_unlock(&xrcd->tgt_qp_mutex);
 +
 +      if (real_qp) {
 +              ret = ib_destroy_qp(real_qp);
 +              if (!ret)
 +                      atomic_dec(&xrcd->usecnt);
 +              else
 +                      __ib_insert_xrcd_qp(xrcd, real_qp);
 +      }
 +
 +      return 0;
 +}
 +
  int ib_destroy_qp(struct ib_qp *qp)
  {
        struct ib_pd *pd;
        struct ib_srq *srq;
        int ret;
  
 -      pd  = qp->pd;
 -      scq = qp->send_cq;
 -      rcq = qp->recv_cq;
 -      srq = qp->srq;
 +      if (atomic_read(&qp->usecnt))
 +              return -EBUSY;
 +
 +      if (qp->real_qp != qp)
 +              return __ib_destroy_shared_qp(qp);
 +
 +      pd   = qp->pd;
 +      scq  = qp->send_cq;
 +      rcq  = qp->recv_cq;
 +      srq  = qp->srq;
  
        ret = qp->device->destroy_qp(qp);
        if (!ret) {
 -              atomic_dec(&pd->usecnt);
 -              atomic_dec(&scq->usecnt);
 -              atomic_dec(&rcq->usecnt);
 +              if (pd)
 +                      atomic_dec(&pd->usecnt);
 +              if (scq)
 +                      atomic_dec(&scq->usecnt);
 +              if (rcq)
 +                      atomic_dec(&rcq->usecnt);
                if (srq)
                        atomic_dec(&srq->usecnt);
        }
@@@ -1201,42 -921,3 +1202,42 @@@ int ib_detach_mcast(struct ib_qp *qp, u
        return qp->device->detach_mcast(qp, gid, lid);
  }
  EXPORT_SYMBOL(ib_detach_mcast);
 +
 +struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
 +{
 +      struct ib_xrcd *xrcd;
 +
 +      if (!device->alloc_xrcd)
 +              return ERR_PTR(-ENOSYS);
 +
 +      xrcd = device->alloc_xrcd(device, NULL, NULL);
 +      if (!IS_ERR(xrcd)) {
 +              xrcd->device = device;
 +              xrcd->inode = NULL;
 +              atomic_set(&xrcd->usecnt, 0);
 +              mutex_init(&xrcd->tgt_qp_mutex);
 +              INIT_LIST_HEAD(&xrcd->tgt_qp_list);
 +      }
 +
 +      return xrcd;
 +}
 +EXPORT_SYMBOL(ib_alloc_xrcd);
 +
 +int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
 +{
 +      struct ib_qp *qp;
 +      int ret;
 +
 +      if (atomic_read(&xrcd->usecnt))
 +              return -EBUSY;
 +
 +      while (!list_empty(&xrcd->tgt_qp_list)) {
 +              qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
 +              ret = ib_destroy_qp(qp);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      return xrcd->device->dealloc_xrcd(xrcd);
 +}
 +EXPORT_SYMBOL(ib_dealloc_xrcd);
@@@ -29,6 -29,9 +29,9 @@@
   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   * SOFTWARE.
   */
+ #include <linux/module.h>
  #include "iw_cxgb4.h"
  
  static int ocqp_support = 1;
@@@ -917,11 -920,7 +920,11 @@@ static void post_terminate(struct c4iw_
        wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
        wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
        term = (struct terminate_message *)wqe->u.terminate.termmsg;
 -      build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
 +      if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
 +              term->layer_etype = qhp->attr.layer_etype;
 +              term->ecode = qhp->attr.ecode;
 +      } else
 +              build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
        c4iw_ofld_send(&qhp->rhp->rdev, skb);
  }
  
@@@ -945,11 -944,8 +948,11 @@@ static void __flush_qp(struct c4iw_qp *
        flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
        spin_unlock(&qhp->lock);
        spin_unlock_irqrestore(&rchp->lock, flag);
 -      if (flushed)
 +      if (flushed) {
 +              spin_lock_irqsave(&rchp->comp_handler_lock, flag);
                (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
 +              spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
 +      }
  
        /* locking hierarchy: cq lock first, then qp lock. */
        spin_lock_irqsave(&schp->lock, flag);
        flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
        spin_unlock(&qhp->lock);
        spin_unlock_irqrestore(&schp->lock, flag);
 -      if (flushed)
 +      if (flushed) {
 +              spin_lock_irqsave(&schp->comp_handler_lock, flag);
                (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
 +              spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
 +      }
  }
  
  static void flush_qp(struct c4iw_qp *qhp)
  {
        struct c4iw_cq *rchp, *schp;
 +      unsigned long flag;
  
        rchp = get_chp(qhp->rhp, qhp->attr.rcq);
        schp = get_chp(qhp->rhp, qhp->attr.scq);
        if (qhp->ibqp.uobject) {
                t4_set_wq_in_error(&qhp->wq);
                t4_set_cq_in_error(&rchp->cq);
 -              if (schp != rchp)
 +              spin_lock_irqsave(&rchp->comp_handler_lock, flag);
 +              (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
 +              spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
 +              if (schp != rchp) {
                        t4_set_cq_in_error(&schp->cq);
 +                      spin_lock_irqsave(&schp->comp_handler_lock, flag);
 +                      (*schp->ibcq.comp_handler)(&schp->ibcq,
 +                                      schp->ibcq.cq_context);
 +                      spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
 +              }
                return;
        }
        __flush_qp(qhp, rchp, schp);
@@@ -1031,7 -1015,6 +1034,7 @@@ out
  
  static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
  {
 +      PDBG("%s p2p_type = %d\n", __func__, p2p_type);
        memset(&init->u, 0, sizeof init->u);
        switch (p2p_type) {
        case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
@@@ -1226,16 -1209,12 +1229,16 @@@ int c4iw_modify_qp(struct c4iw_dev *rhp
                                disconnect = 1;
                                c4iw_get_ep(&qhp->ep->com);
                        }
 +                      if (qhp->ibqp.uobject)
 +                              t4_set_wq_in_error(&qhp->wq);
                        ret = rdma_fini(rhp, qhp, ep);
                        if (ret)
                                goto err;
                        break;
                case C4IW_QP_STATE_TERMINATE:
                        set_state(qhp, C4IW_QP_STATE_TERMINATE);
 +                      qhp->attr.layer_etype = attrs->layer_etype;
 +                      qhp->attr.ecode = attrs->ecode;
                        if (qhp->ibqp.uobject)
                                t4_set_wq_in_error(&qhp->wq);
                        ep = qhp->ep;
                        break;
                case C4IW_QP_STATE_ERROR:
                        set_state(qhp, C4IW_QP_STATE_ERROR);
 +                      if (qhp->ibqp.uobject)
 +                              t4_set_wq_in_error(&qhp->wq);
                        if (!internal) {
                                abort = 1;
                                disconnect = 1;
@@@ -1360,10 -1337,7 +1363,10 @@@ int c4iw_destroy_qp(struct ib_qp *ib_qp
        rhp = qhp->rhp;
  
        attrs.next_state = C4IW_QP_STATE_ERROR;
 -      c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
 +      if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
 +              c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
 +      else
 +              c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
        wait_event(qhp->wait, !qhp->ep);
  
        remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
@@@ -40,7 -40,9 +40,9 @@@
  
  #include <linux/sched.h>
  #include <linux/slab.h>
+ #include <linux/stat.h>
  #include <linux/mm.h>
+ #include <linux/export.h>
  
  #include "mthca_dev.h"
  #include "mthca_cmd.h"
@@@ -438,9 -440,6 +440,9 @@@ static struct ib_srq *mthca_create_srq(
        struct mthca_srq *srq;
        int err;
  
 +      if (init_attr->srq_type != IB_SRQT_BASIC)
 +              return ERR_PTR(-ENOSYS);
 +
        srq = kmalloc(sizeof *srq, GFP_KERNEL);
        if (!srq)
                return ERR_PTR(-ENOMEM);
@@@ -37,6 -37,7 +37,7 @@@
  #include <linux/delay.h>
  #include <linux/netdevice.h>
  #include <linux/vmalloc.h>
+ #include <linux/module.h>
  
  #include "qib.h"
  
@@@ -279,10 -280,10 +280,10 @@@ bail
   */
  static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
  {
 -      const u32 chunk = etail / rcd->rcvegrbufs_perchunk;
 -      const u32 idx =  etail % rcd->rcvegrbufs_perchunk;
 +      const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
 +      const u32 idx =  etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
  
 -      return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize;
 +      return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
  }
  
  /*
@@@ -310,6 -311,7 +311,6 @@@ static u32 qib_rcv_hdrerr(struct qib_ct
                u32 opcode;
                u32 psn;
                int diff;
 -              unsigned long flags;
  
                /* Sanity check packet */
                if (tlen < 24)
  
                        switch (qp->ibqp.qp_type) {
                        case IB_QPT_RC:
 -                              spin_lock_irqsave(&qp->s_lock, flags);
                                ruc_res =
                                        qib_ruc_check_hdr(
                                                ibp, hdr,
                                                qp,
                                                be32_to_cpu(ohdr->bth[0]));
                                if (ruc_res) {
 -                                      spin_unlock_irqrestore(&qp->s_lock,
 -                                                             flags);
                                        goto unlock;
                                }
 -                              spin_unlock_irqrestore(&qp->s_lock, flags);
  
                                /* Only deal with RDMA Writes for now */
                                if (opcode <
@@@ -542,15 -548,6 +543,15 @@@ move_along
                        updegr = 0;
                }
        }
 +      /*
 +       * Notify qib_destroy_qp() if it is waiting
 +       * for lookaside_qp to finish.
 +       */
 +      if (rcd->lookaside_qp) {
 +              if (atomic_dec_and_test(&rcd->lookaside_qp->refcount))
 +                      wake_up(&rcd->lookaside_qp->wait);
 +              rcd->lookaside_qp = NULL;
 +      }
  
        rcd->head = l;
        rcd->pkt_count += i;
@@@ -43,6 -43,7 +43,7 @@@
  #include <linux/jiffies.h>
  #include <asm/pgtable.h>
  #include <linux/delay.h>
+ #include <linux/export.h>
  
  #include "qib.h"
  #include "qib_common.h"
@@@ -1284,7 -1285,6 +1285,7 @@@ static int setup_ctxt(struct qib_pportd
        strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
        ctxt_fp(fp) = rcd;
        qib_stats.sps_ctxts++;
 +      dd->freectxts++;
        ret = 0;
        goto bail;
  
@@@ -1793,7 -1793,6 +1794,7 @@@ static int qib_close(struct inode *in, 
                if (dd->pageshadow)
                        unlock_expected_tids(rcd);
                qib_stats.sps_ctxts--;
 +              dd->freectxts--;
        }
  
        mutex_unlock(&qib_mutex);
@@@ -39,6 -39,7 +39,7 @@@
  #include <linux/interrupt.h>
  #include <linux/pci.h>
  #include <linux/delay.h>
+ #include <linux/module.h>
  #include <linux/io.h>
  #include <rdma/ib_verbs.h>
  
@@@ -4085,8 -4086,6 +4086,8 @@@ static int qib_init_7220_variables(stru
        /* we always allocate at least 2048 bytes for eager buffers */
        ret = ib_mtu_enum_to_int(qib_ibmtu);
        dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
 +      BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
 +      dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
  
        qib_7220_tidtemplate(dd);
  
@@@ -40,6 -40,7 +40,7 @@@
  #include <linux/delay.h>
  #include <linux/io.h>
  #include <linux/jiffies.h>
+ #include <linux/module.h>
  #include <rdma/ib_verbs.h>
  #include <rdma/ib_smi.h>
  
@@@ -2310,15 -2311,12 +2311,15 @@@ static int qib_7322_bringup_serdes(stru
        val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
                QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
  
 +      ppd->cpspec->ibcctrl_a = val;
        /*
         * Reset the PCS interface to the serdes (and also ibc, which is still
         * in reset from above).  Writes new value of ibcctrl_a as last step.
         */
        qib_7322_mini_pcs_reset(ppd);
        qib_write_kreg(dd, kr_scratch, 0ULL);
 +      /* clear the linkinit cmds */
 +      ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
  
        if (!ppd->cpspec->ibcctrl_b) {
                unsigned lse = ppd->link_speed_enabled;
        qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  
 -      /* Hold the link state machine for mezz boards */
 -      if (IS_QMH(dd) || IS_QME(dd))
 -              qib_set_ib_7322_lstate(ppd, 0,
 -                                     QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
 -
        /* Also enable IBSTATUSCHG interrupt.  */
        val = qib_read_kreg_port(ppd, krp_errmask);
        qib_write_kreg_port(ppd, krp_errmask,
@@@ -2851,8 -2854,9 +2852,8 @@@ static irqreturn_t qib_7322intr(int irq
                for (i = 0; i < dd->first_user_ctxt; i++) {
                        if (ctxtrbits & rmask) {
                                ctxtrbits &= ~rmask;
 -                              if (dd->rcd[i]) {
 +                              if (dd->rcd[i])
                                        qib_kreceive(dd->rcd[i], NULL, &npkts);
 -                              }
                        }
                        rmask <<= 1;
                }
@@@ -5227,8 -5231,6 +5228,8 @@@ static int qib_7322_ib_updown(struct qi
                                     QIBL_IB_AUTONEG_INPROG)))
                        set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
 +                      struct qib_qsfp_data *qd =
 +                              &ppd->cpspec->qsfp_data;
                        /* unlock the Tx settings, speed may change */
                        qib_write_kreg_port(ppd, krp_tx_deemph_override,
                                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
                        qib_cancel_sends(ppd);
                        /* on link down, ensure sane pcs state */
                        qib_7322_mini_pcs_reset(ppd);
 +                      /* schedule the qsfp refresh which should turn the link
 +                         off */
 +                      if (ppd->dd->flags & QIB_HAS_QSFP) {
 +                              qd->t_insert = get_jiffies_64();
 +                              schedule_work(&qd->work);
 +                      }
                        spin_lock_irqsave(&ppd->sdma_lock, flags);
                        if (__qib_sdma_running(ppd))
                                __qib_sdma_process_event(ppd,
@@@ -5592,79 -5588,43 +5593,79 @@@ static void qsfp_7322_event(struct work
        struct qib_qsfp_data *qd;
        struct qib_pportdata *ppd;
        u64 pwrup;
 +      unsigned long flags;
        int ret;
        u32 le2;
  
        qd = container_of(work, struct qib_qsfp_data, work);
        ppd = qd->ppd;
 -      pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
 +      pwrup = qd->t_insert +
 +              msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
  
 -      /*
 -       * Some QSFP's not only do not respond until the full power-up
 -       * time, but may behave badly if we try. So hold off responding
 -       * to insertion.
 -       */
 -      while (1) {
 -              u64 now = get_jiffies_64();
 -              if (time_after64(now, pwrup))
 -                      break;
 -              msleep(20);
 -      }
 -      ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
 -      /*
 -       * Need to change LE2 back to defaults if we couldn't
 -       * read the cable type (to handle cable swaps), so do this
 -       * even on failure to read cable information.  We don't
 -       * get here for QME, so IS_QME check not needed here.
 -       */
 -      if (!ret && !ppd->dd->cspec->r1) {
 -              if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
 -                      le2 = LE2_QME;
 -              else if (qd->cache.atten[1] >= qib_long_atten &&
 -                       QSFP_IS_CU(qd->cache.tech))
 -                      le2 = LE2_5m;
 -              else
 +      /* Delay for 20 msecs to allow ModPrs resistor to setup */
 +      mdelay(QSFP_MODPRS_LAG_MSEC);
 +
 +      if (!qib_qsfp_mod_present(ppd)) {
 +              ppd->cpspec->qsfp_data.modpresent = 0;
 +              /* Set the physical link to disabled */
 +              qib_set_ib_7322_lstate(ppd, 0,
 +                                     QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
 +              spin_lock_irqsave(&ppd->lflags_lock, flags);
 +              ppd->lflags &= ~QIBL_LINKV;
 +              spin_unlock_irqrestore(&ppd->lflags_lock, flags);
 +      } else {
 +              /*
 +               * Some QSFP's not only do not respond until the full power-up
 +               * time, but may behave badly if we try. So hold off responding
 +               * to insertion.
 +               */
 +              while (1) {
 +                      u64 now = get_jiffies_64();
 +                      if (time_after64(now, pwrup))
 +                              break;
 +                      msleep(20);
 +              }
 +
 +              ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
 +
 +              /*
 +               * Need to change LE2 back to defaults if we couldn't
 +               * read the cable type (to handle cable swaps), so do this
 +               * even on failure to read cable information.  We don't
 +               * get here for QME, so IS_QME check not needed here.
 +               */
 +              if (!ret && !ppd->dd->cspec->r1) {
 +                      if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
 +                              le2 = LE2_QME;
 +                      else if (qd->cache.atten[1] >= qib_long_atten &&
 +                               QSFP_IS_CU(qd->cache.tech))
 +                              le2 = LE2_5m;
 +                      else
 +                              le2 = LE2_DEFAULT;
 +              } else
                        le2 = LE2_DEFAULT;
 -      } else
 -              le2 = LE2_DEFAULT;
 -      ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
 -      init_txdds_table(ppd, 0);
 +              ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
 +              /*
 +               * We always change parameteters, since we can choose
 +               * values for cables without eeproms, and the cable may have
 +               * changed from a cable with full or partial eeprom content
 +               * to one with partial or no content.
 +               */
 +              init_txdds_table(ppd, 0);
 +              /* The physical link is being re-enabled only when the
 +               * previous state was DISABLED and the VALID bit is not
 +               * set. This should only happen when  the cable has been
 +               * physically pulled. */
 +              if (!ppd->cpspec->qsfp_data.modpresent &&
 +                  (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
 +                      ppd->cpspec->qsfp_data.modpresent = 1;
 +                      qib_set_ib_7322_lstate(ppd, 0,
 +                              QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
 +                      spin_lock_irqsave(&ppd->lflags_lock, flags);
 +                      ppd->lflags |= QIBL_LINKV;
 +                      spin_unlock_irqrestore(&ppd->lflags_lock, flags);
 +              }
 +      }
  }
  
  /*
@@@ -5768,8 -5728,7 +5769,8 @@@ static void set_no_qsfp_atten(struct qi
                        /* now change the IBC and serdes, overriding generic */
                        init_txdds_table(ppd, 1);
                        /* Re-enable the physical state machine on mezz boards
 -                       * now that the correct settings have been set. */
 +                       * now that the correct settings have been set.
 +                       * QSFP boards are handles by the QSFP event handler */
                        if (IS_QMH(dd) || IS_QME(dd))
                                qib_set_ib_7322_lstate(ppd, 0,
                                            QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
@@@ -6247,8 -6206,6 +6248,8 @@@ static int qib_init_7322_variables(stru
  
        /* we always allocate at least 2048 bytes for eager buffers */
        dd->rcvegrbufsize = max(mtu, 2048);
 +      BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
 +      dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
  
        qib_7322_tidtemplate(dd);
  
@@@ -7191,8 -7148,7 +7192,8 @@@ static void find_best_ent(struct qib_pp
                }
        }
  
 -      /* Lookup serdes setting by cable type and attenuation */
 +      /* Active cables don't have attenuation so we only set SERDES
 +       * settings to account for the attenuation of the board traces. */
        if (!override && QSFP_IS_ACTIVE(qd->tech)) {
                *sdr_dds = txdds_sdr + ppd->dd->board_atten;
                *ddr_dds = txdds_ddr + ppd->dd->board_atten;
@@@ -7509,6 -7465,12 +7510,6 @@@ static int serdes_7322_init_new(struct 
        u32 le_val, rxcaldone;
        int chan, chan_done = (1 << SERDES_CHANS) - 1;
  
 -      /*
 -       * Initialize the Tx DDS tables.  Also done every QSFP event,
 -       * for adapters with QSFP
 -       */
 -      init_txdds_table(ppd, 0);
 -
        /* Clear cmode-override, may be set from older driver */
        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
  
        /* VGA output common mode */
        ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
  
 +      /*
 +       * Initialize the Tx DDS tables.  Also done every QSFP event,
 +       * for adapters with QSFP
 +       */
 +      init_txdds_table(ppd, 0);
 +
        return 0;
  }
  
@@@ -37,6 -37,7 +37,7 @@@
  #include <linux/vmalloc.h>
  #include <linux/delay.h>
  #include <linux/idr.h>
+ #include <linux/module.h>
  
  #include "qib.h"
  #include "qib_common.h"
@@@ -183,9 -184,6 +184,9 @@@ struct qib_ctxtdata *qib_create_ctxtdat
                rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
                        rcd->rcvegrbufs_perchunk - 1) /
                        rcd->rcvegrbufs_perchunk;
 +              BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk));
 +              rcd->rcvegrbufs_perchunk_shift =
 +                      ilog2(rcd->rcvegrbufs_perchunk);
        }
        return rcd;
  }
@@@ -401,7 -399,6 +402,7 @@@ static void enable_chip(struct qib_devd
                if (rcd)
                        dd->f_rcvctrl(rcd->ppd, rcvmask, i);
        }
 +      dd->freectxts = dd->cfgctxts - dd->first_user_ctxt;
  }
  
  static void verify_interrupt(unsigned long opaque)
@@@ -585,6 -582,10 +586,6 @@@ int qib_init(struct qib_devdata *dd, in
                        continue;
                }
  
 -              /* let link come up, and enable IBC */
 -              spin_lock_irqsave(&ppd->lflags_lock, flags);
 -              ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
 -              spin_unlock_irqrestore(&ppd->lflags_lock, flags);
                portok++;
        }
  
  #include <rdma/ib_mad.h>
  #include <rdma/ib_user_verbs.h>
  #include <linux/io.h>
+ #include <linux/module.h>
  #include <linux/utsname.h>
  #include <linux/rculist.h>
  #include <linux/mm.h>
 +#include <linux/random.h>
  
  #include "qib.h"
  #include "qib_common.h"
  
 -static unsigned int ib_qib_qp_table_size = 251;
 +static unsigned int ib_qib_qp_table_size = 256;
  module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
  MODULE_PARM_DESC(qp_table_size, "QP table size");
  
@@@ -660,25 -660,17 +661,25 @@@ void qib_ib_rcv(struct qib_ctxtdata *rc
                if (atomic_dec_return(&mcast->refcount) <= 1)
                        wake_up(&mcast->wait);
        } else {
 -              qp = qib_lookup_qpn(ibp, qp_num);
 -              if (!qp)
 -                      goto drop;
 +              if (rcd->lookaside_qp) {
 +                      if (rcd->lookaside_qpn != qp_num) {
 +                              if (atomic_dec_and_test(
 +                                      &rcd->lookaside_qp->refcount))
 +                                      wake_up(
 +                                       &rcd->lookaside_qp->wait);
 +                                      rcd->lookaside_qp = NULL;
 +                              }
 +              }
 +              if (!rcd->lookaside_qp) {
 +                      qp = qib_lookup_qpn(ibp, qp_num);
 +                      if (!qp)
 +                              goto drop;
 +                      rcd->lookaside_qp = qp;
 +                      rcd->lookaside_qpn = qp_num;
 +              } else
 +                      qp = rcd->lookaside_qp;
                ibp->n_unicast_rcv++;
                qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
 -              /*
 -               * Notify qib_destroy_qp() if it is waiting
 -               * for us to finish.
 -               */
 -              if (atomic_dec_and_test(&qp->refcount))
 -                      wake_up(&qp->wait);
        }
        return;
  
@@@ -1983,8 -1975,6 +1984,8 @@@ static void init_ibport(struct qib_ppor
        ibp->z_excessive_buffer_overrun_errors =
                cntrs.excessive_buffer_overrun_errors;
        ibp->z_vl15_dropped = cntrs.vl15_dropped;
 +      RCU_INIT_POINTER(ibp->qp0, NULL);
 +      RCU_INIT_POINTER(ibp->qp1, NULL);
  }
  
  /**
@@@ -2001,15 -1991,12 +2002,15 @@@ int qib_register_ib_device(struct qib_d
        int ret;
  
        dev->qp_table_size = ib_qib_qp_table_size;
 -      dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table,
 +      get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
 +      dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
                                GFP_KERNEL);
        if (!dev->qp_table) {
                ret = -ENOMEM;
                goto err_qpt;
        }
 +      for (i = 0; i < dev->qp_table_size; i++)
 +              RCU_INIT_POINTER(dev->qp_table[i], NULL);
  
        for (i = 0; i < dd->num_pports; i++)
                init_ibport(ppd + i);
@@@ -37,6 -37,7 +37,7 @@@
  #include <linux/delay.h>
  #include <linux/slab.h>
  #include <linux/vmalloc.h>
+ #include <linux/moduleparam.h>
  
  #include "ipoib.h"
  
@@@ -84,7 -85,7 +85,7 @@@ static void ipoib_cm_dma_unmap_rx(struc
        ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
  
        for (i = 0; i < frags; ++i)
 -              ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
 +              ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
  }
  
  static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
@@@ -183,7 -184,7 +184,7 @@@ partial_error
        ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
  
        for (; i > 0; --i)
 -              ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
 +              ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
  
        dev_kfree_skb_any(skb);
        return NULL;
@@@ -1497,7 -1498,6 +1498,7 @@@ static void ipoib_cm_create_srq(struct 
  {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ib_srq_init_attr srq_init_attr = {
 +              .srq_type = IB_SRQT_BASIC,
                .attr = {
                        .max_wr  = ipoib_recvq_size,
                        .max_sge = max_sge
@@@ -37,6 -37,7 +37,7 @@@
  struct file_operations;
  
  #include <linux/debugfs.h>
+ #include <linux/export.h>
  
  #include "ipoib.h"
  
@@@ -212,15 -213,16 +213,15 @@@ static int ipoib_path_seq_show(struct s
                   gid_buf, path.pathrec.dlid ? "yes" : "no");
  
        if (path.pathrec.dlid) {
 -              rate = ib_rate_to_mult(path.pathrec.rate) * 25;
 +              rate = ib_rate_to_mbps(path.pathrec.rate);
  
                seq_printf(file,
                           "  DLID:     0x%04x\n"
                           "  SL: %12d\n"
 -                         "  rate: %*d%s Gb/sec\n",
 +                         "  rate: %8d.%d Gb/sec\n",
                           be16_to_cpu(path.pathrec.dlid),
                           path.pathrec.sl,
 -                         10 - ((rate % 10) ? 2 : 0),
 -                         rate / 10, rate % 10 ? ".5" : "");
 +                         rate / 1000, rate % 1000);
        }
  
        seq_putc(file, '\n');
@@@ -57,6 -57,7 +57,7 @@@
  #include <linux/scatterlist.h>
  #include <linux/delay.h>
  #include <linux/slab.h>
+ #include <linux/module.h>
  
  #include <net/sock.h>
  
@@@ -151,6 -152,7 +152,6 @@@ int iser_initialize_task_headers(struc
        tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
        tx_desc->tx_sg[0].lkey   = device->mr->lkey;
  
 -      iser_task->headers_initialized  = 1;
        iser_task->iser_conn            = iser_conn;
        return 0;
  }
@@@ -165,7 -167,8 +166,7 @@@ iscsi_iser_task_init(struct iscsi_task 
  {
        struct iscsi_iser_task *iser_task = task->dd_data;
  
 -      if (!iser_task->headers_initialized)
 -              if (iser_initialize_task_headers(task, &iser_task->desc))
 +      if (iser_initialize_task_headers(task, &iser_task->desc))
                        return -ENOMEM;
  
        /* mgmt task */
@@@ -276,13 -279,6 +277,13 @@@ iscsi_iser_task_xmit(struct iscsi_task 
  static void iscsi_iser_cleanup_task(struct iscsi_task *task)
  {
        struct iscsi_iser_task *iser_task = task->dd_data;
 +      struct iser_tx_desc     *tx_desc = &iser_task->desc;
 +
 +      struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
 +      struct iser_device     *device    = iser_conn->ib_conn->device;
 +
 +      ib_dma_unmap_single(device->ib_device,
 +              tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
  
        /* mgmt tasks do not need special cleanup */
        if (!task->sc)
@@@ -14,6 -14,7 +14,7 @@@
  
  #include <linux/mfd/asic3.h>
  #include <linux/mfd/core.h>
+ #include <linux/module.h>
  
  /*
   *    The HTC ASIC3 LED GPIOs are inputs, not outputs.
@@@ -107,10 -108,9 +108,10 @@@ static int __devinit asic3_led_probe(st
        }
  
        led->cdev->name = led->name;
 -      led->cdev->default_trigger = led->default_trigger;
 +      led->cdev->flags = LED_CORE_SUSPENDRESUME;
        led->cdev->brightness_set = brightness_set;
        led->cdev->blink_set = blink_set;
 +      led->cdev->default_trigger = led->default_trigger;
  
        ret = led_classdev_register(&pdev->dev, led->cdev);
        if (ret < 0)
@@@ -137,44 -137,12 +138,44 @@@ static int __devexit asic3_led_remove(s
        return mfd_cell_disable(pdev);
  }
  
 +static int asic3_led_suspend(struct device *dev)
 +{
 +      struct platform_device *pdev = to_platform_device(dev);
 +      const struct mfd_cell *cell = mfd_get_cell(pdev);
 +      int ret;
 +
 +      ret = 0;
 +      if (cell->suspend)
 +              ret = (*cell->suspend)(pdev);
 +
 +      return ret;
 +}
 +
 +static int asic3_led_resume(struct device *dev)
 +{
 +      struct platform_device *pdev = to_platform_device(dev);
 +      const struct mfd_cell *cell = mfd_get_cell(pdev);
 +      int ret;
 +
 +      ret = 0;
 +      if (cell->resume)
 +              ret = (*cell->resume)(pdev);
 +
 +      return ret;
 +}
 +
 +static const struct dev_pm_ops asic3_led_pm_ops = {
 +      .suspend        = asic3_led_suspend,
 +      .resume         = asic3_led_resume,
 +};
 +
  static struct platform_driver asic3_led_driver = {
        .probe          = asic3_led_probe,
        .remove         = __devexit_p(asic3_led_remove),
        .driver         = {
                .name   = "leds-asic3",
                .owner  = THIS_MODULE,
 +              .pm     = &asic3_led_pm_ops,
        },
  };
  
diff --combined drivers/leds/leds-gpio.c
@@@ -18,6 -18,7 +18,7 @@@
  #include <linux/of_gpio.h>
  #include <linux/slab.h>
  #include <linux/workqueue.h>
+ #include <linux/module.h>
  
  #include <asm/gpio.h>
  
@@@ -121,7 -122,7 +122,7 @@@ static int __devinit create_gpio_led(co
        }
        led_dat->cdev.brightness_set = gpio_led_set;
        if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
 -              state = !!gpio_get_value(led_dat->gpio) ^ led_dat->active_low;
 +              state = !!gpio_get_value_cansleep(led_dat->gpio) ^ led_dat->active_low;
        else
                state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
        led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
@@@ -18,6 -18,7 +18,7 @@@
  #include <linux/led-lm3530.h>
  #include <linux/types.h>
  #include <linux/regulator/consumer.h>
+ #include <linux/module.h>
  
  #define LM3530_LED_DEV "lcd-backlight"
  #define LM3530_NAME "lm3530-led"
@@@ -421,6 -422,7 +422,6 @@@ err_class_register
  err_reg_init:
        regulator_put(drvdata->regulator);
  err_regulator_get:
 -      i2c_set_clientdata(client, NULL);
        kfree(drvdata);
  err_out:
        return err;
@@@ -448,7 -450,7 +449,7 @@@ MODULE_DEVICE_TABLE(i2c, lm3530_id)
  
  static struct i2c_driver lm3530_i2c_driver = {
        .probe = lm3530_probe,
 -      .remove = lm3530_remove,
 +      .remove = __devexit_p(lm3530_remove),
        .id_table = lm3530_id,
        .driver = {
                .name = LM3530_NAME,
@@@ -9,6 -9,7 +9,7 @@@
  #include <linux/dm-dirty-log.h>
  #include <linux/device-mapper.h>
  #include <linux/dm-log-userspace.h>
+ #include <linux/module.h>
  
  #include "dm-log-userspace-transfer.h"
  
@@@ -30,7 -31,6 +31,7 @@@ struct flush_entry 
  
  struct log_c {
        struct dm_target *ti;
 +      struct dm_dev *log_dev;
        uint32_t region_size;
        region_t region_count;
        uint64_t luid;
@@@ -147,7 -147,7 +148,7 @@@ static int build_constructor_string(str
   *    <UUID> <other args>
   * Where 'other args' is the userspace implementation specific log
   * arguments.  An example might be:
 - *    <UUID> clustered_disk <arg count> <log dev> <region_size> [[no]sync]
 + *    <UUID> clustered-disk <arg count> <log dev> <region_size> [[no]sync]
   *
   * So, this module will strip off the <UUID> for identification purposes
   * when communicating with userspace about a log; but will pass on everything
@@@ -162,15 -162,13 +163,15 @@@ static int userspace_ctr(struct dm_dirt
        struct log_c *lc = NULL;
        uint64_t rdata;
        size_t rdata_size = sizeof(rdata);
 +      char *devices_rdata = NULL;
 +      size_t devices_rdata_size = DM_NAME_LEN;
  
        if (argc < 3) {
                DMWARN("Too few arguments to userspace dirty log");
                return -EINVAL;
        }
  
 -      lc = kmalloc(sizeof(*lc), GFP_KERNEL);
 +      lc = kzalloc(sizeof(*lc), GFP_KERNEL);
        if (!lc) {
                DMWARN("Unable to allocate userspace log context.");
                return -ENOMEM;
                return str_size;
        }
  
 -      /* Send table string */
 +      devices_rdata = kzalloc(devices_rdata_size, GFP_KERNEL);
 +      if (!devices_rdata) {
 +              DMERR("Failed to allocate memory for device information");
 +              r = -ENOMEM;
 +              goto out;
 +      }
 +
 +      /*
 +       * Send table string and get back any opened device.
 +       */
        r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
 -                               ctr_str, str_size, NULL, NULL);
 +                               ctr_str, str_size,
 +                               devices_rdata, &devices_rdata_size);
  
        if (r < 0) {
                if (r == -ESRCH)
        lc->region_size = (uint32_t)rdata;
        lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
  
 +      if (devices_rdata_size) {
 +              if (devices_rdata[devices_rdata_size - 1] != '\0') {
 +                      DMERR("DM_ULOG_CTR device return string not properly terminated");
 +                      r = -EINVAL;
 +                      goto out;
 +              }
 +              r = dm_get_device(ti, devices_rdata,
 +                                dm_table_get_mode(ti->table), &lc->log_dev);
 +              if (r)
 +                      DMERR("Failed to register %s with device-mapper",
 +                            devices_rdata);
 +      }
  out:
 +      kfree(devices_rdata);
        if (r) {
                kfree(lc);
                kfree(ctr_str);
@@@ -267,9 -242,6 +268,9 @@@ static void userspace_dtr(struct dm_dir
                                 NULL, 0,
                                 NULL, NULL);
  
 +      if (lc->log_dev)
 +              dm_put_device(lc->ti, lc->log_dev);
 +
        kfree(lc->usr_argv_str);
        kfree(lc);
  
diff --combined drivers/md/dm-raid.c
@@@ -6,6 -6,7 +6,7 @@@
   */
  
  #include <linux/slab.h>
+ #include <linux/module.h>
  
  #include "md.h"
  #include "raid1.h"
@@@ -1017,56 -1018,30 +1018,56 @@@ static int raid_status(struct dm_targe
        struct raid_set *rs = ti->private;
        unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
        unsigned sz = 0;
 -      int i;
 +      int i, array_in_sync = 0;
        sector_t sync;
  
        switch (type) {
        case STATUSTYPE_INFO:
                DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
  
 -              for (i = 0; i < rs->md.raid_disks; i++) {
 -                      if (test_bit(Faulty, &rs->dev[i].rdev.flags))
 -                              DMEMIT("D");
 -                      else if (test_bit(In_sync, &rs->dev[i].rdev.flags))
 -                              DMEMIT("A");
 -                      else
 -                              DMEMIT("a");
 -              }
 -
                if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
                        sync = rs->md.curr_resync_completed;
                else
                        sync = rs->md.recovery_cp;
  
 -              if (sync > rs->md.resync_max_sectors)
 +              if (sync >= rs->md.resync_max_sectors) {
 +                      array_in_sync = 1;
                        sync = rs->md.resync_max_sectors;
 +              } else {
 +                      /*
 +                       * The array may be doing an initial sync, or it may
 +                       * be rebuilding individual components.  If all the
 +                       * devices are In_sync, then it is the array that is
 +                       * being initialized.
 +                       */
 +                      for (i = 0; i < rs->md.raid_disks; i++)
 +                              if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
 +                                      array_in_sync = 1;
 +              }
 +              /*
 +               * Status characters:
 +               *  'D' = Dead/Failed device
 +               *  'a' = Alive but not in-sync
 +               *  'A' = Alive and in-sync
 +               */
 +              for (i = 0; i < rs->md.raid_disks; i++) {
 +                      if (test_bit(Faulty, &rs->dev[i].rdev.flags))
 +                              DMEMIT("D");
 +                      else if (!array_in_sync ||
 +                               !test_bit(In_sync, &rs->dev[i].rdev.flags))
 +                              DMEMIT("a");
 +                      else
 +                              DMEMIT("A");
 +              }
  
 +              /*
 +               * In-sync ratio:
 +               *  The in-sync ratio shows the progress of:
 +               *   - Initializing the array
 +               *   - Rebuilding a subset of devices of the array
 +               *  The user can distinguish between the two by referring
 +               *  to the status characters.
 +               */
                DMEMIT(" %llu/%llu",
                       (unsigned long long) sync,
                       (unsigned long long) rs->md.resync_max_sectors);
diff --combined drivers/md/faulty.c
@@@ -63,6 -63,7 +63,7 @@@
  
  #define MaxFault      50
  #include <linux/blkdev.h>
+ #include <linux/module.h>
  #include <linux/raid/md_u.h>
  #include <linux/slab.h>
  #include "md.h"
@@@ -169,7 -170,7 +170,7 @@@ static void add_sector(struct faulty_co
                conf->nfaults = n+1;
  }
  
 -static int make_request(struct mddev *mddev, struct bio *bio)
 +static void make_request(struct mddev *mddev, struct bio *bio)
  {
        struct faulty_conf *conf = mddev->private;
        int failit = 0;
                         * just fail immediately
                         */
                        bio_endio(bio, -EIO);
 -                      return 0;
 +                      return;
                }
  
                if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
        }
        if (failit) {
                struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
 +
                b->bi_bdev = conf->rdev->bdev;
                b->bi_private = bio;
                b->bi_end_io = faulty_fail;
 -              generic_make_request(b);
 -              return 0;
 -      } else {
 +              bio = b;
 +      } else
                bio->bi_bdev = conf->rdev->bdev;
 -              return 1;
 -      }
 +
 +      generic_make_request(bio);
  }
  
  static void status(struct seq_file *seq, struct mddev *mddev)
diff --combined drivers/md/linear.c
@@@ -19,6 -19,7 +19,7 @@@
  #include <linux/blkdev.h>
  #include <linux/raid/md_u.h>
  #include <linux/seq_file.h>
+ #include <linux/module.h>
  #include <linux/slab.h>
  #include "md.h"
  #include "linear.h"
@@@ -264,14 -265,14 +265,14 @@@ static int linear_stop (struct mddev *m
        return 0;
  }
  
 -static int linear_make_request (struct mddev *mddev, struct bio *bio)
 +static void linear_make_request(struct mddev *mddev, struct bio *bio)
  {
        struct dev_info *tmp_dev;
        sector_t start_sector;
  
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
 -              return 0;
 +              return;
        }
  
        rcu_read_lock();
                       (unsigned long long)start_sector);
                rcu_read_unlock();
                bio_io_error(bio);
 -              return 0;
 +              return;
        }
        if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
                     tmp_dev->end_sector)) {
  
                bp = bio_split(bio, end_sector - bio->bi_sector);
  
 -              if (linear_make_request(mddev, &bp->bio1))
 -                      generic_make_request(&bp->bio1);
 -              if (linear_make_request(mddev, &bp->bio2))
 -                      generic_make_request(&bp->bio2);
 +              linear_make_request(mddev, &bp->bio1);
 +              linear_make_request(mddev, &bp->bio2);
                bio_pair_release(bp);
 -              return 0;
 +              return;
        }
                    
        bio->bi_bdev = tmp_dev->rdev->bdev;
        bio->bi_sector = bio->bi_sector - start_sector
                + tmp_dev->rdev->data_offset;
        rcu_read_unlock();
 -
 -      return 1;
 +      generic_make_request(bio);
  }
  
  static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --combined drivers/md/md.c
@@@ -44,6 -44,7 +44,7 @@@
  #include <linux/hdreg.h>
  #include <linux/proc_fs.h>
  #include <linux/random.h>
+ #include <linux/module.h>
  #include <linux/reboot.h>
  #include <linux/file.h>
  #include <linux/compat.h>
@@@ -332,17 -333,18 +333,17 @@@ static DEFINE_SPINLOCK(all_mddevs_lock)
   * call has finished, the bio has been linked into some internal structure
   * and so is visible to ->quiesce(), so we don't need the refcount any more.
   */
 -static int md_make_request(struct request_queue *q, struct bio *bio)
 +static void md_make_request(struct request_queue *q, struct bio *bio)
  {
        const int rw = bio_data_dir(bio);
        struct mddev *mddev = q->queuedata;
 -      int rv;
        int cpu;
        unsigned int sectors;
  
        if (mddev == NULL || mddev->pers == NULL
            || !mddev->ready) {
                bio_io_error(bio);
 -              return 0;
 +              return;
        }
        smp_rmb(); /* Ensure implications of  'active' are visible */
        rcu_read_lock();
         * go away inside make_request
         */
        sectors = bio_sectors(bio);
 -      rv = mddev->pers->make_request(mddev, bio);
 +      mddev->pers->make_request(mddev, bio);
  
        cpu = part_stat_lock();
        part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
  
        if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
                wake_up(&mddev->sb_wait);
 -
 -      return rv;
  }
  
  /* mddev_suspend makes sure no new requests are submitted
@@@ -474,7 -478,8 +475,7 @@@ static void md_submit_flush_data(struc
                bio_endio(bio, 0);
        else {
                bio->bi_rw &= ~REQ_FLUSH;
 -              if (mddev->pers->make_request(mddev, bio))
 -                      generic_make_request(bio);
 +              mddev->pers->make_request(mddev, bio);
        }
  
        mddev->flush_bio = NULL;
diff --combined drivers/md/multipath.c
@@@ -20,6 -20,7 +20,7 @@@
   */
  
  #include <linux/blkdev.h>
+ #include <linux/module.h>
  #include <linux/raid/md_u.h>
  #include <linux/seq_file.h>
  #include <linux/slab.h>
@@@ -106,7 -107,7 +107,7 @@@ static void multipath_end_request(struc
        rdev_dec_pending(rdev, conf->mddev);
  }
  
 -static int multipath_make_request(struct mddev *mddev, struct bio * bio)
 +static void multipath_make_request(struct mddev *mddev, struct bio * bio)
  {
        struct mpconf *conf = mddev->private;
        struct multipath_bh * mp_bh;
  
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
 -              return 0;
 +              return;
        }
  
        mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
        if (mp_bh->path < 0) {
                bio_endio(bio, -EIO);
                mempool_free(mp_bh, conf->pool);
 -              return 0;
 +              return;
        }
        multipath = conf->multipaths + mp_bh->path;
  
        mp_bh->bio.bi_end_io = multipath_end_request;
        mp_bh->bio.bi_private = mp_bh;
        generic_make_request(&mp_bh->bio);
 -      return 0;
 +      return;
  }
  
  static void multipath_status (struct seq_file *seq, struct mddev *mddev)
diff --combined drivers/md/raid0.c
@@@ -20,6 -20,7 +20,7 @@@
  
  #include <linux/blkdev.h>
  #include <linux/seq_file.h>
+ #include <linux/module.h>
  #include <linux/slab.h>
  #include "md.h"
  #include "raid0.h"
@@@ -468,7 -469,7 +469,7 @@@ static inline int is_io_in_chunk_bounda
        }
  }
  
 -static int raid0_make_request(struct mddev *mddev, struct bio *bio)
 +static void raid0_make_request(struct mddev *mddev, struct bio *bio)
  {
        unsigned int chunk_sects;
        sector_t sector_offset;
  
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
 -              return 0;
 +              return;
        }
  
        chunk_sects = mddev->chunk_sectors;
                else
                        bp = bio_split(bio, chunk_sects -
                                       sector_div(sector, chunk_sects));
 -              if (raid0_make_request(mddev, &bp->bio1))
 -                      generic_make_request(&bp->bio1);
 -              if (raid0_make_request(mddev, &bp->bio2))
 -                      generic_make_request(&bp->bio2);
 -
 +              raid0_make_request(mddev, &bp->bio1);
 +              raid0_make_request(mddev, &bp->bio2);
                bio_pair_release(bp);
 -              return 0;
 +              return;
        }
  
        sector_offset = bio->bi_sector;
        bio->bi_bdev = tmp_dev->bdev;
        bio->bi_sector = sector_offset + zone->dev_start +
                tmp_dev->data_offset;
 -      /*
 -       * Let the main block layer submit the IO and resolve recursion:
 -       */
 -      return 1;
 +
 +      generic_make_request(bio);
 +      return;
  
  bad_map:
        printk("md/raid0:%s: make_request bug: can't convert block across chunks"
               (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
  
        bio_io_error(bio);
 -      return 0;
 +      return;
  }
  
  static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --combined drivers/md/raid1.c
@@@ -34,6 -34,7 +34,7 @@@
  #include <linux/slab.h>
  #include <linux/delay.h>
  #include <linux/blkdev.h>
+ #include <linux/module.h>
  #include <linux/seq_file.h>
  #include <linux/ratelimit.h>
  #include "md.h"
@@@ -807,7 -808,7 +808,7 @@@ do_sync_io
        pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
  }
  
 -static int make_request(struct mddev *mddev, struct bio * bio)
 +static void make_request(struct mddev *mddev, struct bio * bio)
  {
        struct r1conf *conf = mddev->private;
        struct mirror_info *mirror;
@@@ -892,7 -893,7 +893,7 @@@ read_again
                if (rdisk < 0) {
                        /* couldn't find anywhere to read from */
                        raid_end_bio_io(r1_bio);
 -                      return 0;
 +                      return;
                }
                mirror = conf->mirrors + rdisk;
  
                        goto read_again;
                } else
                        generic_make_request(read_bio);
 -              return 0;
 +              return;
        }
  
        /*
  
        if (do_sync || !bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
 -
 -      return 0;
  }
  
  static void status(struct seq_file *seq, struct mddev *mddev)
@@@ -2191,6 -2194,7 +2192,6 @@@ static sector_t sync_request(struct mdd
                bio->bi_next = NULL;
                bio->bi_flags &= ~(BIO_POOL_MASK-1);
                bio->bi_flags |= 1 << BIO_UPTODATE;
 -              bio->bi_comp_cpu = -1;
                bio->bi_rw = READ;
                bio->bi_vcnt = 0;
                bio->bi_idx = 0;
diff --combined drivers/md/raid10.c
@@@ -21,6 -21,7 +21,7 @@@
  #include <linux/slab.h>
  #include <linux/delay.h>
  #include <linux/blkdev.h>
+ #include <linux/module.h>
  #include <linux/seq_file.h>
  #include <linux/ratelimit.h>
  #include "md.h"
@@@ -842,7 -843,7 +843,7 @@@ static void unfreeze_array(struct r10co
        spin_unlock_irq(&conf->resync_lock);
  }
  
 -static int make_request(struct mddev *mddev, struct bio * bio)
 +static void make_request(struct mddev *mddev, struct bio * bio)
  {
        struct r10conf *conf = mddev->private;
        struct mirror_info *mirror;
  
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
 -              return 0;
 +              return;
        }
  
        /* If this request crosses a chunk boundary, we need to
                conf->nr_waiting++;
                spin_unlock_irq(&conf->resync_lock);
  
 -              if (make_request(mddev, &bp->bio1))
 -                      generic_make_request(&bp->bio1);
 -              if (make_request(mddev, &bp->bio2))
 -                      generic_make_request(&bp->bio2);
 +              make_request(mddev, &bp->bio1);
 +              make_request(mddev, &bp->bio2);
  
                spin_lock_irq(&conf->resync_lock);
                conf->nr_waiting--;
                spin_unlock_irq(&conf->resync_lock);
  
                bio_pair_release(bp);
 -              return 0;
 +              return;
        bad_map:
                printk("md/raid10:%s: make_request bug: can't convert block across chunks"
                       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
                       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
  
                bio_io_error(bio);
 -              return 0;
 +              return;
        }
  
        md_write_start(mddev, bio);
@@@ -952,7 -955,7 +953,7 @@@ read_again
                slot = r10_bio->read_slot;
                if (disk < 0) {
                        raid_end_bio_io(r10_bio);
 -                      return 0;
 +                      return;
                }
                mirror = conf->mirrors + disk;
  
                        goto read_again;
                } else
                        generic_make_request(read_bio);
 -              return 0;
 +              return;
        }
  
        /*
@@@ -1174,6 -1177,7 +1175,6 @@@ retry_write
  
        if (do_sync || !mddev->bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
 -      return 0;
  }
  
  static void status(struct seq_file *seq, struct mddev *mddev)
@@@ -1352,7 -1356,7 +1353,7 @@@ static int raid10_add_disk(struct mdde
                struct mirror_info *p = &conf->mirrors[mirror];
                if (p->recovery_disabled == mddev->recovery_disabled)
                        continue;
 -              if (!p->rdev)
 +              if (p->rdev)
                        continue;
  
                disk_stack_limits(mddev->gendisk, rdev->bdev,
diff --combined drivers/md/raid5.c
@@@ -47,6 -47,7 +47,7 @@@
  #include <linux/kthread.h>
  #include <linux/raid/pq.h>
  #include <linux/async_tx.h>
+ #include <linux/module.h>
  #include <linux/async.h>
  #include <linux/seq_file.h>
  #include <linux/cpu.h>
@@@ -3688,7 -3689,7 +3689,7 @@@ static struct stripe_head *__get_priori
        return sh;
  }
  
 -static int make_request(struct mddev *mddev, struct bio * bi)
 +static void make_request(struct mddev *mddev, struct bio * bi)
  {
        struct r5conf *conf = mddev->private;
        int dd_idx;
  
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
 -              return 0;
 +              return;
        }
  
        md_write_start(mddev, bi);
        if (rw == READ &&
             mddev->reshape_position == MaxSector &&
             chunk_aligned_read(mddev,bi))
 -              return 0;
 +              return;
  
        logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
        last_sector = bi->bi_sector + (bi->bi_size>>9);
  
                bio_endio(bi, 0);
        }
 -
 -      return 0;
  }
  
  static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <media/saa7146.h>
+ #include <linux/module.h>
  
  LIST_HEAD(saa7146_devices);
  DEFINE_MUTEX(saa7146_devices_lock);
@@@ -37,9 -36,10 +38,9 @@@ static void dump_registers(struct saa71
  {
        int i = 0;
  
 -      INFO((" @ %li jiffies:\n",jiffies));
 -      for(i = 0; i <= 0x148; i+=4) {
 -              printk("0x%03x: 0x%08x\n",i,saa7146_read(dev,i));
 -      }
 +      pr_info(" @ %li jiffies:\n", jiffies);
 +      for (i = 0; i <= 0x148; i += 4)
 +              pr_info("0x%03x: 0x%08x\n", i, saa7146_read(dev, i));
  }
  #endif
  
@@@ -73,8 -73,9 +74,8 @@@ static inline int saa7146_wait_for_debi
                if (saa7146_read(dev, MC2) & 2)
                        break;
                if (err) {
 -                      printk(KERN_ERR "%s: %s timed out while waiting for "
 -                                      "registers getting programmed\n",
 -                                      dev->name, __func__);
 +                      pr_err("%s: %s timed out while waiting for registers getting programmed\n",
 +                             dev->name, __func__);
                        return -ETIMEDOUT;
                }
                msleep(1);
@@@ -88,8 -89,8 +89,8 @@@
                        break;
                saa7146_read(dev, MC2);
                if (err) {
 -                      DEB_S(("%s: %s timed out while waiting for transfer "
 -                              "completion\n", dev->name, __func__));
 +                      DEB_S("%s: %s timed out while waiting for transfer completion\n",
 +                            dev->name, __func__);
                        return -ETIMEDOUT;
                }
                msleep(1);
@@@ -109,8 -110,9 +110,8 @@@ static inline int saa7146_wait_for_debi
                if (saa7146_read(dev, MC2) & 2)
                        break;
                if (!loops--) {
 -                      printk(KERN_ERR "%s: %s timed out while waiting for "
 -                                      "registers getting programmed\n",
 -                                      dev->name, __func__);
 +                      pr_err("%s: %s timed out while waiting for registers getting programmed\n",
 +                             dev->name, __func__);
                        return -ETIMEDOUT;
                }
                udelay(1);
                        break;
                saa7146_read(dev, MC2);
                if (!loops--) {
 -                      DEB_S(("%s: %s timed out while waiting for transfer "
 -                              "completion\n", dev->name, __func__));
 +                      DEB_S("%s: %s timed out while waiting for transfer completion\n",
 +                            dev->name, __func__);
                        return -ETIMEDOUT;
                }
                udelay(5);
@@@ -263,9 -265,7 +264,9 @@@ int saa7146_pgtable_build_single(struc
        ptr = pt->cpu;
        for (i = 0; i < sglen; i++, list++) {
  /*
 -              printk("i:%d, adr:0x%08x, len:%d, offset:%d\n", i,sg_dma_address(list), sg_dma_len(list), list->offset);
 +              pr_debug("i:%d, adr:0x%08x, len:%d, offset:%d\n",
 +                       i, sg_dma_address(list), sg_dma_len(list),
 +                       list->offset);
  */
                for (p = 0; p * 4096 < list->length; p++, ptr++) {
                        *ptr = cpu_to_le32(sg_dma_address(list) + p * 4096);
  
  /*
        ptr = pt->cpu;
 -      printk("offset: %d\n",pt->offset);
 +      pr_debug("offset: %d\n", pt->offset);
        for(i=0;i<5;i++) {
 -              printk("ptr1 %d: 0x%08x\n",i,ptr[i]);
 +              pr_debug("ptr1 %d: 0x%08x\n", i, ptr[i]);
        }
  */
        return 0;
@@@ -315,7 -315,7 +316,7 @@@ static irqreturn_t interrupt_hw(int irq
                }
        }
        if (0 != (isr & (MASK_27))) {
 -              DEB_INT(("irq: RPS0 (0x%08x).\n",isr));
 +              DEB_INT("irq: RPS0 (0x%08x)\n", isr);
                if (dev->vv_data && dev->vv_callback)
                        dev->vv_callback(dev,isr);
                isr &= ~MASK_27;
                } else {
                        u32 psr = saa7146_read(dev, PSR);
                        u32 ssr = saa7146_read(dev, SSR);
 -                      printk(KERN_WARNING "%s: unexpected i2c irq: isr %08x psr %08x ssr %08x\n",
 -                             dev->name, isr, psr, ssr);
 +                      pr_warn("%s: unexpected i2c irq: isr %08x psr %08x ssr %08x\n",
 +                              dev->name, isr, psr, ssr);
                }
                isr &= ~(MASK_16|MASK_17);
        }
        if( 0 != isr ) {
 -              ERR(("warning: interrupt enabled, but not handled properly.(0x%08x)\n",isr));
 -              ERR(("disabling interrupt source(s)!\n"));
 +              ERR("warning: interrupt enabled, but not handled properly.(0x%08x)\n",
 +                  isr);
 +              ERR("disabling interrupt source(s)!\n");
                SAA7146_IER_DISABLE(dev,isr);
        }
        saa7146_write(dev, ISR, ack_isr);
@@@ -362,15 -361,15 +363,15 @@@ static int saa7146_init_one(struct pci_
        /* clear out mem for sure */
        dev = kzalloc(sizeof(struct saa7146_dev), GFP_KERNEL);
        if (!dev) {
 -              ERR(("out of memory.\n"));
 +              ERR("out of memory\n");
                goto out;
        }
  
 -      DEB_EE(("pci:%p\n",pci));
 +      DEB_EE("pci:%p\n", pci);
  
        err = pci_enable_device(pci);
        if (err < 0) {
 -              ERR(("pci_enable_device() failed.\n"));
 +              ERR("pci_enable_device() failed\n");
                goto err_free;
        }
  
        dev->mem = ioremap(pci_resource_start(pci, 0),
                           pci_resource_len(pci, 0));
        if (!dev->mem) {
 -              ERR(("ioremap() failed.\n"));
 +              ERR("ioremap() failed\n");
                err = -ENODEV;
                goto err_release;
        }
        err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED | IRQF_DISABLED,
                          dev->name, dev);
        if (err < 0) {
 -              ERR(("request_irq() failed.\n"));
 +              ERR("request_irq() failed\n");
                goto err_unmap;
        }
  
        /* create a nice device name */
        sprintf(dev->name, "saa7146 (%d)", saa7146_num);
  
 -      INFO(("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x).\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device));
 +      pr_info("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x)\n",
 +              dev->mem, dev->revision, pci->irq,
 +              pci->subsystem_vendor, pci->subsystem_device);
        dev->ext = ext;
  
        mutex_init(&dev->v4l2_lock);
        err = -ENODEV;
  
        if (ext->probe && ext->probe(dev)) {
 -              DEB_D(("ext->probe() failed for %p. skipping device.\n",dev));
 +              DEB_D("ext->probe() failed for %p. skipping device.\n", dev);
                goto err_free_i2c;
        }
  
        if (ext->attach(dev, pci_ext)) {
 -              DEB_D(("ext->attach() failed for %p. skipping device.\n",dev));
 +              DEB_D("ext->attach() failed for %p. skipping device.\n", dev);
                goto err_free_i2c;
        }
        /* V4L extensions will set the pci drvdata to the v4l2_device in the
@@@ -525,7 -522,7 +526,7 @@@ static void saa7146_remove_one(struct p
                { NULL, 0 }
        }, *p;
  
 -      DEB_EE(("dev:%p\n",dev));
 +      DEB_EE("dev:%p\n", dev);
  
        dev->ext->detach(dev);
        /* Zero the PCI drvdata after use. */
  
  int saa7146_register_extension(struct saa7146_extension* ext)
  {
 -      DEB_EE(("ext:%p\n",ext));
 +      DEB_EE("ext:%p\n", ext);
  
        ext->driver.name = ext->name;
        ext->driver.id_table = ext->pci_tbl;
        ext->driver.probe = saa7146_init_one;
        ext->driver.remove = saa7146_remove_one;
  
 -      printk("saa7146: register extension '%s'.\n",ext->name);
 +      pr_info("register extension '%s'\n", ext->name);
        return pci_register_driver(&ext->driver);
  }
  
  int saa7146_unregister_extension(struct saa7146_extension* ext)
  {
 -      DEB_EE(("ext:%p\n",ext));
 -      printk("saa7146: unregister extension '%s'.\n",ext->name);
 +      DEB_EE("ext:%p\n", ext);
 +      pr_info("unregister extension '%s'\n", ext->name);
        pci_unregister_driver(&ext->driver);
        return 0;
  }
@@@ -1,6 -1,5 +1,7 @@@
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <media/saa7146_vv.h>
+ #include <linux/module.h>
  
  /****************************************************************************/
  /* resource management functions, shamelessly stolen from saa7134 driver */
@@@ -11,23 -10,21 +12,23 @@@ int saa7146_res_get(struct saa7146_fh *
        struct saa7146_vv *vv = dev->vv_data;
  
        if (fh->resources & bit) {
 -              DEB_D(("already allocated! want: 0x%02x, cur:0x%02x\n",bit,vv->resources));
 +              DEB_D("already allocated! want: 0x%02x, cur:0x%02x\n",
 +                    bit, vv->resources);
                /* have it already allocated */
                return 1;
        }
  
        /* is it free? */
        if (vv->resources & bit) {
 -              DEB_D(("locked! vv->resources:0x%02x, we want:0x%02x\n",vv->resources,bit));
 +              DEB_D("locked! vv->resources:0x%02x, we want:0x%02x\n",
 +                    vv->resources, bit);
                /* no, someone else uses it */
                return 0;
        }
        /* it's free, grab it */
 -      fh->resources  |= bit;
 +      fh->resources |= bit;
        vv->resources |= bit;
 -      DEB_D(("res: get 0x%02x, cur:0x%02x\n",bit,vv->resources));
 +      DEB_D("res: get 0x%02x, cur:0x%02x\n", bit, vv->resources);
        return 1;
  }
  
@@@ -38,9 -35,9 +39,9 @@@ void saa7146_res_free(struct saa7146_f
  
        BUG_ON((fh->resources & bits) != bits);
  
 -      fh->resources  &= ~bits;
 +      fh->resources &= ~bits;
        vv->resources &= ~bits;
 -      DEB_D(("res: put 0x%02x, cur:0x%02x\n",bits,vv->resources));
 +      DEB_D("res: put 0x%02x, cur:0x%02x\n", bits, vv->resources);
  }
  
  
@@@ -51,7 -48,7 +52,7 @@@ void saa7146_dma_free(struct saa7146_de
                                                struct saa7146_buf *buf)
  {
        struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
 -      DEB_EE(("dev:%p, buf:%p\n",dev,buf));
 +      DEB_EE("dev:%p, buf:%p\n", dev, buf);
  
        BUG_ON(in_interrupt());
  
@@@ -70,19 -67,18 +71,19 @@@ int saa7146_buffer_queue(struct saa7146
                         struct saa7146_buf *buf)
  {
        assert_spin_locked(&dev->slock);
 -      DEB_EE(("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf));
 +      DEB_EE("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf);
  
        BUG_ON(!q);
  
        if (NULL == q->curr) {
                q->curr = buf;
 -              DEB_D(("immediately activating buffer %p\n", buf));
 +              DEB_D("immediately activating buffer %p\n", buf);
                buf->activate(dev,buf,NULL);
        } else {
                list_add_tail(&buf->vb.queue,&q->queue);
                buf->vb.state = VIDEOBUF_QUEUED;
 -              DEB_D(("adding buffer %p to queue. (active buffer present)\n", buf));
 +              DEB_D("adding buffer %p to queue. (active buffer present)\n",
 +                    buf);
        }
        return 0;
  }
@@@ -92,14 -88,14 +93,14 @@@ void saa7146_buffer_finish(struct saa71
                           int state)
  {
        assert_spin_locked(&dev->slock);
 -      DEB_EE(("dev:%p, dmaq:%p, state:%d\n", dev, q, state));
 -      DEB_EE(("q->curr:%p\n",q->curr));
 +      DEB_EE("dev:%p, dmaq:%p, state:%d\n", dev, q, state);
 +      DEB_EE("q->curr:%p\n", q->curr);
  
        BUG_ON(!q->curr);
  
        /* finish current buffer */
        if (NULL == q->curr) {
 -              DEB_D(("aiii. no current buffer\n"));
 +              DEB_D("aiii. no current buffer\n");
                return;
        }
  
@@@ -117,7 -113,7 +118,7 @@@ void saa7146_buffer_next(struct saa7146
  
        BUG_ON(!q);
  
 -      DEB_INT(("dev:%p, dmaq:%p, vbi:%d\n", dev, q, vbi));
 +      DEB_INT("dev:%p, dmaq:%p, vbi:%d\n", dev, q, vbi);
  
        assert_spin_locked(&dev->slock);
        if (!list_empty(&q->queue)) {
                if (!list_empty(&q->queue))
                        next = list_entry(q->queue.next,struct saa7146_buf, vb.queue);
                q->curr = buf;
 -              DEB_INT(("next buffer: buf:%p, prev:%p, next:%p\n", buf, q->queue.prev,q->queue.next));
 +              DEB_INT("next buffer: buf:%p, prev:%p, next:%p\n",
 +                      buf, q->queue.prev, q->queue.next);
                buf->activate(dev,buf,next);
        } else {
 -              DEB_INT(("no next buffer. stopping.\n"));
 +              DEB_INT("no next buffer. stopping.\n");
                if( 0 != vbi ) {
                        /* turn off video-dma3 */
                        saa7146_write(dev,MC1, MASK_20);
@@@ -168,11 -163,11 +169,11 @@@ void saa7146_buffer_timeout(unsigned lo
        struct saa7146_dev *dev = q->dev;
        unsigned long flags;
  
 -      DEB_EE(("dev:%p, dmaq:%p\n", dev, q));
 +      DEB_EE("dev:%p, dmaq:%p\n", dev, q);
  
        spin_lock_irqsave(&dev->slock,flags);
        if (q->curr) {
 -              DEB_D(("timeout on %p\n", q->curr));
 +              DEB_D("timeout on %p\n", q->curr);
                saa7146_buffer_finish(dev,q,VIDEOBUF_ERROR);
        }
  
@@@ -200,12 -195,12 +201,12 @@@ static int fops_open(struct file *file
  
        enum v4l2_buf_type type;
  
 -      DEB_EE(("file:%p, dev:%s\n", file, video_device_node_name(vdev)));
 +      DEB_EE("file:%p, dev:%s\n", file, video_device_node_name(vdev));
  
        if (mutex_lock_interruptible(&saa7146_devices_lock))
                return -ERESTARTSYS;
  
 -      DEB_D(("using: %p\n",dev));
 +      DEB_D("using: %p\n", dev);
  
        type = vdev->vfl_type == VFL_TYPE_GRABBER
             ? V4L2_BUF_TYPE_VIDEO_CAPTURE
  
        /* check if an extension is registered */
        if( NULL == dev->ext ) {
 -              DEB_S(("no extension registered for this device.\n"));
 +              DEB_S("no extension registered for this device\n");
                result = -ENODEV;
                goto out;
        }
        /* allocate per open data */
        fh = kzalloc(sizeof(*fh),GFP_KERNEL);
        if (NULL == fh) {
 -              DEB_S(("cannot allocate memory for per open data.\n"));
 +              DEB_S("cannot allocate memory for per open data\n");
                result = -ENOMEM;
                goto out;
        }
        fh->type = type;
  
        if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
 -              DEB_S(("initializing vbi...\n"));
 +              DEB_S("initializing vbi...\n");
                if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
                        result = saa7146_vbi_uops.open(dev,file);
                if (dev->ext_vv_data->vbi_fops.open)
                        dev->ext_vv_data->vbi_fops.open(file);
        } else {
 -              DEB_S(("initializing video...\n"));
 +              DEB_S("initializing video...\n");
                result = saa7146_video_uops.open(dev,file);
        }
  
@@@ -265,7 -260,7 +266,7 @@@ static int fops_release(struct file *fi
        struct saa7146_fh  *fh  = file->private_data;
        struct saa7146_dev *dev = fh->dev;
  
 -      DEB_EE(("file:%p\n", file));
 +      DEB_EE("file:%p\n", file);
  
        if (mutex_lock_interruptible(&saa7146_devices_lock))
                return -ERESTARTSYS;
@@@ -295,14 -290,12 +296,14 @@@ static int fops_mmap(struct file *file
  
        switch (fh->type) {
        case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
 -              DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, vma:%p\n",file, vma));
 +              DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, vma:%p\n",
 +                     file, vma);
                q = &fh->video_q;
                break;
                }
        case V4L2_BUF_TYPE_VBI_CAPTURE: {
 -              DEB_EE(("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, vma:%p\n",file, vma));
 +              DEB_EE("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, vma:%p\n",
 +                     file, vma);
                q = &fh->vbi_q;
                break;
                }
@@@ -320,14 -313,14 +321,14 @@@ static unsigned int fops_poll(struct fi
        struct videobuf_buffer *buf = NULL;
        struct videobuf_queue *q;
  
 -      DEB_EE(("file:%p, poll:%p\n",file, wait));
 +      DEB_EE("file:%p, poll:%p\n", file, wait);
  
        if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
                if( 0 == fh->vbi_q.streaming )
                        return videobuf_poll_stream(file, &fh->vbi_q, wait);
                q = &fh->vbi_q;
        } else {
 -              DEB_D(("using video queue.\n"));
 +              DEB_D("using video queue\n");
                q = &fh->video_q;
        }
  
                buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
  
        if (!buf) {
 -              DEB_D(("buf == NULL!\n"));
 +              DEB_D("buf == NULL!\n");
                return POLLERR;
        }
  
        poll_wait(file, &buf->done, wait);
        if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR) {
 -              DEB_D(("poll succeeded!\n"));
 +              DEB_D("poll succeeded!\n");
                return POLLIN|POLLRDNORM;
        }
  
 -      DEB_D(("nothing to poll for, buf->state:%d\n",buf->state));
 +      DEB_D("nothing to poll for, buf->state:%d\n", buf->state);
        return 0;
  }
  
@@@ -354,20 -347,18 +355,20 @@@ static ssize_t fops_read(struct file *f
        struct saa7146_fh *fh = file->private_data;
  
        switch (fh->type) {
 -      case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
 -//            DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, data:%p, count:%lun", file, data, (unsigned long)count));
 +      case V4L2_BUF_TYPE_VIDEO_CAPTURE:
 +/*
 +              DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, data:%p, count:%lun",
 +                     file, data, (unsigned long)count);
 +*/
                return saa7146_video_uops.read(file,data,count,ppos);
 -              }
 -      case V4L2_BUF_TYPE_VBI_CAPTURE: {
 -//            DEB_EE(("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, data:%p, count:%lu\n", file, data, (unsigned long)count));
 +      case V4L2_BUF_TYPE_VBI_CAPTURE:
 +/*
 +              DEB_EE("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, data:%p, count:%lu\n",
 +                     file, data, (unsigned long)count);
 +*/
                if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
                        return saa7146_vbi_uops.read(file,data,count,ppos);
 -              else
 -                      return -EINVAL;
 -              }
 -              break;
 +              return -EINVAL;
        default:
                BUG();
                return 0;
@@@ -408,22 -399,22 +409,22 @@@ static void vv_callback(struct saa7146_
  {
        u32 isr = status;
  
 -      DEB_INT(("dev:%p, isr:0x%08x\n",dev,(u32)status));
 +      DEB_INT("dev:%p, isr:0x%08x\n", dev, (u32)status);
  
        if (0 != (isr & (MASK_27))) {
 -              DEB_INT(("irq: RPS0 (0x%08x).\n",isr));
 +              DEB_INT("irq: RPS0 (0x%08x)\n", isr);
                saa7146_video_uops.irq_done(dev,isr);
        }
  
        if (0 != (isr & (MASK_28))) {
                u32 mc2 = saa7146_read(dev, MC2);
                if( 0 != (mc2 & MASK_15)) {
 -                      DEB_INT(("irq: RPS1 vbi workaround (0x%08x).\n",isr));
 +                      DEB_INT("irq: RPS1 vbi workaround (0x%08x)\n", isr);
                        wake_up(&dev->vv_data->vbi_wq);
                        saa7146_write(dev,MC2, MASK_31);
                        return;
                }
 -              DEB_INT(("irq: RPS1 (0x%08x).\n",isr));
 +              DEB_INT("irq: RPS1 (0x%08x)\n", isr);
                saa7146_vbi_uops.irq_done(dev,isr);
        }
  }
@@@ -439,13 -430,13 +440,13 @@@ int saa7146_vv_init(struct saa7146_dev
  
        vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL);
        if (vv == NULL) {
 -              ERR(("out of memory. aborting.\n"));
 +              ERR("out of memory. aborting.\n");
                return -ENOMEM;
        }
        ext_vv->ops = saa7146_video_ioctl_ops;
        ext_vv->core_ops = &saa7146_video_ioctl_ops;
  
 -      DEB_EE(("dev:%p\n",dev));
 +      DEB_EE("dev:%p\n", dev);
  
        /* set default values for video parts of the saa7146 */
        saa7146_write(dev, BCS_CTRL, 0x80400040);
  
        vv->d_clipping.cpu_addr = pci_alloc_consistent(dev->pci, SAA7146_CLIPPING_MEM, &vv->d_clipping.dma_handle);
        if( NULL == vv->d_clipping.cpu_addr ) {
 -              ERR(("out of memory. aborting.\n"));
 +              ERR("out of memory. aborting.\n");
                kfree(vv);
                return -1;
        }
@@@ -481,7 -472,7 +482,7 @@@ int saa7146_vv_release(struct saa7146_d
  {
        struct saa7146_vv *vv = dev->vv_data;
  
 -      DEB_EE(("dev:%p\n",dev));
 +      DEB_EE("dev:%p\n", dev);
  
        v4l2_device_unregister(&dev->v4l2_dev);
        pci_free_consistent(dev->pci, SAA7146_CLIPPING_MEM, vv->d_clipping.cpu_addr, vv->d_clipping.dma_handle);
@@@ -500,7 -491,7 +501,7 @@@ int saa7146_register_device(struct vide
        int err;
        int i;
  
 -      DEB_EE(("dev:%p, name:'%s', type:%d\n",dev,name,type));
 +      DEB_EE("dev:%p, name:'%s', type:%d\n", dev, name, type);
  
        // released by vfd->release
        vfd = video_device_alloc();
  
        err = video_register_device(vfd, type, -1);
        if (err < 0) {
 -              ERR(("cannot register v4l2 device. skipping.\n"));
 +              ERR("cannot register v4l2 device. skipping.\n");
                video_device_release(vfd);
                return err;
        }
  
 -      INFO(("%s: registered device %s [v4l2]\n",
 -              dev->name, video_device_node_name(vfd)));
 +      pr_info("%s: registered device %s [v4l2]\n",
 +              dev->name, video_device_node_name(vfd));
  
        *vid = vfd;
        return 0;
@@@ -534,7 -525,7 +535,7 @@@ EXPORT_SYMBOL_GPL(saa7146_register_devi
  
  int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev)
  {
 -      DEB_EE(("dev:%p\n",dev));
 +      DEB_EE("dev:%p\n", dev);
  
        video_unregister_device(*vid);
        *vid = NULL;
@@@ -1,6 -1,5 +1,7 @@@
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <linux/kernel.h>
+ #include <linux/export.h>
  #include <media/saa7146_vv.h>
  
  static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
@@@ -713,8 -712,8 +714,8 @@@ static int calculate_video_dma_grab_pac
  
        int depth = sfmt->depth;
  
 -      DEB_CAP(("[size=%dx%d,fields=%s]\n",
 -              width,height,v4l2_field_names[field]));
 +      DEB_CAP("[size=%dx%d,fields=%s]\n",
 +              width, height, v4l2_field_names[field]);
  
        if( bytesperline != 0) {
                vdma1.pitch = bytesperline*2;
@@@ -839,8 -838,8 +840,8 @@@ static int calculate_video_dma_grab_pla
        BUG_ON(0 == buf->pt[1].dma);
        BUG_ON(0 == buf->pt[2].dma);
  
 -      DEB_CAP(("[size=%dx%d,fields=%s]\n",
 -              width,height,v4l2_field_names[field]));
 +      DEB_CAP("[size=%dx%d,fields=%s]\n",
 +              width, height, v4l2_field_names[field]);
  
        /* fixme: look at bytesperline! */
  
@@@ -1000,12 -999,12 +1001,12 @@@ void saa7146_set_capture(struct saa7146
        struct saa7146_vv *vv = dev->vv_data;
        u32 vdma1_prot_addr;
  
 -      DEB_CAP(("buf:%p, next:%p\n",buf,next));
 +      DEB_CAP("buf:%p, next:%p\n", buf, next);
  
        vdma1_prot_addr = saa7146_read(dev, PROT_ADDR1);
        if( 0 == vdma1_prot_addr ) {
                /* clear out beginning of streaming bit (rps register 0)*/
 -              DEB_CAP(("forcing sync to new frame\n"));
 +              DEB_CAP("forcing sync to new frame\n");
                saa7146_write(dev, MC2, MASK_27 );
        }
  
@@@ -1,7 -1,6 +1,8 @@@
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <media/saa7146_vv.h>
  #include <media/v4l2-chip-ident.h>
+ #include <linux/module.h>
  
  static int max_memory = 32;
  
@@@ -96,7 -95,7 +97,7 @@@ struct saa7146_format* saa7146_format_b
                }
        }
  
 -      DEB_D(("unknown pixelformat:'%4.4s'\n",(char *)&fourcc));
 +      DEB_D("unknown pixelformat:'%4.4s'\n", (char *)&fourcc);
        return NULL;
  }
  
@@@ -109,32 -108,32 +110,32 @@@ int saa7146_start_preview(struct saa714
        struct v4l2_format fmt;
        int ret = 0, err = 0;
  
 -      DEB_EE(("dev:%p, fh:%p\n",dev,fh));
 +      DEB_EE("dev:%p, fh:%p\n", dev, fh);
  
        /* check if we have overlay informations */
        if( NULL == fh->ov.fh ) {
 -              DEB_D(("no overlay data available. try S_FMT first.\n"));
 +              DEB_D("no overlay data available. try S_FMT first.\n");
                return -EAGAIN;
        }
  
        /* check if streaming capture is running */
        if (IS_CAPTURE_ACTIVE(fh) != 0) {
 -              DEB_D(("streaming capture is active.\n"));
 +              DEB_D("streaming capture is active\n");
                return -EBUSY;
        }
  
        /* check if overlay is running */
        if (IS_OVERLAY_ACTIVE(fh) != 0) {
                if (vv->video_fh == fh) {
 -                      DEB_D(("overlay is already active.\n"));
 +                      DEB_D("overlay is already active\n");
                        return 0;
                }
 -              DEB_D(("overlay is already active in another open.\n"));
 +              DEB_D("overlay is already active in another open\n");
                return -EBUSY;
        }
  
        if (0 == saa7146_res_get(fh, RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP)) {
 -              DEB_D(("cannot get necessary overlay resources\n"));
 +              DEB_D("cannot get necessary overlay resources\n");
                return -EBUSY;
        }
  
        fh->ov.win = fmt.fmt.win;
        vv->ov_data = &fh->ov;
  
 -      DEB_D(("%dx%d+%d+%d %s field=%s\n",
 -              fh->ov.win.w.width,fh->ov.win.w.height,
 -              fh->ov.win.w.left,fh->ov.win.w.top,
 -              vv->ov_fmt->name,v4l2_field_names[fh->ov.win.field]));
 +      DEB_D("%dx%d+%d+%d %s field=%s\n",
 +            fh->ov.win.w.width, fh->ov.win.w.height,
 +            fh->ov.win.w.left, fh->ov.win.w.top,
 +            vv->ov_fmt->name, v4l2_field_names[fh->ov.win.field]);
  
        if (0 != (ret = saa7146_enable_overlay(fh))) {
 -              DEB_D(("enabling overlay failed: %d\n",ret));
 +              DEB_D("enabling overlay failed: %d\n", ret);
                saa7146_res_free(vv->video_fh, RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP);
                return ret;
        }
@@@ -170,22 -169,22 +171,22 @@@ int saa7146_stop_preview(struct saa7146
        struct saa7146_dev *dev = fh->dev;
        struct saa7146_vv *vv = dev->vv_data;
  
 -      DEB_EE(("dev:%p, fh:%p\n",dev,fh));
 +      DEB_EE("dev:%p, fh:%p\n", dev, fh);
  
        /* check if streaming capture is running */
        if (IS_CAPTURE_ACTIVE(fh) != 0) {
 -              DEB_D(("streaming capture is active.\n"));
 +              DEB_D("streaming capture is active\n");
                return -EBUSY;
        }
  
        /* check if overlay is running at all */
        if ((vv->video_status & STATUS_OVERLAY) == 0) {
 -              DEB_D(("no active overlay.\n"));
 +              DEB_D("no active overlay\n");
                return 0;
        }
  
        if (vv->video_fh != fh) {
 -              DEB_D(("overlay is active, but in another open.\n"));
 +              DEB_D("overlay is active, but in another open\n");
                return -EBUSY;
        }
  
@@@ -270,7 -269,7 +271,7 @@@ static int saa7146_pgtable_build(struc
        int length = dma->sglen;
        struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
  
 -      DEB_EE(("dev:%p, buf:%p, sg_len:%d\n",dev,buf,length));
 +      DEB_EE("dev:%p, buf:%p, sg_len:%d\n", dev, buf, length);
  
        if( 0 != IS_PLANAR(sfmt->trans)) {
                struct saa7146_pgtable *pt1 = &buf->pt[0];
                                m3 = ((size+(size/2)+PAGE_SIZE)/PAGE_SIZE)-1;
                                o1 = size%PAGE_SIZE;
                                o2 = (size+(size/4))%PAGE_SIZE;
 -                              DEB_CAP(("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n",size,m1,m2,m3,o1,o2));
 +                              DEB_CAP("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n",
 +                                      size, m1, m2, m3, o1, o2);
                                break;
                        }
                        case 16: {
                                m3 = ((2*size+PAGE_SIZE)/PAGE_SIZE)-1;
                                o1 = size%PAGE_SIZE;
                                o2 = (size+(size/2))%PAGE_SIZE;
 -                              DEB_CAP(("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n",size,m1,m2,m3,o1,o2));
 +                              DEB_CAP("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n",
 +                                      size, m1, m2, m3, o1, o2);
                                break;
                        }
                        default: {
@@@ -391,23 -388,23 +392,23 @@@ static int video_begin(struct saa7146_f
        unsigned int resource;
        int ret = 0, err = 0;
  
 -      DEB_EE(("dev:%p, fh:%p\n",dev,fh));
 +      DEB_EE("dev:%p, fh:%p\n", dev, fh);
  
        if ((vv->video_status & STATUS_CAPTURE) != 0) {
                if (vv->video_fh == fh) {
 -                      DEB_S(("already capturing.\n"));
 +                      DEB_S("already capturing\n");
                        return 0;
                }
 -              DEB_S(("already capturing in another open.\n"));
 +              DEB_S("already capturing in another open\n");
                return -EBUSY;
        }
  
        if ((vv->video_status & STATUS_OVERLAY) != 0) {
 -              DEB_S(("warning: suspending overlay video for streaming capture.\n"));
 +              DEB_S("warning: suspending overlay video for streaming capture\n");
                vv->ov_suspend = vv->video_fh;
                err = saa7146_stop_preview(vv->video_fh); /* side effect: video_status is now 0, video_fh is NULL */
                if (0 != err) {
 -                      DEB_D(("suspending video failed. aborting\n"));
 +                      DEB_D("suspending video failed. aborting\n");
                        return err;
                }
        }
  
        ret = saa7146_res_get(fh, resource);
        if (0 == ret) {
 -              DEB_S(("cannot get capture resource %d\n",resource));
 +              DEB_S("cannot get capture resource %d\n", resource);
                if (vv->ov_suspend != NULL) {
                        saa7146_start_preview(vv->ov_suspend);
                        vv->ov_suspend = NULL;
@@@ -452,15 -449,15 +453,15 @@@ static int video_end(struct saa7146_fh 
        unsigned long flags;
        unsigned int resource;
        u32 dmas = 0;
 -      DEB_EE(("dev:%p, fh:%p\n",dev,fh));
 +      DEB_EE("dev:%p, fh:%p\n", dev, fh);
  
        if ((vv->video_status & STATUS_CAPTURE) != STATUS_CAPTURE) {
 -              DEB_S(("not capturing.\n"));
 +              DEB_S("not capturing\n");
                return 0;
        }
  
        if (vv->video_fh != fh) {
 -              DEB_S(("capturing, but in another open.\n"));
 +              DEB_S("capturing, but in another open\n");
                return -EBUSY;
        }
  
@@@ -534,7 -531,7 +535,7 @@@ static int vidioc_s_fbuf(struct file *f
        struct saa7146_vv *vv = dev->vv_data;
        struct saa7146_format *fmt;
  
 -      DEB_EE(("VIDIOC_S_FBUF\n"));
 +      DEB_EE("VIDIOC_S_FBUF\n");
  
        if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
                return -EPERM;
  
        /* planar formats are not allowed for overlay video, clipping and video dma would clash */
        if (fmt->flags & FORMAT_IS_PLANAR)
 -              DEB_S(("planar pixelformat '%4.4s' not allowed for overlay\n",
 -                                      (char *)&fmt->pixelformat));
 +              DEB_S("planar pixelformat '%4.4s' not allowed for overlay\n",
 +                    (char *)&fmt->pixelformat);
  
        /* check if overlay is running */
        if (IS_OVERLAY_ACTIVE(fh) != 0) {
                if (vv->video_fh != fh) {
 -                      DEB_D(("refusing to change framebuffer informations while overlay is active in another open.\n"));
 +                      DEB_D("refusing to change framebuffer informations while overlay is active in another open\n");
                        return -EBUSY;
                }
        }
  
        if (vv->ov_fb.fmt.bytesperline < vv->ov_fb.fmt.width) {
                vv->ov_fb.fmt.bytesperline = vv->ov_fb.fmt.width * fmt->depth / 8;
 -              DEB_D(("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline));
 +              DEB_D("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline);
        }
        return 0;
  }
@@@ -592,7 -589,7 +593,7 @@@ static int vidioc_queryctrl(struct fil
        if (ctrl == NULL)
                return -EINVAL;
  
 -      DEB_EE(("VIDIOC_QUERYCTRL: id:%d\n", c->id));
 +      DEB_EE("VIDIOC_QUERYCTRL: id:%d\n", c->id);
        *c = *ctrl;
        return 0;
  }
@@@ -611,25 -608,25 +612,25 @@@ static int vidioc_g_ctrl(struct file *f
        case V4L2_CID_BRIGHTNESS:
                value = saa7146_read(dev, BCS_CTRL);
                c->value = 0xff & (value >> 24);
 -              DEB_D(("V4L2_CID_BRIGHTNESS: %d\n", c->value));
 +              DEB_D("V4L2_CID_BRIGHTNESS: %d\n", c->value);
                break;
        case V4L2_CID_CONTRAST:
                value = saa7146_read(dev, BCS_CTRL);
                c->value = 0x7f & (value >> 16);
 -              DEB_D(("V4L2_CID_CONTRAST: %d\n", c->value));
 +              DEB_D("V4L2_CID_CONTRAST: %d\n", c->value);
                break;
        case V4L2_CID_SATURATION:
                value = saa7146_read(dev, BCS_CTRL);
                c->value = 0x7f & (value >> 0);
 -              DEB_D(("V4L2_CID_SATURATION: %d\n", c->value));
 +              DEB_D("V4L2_CID_SATURATION: %d\n", c->value);
                break;
        case V4L2_CID_VFLIP:
                c->value = vv->vflip;
 -              DEB_D(("V4L2_CID_VFLIP: %d\n", c->value));
 +              DEB_D("V4L2_CID_VFLIP: %d\n", c->value);
                break;
        case V4L2_CID_HFLIP:
                c->value = vv->hflip;
 -              DEB_D(("V4L2_CID_HFLIP: %d\n", c->value));
 +              DEB_D("V4L2_CID_HFLIP: %d\n", c->value);
                break;
        default:
                return -EINVAL;
@@@ -645,7 -642,7 +646,7 @@@ static int vidioc_s_ctrl(struct file *f
  
        ctrl = ctrl_by_id(c->id);
        if (NULL == ctrl) {
 -              DEB_D(("unknown control %d\n", c->id));
 +              DEB_D("unknown control %d\n", c->id);
                return -EINVAL;
        }
  
        case V4L2_CID_HFLIP:
                /* fixme: we can support changing VFLIP and HFLIP here... */
                if (IS_CAPTURE_ACTIVE(fh) != 0) {
 -                      DEB_D(("V4L2_CID_HFLIP while active capture.\n"));
 +                      DEB_D("V4L2_CID_HFLIP while active capture\n");
                        return -EBUSY;
                }
                vv->hflip = c->value;
                break;
        case V4L2_CID_VFLIP:
                if (IS_CAPTURE_ACTIVE(fh) != 0) {
 -                      DEB_D(("V4L2_CID_VFLIP while active capture.\n"));
 +                      DEB_D("V4L2_CID_VFLIP while active capture\n");
                        return -EBUSY;
                }
                vv->vflip = c->value;
@@@ -752,7 -749,7 +753,7 @@@ static int vidioc_try_fmt_vid_cap(struc
        int maxw, maxh;
        int calc_bpl;
  
 -      DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh));
 +      DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh);
  
        fmt = saa7146_format_by_fourcc(dev, f->fmt.pix.pixelformat);
        if (NULL == fmt)
                vv->last_field = V4L2_FIELD_INTERLACED;
                break;
        default:
 -              DEB_D(("no known field mode '%d'.\n", field));
 +              DEB_D("no known field mode '%d'\n", field);
                return -EINVAL;
        }
  
                f->fmt.pix.bytesperline = calc_bpl;
  
        f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
 -      DEB_D(("w:%d, h:%d, bytesperline:%d, sizeimage:%d\n", f->fmt.pix.width,
 -                      f->fmt.pix.height, f->fmt.pix.bytesperline, f->fmt.pix.sizeimage));
 +      DEB_D("w:%d, h:%d, bytesperline:%d, sizeimage:%d\n",
 +            f->fmt.pix.width, f->fmt.pix.height,
 +            f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
  
        return 0;
  }
@@@ -816,23 -812,22 +817,23 @@@ static int vidioc_try_fmt_vid_overlay(s
        enum v4l2_field field;
        int maxw, maxh;
  
 -      DEB_EE(("dev:%p\n", dev));
 +      DEB_EE("dev:%p\n", dev);
  
        if (NULL == vv->ov_fb.base) {
 -              DEB_D(("no fb base set.\n"));
 +              DEB_D("no fb base set\n");
                return -EINVAL;
        }
        if (NULL == vv->ov_fmt) {
 -              DEB_D(("no fb fmt set.\n"));
 +              DEB_D("no fb fmt set\n");
                return -EINVAL;
        }
        if (win->w.width < 48 || win->w.height < 32) {
 -              DEB_D(("min width/height. (%d,%d)\n", win->w.width, win->w.height));
 +              DEB_D("min width/height. (%d,%d)\n",
 +                    win->w.width, win->w.height);
                return -EINVAL;
        }
        if (win->clipcount > 16) {
 -              DEB_D(("clipcount too big.\n"));
 +              DEB_D("clipcount too big\n");
                return -EINVAL;
        }
  
        case V4L2_FIELD_INTERLACED:
                break;
        default:
 -              DEB_D(("no known field mode '%d'.\n", field));
 +              DEB_D("no known field mode '%d'\n", field);
                return -EINVAL;
        }
  
@@@ -874,17 -869,16 +875,17 @@@ static int vidioc_s_fmt_vid_cap(struct 
        struct saa7146_vv *vv = dev->vv_data;
        int err;
  
 -      DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh));
 +      DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh);
        if (IS_CAPTURE_ACTIVE(fh) != 0) {
 -              DEB_EE(("streaming capture is active\n"));
 +              DEB_EE("streaming capture is active\n");
                return -EBUSY;
        }
        err = vidioc_try_fmt_vid_cap(file, fh, f);
        if (0 != err)
                return err;
        fh->video_fmt = f->fmt.pix;
 -      DEB_EE(("set to pixelformat '%4.4s'\n", (char *)&fh->video_fmt.pixelformat));
 +      DEB_EE("set to pixelformat '%4.4s'\n",
 +             (char *)&fh->video_fmt.pixelformat);
        return 0;
  }
  
@@@ -895,7 -889,7 +896,7 @@@ static int vidioc_s_fmt_vid_overlay(str
        struct saa7146_vv *vv = dev->vv_data;
        int err;
  
 -      DEB_EE(("V4L2_BUF_TYPE_VIDEO_OVERLAY: dev:%p, fh:%p\n", dev, fh));
 +      DEB_EE("V4L2_BUF_TYPE_VIDEO_OVERLAY: dev:%p, fh:%p\n", dev, fh);
        err = vidioc_try_fmt_vid_overlay(file, fh, f);
        if (0 != err)
                return err;
@@@ -938,7 -932,7 +939,7 @@@ static int vidioc_g_std(struct file *fi
                if (e->index < 0 )
                        return -EINVAL;
                if( e->index < dev->ext_vv_data->num_stds ) {
 -                      DEB_EE(("VIDIOC_ENUMSTD: index:%d\n",e->index));
 +                      DEB_EE("VIDIOC_ENUMSTD: index:%d\n", e->index);
                        v4l2_video_std_construct(e, dev->ext_vv_data->stds[e->index].id, dev->ext_vv_data->stds[e->index].name);
                        return 0;
                }
@@@ -953,10 -947,10 +954,10 @@@ static int vidioc_s_std(struct file *fi
        int found = 0;
        int err, i;
  
 -      DEB_EE(("VIDIOC_S_STD\n"));
 +      DEB_EE("VIDIOC_S_STD\n");
  
        if ((vv->video_status & STATUS_CAPTURE) == STATUS_CAPTURE) {
 -              DEB_D(("cannot change video standard while streaming capture is active\n"));
 +              DEB_D("cannot change video standard while streaming capture is active\n");
                return -EBUSY;
        }
  
                vv->ov_suspend = vv->video_fh;
                err = saa7146_stop_preview(vv->video_fh); /* side effect: video_status is now 0, video_fh is NULL */
                if (0 != err) {
 -                      DEB_D(("suspending video failed. aborting\n"));
 +                      DEB_D("suspending video failed. aborting\n");
                        return err;
                }
        }
        }
  
        if (!found) {
 -              DEB_EE(("VIDIOC_S_STD: standard not found.\n"));
 +              DEB_EE("VIDIOC_S_STD: standard not found\n");
                return -EINVAL;
        }
  
 -      DEB_EE(("VIDIOC_S_STD: set to standard to '%s'\n", vv->standard->name));
 +      DEB_EE("VIDIOC_S_STD: set to standard to '%s'\n", vv->standard->name);
        return 0;
  }
  
@@@ -997,7 -991,7 +998,7 @@@ static int vidioc_overlay(struct file *
  {
        int err;
  
 -      DEB_D(("VIDIOC_OVERLAY on:%d\n", on));
 +      DEB_D("VIDIOC_OVERLAY on:%d\n", on);
        if (on)
                err = saa7146_start_preview(fh);
        else
@@@ -1054,7 -1048,7 +1055,7 @@@ static int vidioc_streamon(struct file 
        struct saa7146_fh *fh = __fh;
        int err;
  
 -      DEB_D(("VIDIOC_STREAMON, type:%d\n", type));
 +      DEB_D("VIDIOC_STREAMON, type:%d\n", type);
  
        err = video_begin(fh);
        if (err)
@@@ -1073,18 -1067,18 +1074,18 @@@ static int vidioc_streamoff(struct fil
        struct saa7146_vv *vv = dev->vv_data;
        int err;
  
 -      DEB_D(("VIDIOC_STREAMOFF, type:%d\n", type));
 +      DEB_D("VIDIOC_STREAMOFF, type:%d\n", type);
  
        /* ugly: we need to copy some checks from video_end(),
           because videobuf_streamoff() relies on the capture running.
           check and fix this */
        if ((vv->video_status & STATUS_CAPTURE) != STATUS_CAPTURE) {
 -              DEB_S(("not capturing.\n"));
 +              DEB_S("not capturing\n");
                return 0;
        }
  
        if (vv->video_fh != fh) {
 -              DEB_S(("capturing, but in another open.\n"));
 +              DEB_S("capturing, but in another open\n");
                return -EBUSY;
        }
  
        else if (type == V4L2_BUF_TYPE_VBI_CAPTURE)
                err = videobuf_streamoff(&fh->vbi_q);
        if (0 != err) {
 -              DEB_D(("warning: videobuf_streamoff() failed.\n"));
 +              DEB_D("warning: videobuf_streamoff() failed\n");
                video_end(fh, file);
        } else {
                err = video_end(fh, file);
@@@ -1181,27 -1175,25 +1182,27 @@@ static int buffer_prepare(struct videob
        struct saa7146_buf *buf = (struct saa7146_buf *)vb;
        int size,err = 0;
  
 -      DEB_CAP(("vbuf:%p\n",vb));
 +      DEB_CAP("vbuf:%p\n", vb);
  
        /* sanity checks */
        if (fh->video_fmt.width  < 48 ||
            fh->video_fmt.height < 32 ||
            fh->video_fmt.width  > vv->standard->h_max_out ||
            fh->video_fmt.height > vv->standard->v_max_out) {
 -              DEB_D(("w (%d) / h (%d) out of bounds.\n",fh->video_fmt.width,fh->video_fmt.height));
 +              DEB_D("w (%d) / h (%d) out of bounds\n",
 +                    fh->video_fmt.width, fh->video_fmt.height);
                return -EINVAL;
        }
  
        size = fh->video_fmt.sizeimage;
        if (0 != buf->vb.baddr && buf->vb.bsize < size) {
 -              DEB_D(("size mismatch.\n"));
 +              DEB_D("size mismatch\n");
                return -EINVAL;
        }
  
 -      DEB_CAP(("buffer_prepare [size=%dx%d,bytes=%d,fields=%s]\n",
 -              fh->video_fmt.width,fh->video_fmt.height,size,v4l2_field_names[fh->video_fmt.field]));
 +      DEB_CAP("buffer_prepare [size=%dx%d,bytes=%d,fields=%s]\n",
 +              fh->video_fmt.width, fh->video_fmt.height,
 +              size, v4l2_field_names[fh->video_fmt.field]);
        if (buf->vb.width  != fh->video_fmt.width  ||
            buf->vb.bytesperline != fh->video_fmt.bytesperline ||
            buf->vb.height != fh->video_fmt.height ||
        return 0;
  
   oops:
 -      DEB_D(("error out.\n"));
 +      DEB_D("error out\n");
        saa7146_dma_free(dev,q,buf);
  
        return err;
@@@ -1268,7 -1260,7 +1269,7 @@@ static int buffer_setup(struct videobuf
                *count = (max_memory*1048576) / *size;
        }
  
 -      DEB_CAP(("%d buffers, %d bytes each.\n",*count,*size));
 +      DEB_CAP("%d buffers, %d bytes each\n", *count, *size);
  
        return 0;
  }
@@@ -1281,7 -1273,7 +1282,7 @@@ static void buffer_queue(struct videobu
        struct saa7146_vv *vv = dev->vv_data;
        struct saa7146_buf *buf = (struct saa7146_buf *)vb;
  
 -      DEB_CAP(("vbuf:%p\n",vb));
 +      DEB_CAP("vbuf:%p\n", vb);
        saa7146_buffer_queue(fh->dev,&vv->video_q,buf);
  }
  
@@@ -1292,7 -1284,7 +1293,7 @@@ static void buffer_release(struct video
        struct saa7146_dev *dev = fh->dev;
        struct saa7146_buf *buf = (struct saa7146_buf *)vb;
  
 -      DEB_CAP(("vbuf:%p\n",vb));
 +      DEB_CAP("vbuf:%p\n", vb);
  
        saa7146_dma_free(dev,q,buf);
  
@@@ -1356,14 -1348,18 +1357,14 @@@ static void video_close(struct saa7146_
        struct saa7146_fh *fh = file->private_data;
        struct saa7146_vv *vv = dev->vv_data;
        struct videobuf_queue *q = &fh->video_q;
 -      int err;
  
 -      if (IS_CAPTURE_ACTIVE(fh) != 0) {
 -              err = video_end(fh, file);
 -      } else if (IS_OVERLAY_ACTIVE(fh) != 0) {
 -              err = saa7146_stop_preview(fh);
 -      }
 +      if (IS_CAPTURE_ACTIVE(fh) != 0)
 +              video_end(fh, file);
 +      else if (IS_OVERLAY_ACTIVE(fh) != 0)
 +              saa7146_stop_preview(fh);
  
        videobuf_stop(q);
 -
        /* hmm, why is this function declared void? */
 -      /* return err */
  }
  
  
@@@ -1373,7 -1369,7 +1374,7 @@@ static void video_irq_done(struct saa71
        struct saa7146_dmaqueue *q = &vv->video_q;
  
        spin_lock(&dev->slock);
 -      DEB_CAP(("called.\n"));
 +      DEB_CAP("called\n");
  
        /* only finish the buffer if we have one... */
        if( NULL != q->curr ) {
@@@ -1391,15 -1387,15 +1392,15 @@@ static ssize_t video_read(struct file *
        struct saa7146_vv *vv = dev->vv_data;
        ssize_t ret = 0;
  
 -      DEB_EE(("called.\n"));
 +      DEB_EE("called\n");
  
        if ((vv->video_status & STATUS_CAPTURE) != 0) {
                /* fixme: should we allow read() captures while streaming capture? */
                if (vv->video_fh == fh) {
 -                      DEB_S(("already capturing.\n"));
 +                      DEB_S("already capturing\n");
                        return -EBUSY;
                }
 -              DEB_S(("already capturing in another open.\n"));
 +              DEB_S("already capturing in another open\n");
                return -EBUSY;
        }
  
@@@ -1,5 -1,5 +1,6 @@@
  #include <linux/i2c.h>
 +#include <linux/mutex.h>
+ #include <linux/module.h>
  
  #include "dibx000_common.h"
  
@@@ -11,13 -11,6 +12,13 @@@ MODULE_PARM_DESC(debug, "turn on debugg
  
  static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
  {
 +      int ret;
 +
 +      if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
 +              dprintk("could not acquire lock");
 +              return -EINVAL;
 +      }
 +
        mst->i2c_write_buffer[0] = (reg >> 8) & 0xff;
        mst->i2c_write_buffer[1] = reg & 0xff;
        mst->i2c_write_buffer[2] = (val >> 8) & 0xff;
        mst->msg[0].buf = mst->i2c_write_buffer;
        mst->msg[0].len = 4;
  
 -      return i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0;
 +      ret = i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0;
 +      mutex_unlock(&mst->i2c_buffer_lock);
 +
 +      return ret;
  }
  
  static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
  {
 +      u16 ret;
 +
 +      if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
 +              dprintk("could not acquire lock");
 +              return 0;
 +      }
 +
        mst->i2c_write_buffer[0] = reg >> 8;
        mst->i2c_write_buffer[1] = reg & 0xff;
  
        if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2)
                dprintk("i2c read error on %d", reg);
  
 -      return (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
 +      ret = (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
 +      mutex_unlock(&mst->i2c_buffer_lock);
 +
 +      return ret;
  }
  
  static int dibx000_is_i2c_done(struct dibx000_i2c_master *mst)
@@@ -278,7 -258,6 +279,7 @@@ static int dibx000_i2c_gated_gpio67_xfe
                                        struct i2c_msg msg[], int num)
  {
        struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
 +      int ret;
  
        if (num > 32) {
                dprintk("%s: too much I2C message to be transmitted (%i).\
                return -ENOMEM;
        }
  
 -      memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
 -
        dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7);
  
 +      if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
 +              dprintk("could not acquire lock");
 +              return -EINVAL;
 +      }
 +
 +      memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
 +
        /* open the gate */
        dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
        mst->msg[0].addr = mst->i2c_addr;
        mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
        mst->msg[num + 1].len = 4;
  
 -      return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
 +      ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ?
 +                      num : -EIO);
 +
 +      mutex_unlock(&mst->i2c_buffer_lock);
 +      return ret;
  }
  
  static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
@@@ -325,7 -295,6 +326,7 @@@ static int dibx000_i2c_gated_tuner_xfer
                                        struct i2c_msg msg[], int num)
  {
        struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
 +      int ret;
  
        if (num > 32) {
                dprintk("%s: too much I2C message to be transmitted (%i).\
                return -ENOMEM;
        }
  
 -      memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
 -
        dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER);
  
 +      if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
 +              dprintk("could not acquire lock");
 +              return -EINVAL;
 +      }
 +      memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
 +
        /* open the gate */
        dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
        mst->msg[0].addr = mst->i2c_addr;
        mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
        mst->msg[num + 1].len = 4;
  
 -      return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
 +      ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ?
 +                      num : -EIO);
 +      mutex_unlock(&mst->i2c_buffer_lock);
 +      return ret;
  }
  
  static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
@@@ -429,18 -391,8 +430,18 @@@ static int i2c_adapter_init(struct i2c_
  int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
                                struct i2c_adapter *i2c_adap, u8 i2c_addr)
  {
 -      u8 tx[4];
 -      struct i2c_msg m = {.addr = i2c_addr >> 1,.buf = tx,.len = 4 };
 +      int ret;
 +
 +      mutex_init(&mst->i2c_buffer_lock);
 +      if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
 +              dprintk("could not acquire lock");
 +              return -EINVAL;
 +      }
 +      memset(mst->msg, 0, sizeof(struct i2c_msg));
 +      mst->msg[0].addr = i2c_addr >> 1;
 +      mst->msg[0].flags = 0;
 +      mst->msg[0].buf = mst->i2c_write_buffer;
 +      mst->msg[0].len = 4;
  
        mst->device_rev = device_rev;
        mst->i2c_adap = i2c_adap;
                                "DiBX000: could not initialize the master i2c_adapter\n");
  
        /* initialize the i2c-master by closing the gate */
 -      dibx000_i2c_gate_ctrl(mst, tx, 0, 0);
 +      dibx000_i2c_gate_ctrl(mst, mst->i2c_write_buffer, 0, 0);
 +
 +      ret = (i2c_transfer(i2c_adap, mst->msg, 1) == 1);
 +      mutex_unlock(&mst->i2c_buffer_lock);
  
 -      return i2c_transfer(i2c_adap, &m, 1) == 1;
 +      return ret;
  }
  
  EXPORT_SYMBOL(dibx000_init_i2c_master);
@@@ -23,6 -23,7 +23,7 @@@
  #include <linux/interrupt.h>
  #include <linux/mfd/wl1273-core.h>
  #include <linux/slab.h>
+ #include <linux/module.h>
  #include <media/v4l2-common.h>
  #include <media/v4l2-ctrls.h>
  #include <media/v4l2-device.h>
@@@ -2109,7 -2110,7 +2110,7 @@@ static int __devinit wl1273_fm_radio_pr
                                 V4L2_CID_TUNE_ANTENNA_CAPACITOR,
                                 0, 255, 1, 255);
        if (ctrl)
 -              ctrl->is_volatile = 1;
 +              ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
  
        if (radio->ctrl_handler.error) {
                r = radio->ctrl_handler.error;
@@@ -28,6 -28,8 +28,8 @@@
   *
   */
  
+ #include <linux/export.h>
  #include "fmdrv.h"
  #include "fmdrv_v4l2.h"
  #include "fmdrv_common.h"
@@@ -84,14 -86,12 +86,14 @@@ static ssize_t fm_v4l2_fops_write(struc
        ret = copy_from_user(&rds, buf, sizeof(rds));
        fmdbg("(%d)type: %d, text %s, af %d\n",
                   ret, rds.text_type, rds.text, rds.af_freq);
 +      if (ret)
 +              return -EFAULT;
  
        fmdev = video_drvdata(file);
        fm_tx_set_radio_text(fmdev, rds.text, rds.text_type);
        fm_tx_set_af(fmdev, rds.af_freq);
  
 -      return 0;
 +      return sizeof(rds);
  }
  
  static u32 fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
@@@ -559,7 -559,7 +561,7 @@@ int fm_v4l2_init_video_device(struct fm
                        255, 1, 255);
  
        if (ctrl)
 -              ctrl->is_volatile = 1;
 +              ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
  
        return 0;
  }
@@@ -14,6 -14,7 +14,7 @@@
  
  #include <linux/sched.h>
  #include <linux/wait.h>
+ #include <linux/module.h>
  #include <media/lirc.h>
  #include <media/lirc_dev.h>
  #include <media/rc-core.h>
@@@ -98,7 -99,7 +99,7 @@@ static int ir_lirc_decode(struct rc_de
        return 0;
  }
  
 -static ssize_t ir_lirc_transmit_ir(struct file *file, const char *buf,
 +static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf,
                                   size_t n, loff_t *ppos)
  {
        struct lirc_codec *lirc;
@@@ -140,11 -141,10 +141,11 @@@ out
  }
  
  static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
 -                      unsigned long __user arg)
 +                      unsigned long arg)
  {
        struct lirc_codec *lirc;
        struct rc_dev *dev;
 +      u32 __user *argp = (u32 __user *)(arg);
        int ret = 0;
        __u32 val = 0, tmp;
  
                return -EFAULT;
  
        if (_IOC_DIR(cmd) & _IOC_WRITE) {
 -              ret = get_user(val, (__u32 *)arg);
 +              ret = get_user(val, argp);
                if (ret)
                        return ret;
        }
        }
  
        if (_IOC_DIR(cmd) & _IOC_READ)
 -              ret = put_user(val, (__u32 *)arg);
 +              ret = put_user(val, argp);
  
        return ret;
  }
@@@ -11,6 -11,7 +11,7 @@@
   */
  
  #include <media/rc-map.h>
+ #include <linux/module.h>
  
  /* Pinnacle PCTV HD 800i mini remote */
  
@@@ -20,7 -21,6 +21,7 @@@ static struct rc_map_table pinnacle_pct
        { 0x0701, KEY_MENU }, /* Pinnacle logo */
        { 0x0739, KEY_POWER },
        { 0x0703, KEY_VOLUMEUP },
 +      { 0x0705, KEY_OK },
        { 0x0709, KEY_VOLUMEDOWN },
        { 0x0706, KEY_CHANNELUP },
        { 0x070c, KEY_CHANNELDOWN },
@@@ -18,6 -18,7 +18,7 @@@
  #include <linux/input.h>
  #include <linux/slab.h>
  #include <linux/device.h>
+ #include <linux/module.h>
  #include "rc-core-priv.h"
  
  /* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
@@@ -928,6 -929,10 +929,6 @@@ out
  
  static void rc_dev_release(struct device *device)
  {
 -      struct rc_dev *dev = to_rc_dev(device);
 -
 -      kfree(dev);
 -      module_put(THIS_MODULE);
  }
  
  #define ADD_HOTPLUG_VAR(fmt, val...)                                  \
@@@ -941,9 -946,6 +942,9 @@@ static int rc_dev_uevent(struct device 
  {
        struct rc_dev *dev = to_rc_dev(device);
  
 +      if (!dev || !dev->input_dev)
 +              return -ENODEV;
 +
        if (dev->rc_map.name)
                ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
        if (dev->driver_name)
@@@ -1012,16 -1014,10 +1013,16 @@@ EXPORT_SYMBOL_GPL(rc_allocate_device)
  
  void rc_free_device(struct rc_dev *dev)
  {
 -      if (dev) {
 +      if (!dev)
 +              return;
 +
 +      if (dev->input_dev)
                input_free_device(dev->input_dev);
 -              put_device(&dev->dev);
 -      }
 +
 +      put_device(&dev->dev);
 +
 +      kfree(dev);
 +      module_put(THIS_MODULE);
  }
  EXPORT_SYMBOL_GPL(rc_free_device);
  
@@@ -1148,18 -1144,14 +1149,18 @@@ void rc_unregister_device(struct rc_de
        if (dev->driver_type == RC_DRIVER_IR_RAW)
                ir_raw_event_unregister(dev);
  
 +      /* Freeing the table should also call the stop callback */
 +      ir_free_table(&dev->rc_map);
 +      IR_dprintk(1, "Freed keycode table\n");
 +
        input_unregister_device(dev->input_dev);
        dev->input_dev = NULL;
  
 -      ir_free_table(&dev->rc_map);
 -      IR_dprintk(1, "Freed keycode table\n");
 +      device_del(&dev->dev);
  
 -      device_unregister(&dev->dev);
 +      rc_free_device(dev);
  }
 +
  EXPORT_SYMBOL_GPL(rc_unregister_device);
  
  /*
@@@ -31,8 -31,8 +31,9 @@@
   */
  
  #include <linux/delay.h>
+ #include <linux/module.h>
  #include <linux/i2c.h>
 +#include <linux/module.h>
  #include <linux/slab.h>
  #include <linux/version.h>
  #include <media/adp1653.h>
@@@ -259,7 -259,7 +260,7 @@@ static int adp1653_init_controls(struc
        if (flash->ctrls.error)
                return flash->ctrls.error;
  
 -      fault->is_volatile = 1;
 +      fault->flags |= V4L2_CTRL_FLAG_VOLATILE;
  
        flash->subdev.ctrl_handler = &flash->ctrls;
        return 0;
@@@ -414,10 -414,6 +415,10 @@@ static int adp1653_probe(struct i2c_cli
        struct adp1653_flash *flash;
        int ret;
  
 +      /* we couldn't work without platform data */
 +      if (client->dev.platform_data == NULL)
 +              return -ENODEV;
 +
        flash = kzalloc(sizeof(*flash), GFP_KERNEL);
        if (flash == NULL)
                return -ENOMEM;
        flash->subdev.internal_ops = &adp1653_internal_ops;
        flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
  
 -      adp1653_init_controls(flash);
 +      ret = adp1653_init_controls(flash);
 +      if (ret)
 +              goto free_and_quit;
  
        ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0);
        if (ret < 0)
 -              kfree(flash);
 +              goto free_and_quit;
 +
 +      flash->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH;
  
 +      return 0;
 +
 +free_and_quit:
 +      v4l2_ctrl_handler_free(&flash->ctrls);
 +      kfree(flash);
        return ret;
  }
  
@@@ -23,6 -23,7 +23,7 @@@
  
  #include <linux/slab.h>
  #include <linux/kfifo.h>
+ #include <linux/module.h>
  #include <media/cx25840.h>
  #include <media/rc-core.h>
  
@@@ -668,7 -669,7 +669,7 @@@ static int cx25840_ir_rx_read(struct v4
        u16 divider;
        unsigned int i, n;
        union cx25840_ir_fifo_rec *p;
 -      unsigned u, v;
 +      unsigned u, v, w;
  
        if (ir_state == NULL)
                return -ENODEV;
                if ((p->hw_fifo_data & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) {
                        /* Assume RTO was because of no IR light input */
                        u = 0;
 -                      v4l2_dbg(2, ir_debug, sd, "rx read: end of rx\n");
 +                      w = 1;
                } else {
                        u = (p->hw_fifo_data & FIFO_RXTX_LVL) ? 1 : 0;
                        if (invert)
                                u = u ? 0 : 1;
 +                      w = 0;
                }
  
                v = (unsigned) pulse_width_count_to_ns(
                init_ir_raw_event(&p->ir_core_data);
                p->ir_core_data.pulse = u;
                p->ir_core_data.duration = v;
 +              p->ir_core_data.timeout = w;
  
 -              v4l2_dbg(2, ir_debug, sd, "rx read: %10u ns  %s\n",
 -                       v, u ? "mark" : "space");
 +              v4l2_dbg(2, ir_debug, sd, "rx read: %10u ns  %s  %s\n",
 +                       v, u ? "mark" : "space", w ? "(timed out)" : "");
 +              if (w)
 +                      v4l2_dbg(2, ir_debug, sd, "rx read: end of rx\n");
        }
        return 0;
  }
      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #define DEBUG_VARIABLE debug
  
  #include <media/saa7146_vv.h>
+ #include <linux/module.h>
  
  static int debug;
  module_param(debug, int, 0);
@@@ -177,14 -176,13 +178,14 @@@ static int hexium_init_done(struct saa7
        union i2c_smbus_data data;
        int i = 0;
  
 -      DEB_D(("hexium_init_done called.\n"));
 +      DEB_D("hexium_init_done called\n");
  
        /* initialize the helper ics to useful values */
        for (i = 0; i < sizeof(hexium_ks0127b); i++) {
                data.byte = hexium_ks0127b[i];
                if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, i, I2C_SMBUS_BYTE_DATA, &data)) {
 -                      printk("hexium_gemini: hexium_init_done() failed for address 0x%02x\n", i);
 +                      pr_err("hexium_init_done() failed for address 0x%02x\n",
 +                             i);
                }
        }
  
@@@ -195,7 -193,7 +196,7 @@@ static int hexium_set_input(struct hexi
  {
        union i2c_smbus_data data;
  
 -      DEB_D((".\n"));
 +      DEB_D("\n");
  
        data.byte = hexium_input_select[input].byte;
        if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, hexium_input_select[input].adr, I2C_SMBUS_BYTE_DATA, &data)) {
@@@ -210,13 -208,12 +211,13 @@@ static int hexium_set_standard(struct h
        union i2c_smbus_data data;
        int i = 0;
  
 -      DEB_D((".\n"));
 +      DEB_D("\n");
  
        while (vdec[i].adr != -1) {
                data.byte = vdec[i].byte;
                if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, vdec[i].adr, I2C_SMBUS_BYTE_DATA, &data)) {
 -                      printk("hexium_init_done: hexium_set_standard() failed for address 0x%02x\n", i);
 +                      pr_err("hexium_init_done: hexium_set_standard() failed for address 0x%02x\n",
 +                             i);
                        return -1;
                }
                i++;
  
  static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
  {
 -      DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
 +      DEB_EE("VIDIOC_ENUMINPUT %d\n", i->index);
  
        if (i->index >= HEXIUM_INPUTS)
                return -EINVAL;
  
        memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input));
  
 -      DEB_D(("v4l2_ioctl: VIDIOC_ENUMINPUT %d.\n", i->index));
 +      DEB_D("v4l2_ioctl: VIDIOC_ENUMINPUT %d\n", i->index);
        return 0;
  }
  
@@@ -244,7 -241,7 +245,7 @@@ static int vidioc_g_input(struct file *
  
        *input = hexium->cur_input;
  
 -      DEB_D(("VIDIOC_G_INPUT: %d\n", *input));
 +      DEB_D("VIDIOC_G_INPUT: %d\n", *input);
        return 0;
  }
  
@@@ -253,7 -250,7 +254,7 @@@ static int vidioc_s_input(struct file *
        struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
        struct hexium *hexium = (struct hexium *) dev->ext_priv;
  
 -      DEB_EE(("VIDIOC_S_INPUT %d.\n", input));
 +      DEB_EE("VIDIOC_S_INPUT %d\n", input);
  
        if (input >= HEXIUM_INPUTS)
                return -EINVAL;
@@@ -274,7 -271,7 +275,7 @@@ static int vidioc_queryctrl(struct fil
        for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) {
                if (hexium_controls[i].id == qc->id) {
                        *qc = hexium_controls[i];
 -                      DEB_D(("VIDIOC_QUERYCTRL %d.\n", qc->id));
 +                      DEB_D("VIDIOC_QUERYCTRL %d\n", qc->id);
                        return 0;
                }
        }
@@@ -297,7 -294,7 +298,7 @@@ static int vidioc_g_ctrl(struct file *f
  
        if (vc->id == V4L2_CID_PRIVATE_BASE) {
                vc->value = hexium->cur_bw;
 -              DEB_D(("VIDIOC_G_CTRL BW:%d.\n", vc->value));
 +              DEB_D("VIDIOC_G_CTRL BW:%d\n", vc->value);
                return 0;
        }
        return -EINVAL;
@@@ -320,7 -317,7 +321,7 @@@ static int vidioc_s_ctrl(struct file *f
        if (vc->id == V4L2_CID_PRIVATE_BASE)
                hexium->cur_bw = vc->value;
  
 -      DEB_D(("VIDIOC_S_CTRL BW:%d.\n", hexium->cur_bw));
 +      DEB_D("VIDIOC_S_CTRL BW:%d\n", hexium->cur_bw);
  
        if (0 == hexium->cur_bw && V4L2_STD_PAL == hexium->cur_std) {
                hexium_set_standard(hexium, hexium_pal);
@@@ -355,14 -352,14 +356,14 @@@ static struct saa7146_ext_vv vv_data
  /* this function only gets called when the probing was successful */
  static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
  {
 -      struct hexium *hexium = (struct hexium *) dev->ext_priv;
 +      struct hexium *hexium;
        int ret;
  
 -      DEB_EE((".\n"));
 +      DEB_EE("\n");
  
        hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
        if (NULL == hexium) {
 -              printk("hexium_gemini: not enough kernel memory in hexium_attach().\n");
 +              pr_err("not enough kernel memory in hexium_attach()\n");
                return -ENOMEM;
        }
        dev->ext_priv = hexium;
        };
        saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
        if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
 -              DEB_S(("cannot register i2c-device. skipping.\n"));
 +              DEB_S("cannot register i2c-device. skipping.\n");
                kfree(hexium);
                return -EFAULT;
        }
        vv_data.ops.vidioc_s_input = vidioc_s_input;
        ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
        if (ret < 0) {
 -              printk("hexium_gemini: cannot register capture v4l2 device. skipping.\n");
 +              pr_err("cannot register capture v4l2 device. skipping.\n");
                return ret;
        }
  
 -      printk("hexium_gemini: found 'hexium gemini' frame grabber-%d.\n", hexium_num);
 +      pr_info("found 'hexium gemini' frame grabber-%d\n", hexium_num);
        hexium_num++;
  
        return 0;
@@@ -420,7 -417,7 +421,7 @@@ static int hexium_detach(struct saa7146
  {
        struct hexium *hexium = (struct hexium *) dev->ext_priv;
  
 -      DEB_EE(("dev:%p\n", dev));
 +      DEB_EE("dev:%p\n", dev);
  
        saa7146_unregister_device(&hexium->video_dev, dev);
        saa7146_vv_release(dev);
@@@ -512,7 -509,7 +513,7 @@@ static struct saa7146_extension hexium_
  static int __init hexium_init_module(void)
  {
        if (0 != saa7146_register_extension(&hexium_extension)) {
 -              DEB_S(("failed to register extension.\n"));
 +              DEB_S("failed to register extension\n");
                return -ENODEV;
        }
  
      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #define DEBUG_VARIABLE debug
  
  #include <media/saa7146_vv.h>
+ #include <linux/module.h>
  
  static int debug;
  module_param(debug, int, 0);
@@@ -211,7 -210,7 +212,7 @@@ static int hexium_probe(struct saa7146_
        union i2c_smbus_data data;
        int err = 0;
  
 -      DEB_EE((".\n"));
 +      DEB_EE("\n");
  
        /* there are no hexium orion cards with revision 0 saa7146s */
        if (0 == dev->revision) {
  
        hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
        if (NULL == hexium) {
 -              printk("hexium_orion: hexium_probe: not enough kernel memory.\n");
 +              pr_err("hexium_probe: not enough kernel memory\n");
                return -ENOMEM;
        }
  
        };
        saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
        if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
 -              DEB_S(("cannot register i2c-device. skipping.\n"));
 +              DEB_S("cannot register i2c-device. skipping.\n");
                kfree(hexium);
                return -EFAULT;
        }
  
        /* detect newer Hexium Orion cards by subsystem ids */
        if (0x17c8 == dev->pci->subsystem_vendor && 0x0101 == dev->pci->subsystem_device) {
 -              printk("hexium_orion: device is a Hexium Orion w/ 1 SVHS + 3 BNC inputs.\n");
 +              pr_info("device is a Hexium Orion w/ 1 SVHS + 3 BNC inputs\n");
                /* we store the pointer in our private data field */
                dev->ext_priv = hexium;
                hexium->type = HEXIUM_ORION_1SVHS_3BNC;
        }
  
        if (0x17c8 == dev->pci->subsystem_vendor && 0x2101 == dev->pci->subsystem_device) {
 -              printk("hexium_orion: device is a Hexium Orion w/ 4 BNC inputs.\n");
 +              pr_info("device is a Hexium Orion w/ 4 BNC inputs\n");
                /* we store the pointer in our private data field */
                dev->ext_priv = hexium;
                hexium->type = HEXIUM_ORION_4BNC;
        /* check if this is an old hexium Orion card by looking at
           a saa7110 at address 0x4e */
        if (0 == (err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ, 0x00, I2C_SMBUS_BYTE_DATA, &data))) {
 -              printk("hexium_orion: device is a Hexium HV-PCI6/Orion (old).\n");
 +              pr_info("device is a Hexium HV-PCI6/Orion (old)\n");
                /* we store the pointer in our private data field */
                dev->ext_priv = hexium;
                hexium->type = HEXIUM_HV_PCI6_ORION;
@@@ -290,13 -289,13 +291,13 @@@ static int hexium_init_done(struct saa7
        union i2c_smbus_data data;
        int i = 0;
  
 -      DEB_D(("hexium_init_done called.\n"));
 +      DEB_D("hexium_init_done called\n");
  
        /* initialize the helper ics to useful values */
        for (i = 0; i < sizeof(hexium_saa7110); i++) {
                data.byte = hexium_saa7110[i];
                if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_WRITE, i, I2C_SMBUS_BYTE_DATA, &data)) {
 -                      printk("hexium_orion: failed for address 0x%02x\n", i);
 +                      pr_err("failed for address 0x%02x\n", i);
                }
        }
  
@@@ -308,7 -307,7 +309,7 @@@ static int hexium_set_input(struct hexi
        union i2c_smbus_data data;
        int i = 0;
  
 -      DEB_D((".\n"));
 +      DEB_D("\n");
  
        for (i = 0; i < 8; i++) {
                int adr = hexium_input_select[input].data[i].adr;
                if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_WRITE, adr, I2C_SMBUS_BYTE_DATA, &data)) {
                        return -1;
                }
 -              printk("%d: 0x%02x => 0x%02x\n",input, adr,data.byte);
 +              pr_debug("%d: 0x%02x => 0x%02x\n", input, adr, data.byte);
        }
  
        return 0;
  
  static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
  {
 -      DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
 +      DEB_EE("VIDIOC_ENUMINPUT %d\n", i->index);
  
        if (i->index >= HEXIUM_INPUTS)
                return -EINVAL;
  
        memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input));
  
 -      DEB_D(("v4l2_ioctl: VIDIOC_ENUMINPUT %d.\n", i->index));
 +      DEB_D("v4l2_ioctl: VIDIOC_ENUMINPUT %d\n", i->index);
        return 0;
  }
  
@@@ -342,7 -341,7 +343,7 @@@ static int vidioc_g_input(struct file *
  
        *input = hexium->cur_input;
  
 -      DEB_D(("VIDIOC_G_INPUT: %d\n", *input));
 +      DEB_D("VIDIOC_G_INPUT: %d\n", *input);
        return 0;
  }
  
@@@ -367,18 -366,18 +368,18 @@@ static int hexium_attach(struct saa7146
  {
        struct hexium *hexium = (struct hexium *) dev->ext_priv;
  
 -      DEB_EE((".\n"));
 +      DEB_EE("\n");
  
        saa7146_vv_init(dev, &vv_data);
        vv_data.ops.vidioc_enum_input = vidioc_enum_input;
        vv_data.ops.vidioc_g_input = vidioc_g_input;
        vv_data.ops.vidioc_s_input = vidioc_s_input;
        if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
 -              printk("hexium_orion: cannot register capture v4l2 device. skipping.\n");
 +              pr_err("cannot register capture v4l2 device. skipping.\n");
                return -1;
        }
  
 -      printk("hexium_orion: found 'hexium orion' frame grabber-%d.\n", hexium_num);
 +      pr_err("found 'hexium orion' frame grabber-%d\n", hexium_num);
        hexium_num++;
  
        /* the rest */
@@@ -392,7 -391,7 +393,7 @@@ static int hexium_detach(struct saa7146
  {
        struct hexium *hexium = (struct hexium *) dev->ext_priv;
  
 -      DEB_EE(("dev:%p\n", dev));
 +      DEB_EE("dev:%p\n", dev);
  
        saa7146_unregister_device(&hexium->video_dev, dev);
        saa7146_vv_release(dev);
@@@ -481,7 -480,7 +482,7 @@@ static struct saa7146_extension extensi
  static int __init hexium_init_module(void)
  {
        if (0 != saa7146_register_extension(&extension)) {
 -              DEB_S(("failed to register extension.\n"));
 +              DEB_S("failed to register extension\n");
                return -ENODEV;
        }
  
  
  #include <linux/delay.h>
  #include <linux/i2c.h>
 +#include <linux/v4l2-mediabus.h>
  #include <linux/slab.h>
  #include <linux/videodev2.h>
+ #include <linux/module.h>
  
  #include <media/soc_camera.h>
 -#include <media/soc_mediabus.h>
  #include <media/v4l2-subdev.h>
  #include <media/v4l2-chip-ident.h>
  
@@@ -267,17 -268,6 +268,17 @@@ static int imx074_g_chip_ident(struct v
        return 0;
  }
  
 +static int imx074_g_mbus_config(struct v4l2_subdev *sd,
 +                              struct v4l2_mbus_config *cfg)
 +{
 +      cfg->type = V4L2_MBUS_CSI2;
 +      cfg->flags = V4L2_MBUS_CSI2_2_LANE |
 +              V4L2_MBUS_CSI2_CHANNEL_0 |
 +              V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
 +
 +      return 0;
 +}
 +
  static struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
        .s_stream       = imx074_s_stream,
        .s_mbus_fmt     = imx074_s_fmt,
        .enum_mbus_fmt  = imx074_enum_fmt,
        .g_crop         = imx074_g_crop,
        .cropcap        = imx074_cropcap,
 +      .g_mbus_config  = imx074_g_mbus_config,
  };
  
  static struct v4l2_subdev_core_ops imx074_subdev_core_ops = {
@@@ -298,7 -287,28 +299,7 @@@ static struct v4l2_subdev_ops imx074_su
        .video  = &imx074_subdev_video_ops,
  };
  
 -/*
 - * We have to provide soc-camera operations, but we don't have anything to say
 - * there. The MIPI CSI2 driver will provide .query_bus_param and .set_bus_param
 - */
 -static unsigned long imx074_query_bus_param(struct soc_camera_device *icd)
 -{
 -      return 0;
 -}
 -
 -static int imx074_set_bus_param(struct soc_camera_device *icd,
 -                               unsigned long flags)
 -{
 -      return -EINVAL;
 -}
 -
 -static struct soc_camera_ops imx074_ops = {
 -      .query_bus_param        = imx074_query_bus_param,
 -      .set_bus_param          = imx074_set_bus_param,
 -};
 -
 -static int imx074_video_probe(struct soc_camera_device *icd,
 -                            struct i2c_client *client)
 +static int imx074_video_probe(struct i2c_client *client)
  {
        int ret;
        u16 id;
@@@ -408,10 -418,17 +409,10 @@@ static int imx074_probe(struct i2c_clie
                        const struct i2c_device_id *did)
  {
        struct imx074 *priv;
 -      struct soc_camera_device *icd = client->dev.platform_data;
        struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
 -      struct soc_camera_link *icl;
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
        int ret;
  
 -      if (!icd) {
 -              dev_err(&client->dev, "IMX074: missing soc-camera data!\n");
 -              return -EINVAL;
 -      }
 -
 -      icl = to_soc_camera_link(icd);
        if (!icl) {
                dev_err(&client->dev, "IMX074: missing platform data!\n");
                return -EINVAL;
  
        v4l2_i2c_subdev_init(&priv->subdev, client, &imx074_subdev_ops);
  
 -      icd->ops        = &imx074_ops;
        priv->fmt       = &imx074_colour_fmts[0];
  
 -      ret = imx074_video_probe(icd, client);
 +      ret = imx074_video_probe(client);
        if (ret < 0) {
 -              icd->ops = NULL;
                kfree(priv);
                return ret;
        }
  static int imx074_remove(struct i2c_client *client)
  {
        struct imx074 *priv = to_imx074(client);
 -      struct soc_camera_device *icd = client->dev.platform_data;
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
  
 -      icd->ops = NULL;
        if (icl->free_bus)
                icl->free_bus(icl);
        kfree(priv);
@@@ -21,6 -21,7 +21,7 @@@
  #include <linux/gpio.h>
  #include <linux/regulator/consumer.h>
  #include <linux/videodev2.h>
+ #include <linux/module.h>
  #include <media/v4l2-ctrls.h>
  #include <media/v4l2-device.h>
  #include <media/v4l2-subdev.h>
@@@ -936,7 -937,7 +937,7 @@@ static int __devinit m5mols_probe(struc
                return -EINVAL;
        }
  
 -      if (!pdata->irq) {
 +      if (!client->irq) {
                dev_err(&client->dev, "Interrupt not assigned\n");
                return -EINVAL;
        }
  
        init_waitqueue_head(&info->irq_waitq);
        INIT_WORK(&info->work_irq, m5mols_irq_work);
 -      ret = request_irq(pdata->irq, m5mols_irq_handler,
 +      ret = request_irq(client->irq, m5mols_irq_handler,
                          IRQF_TRIGGER_RISING, MODULE_NAME, sd);
        if (ret) {
                dev_err(&client->dev, "Interrupt request failed: %d\n", ret);
@@@ -998,7 -999,7 +999,7 @@@ static int __devexit m5mols_remove(stru
        struct m5mols_info *info = to_m5mols(sd);
  
        v4l2_device_unregister_subdev(sd);
 -      free_irq(info->pdata->irq, sd);
 +      free_irq(client->irq, sd);
  
        regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
        gpio_free(info->pdata->gpio_reset);
  #include <linux/slab.h>
  #include <linux/i2c.h>
  #include <linux/log2.h>
+ #include <linux/module.h>
  
 +#include <media/soc_camera.h>
 +#include <media/soc_mediabus.h>
  #include <media/v4l2-subdev.h>
  #include <media/v4l2-chip-ident.h>
 -#include <media/soc_camera.h>
 +#include <media/v4l2-ctrls.h>
  
  /*
   * mt9m001 i2c address 0x5d
@@@ -86,19 -85,15 +87,19 @@@ static const struct mt9m001_datafmt mt9
  
  struct mt9m001 {
        struct v4l2_subdev subdev;
 +      struct v4l2_ctrl_handler hdl;
 +      struct {
 +              /* exposure/auto-exposure cluster */
 +              struct v4l2_ctrl *autoexposure;
 +              struct v4l2_ctrl *exposure;
 +      };
        struct v4l2_rect rect;  /* Sensor window */
        const struct mt9m001_datafmt *fmt;
        const struct mt9m001_datafmt *fmts;
        int num_fmts;
        int model;      /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */
 -      unsigned int gain;
 -      unsigned int exposure;
 +      unsigned int total_h;
        unsigned short y_skip_top;      /* Lines to skip at the top */
 -      unsigned char autoexposure;
  };
  
  static struct mt9m001 *to_mt9m001(const struct i2c_client *client)
@@@ -171,13 -166,54 +172,13 @@@ static int mt9m001_s_stream(struct v4l2
        return 0;
  }
  
 -static int mt9m001_set_bus_param(struct soc_camera_device *icd,
 -                               unsigned long flags)
 -{
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -      unsigned long width_flag = flags & SOCAM_DATAWIDTH_MASK;
 -
 -      /* Only one width bit may be set */
 -      if (!is_power_of_2(width_flag))
 -              return -EINVAL;
 -
 -      if (icl->set_bus_param)
 -              return icl->set_bus_param(icl, width_flag);
 -
 -      /*
 -       * Without board specific bus width settings we only support the
 -       * sensors native bus width
 -       */
 -      if (width_flag == SOCAM_DATAWIDTH_10)
 -              return 0;
 -
 -      return -EINVAL;
 -}
 -
 -static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd)
 -{
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -      /* MT9M001 has all capture_format parameters fixed */
 -      unsigned long flags = SOCAM_PCLK_SAMPLE_FALLING |
 -              SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |
 -              SOCAM_DATA_ACTIVE_HIGH | SOCAM_MASTER;
 -
 -      if (icl->query_bus_param)
 -              flags |= icl->query_bus_param(icl) & SOCAM_DATAWIDTH_MASK;
 -      else
 -              flags |= SOCAM_DATAWIDTH_10;
 -
 -      return soc_camera_apply_sensor_flags(icl, flags);
 -}
 -
  static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
  {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        struct mt9m001 *mt9m001 = to_mt9m001(client);
        struct v4l2_rect rect = a->c;
 -      struct soc_camera_device *icd = client->dev.platform_data;
        int ret;
        const u16 hblank = 9, vblank = 25;
 -      unsigned int total_h;
  
        if (mt9m001->fmts == mt9m001_colour_fmts)
                /*
        soc_camera_limit_side(&rect.top, &rect.height,
                     MT9M001_ROW_SKIP, MT9M001_MIN_HEIGHT, MT9M001_MAX_HEIGHT);
  
 -      total_h = rect.height + mt9m001->y_skip_top + vblank;
 +      mt9m001->total_h = rect.height + mt9m001->y_skip_top + vblank;
  
        /* Blanking and start values - default... */
        ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank);
  
        /*
         * The caller provides a supported format, as verified per
 -       * call to icd->try_fmt()
 +       * call to .try_mbus_fmt()
         */
        if (!ret)
                ret = reg_write(client, MT9M001_COLUMN_START, rect.left);
        if (!ret)
                ret = reg_write(client, MT9M001_WINDOW_HEIGHT,
                                rect.height + mt9m001->y_skip_top - 1);
 -      if (!ret && mt9m001->autoexposure) {
 -              ret = reg_write(client, MT9M001_SHUTTER_WIDTH, total_h);
 -              if (!ret) {
 -                      const struct v4l2_queryctrl *qctrl =
 -                              soc_camera_find_qctrl(icd->ops,
 -                                                    V4L2_CID_EXPOSURE);
 -                      mt9m001->exposure = (524 + (total_h - 1) *
 -                               (qctrl->maximum - qctrl->minimum)) /
 -                              1048 + qctrl->minimum;
 -              }
 -      }
 +      if (!ret && v4l2_ctrl_g_ctrl(mt9m001->autoexposure) == V4L2_EXPOSURE_AUTO)
 +              ret = reg_write(client, MT9M001_SHUTTER_WIDTH, mt9m001->total_h);
  
        if (!ret)
                mt9m001->rect = rect;
@@@ -377,48 -422,107 +378,48 @@@ static int mt9m001_s_register(struct v4
  }
  #endif
  
 -static const struct v4l2_queryctrl mt9m001_controls[] = {
 -      {
 -              .id             = V4L2_CID_VFLIP,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Flip Vertically",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 0,
 -      }, {
 -              .id             = V4L2_CID_GAIN,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Gain",
 -              .minimum        = 0,
 -              .maximum        = 127,
 -              .step           = 1,
 -              .default_value  = 64,
 -              .flags          = V4L2_CTRL_FLAG_SLIDER,
 -      }, {
 -              .id             = V4L2_CID_EXPOSURE,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Exposure",
 -              .minimum        = 1,
 -              .maximum        = 255,
 -              .step           = 1,
 -              .default_value  = 255,
 -              .flags          = V4L2_CTRL_FLAG_SLIDER,
 -      }, {
 -              .id             = V4L2_CID_EXPOSURE_AUTO,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Automatic Exposure",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 1,
 -      }
 -};
 -
 -static struct soc_camera_ops mt9m001_ops = {
 -      .set_bus_param          = mt9m001_set_bus_param,
 -      .query_bus_param        = mt9m001_query_bus_param,
 -      .controls               = mt9m001_controls,
 -      .num_controls           = ARRAY_SIZE(mt9m001_controls),
 -};
 -
 -static int mt9m001_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 +static int mt9m001_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
  {
 -      struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      struct mt9m001 *mt9m001 = to_mt9m001(client);
 -      int data;
 +      struct mt9m001 *mt9m001 = container_of(ctrl->handler,
 +                                             struct mt9m001, hdl);
 +      s32 min, max;
  
        switch (ctrl->id) {
 -      case V4L2_CID_VFLIP:
 -              data = reg_read(client, MT9M001_READ_OPTIONS2);
 -              if (data < 0)
 -                      return -EIO;
 -              ctrl->value = !!(data & 0x8000);
 -              break;
        case V4L2_CID_EXPOSURE_AUTO:
 -              ctrl->value = mt9m001->autoexposure;
 -              break;
 -      case V4L2_CID_GAIN:
 -              ctrl->value = mt9m001->gain;
 -              break;
 -      case V4L2_CID_EXPOSURE:
 -              ctrl->value = mt9m001->exposure;
 +              min = mt9m001->exposure->minimum;
 +              max = mt9m001->exposure->maximum;
 +              mt9m001->exposure->val =
 +                      (524 + (mt9m001->total_h - 1) * (max - min)) / 1048 + min;
                break;
        }
        return 0;
  }
  
 -static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 +static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl)
  {
 +      struct mt9m001 *mt9m001 = container_of(ctrl->handler,
 +                                             struct mt9m001, hdl);
 +      struct v4l2_subdev *sd = &mt9m001->subdev;
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      struct mt9m001 *mt9m001 = to_mt9m001(client);
 -      struct soc_camera_device *icd = client->dev.platform_data;
 -      const struct v4l2_queryctrl *qctrl;
 +      struct v4l2_ctrl *exp = mt9m001->exposure;
        int data;
  
 -      qctrl = soc_camera_find_qctrl(&mt9m001_ops, ctrl->id);
 -
 -      if (!qctrl)
 -              return -EINVAL;
 -
        switch (ctrl->id) {
        case V4L2_CID_VFLIP:
 -              if (ctrl->value)
 +              if (ctrl->val)
                        data = reg_set(client, MT9M001_READ_OPTIONS2, 0x8000);
                else
                        data = reg_clear(client, MT9M001_READ_OPTIONS2, 0x8000);
                if (data < 0)
                        return -EIO;
 -              break;
 +              return 0;
 +
        case V4L2_CID_GAIN:
 -              if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
 -                      return -EINVAL;
                /* See Datasheet Table 7, Gain settings. */
 -              if (ctrl->value <= qctrl->default_value) {
 +              if (ctrl->val <= ctrl->default_value) {
                        /* Pack it into 0..1 step 0.125, register values 0..8 */
 -                      unsigned long range = qctrl->default_value - qctrl->minimum;
 -                      data = ((ctrl->value - qctrl->minimum) * 8 + range / 2) / range;
 +                      unsigned long range = ctrl->default_value - ctrl->minimum;
 +                      data = ((ctrl->val - ctrl->minimum) * 8 + range / 2) / range;
  
                        dev_dbg(&client->dev, "Setting gain %d\n", data);
                        data = reg_write(client, MT9M001_GLOBAL_GAIN, data);
                } else {
                        /* Pack it into 1.125..15 variable step, register values 9..67 */
                        /* We assume qctrl->maximum - qctrl->default_value - 1 > 0 */
 -                      unsigned long range = qctrl->maximum - qctrl->default_value - 1;
 -                      unsigned long gain = ((ctrl->value - qctrl->default_value - 1) *
 +                      unsigned long range = ctrl->maximum - ctrl->default_value - 1;
 +                      unsigned long gain = ((ctrl->val - ctrl->default_value - 1) *
                                               111 + range / 2) / range + 9;
  
                        if (gain <= 32)
                        if (data < 0)
                                return -EIO;
                }
 +              return 0;
  
 -              /* Success */
 -              mt9m001->gain = ctrl->value;
 -              break;
 -      case V4L2_CID_EXPOSURE:
 -              /* mt9m001 has maximum == default */
 -              if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
 -                      return -EINVAL;
 -              else {
 -                      unsigned long range = qctrl->maximum - qctrl->minimum;
 -                      unsigned long shutter = ((ctrl->value - qctrl->minimum) * 1048 +
 +      case V4L2_CID_EXPOSURE_AUTO:
 +              if (ctrl->val == V4L2_EXPOSURE_MANUAL) {
 +                      unsigned long range = exp->maximum - exp->minimum;
 +                      unsigned long shutter = ((exp->val - exp->minimum) * 1048 +
                                                 range / 2) / range + 1;
  
                        dev_dbg(&client->dev,
                                "Setting shutter width from %d to %lu\n",
 -                              reg_read(client, MT9M001_SHUTTER_WIDTH),
 -                              shutter);
 +                              reg_read(client, MT9M001_SHUTTER_WIDTH), shutter);
                        if (reg_write(client, MT9M001_SHUTTER_WIDTH, shutter) < 0)
                                return -EIO;
 -                      mt9m001->exposure = ctrl->value;
 -                      mt9m001->autoexposure = 0;
 -              }
 -              break;
 -      case V4L2_CID_EXPOSURE_AUTO:
 -              if (ctrl->value) {
 +              } else {
                        const u16 vblank = 25;
 -                      unsigned int total_h = mt9m001->rect.height +
 +
 +                      mt9m001->total_h = mt9m001->rect.height +
                                mt9m001->y_skip_top + vblank;
 -                      if (reg_write(client, MT9M001_SHUTTER_WIDTH,
 -                                    total_h) < 0)
 +                      if (reg_write(client, MT9M001_SHUTTER_WIDTH, mt9m001->total_h) < 0)
                                return -EIO;
 -                      qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE);
 -                      mt9m001->exposure = (524 + (total_h - 1) *
 -                               (qctrl->maximum - qctrl->minimum)) /
 -                              1048 + qctrl->minimum;
 -                      mt9m001->autoexposure = 1;
 -              } else
 -                      mt9m001->autoexposure = 0;
 -              break;
 +              }
 +              return 0;
        }
 -      return 0;
 +      return -EINVAL;
  }
  
  /*
   * Interface active, can use i2c. If it fails, it can indeed mean, that
   * this wasn't our capture interface, so, we wait for the right one
   */
 -static int mt9m001_video_probe(struct soc_camera_device *icd,
 +static int mt9m001_video_probe(struct soc_camera_link *icl,
                               struct i2c_client *client)
  {
        struct mt9m001 *mt9m001 = to_mt9m001(client);
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
        s32 data;
        unsigned long flags;
        int ret;
  
 -      /* We must have a parent by now. And it cannot be a wrong one. */
 -      BUG_ON(!icd->parent ||
 -             to_soc_camera_host(icd->parent)->nr != icd->iface);
 -
        /* Enable the chip */
        data = reg_write(client, MT9M001_CHIP_ENABLE, 1);
        dev_dbg(&client->dev, "write: %d\n", data);
                dev_err(&client->dev, "Failed to initialise the camera\n");
  
        /* mt9m001_init() has reset the chip, returning registers to defaults */
 -      mt9m001->gain = 64;
 -      mt9m001->exposure = 255;
 -
 -      return ret;
 +      return v4l2_ctrl_handler_setup(&mt9m001->hdl);
  }
  
 -static void mt9m001_video_remove(struct soc_camera_device *icd)
 +static void mt9m001_video_remove(struct soc_camera_link *icl)
  {
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -
 -      dev_dbg(icd->pdev, "Video removed: %p, %p\n",
 -              icd->parent, icd->vdev);
        if (icl->free_bus)
                icl->free_bus(icl);
  }
@@@ -555,12 -688,9 +556,12 @@@ static int mt9m001_g_skip_top_lines(str
        return 0;
  }
  
 +static const struct v4l2_ctrl_ops mt9m001_ctrl_ops = {
 +      .g_volatile_ctrl = mt9m001_g_volatile_ctrl,
 +      .s_ctrl = mt9m001_s_ctrl,
 +};
 +
  static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
 -      .g_ctrl         = mt9m001_g_ctrl,
 -      .s_ctrl         = mt9m001_s_ctrl,
        .g_chip_ident   = mt9m001_g_chip_ident,
  #ifdef CONFIG_VIDEO_ADV_DEBUG
        .g_register     = mt9m001_g_register,
@@@ -581,40 -711,6 +582,40 @@@ static int mt9m001_enum_fmt(struct v4l2
        return 0;
  }
  
 +static int mt9m001_g_mbus_config(struct v4l2_subdev *sd,
 +                              struct v4l2_mbus_config *cfg)
 +{
 +      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
 +
 +      /* MT9M001 has all capture_format parameters fixed */
 +      cfg->flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
 +              V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
 +              V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_MASTER;
 +      cfg->type = V4L2_MBUS_PARALLEL;
 +      cfg->flags = soc_camera_apply_board_flags(icl, cfg);
 +
 +      return 0;
 +}
 +
 +static int mt9m001_s_mbus_config(struct v4l2_subdev *sd,
 +                              const struct v4l2_mbus_config *cfg)
 +{
 +      const struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
 +      struct mt9m001 *mt9m001 = to_mt9m001(client);
 +      unsigned int bps = soc_mbus_get_fmtdesc(mt9m001->fmt->code)->bits_per_sample;
 +
 +      if (icl->set_bus_param)
 +              return icl->set_bus_param(icl, 1 << (bps - 1));
 +
 +      /*
 +       * Without board specific bus width settings we only support the
 +       * sensors native bus width
 +       */
 +      return bps == 10 ? 0 : -EINVAL;
 +}
 +
  static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
        .s_stream       = mt9m001_s_stream,
        .s_mbus_fmt     = mt9m001_s_fmt,
        .g_crop         = mt9m001_g_crop,
        .cropcap        = mt9m001_cropcap,
        .enum_mbus_fmt  = mt9m001_enum_fmt,
 +      .g_mbus_config  = mt9m001_g_mbus_config,
 +      .s_mbus_config  = mt9m001_s_mbus_config,
  };
  
  static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
@@@ -642,10 -736,17 +643,10 @@@ static int mt9m001_probe(struct i2c_cli
                         const struct i2c_device_id *did)
  {
        struct mt9m001 *mt9m001;
 -      struct soc_camera_device *icd = client->dev.platform_data;
        struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
 -      struct soc_camera_link *icl;
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
        int ret;
  
 -      if (!icd) {
 -              dev_err(&client->dev, "MT9M001: missing soc-camera data!\n");
 -              return -EINVAL;
 -      }
 -
 -      icl = to_soc_camera_link(icd);
        if (!icl) {
                dev_err(&client->dev, "MT9M001 driver needs platform data\n");
                return -EINVAL;
                return -ENOMEM;
  
        v4l2_i2c_subdev_init(&mt9m001->subdev, client, &mt9m001_subdev_ops);
 +      v4l2_ctrl_handler_init(&mt9m001->hdl, 4);
 +      v4l2_ctrl_new_std(&mt9m001->hdl, &mt9m001_ctrl_ops,
 +                      V4L2_CID_VFLIP, 0, 1, 1, 0);
 +      v4l2_ctrl_new_std(&mt9m001->hdl, &mt9m001_ctrl_ops,
 +                      V4L2_CID_GAIN, 0, 127, 1, 64);
 +      mt9m001->exposure = v4l2_ctrl_new_std(&mt9m001->hdl, &mt9m001_ctrl_ops,
 +                      V4L2_CID_EXPOSURE, 1, 255, 1, 255);
 +      /*
 +       * Simulated autoexposure. If enabled, we calculate shutter width
 +       * ourselves in the driver based on vertical blanking and frame width
 +       */
 +      mt9m001->autoexposure = v4l2_ctrl_new_std_menu(&mt9m001->hdl,
 +                      &mt9m001_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
 +                      V4L2_EXPOSURE_AUTO);
 +      mt9m001->subdev.ctrl_handler = &mt9m001->hdl;
 +      if (mt9m001->hdl.error) {
 +              int err = mt9m001->hdl.error;
  
 -      /* Second stage probe - when a capture adapter is there */
 -      icd->ops                = &mt9m001_ops;
 +              kfree(mt9m001);
 +              return err;
 +      }
 +      v4l2_ctrl_auto_cluster(2, &mt9m001->autoexposure,
 +                                      V4L2_EXPOSURE_MANUAL, true);
  
 +      /* Second stage probe - when a capture adapter is there */
        mt9m001->y_skip_top     = 0;
        mt9m001->rect.left      = MT9M001_COLUMN_SKIP;
        mt9m001->rect.top       = MT9M001_ROW_SKIP;
        mt9m001->rect.width     = MT9M001_MAX_WIDTH;
        mt9m001->rect.height    = MT9M001_MAX_HEIGHT;
  
 -      /*
 -       * Simulated autoexposure. If enabled, we calculate shutter width
 -       * ourselves in the driver based on vertical blanking and frame width
 -       */
 -      mt9m001->autoexposure = 1;
 -
 -      ret = mt9m001_video_probe(icd, client);
 +      ret = mt9m001_video_probe(icl, client);
        if (ret) {
 -              icd->ops = NULL;
 +              v4l2_ctrl_handler_free(&mt9m001->hdl);
                kfree(mt9m001);
        }
  
  static int mt9m001_remove(struct i2c_client *client)
  {
        struct mt9m001 *mt9m001 = to_mt9m001(client);
 -      struct soc_camera_device *icd = client->dev.platform_data;
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
  
 -      icd->ops = NULL;
 -      mt9m001_video_remove(icd);
 +      v4l2_device_unregister_subdev(&mt9m001->subdev);
 +      v4l2_ctrl_handler_free(&mt9m001->hdl);
 +      mt9m001_video_remove(icl);
        kfree(mt9m001);
  
        return 0;
  #include <linux/log2.h>
  #include <linux/gpio.h>
  #include <linux/delay.h>
 +#include <linux/v4l2-mediabus.h>
+ #include <linux/module.h>
  
 +#include <media/soc_camera.h>
  #include <media/v4l2-common.h>
 +#include <media/v4l2-ctrls.h>
  #include <media/v4l2-chip-ident.h>
 -#include <media/soc_camera.h>
  
  /*
   * MT9M111, MT9M112 and MT9M131:
@@@ -179,8 -178,6 +180,8 @@@ enum mt9m111_context 
  
  struct mt9m111 {
        struct v4l2_subdev subdev;
 +      struct v4l2_ctrl_handler hdl;
 +      struct v4l2_ctrl *gain;
        int model;      /* V4L2_IDENT_MT9M111 or V4L2_IDENT_MT9M112 code
                         * from v4l2-chip-ident.h */
        enum mt9m111_context context;
        struct mutex power_lock; /* lock to protect power_count */
        int power_count;
        const struct mt9m111_datafmt *fmt;
 -      unsigned int gain;
 -      unsigned char autoexposure;
 +      int lastpage;   /* PageMap cache value */
        unsigned char datawidth;
        unsigned int powered:1;
 -      unsigned int hflip:1;
 -      unsigned int vflip:1;
 -      unsigned int autowhitebalance:1;
  };
  
  static struct mt9m111 *to_mt9m111(const struct i2c_client *client)
@@@ -202,17 -203,17 +203,17 @@@ static int reg_page_map_set(struct i2c_
  {
        int ret;
        u16 page;
 -      static int lastpage = -1;       /* PageMap cache value */
 +      struct mt9m111 *mt9m111 = to_mt9m111(client);
  
        page = (reg >> 8);
 -      if (page == lastpage)
 +      if (page == mt9m111->lastpage)
                return 0;
        if (page > 2)
                return -EINVAL;
  
        ret = i2c_smbus_write_word_data(client, MT9M111_PAGE_MAP, swab16(page));
        if (!ret)
 -              lastpage = page;
 +              mt9m111->lastpage = page;
        return ret;
  }
  
@@@ -362,6 -363,21 +363,6 @@@ static int mt9m111_reset(struct mt9m11
        return ret;
  }
  
 -static unsigned long mt9m111_query_bus_param(struct soc_camera_device *icd)
 -{
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -      unsigned long flags = SOCAM_MASTER | SOCAM_PCLK_SAMPLE_RISING |
 -              SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |
 -              SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8;
 -
 -      return soc_camera_apply_sensor_flags(icl, flags);
 -}
 -
 -static int mt9m111_set_bus_param(struct soc_camera_device *icd, unsigned long f)
 -{
 -      return 0;
 -}
 -
  static int mt9m111_make_rect(struct mt9m111 *mt9m111,
                             struct v4l2_rect *rect)
  {
@@@ -644,6 -660,50 +645,6 @@@ static int mt9m111_s_register(struct v4
  }
  #endif
  
 -static const struct v4l2_queryctrl mt9m111_controls[] = {
 -      {
 -              .id             = V4L2_CID_VFLIP,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Flip Verticaly",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 0,
 -      }, {
 -              .id             = V4L2_CID_HFLIP,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Flip Horizontaly",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 0,
 -      }, {    /* gain = 1/32*val (=>gain=1 if val==32) */
 -              .id             = V4L2_CID_GAIN,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Gain",
 -              .minimum        = 0,
 -              .maximum        = 63 * 2 * 2,
 -              .step           = 1,
 -              .default_value  = 32,
 -              .flags          = V4L2_CTRL_FLAG_SLIDER,
 -      }, {
 -              .id             = V4L2_CID_EXPOSURE_AUTO,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Auto Exposure",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 1,
 -      }
 -};
 -
 -static struct soc_camera_ops mt9m111_ops = {
 -      .query_bus_param        = mt9m111_query_bus_param,
 -      .set_bus_param          = mt9m111_set_bus_param,
 -      .controls               = mt9m111_controls,
 -      .num_controls           = ARRAY_SIZE(mt9m111_controls),
 -};
 -
  static int mt9m111_set_flip(struct mt9m111 *mt9m111, int flip, int mask)
  {
        struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
@@@ -684,6 -744,7 +685,6 @@@ static int mt9m111_set_global_gain(stru
        if (gain > 63 * 2 * 2)
                return -EINVAL;
  
 -      mt9m111->gain = gain;
        if ((gain >= 64 * 2) && (gain < 63 * 2 * 2))
                val = (1 << 10) | (1 << 9) | (gain / 4);
        else if ((gain >= 64) && (gain < 64 * 2))
  static int mt9m111_set_autoexposure(struct mt9m111 *mt9m111, int on)
  {
        struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
 -      int ret;
  
        if (on)
 -              ret = reg_set(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOEXPO_EN);
 -      else
 -              ret = reg_clear(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOEXPO_EN);
 -
 -      if (!ret)
 -              mt9m111->autoexposure = on;
 -
 -      return ret;
 +              return reg_set(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOEXPO_EN);
 +      return reg_clear(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOEXPO_EN);
  }
  
  static int mt9m111_set_autowhitebalance(struct mt9m111 *mt9m111, int on)
  {
        struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
 -      int ret;
  
        if (on)
 -              ret = reg_set(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN);
 -      else
 -              ret = reg_clear(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN);
 -
 -      if (!ret)
 -              mt9m111->autowhitebalance = on;
 -
 -      return ret;
 -}
 -
 -static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 -{
 -      struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
 -      int data;
 -
 -      switch (ctrl->id) {
 -      case V4L2_CID_VFLIP:
 -              if (mt9m111->context == HIGHPOWER)
 -                      data = reg_read(READ_MODE_B);
 -              else
 -                      data = reg_read(READ_MODE_A);
 -
 -              if (data < 0)
 -                      return -EIO;
 -              ctrl->value = !!(data & MT9M111_RMB_MIRROR_ROWS);
 -              break;
 -      case V4L2_CID_HFLIP:
 -              if (mt9m111->context == HIGHPOWER)
 -                      data = reg_read(READ_MODE_B);
 -              else
 -                      data = reg_read(READ_MODE_A);
 -
 -              if (data < 0)
 -                      return -EIO;
 -              ctrl->value = !!(data & MT9M111_RMB_MIRROR_COLS);
 -              break;
 -      case V4L2_CID_GAIN:
 -              data = mt9m111_get_global_gain(mt9m111);
 -              if (data < 0)
 -                      return data;
 -              ctrl->value = data;
 -              break;
 -      case V4L2_CID_EXPOSURE_AUTO:
 -              ctrl->value = mt9m111->autoexposure;
 -              break;
 -      case V4L2_CID_AUTO_WHITE_BALANCE:
 -              ctrl->value = mt9m111->autowhitebalance;
 -              break;
 -      }
 -      return 0;
 +              return reg_set(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN);
 +      return reg_clear(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN);
  }
  
 -static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 +static int mt9m111_s_ctrl(struct v4l2_ctrl *ctrl)
  {
 -      struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
 -      const struct v4l2_queryctrl *qctrl;
 -      int ret;
 -
 -      qctrl = soc_camera_find_qctrl(&mt9m111_ops, ctrl->id);
 -      if (!qctrl)
 -              return -EINVAL;
 +      struct mt9m111 *mt9m111 = container_of(ctrl->handler,
 +                                             struct mt9m111, hdl);
  
        switch (ctrl->id) {
        case V4L2_CID_VFLIP:
 -              mt9m111->vflip = ctrl->value;
 -              ret = mt9m111_set_flip(mt9m111, ctrl->value,
 +              return mt9m111_set_flip(mt9m111, ctrl->val,
                                        MT9M111_RMB_MIRROR_ROWS);
 -              break;
        case V4L2_CID_HFLIP:
 -              mt9m111->hflip = ctrl->value;
 -              ret = mt9m111_set_flip(mt9m111, ctrl->value,
 +              return mt9m111_set_flip(mt9m111, ctrl->val,
                                        MT9M111_RMB_MIRROR_COLS);
 -              break;
        case V4L2_CID_GAIN:
 -              ret = mt9m111_set_global_gain(mt9m111, ctrl->value);
 -              break;
 +              return mt9m111_set_global_gain(mt9m111, ctrl->val);
        case V4L2_CID_EXPOSURE_AUTO:
 -              ret =  mt9m111_set_autoexposure(mt9m111, ctrl->value);
 -              break;
 +              return mt9m111_set_autoexposure(mt9m111, ctrl->val);
        case V4L2_CID_AUTO_WHITE_BALANCE:
 -              ret =  mt9m111_set_autowhitebalance(mt9m111, ctrl->value);
 -              break;
 -      default:
 -              ret = -EINVAL;
 +              return mt9m111_set_autowhitebalance(mt9m111, ctrl->val);
        }
  
 -      return ret;
 +      return -EINVAL;
  }
  
  static int mt9m111_suspend(struct mt9m111 *mt9m111)
  {
 -      mt9m111->gain = mt9m111_get_global_gain(mt9m111);
 +      v4l2_ctrl_s_ctrl(mt9m111->gain, mt9m111_get_global_gain(mt9m111));
  
        return 0;
  }
@@@ -747,7 -879,11 +748,7 @@@ static void mt9m111_restore_state(struc
        mt9m111_set_context(mt9m111, mt9m111->context);
        mt9m111_set_pixfmt(mt9m111, mt9m111->fmt->code);
        mt9m111_setup_rect(mt9m111, &mt9m111->rect);
 -      mt9m111_set_flip(mt9m111, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS);
 -      mt9m111_set_flip(mt9m111, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS);
 -      mt9m111_set_global_gain(mt9m111, mt9m111->gain);
 -      mt9m111_set_autoexposure(mt9m111, mt9m111->autoexposure);
 -      mt9m111_set_autowhitebalance(mt9m111, mt9m111->autowhitebalance);
 +      v4l2_ctrl_handler_setup(&mt9m111->hdl);
  }
  
  static int mt9m111_resume(struct mt9m111 *mt9m111)
@@@ -775,6 -911,8 +776,6 @@@ static int mt9m111_init(struct mt9m111 
                ret = mt9m111_reset(mt9m111);
        if (!ret)
                ret = mt9m111_set_context(mt9m111, mt9m111->context);
 -      if (!ret)
 -              ret = mt9m111_set_autoexposure(mt9m111, mt9m111->autoexposure);
        if (ret)
                dev_err(&client->dev, "mt9m111 init failed: %d\n", ret);
        return ret;
   * Interface active, can use i2c. If it fails, it can indeed mean, that
   * this wasn't our capture interface, so, we wait for the right one
   */
 -static int mt9m111_video_probe(struct soc_camera_device *icd,
 -                             struct i2c_client *client)
 +static int mt9m111_video_probe(struct i2c_client *client)
  {
        struct mt9m111 *mt9m111 = to_mt9m111(client);
        s32 data;
        int ret;
  
 -      /* We must have a parent by now. And it cannot be a wrong one. */
 -      BUG_ON(!icd->parent ||
 -             to_soc_camera_host(icd->parent)->nr != icd->iface);
 -
 -      mt9m111->autoexposure = 1;
 -      mt9m111->autowhitebalance = 1;
 -
        data = reg_read(CHIP_VERSION);
  
        switch (data) {
                dev_info(&client->dev, "Detected a MT9M112 chip ID %x\n", data);
                break;
        default:
 -              ret = -ENODEV;
                dev_err(&client->dev,
                        "No MT9M111/MT9M112/MT9M131 chip detected register read %x\n",
                        data);
 -              goto ei2c;
 +              return -ENODEV;
        }
  
        ret = mt9m111_init(mt9m111);
 -
 -ei2c:
 -      return ret;
 +      if (ret)
 +              return ret;
 +      return v4l2_ctrl_handler_setup(&mt9m111->hdl);
  }
  
  static int mt9m111_s_power(struct v4l2_subdev *sd, int on)
@@@ -849,11 -996,9 +850,11 @@@ out
        return ret;
  }
  
 +static const struct v4l2_ctrl_ops mt9m111_ctrl_ops = {
 +      .s_ctrl = mt9m111_s_ctrl,
 +};
 +
  static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
 -      .g_ctrl         = mt9m111_g_ctrl,
 -      .s_ctrl         = mt9m111_s_ctrl,
        .g_chip_ident   = mt9m111_g_chip_ident,
        .s_power        = mt9m111_s_power,
  #ifdef CONFIG_VIDEO_ADV_DEBUG
@@@ -872,21 -1017,6 +873,21 @@@ static int mt9m111_enum_fmt(struct v4l2
        return 0;
  }
  
 +static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
 +                              struct v4l2_mbus_config *cfg)
 +{
 +      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
 +
 +      cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
 +              V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
 +              V4L2_MBUS_DATA_ACTIVE_HIGH;
 +      cfg->type = V4L2_MBUS_PARALLEL;
 +      cfg->flags = soc_camera_apply_board_flags(icl, cfg);
 +
 +      return 0;
 +}
 +
  static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
        .s_mbus_fmt     = mt9m111_s_fmt,
        .g_mbus_fmt     = mt9m111_g_fmt,
        .g_crop         = mt9m111_g_crop,
        .cropcap        = mt9m111_cropcap,
        .enum_mbus_fmt  = mt9m111_enum_fmt,
 +      .g_mbus_config  = mt9m111_g_mbus_config,
  };
  
  static struct v4l2_subdev_ops mt9m111_subdev_ops = {
@@@ -907,10 -1036,17 +908,10 @@@ static int mt9m111_probe(struct i2c_cli
                         const struct i2c_device_id *did)
  {
        struct mt9m111 *mt9m111;
 -      struct soc_camera_device *icd = client->dev.platform_data;
        struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
 -      struct soc_camera_link *icl;
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
        int ret;
  
 -      if (!icd) {
 -              dev_err(&client->dev, "mt9m111: soc-camera data missing!\n");
 -              return -EINVAL;
 -      }
 -
 -      icl = to_soc_camera_link(icd);
        if (!icl) {
                dev_err(&client->dev, "mt9m111: driver needs platform data\n");
                return -EINVAL;
                return -ENOMEM;
  
        v4l2_i2c_subdev_init(&mt9m111->subdev, client, &mt9m111_subdev_ops);
 +      v4l2_ctrl_handler_init(&mt9m111->hdl, 5);
 +      v4l2_ctrl_new_std(&mt9m111->hdl, &mt9m111_ctrl_ops,
 +                      V4L2_CID_VFLIP, 0, 1, 1, 0);
 +      v4l2_ctrl_new_std(&mt9m111->hdl, &mt9m111_ctrl_ops,
 +                      V4L2_CID_HFLIP, 0, 1, 1, 0);
 +      v4l2_ctrl_new_std(&mt9m111->hdl, &mt9m111_ctrl_ops,
 +                      V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
 +      mt9m111->gain = v4l2_ctrl_new_std(&mt9m111->hdl, &mt9m111_ctrl_ops,
 +                      V4L2_CID_GAIN, 0, 63 * 2 * 2, 1, 32);
 +      v4l2_ctrl_new_std_menu(&mt9m111->hdl,
 +                      &mt9m111_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
 +                      V4L2_EXPOSURE_AUTO);
 +      mt9m111->subdev.ctrl_handler = &mt9m111->hdl;
 +      if (mt9m111->hdl.error) {
 +              int err = mt9m111->hdl.error;
  
 -      /* Second stage probe - when a capture adapter is there */
 -      icd->ops                = &mt9m111_ops;
 +              kfree(mt9m111);
 +              return err;
 +      }
  
 +      /* Second stage probe - when a capture adapter is there */
        mt9m111->rect.left      = MT9M111_MIN_DARK_COLS;
        mt9m111->rect.top       = MT9M111_MIN_DARK_ROWS;
        mt9m111->rect.width     = MT9M111_MAX_WIDTH;
        mt9m111->rect.height    = MT9M111_MAX_HEIGHT;
        mt9m111->fmt            = &mt9m111_colour_fmts[0];
 +      mt9m111->lastpage       = -1;
  
 -      ret = mt9m111_video_probe(icd, client);
 +      ret = mt9m111_video_probe(client);
        if (ret) {
 -              icd->ops = NULL;
 +              v4l2_ctrl_handler_free(&mt9m111->hdl);
                kfree(mt9m111);
        }
  
  static int mt9m111_remove(struct i2c_client *client)
  {
        struct mt9m111 *mt9m111 = to_mt9m111(client);
 -      struct soc_camera_device *icd = client->dev.platform_data;
  
 -      icd->ops = NULL;
 +      v4l2_device_unregister_subdev(&mt9m111->subdev);
 +      v4l2_ctrl_handler_free(&mt9m111->hdl);
        kfree(mt9m111);
  
        return 0;
  #include <linux/log2.h>
  #include <linux/pm.h>
  #include <linux/slab.h>
 +#include <linux/v4l2-mediabus.h>
  #include <linux/videodev2.h>
+ #include <linux/module.h>
  
  #include <media/soc_camera.h>
  #include <media/v4l2-chip-ident.h>
  #include <media/v4l2-subdev.h>
 +#include <media/v4l2-ctrls.h>
 +
 +/*
 + * ATTENTION: this driver still cannot be used outside of the soc-camera
 + * framework because of its PM implementation, using the video_device node.
 + * If hardware becomes available for testing, alternative PM approaches shall
 + * be considered and tested.
 + */
  
  /*
   * mt9t031 i2c address 0x5d
  #define MT9T031_COLUMN_SKIP           32
  #define MT9T031_ROW_SKIP              20
  
 -#define MT9T031_BUS_PARAM     (SOCAM_PCLK_SAMPLE_RISING |     \
 -      SOCAM_PCLK_SAMPLE_FALLING | SOCAM_HSYNC_ACTIVE_HIGH |   \
 -      SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH |      \
 -      SOCAM_MASTER | SOCAM_DATAWIDTH_10)
 -
  struct mt9t031 {
        struct v4l2_subdev subdev;
 +      struct v4l2_ctrl_handler hdl;
 +      struct {
 +              /* exposure/auto-exposure cluster */
 +              struct v4l2_ctrl *autoexposure;
 +              struct v4l2_ctrl *exposure;
 +      };
        struct v4l2_rect rect;  /* Sensor window */
        int model;      /* V4L2_IDENT_MT9T031* codes from v4l2-chip-ident.h */
        u16 xskip;
        u16 yskip;
 -      unsigned int gain;
 +      unsigned int total_h;
        unsigned short y_skip_top;      /* Lines to skip at the top */
 -      unsigned int exposure;
 -      unsigned char autoexposure;
  };
  
  static struct mt9t031 *to_mt9t031(const struct i2c_client *client)
@@@ -187,6 -180,95 +188,6 @@@ static int mt9t031_s_stream(struct v4l2
        return 0;
  }
  
 -static int mt9t031_set_bus_param(struct soc_camera_device *icd,
 -                               unsigned long flags)
 -{
 -      struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
 -
 -      /* The caller should have queried our parameters, check anyway */
 -      if (flags & ~MT9T031_BUS_PARAM)
 -              return -EINVAL;
 -
 -      if (flags & SOCAM_PCLK_SAMPLE_FALLING)
 -              reg_clear(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
 -      else
 -              reg_set(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
 -
 -      return 0;
 -}
 -
 -static unsigned long mt9t031_query_bus_param(struct soc_camera_device *icd)
 -{
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -
 -      return soc_camera_apply_sensor_flags(icl, MT9T031_BUS_PARAM);
 -}
 -
 -enum {
 -      MT9T031_CTRL_VFLIP,
 -      MT9T031_CTRL_HFLIP,
 -      MT9T031_CTRL_GAIN,
 -      MT9T031_CTRL_EXPOSURE,
 -      MT9T031_CTRL_EXPOSURE_AUTO,
 -};
 -
 -static const struct v4l2_queryctrl mt9t031_controls[] = {
 -      [MT9T031_CTRL_VFLIP] = {
 -              .id             = V4L2_CID_VFLIP,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Flip Vertically",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 0,
 -      },
 -      [MT9T031_CTRL_HFLIP] = {
 -              .id             = V4L2_CID_HFLIP,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Flip Horizontally",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 0,
 -      },
 -      [MT9T031_CTRL_GAIN] = {
 -              .id             = V4L2_CID_GAIN,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Gain",
 -              .minimum        = 0,
 -              .maximum        = 127,
 -              .step           = 1,
 -              .default_value  = 64,
 -              .flags          = V4L2_CTRL_FLAG_SLIDER,
 -      },
 -      [MT9T031_CTRL_EXPOSURE] = {
 -              .id             = V4L2_CID_EXPOSURE,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Exposure",
 -              .minimum        = 1,
 -              .maximum        = 255,
 -              .step           = 1,
 -              .default_value  = 255,
 -              .flags          = V4L2_CTRL_FLAG_SLIDER,
 -      },
 -      [MT9T031_CTRL_EXPOSURE_AUTO] = {
 -              .id             = V4L2_CID_EXPOSURE_AUTO,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Automatic Exposure",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 1,
 -      }
 -};
 -
 -static struct soc_camera_ops mt9t031_ops = {
 -      .set_bus_param          = mt9t031_set_bus_param,
 -      .query_bus_param        = mt9t031_query_bus_param,
 -      .controls               = mt9t031_controls,
 -      .num_controls           = ARRAY_SIZE(mt9t031_controls),
 -};
 -
  /* target must be _even_ */
  static u16 mt9t031_skip(s32 *source, s32 target, s32 max)
  {
@@@ -272,7 -354,7 +273,7 @@@ static int mt9t031_set_params(struct i2
  
        /*
         * The caller provides a supported format, as guaranteed by
 -       * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap()
 +       * .try_mbus_fmt(), soc_camera_s_crop() and soc_camera_cropcap()
         */
        if (ret >= 0)
                ret = reg_write(client, MT9T031_COLUMN_START, rect->left);
        if (ret >= 0)
                ret = reg_write(client, MT9T031_WINDOW_HEIGHT,
                                rect->height + mt9t031->y_skip_top - 1);
 -      if (ret >= 0 && mt9t031->autoexposure) {
 -              unsigned int total_h = rect->height + mt9t031->y_skip_top + vblank;
 -              ret = set_shutter(client, total_h);
 -              if (ret >= 0) {
 -                      const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
 -                      const struct v4l2_queryctrl *qctrl =
 -                              &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
 -                      mt9t031->exposure = (shutter_max / 2 + (total_h - 1) *
 -                               (qctrl->maximum - qctrl->minimum)) /
 -                              shutter_max + qctrl->minimum;
 -              }
 +      if (ret >= 0 && v4l2_ctrl_g_ctrl(mt9t031->autoexposure) == V4L2_EXPOSURE_AUTO) {
 +              mt9t031->total_h = rect->height + mt9t031->y_skip_top + vblank;
 +
 +              ret = set_shutter(client, mt9t031->total_h);
        }
  
        /* Re-enable register update, commit all changes */
@@@ -455,57 -544,71 +456,57 @@@ static int mt9t031_s_register(struct v4
  }
  #endif
  
 -static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 +static int mt9t031_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
  {
 -      struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      struct mt9t031 *mt9t031 = to_mt9t031(client);
 -      int data;
 +      struct mt9t031 *mt9t031 = container_of(ctrl->handler,
 +                                             struct mt9t031, hdl);
 +      const u32 shutter_max = MT9T031_MAX_HEIGHT + MT9T031_VERTICAL_BLANK;
 +      s32 min, max;
  
        switch (ctrl->id) {
 -      case V4L2_CID_VFLIP:
 -              data = reg_read(client, MT9T031_READ_MODE_2);
 -              if (data < 0)
 -                      return -EIO;
 -              ctrl->value = !!(data & 0x8000);
 -              break;
 -      case V4L2_CID_HFLIP:
 -              data = reg_read(client, MT9T031_READ_MODE_2);
 -              if (data < 0)
 -                      return -EIO;
 -              ctrl->value = !!(data & 0x4000);
 -              break;
        case V4L2_CID_EXPOSURE_AUTO:
 -              ctrl->value = mt9t031->autoexposure;
 -              break;
 -      case V4L2_CID_GAIN:
 -              ctrl->value = mt9t031->gain;
 -              break;
 -      case V4L2_CID_EXPOSURE:
 -              ctrl->value = mt9t031->exposure;
 +              min = mt9t031->exposure->minimum;
 +              max = mt9t031->exposure->maximum;
 +              mt9t031->exposure->val =
 +                      (shutter_max / 2 + (mt9t031->total_h - 1) * (max - min))
 +                              / shutter_max + min;
                break;
        }
        return 0;
  }
  
 -static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 +static int mt9t031_s_ctrl(struct v4l2_ctrl *ctrl)
  {
 +      struct mt9t031 *mt9t031 = container_of(ctrl->handler,
 +                                             struct mt9t031, hdl);
 +      struct v4l2_subdev *sd = &mt9t031->subdev;
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      struct mt9t031 *mt9t031 = to_mt9t031(client);
 -      const struct v4l2_queryctrl *qctrl;
 +      struct v4l2_ctrl *exp = mt9t031->exposure;
        int data;
  
        switch (ctrl->id) {
        case V4L2_CID_VFLIP:
 -              if (ctrl->value)
 +              if (ctrl->val)
                        data = reg_set(client, MT9T031_READ_MODE_2, 0x8000);
                else
                        data = reg_clear(client, MT9T031_READ_MODE_2, 0x8000);
                if (data < 0)
                        return -EIO;
 -              break;
 +              return 0;
        case V4L2_CID_HFLIP:
 -              if (ctrl->value)
 +              if (ctrl->val)
                        data = reg_set(client, MT9T031_READ_MODE_2, 0x4000);
                else
                        data = reg_clear(client, MT9T031_READ_MODE_2, 0x4000);
                if (data < 0)
                        return -EIO;
 -              break;
 +              return 0;
        case V4L2_CID_GAIN:
 -              qctrl = &mt9t031_controls[MT9T031_CTRL_GAIN];
 -              if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
 -                      return -EINVAL;
                /* See Datasheet Table 7, Gain settings. */
 -              if (ctrl->value <= qctrl->default_value) {
 +              if (ctrl->val <= ctrl->default_value) {
                        /* Pack it into 0..1 step 0.125, register values 0..8 */
 -                      unsigned long range = qctrl->default_value - qctrl->minimum;
 -                      data = ((ctrl->value - qctrl->minimum) * 8 + range / 2) / range;
 +                      unsigned long range = ctrl->default_value - ctrl->minimum;
 +                      data = ((ctrl->val - ctrl->minimum) * 8 + range / 2) / range;
  
                        dev_dbg(&client->dev, "Setting gain %d\n", data);
                        data = reg_write(client, MT9T031_GLOBAL_GAIN, data);
                } else {
                        /* Pack it into 1.125..128 variable step, register values 9..0x7860 */
                        /* We assume qctrl->maximum - qctrl->default_value - 1 > 0 */
 -                      unsigned long range = qctrl->maximum - qctrl->default_value - 1;
 +                      unsigned long range = ctrl->maximum - ctrl->default_value - 1;
                        /* calculated gain: map 65..127 to 9..1024 step 0.125 */
 -                      unsigned long gain = ((ctrl->value - qctrl->default_value - 1) *
 +                      unsigned long gain = ((ctrl->val - ctrl->default_value - 1) *
                                               1015 + range / 2) / range + 9;
  
                        if (gain <= 32)         /* calculated gain 9..32 -> 9..32 */
                        if (data < 0)
                                return -EIO;
                }
 +              return 0;
  
 -              /* Success */
 -              mt9t031->gain = ctrl->value;
 -              break;
 -      case V4L2_CID_EXPOSURE:
 -              qctrl = &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
 -              /* mt9t031 has maximum == default */
 -              if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
 -                      return -EINVAL;
 -              else {
 -                      const unsigned long range = qctrl->maximum - qctrl->minimum;
 -                      const u32 shutter = ((ctrl->value - qctrl->minimum) * 1048 +
 -                                           range / 2) / range + 1;
 +      case V4L2_CID_EXPOSURE_AUTO:
 +              if (ctrl->val == V4L2_EXPOSURE_MANUAL) {
 +                      unsigned int range = exp->maximum - exp->minimum;
 +                      unsigned int shutter = ((exp->val - exp->minimum) * 1048 +
 +                                               range / 2) / range + 1;
                        u32 old;
  
                        get_shutter(client, &old);
                                old, shutter);
                        if (set_shutter(client, shutter) < 0)
                                return -EIO;
 -                      mt9t031->exposure = ctrl->value;
 -                      mt9t031->autoexposure = 0;
 -              }
 -              break;
 -      case V4L2_CID_EXPOSURE_AUTO:
 -              if (ctrl->value) {
 +              } else {
                        const u16 vblank = MT9T031_VERTICAL_BLANK;
 -                      const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
 -                      unsigned int total_h = mt9t031->rect.height +
 +                      mt9t031->total_h = mt9t031->rect.height +
                                mt9t031->y_skip_top + vblank;
  
 -                      if (set_shutter(client, total_h) < 0)
 +                      if (set_shutter(client, mt9t031->total_h) < 0)
                                return -EIO;
 -                      qctrl = &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
 -                      mt9t031->exposure = (shutter_max / 2 + (total_h - 1) *
 -                               (qctrl->maximum - qctrl->minimum)) /
 -                              shutter_max + qctrl->minimum;
 -                      mt9t031->autoexposure = 1;
 -              } else
 -                      mt9t031->autoexposure = 0;
 -              break;
 +              }
 +              return 0;
        default:
                return -EINVAL;
        }
@@@ -580,7 -701,8 +581,7 @@@ static int mt9t031_runtime_suspend(stru
  static int mt9t031_runtime_resume(struct device *dev)
  {
        struct video_device *vdev = to_video_device(dev);
 -      struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
 -      struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
 +      struct v4l2_subdev *sd = soc_camera_vdev_to_subdev(vdev);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        struct mt9t031 *mt9t031 = to_mt9t031(client);
  
@@@ -613,19 -735,6 +614,19 @@@ static struct device_type mt9t031_dev_t
        .pm     = &mt9t031_dev_pm_ops,
  };
  
 +static int mt9t031_s_power(struct v4l2_subdev *sd, int on)
 +{
 +      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct video_device *vdev = soc_camera_i2c_to_vdev(client);
 +
 +      if (on)
 +              vdev->dev.type = &mt9t031_dev_type;
 +      else
 +              vdev->dev.type = NULL;
 +
 +      return 0;
 +}
 +
  /*
   * Interface active, can use i2c. If it fails, it can indeed mean, that
   * this wasn't our capture interface, so, we wait for the right one
  static int mt9t031_video_probe(struct i2c_client *client)
  {
        struct mt9t031 *mt9t031 = to_mt9t031(client);
 -      struct video_device *vdev = soc_camera_i2c_to_vdev(client);
        s32 data;
        int ret;
  
        if (ret < 0)
                dev_err(&client->dev, "Failed to initialise the camera\n");
        else
 -              vdev->dev.type = &mt9t031_dev_type;
 -
 -      /* mt9t031_idle() has reset the chip to default. */
 -      mt9t031->exposure = 255;
 -      mt9t031->gain = 64;
 +              v4l2_ctrl_handler_setup(&mt9t031->hdl);
  
        return ret;
  }
@@@ -674,14 -788,10 +675,14 @@@ static int mt9t031_g_skip_top_lines(str
        return 0;
  }
  
 +static const struct v4l2_ctrl_ops mt9t031_ctrl_ops = {
 +      .g_volatile_ctrl = mt9t031_g_volatile_ctrl,
 +      .s_ctrl = mt9t031_s_ctrl,
 +};
 +
  static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
 -      .g_ctrl         = mt9t031_g_ctrl,
 -      .s_ctrl         = mt9t031_s_ctrl,
        .g_chip_ident   = mt9t031_g_chip_ident,
 +      .s_power        = mt9t031_s_power,
  #ifdef CONFIG_VIDEO_ADV_DEBUG
        .g_register     = mt9t031_g_register,
        .s_register     = mt9t031_s_register,
@@@ -698,34 -808,6 +699,34 @@@ static int mt9t031_enum_fmt(struct v4l2
        return 0;
  }
  
 +static int mt9t031_g_mbus_config(struct v4l2_subdev *sd,
 +                              struct v4l2_mbus_config *cfg)
 +{
 +      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
 +
 +      cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
 +              V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
 +              V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH;
 +      cfg->type = V4L2_MBUS_PARALLEL;
 +      cfg->flags = soc_camera_apply_board_flags(icl, cfg);
 +
 +      return 0;
 +}
 +
 +static int mt9t031_s_mbus_config(struct v4l2_subdev *sd,
 +                              const struct v4l2_mbus_config *cfg)
 +{
 +      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
 +
 +      if (soc_camera_apply_board_flags(icl, cfg) &
 +          V4L2_MBUS_PCLK_SAMPLE_FALLING)
 +              return reg_clear(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
 +      else
 +              return reg_set(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
 +}
 +
  static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
        .s_stream       = mt9t031_s_stream,
        .s_mbus_fmt     = mt9t031_s_fmt,
        .g_crop         = mt9t031_g_crop,
        .cropcap        = mt9t031_cropcap,
        .enum_mbus_fmt  = mt9t031_enum_fmt,
 +      .g_mbus_config  = mt9t031_g_mbus_config,
 +      .s_mbus_config  = mt9t031_s_mbus_config,
  };
  
  static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
@@@ -753,13 -833,18 +754,13 @@@ static int mt9t031_probe(struct i2c_cli
                         const struct i2c_device_id *did)
  {
        struct mt9t031 *mt9t031;
 -      struct soc_camera_device *icd = client->dev.platform_data;
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
        struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
        int ret;
  
 -      if (icd) {
 -              struct soc_camera_link *icl = to_soc_camera_link(icd);
 -              if (!icl) {
 -                      dev_err(&client->dev, "MT9T031 driver needs platform data\n");
 -                      return -EINVAL;
 -              }
 -
 -              icd->ops = &mt9t031_ops;
 +      if (!icl) {
 +              dev_err(&client->dev, "MT9T031 driver needs platform data\n");
 +              return -EINVAL;
        }
  
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
                return -ENOMEM;
  
        v4l2_i2c_subdev_init(&mt9t031->subdev, client, &mt9t031_subdev_ops);
 +      v4l2_ctrl_handler_init(&mt9t031->hdl, 5);
 +      v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops,
 +                      V4L2_CID_VFLIP, 0, 1, 1, 0);
 +      v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops,
 +                      V4L2_CID_HFLIP, 0, 1, 1, 0);
 +      v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops,
 +                      V4L2_CID_GAIN, 0, 127, 1, 64);
 +
 +      /*
 +       * Simulated autoexposure. If enabled, we calculate shutter width
 +       * ourselves in the driver based on vertical blanking and frame width
 +       */
 +      mt9t031->autoexposure = v4l2_ctrl_new_std_menu(&mt9t031->hdl,
 +                      &mt9t031_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
 +                      V4L2_EXPOSURE_AUTO);
 +      mt9t031->exposure = v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops,
 +                      V4L2_CID_EXPOSURE, 1, 255, 1, 255);
 +
 +      mt9t031->subdev.ctrl_handler = &mt9t031->hdl;
 +      if (mt9t031->hdl.error) {
 +              int err = mt9t031->hdl.error;
 +
 +              kfree(mt9t031);
 +              return err;
 +      }
 +      v4l2_ctrl_auto_cluster(2, &mt9t031->autoexposure,
 +                              V4L2_EXPOSURE_MANUAL, true);
  
        mt9t031->y_skip_top     = 0;
        mt9t031->rect.left      = MT9T031_COLUMN_SKIP;
        mt9t031->rect.width     = MT9T031_MAX_WIDTH;
        mt9t031->rect.height    = MT9T031_MAX_HEIGHT;
  
 -      /*
 -       * Simulated autoexposure. If enabled, we calculate shutter width
 -       * ourselves in the driver based on vertical blanking and frame width
 -       */
 -      mt9t031->autoexposure = 1;
 -
        mt9t031->xskip = 1;
        mt9t031->yskip = 1;
  
        mt9t031_disable(client);
  
        if (ret) {
 -              if (icd)
 -                      icd->ops = NULL;
 +              v4l2_ctrl_handler_free(&mt9t031->hdl);
                kfree(mt9t031);
        }
  
  static int mt9t031_remove(struct i2c_client *client)
  {
        struct mt9t031 *mt9t031 = to_mt9t031(client);
 -      struct soc_camera_device *icd = client->dev.platform_data;
  
 -      if (icd)
 -              icd->ops = NULL;
 +      v4l2_device_unregister_subdev(&mt9t031->subdev);
 +      v4l2_ctrl_handler_free(&mt9t031->hdl);
        kfree(mt9t031);
  
        return 0;
  #include <linux/i2c.h>
  #include <linux/delay.h>
  #include <linux/log2.h>
+ #include <linux/module.h>
  
 +#include <media/soc_camera.h>
 +#include <media/soc_mediabus.h>
  #include <media/v4l2-subdev.h>
  #include <media/v4l2-chip-ident.h>
 -#include <media/soc_camera.h>
 +#include <media/v4l2-ctrls.h>
  
  /*
   * mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c
@@@ -102,17 -101,6 +103,17 @@@ static const struct mt9v022_datafmt mt9
  
  struct mt9v022 {
        struct v4l2_subdev subdev;
 +      struct v4l2_ctrl_handler hdl;
 +      struct {
 +              /* exposure/auto-exposure cluster */
 +              struct v4l2_ctrl *autoexposure;
 +              struct v4l2_ctrl *exposure;
 +      };
 +      struct {
 +              /* gain/auto-gain cluster */
 +              struct v4l2_ctrl *autogain;
 +              struct v4l2_ctrl *gain;
 +      };
        struct v4l2_rect rect;  /* Sensor window */
        const struct mt9v022_datafmt *fmt;
        const struct mt9v022_datafmt *fmts;
@@@ -191,8 -179,6 +192,8 @@@ static int mt9v022_init(struct i2c_clie
                ret = reg_clear(client, MT9V022_BLACK_LEVEL_CALIB_CTRL, 1);
        if (!ret)
                ret = reg_write(client, MT9V022_DIGITAL_TEST_PATTERN, 0);
 +      if (!ret)
 +              return v4l2_ctrl_handler_setup(&mt9v022->hdl);
  
        return ret;
  }
@@@ -214,6 -200,78 +215,6 @@@ static int mt9v022_s_stream(struct v4l2
        return 0;
  }
  
 -static int mt9v022_set_bus_param(struct soc_camera_device *icd,
 -                               unsigned long flags)
 -{
 -      struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
 -      struct mt9v022 *mt9v022 = to_mt9v022(client);
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -      unsigned int width_flag = flags & SOCAM_DATAWIDTH_MASK;
 -      int ret;
 -      u16 pixclk = 0;
 -
 -      /* Only one width bit may be set */
 -      if (!is_power_of_2(width_flag))
 -              return -EINVAL;
 -
 -      if (icl->set_bus_param) {
 -              ret = icl->set_bus_param(icl, width_flag);
 -              if (ret)
 -                      return ret;
 -      } else {
 -              /*
 -               * Without board specific bus width settings we only support the
 -               * sensors native bus width
 -               */
 -              if (width_flag != SOCAM_DATAWIDTH_10)
 -                      return -EINVAL;
 -      }
 -
 -      flags = soc_camera_apply_sensor_flags(icl, flags);
 -
 -      if (flags & SOCAM_PCLK_SAMPLE_FALLING)
 -              pixclk |= 0x10;
 -
 -      if (!(flags & SOCAM_HSYNC_ACTIVE_HIGH))
 -              pixclk |= 0x1;
 -
 -      if (!(flags & SOCAM_VSYNC_ACTIVE_HIGH))
 -              pixclk |= 0x2;
 -
 -      ret = reg_write(client, MT9V022_PIXCLK_FV_LV, pixclk);
 -      if (ret < 0)
 -              return ret;
 -
 -      if (!(flags & SOCAM_MASTER))
 -              mt9v022->chip_control &= ~0x8;
 -
 -      ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
 -      if (ret < 0)
 -              return ret;
 -
 -      dev_dbg(&client->dev, "Calculated pixclk 0x%x, chip control 0x%x\n",
 -              pixclk, mt9v022->chip_control);
 -
 -      return 0;
 -}
 -
 -static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd)
 -{
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -      unsigned int flags = SOCAM_MASTER | SOCAM_SLAVE |
 -              SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING |
 -              SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW |
 -              SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW |
 -              SOCAM_DATA_ACTIVE_HIGH;
 -
 -      if (icl->query_bus_param)
 -              flags |= icl->query_bus_param(icl) & SOCAM_DATAWIDTH_MASK;
 -      else
 -              flags |= SOCAM_DATAWIDTH_10;
 -
 -      return soc_camera_apply_sensor_flags(icl, flags);
 -}
 -
  static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
  {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
@@@ -332,7 -390,7 +333,7 @@@ static int mt9v022_s_fmt(struct v4l2_su
  
        /*
         * The caller provides a supported format, as verified per call to
 -       * icd->try_fmt(), datawidth is from our supported format list
 +       * .try_mbus_fmt(), datawidth is from our supported format list
         */
        switch (mf->code) {
        case V4L2_MBUS_FMT_Y8_1X8:
@@@ -445,131 -503,236 +446,131 @@@ static int mt9v022_s_register(struct v4
  }
  #endif
  
 -static const struct v4l2_queryctrl mt9v022_controls[] = {
 -      {
 -              .id             = V4L2_CID_VFLIP,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Flip Vertically",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 0,
 -      }, {
 -              .id             = V4L2_CID_HFLIP,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Flip Horizontally",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 0,
 -      }, {
 -              .id             = V4L2_CID_GAIN,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Analog Gain",
 -              .minimum        = 64,
 -              .maximum        = 127,
 -              .step           = 1,
 -              .default_value  = 64,
 -              .flags          = V4L2_CTRL_FLAG_SLIDER,
 -      }, {
 -              .id             = V4L2_CID_EXPOSURE,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Exposure",
 -              .minimum        = 1,
 -              .maximum        = 255,
 -              .step           = 1,
 -              .default_value  = 255,
 -              .flags          = V4L2_CTRL_FLAG_SLIDER,
 -      }, {
 -              .id             = V4L2_CID_AUTOGAIN,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Automatic Gain",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 1,
 -      }, {
 -              .id             = V4L2_CID_EXPOSURE_AUTO,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Automatic Exposure",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 1,
 -      }
 -};
 -
 -static struct soc_camera_ops mt9v022_ops = {
 -      .set_bus_param          = mt9v022_set_bus_param,
 -      .query_bus_param        = mt9v022_query_bus_param,
 -      .controls               = mt9v022_controls,
 -      .num_controls           = ARRAY_SIZE(mt9v022_controls),
 -};
 -
 -static int mt9v022_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 +static int mt9v022_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
  {
 +      struct mt9v022 *mt9v022 = container_of(ctrl->handler,
 +                                             struct mt9v022, hdl);
 +      struct v4l2_subdev *sd = &mt9v022->subdev;
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      const struct v4l2_queryctrl *qctrl;
 +      struct v4l2_ctrl *gain = mt9v022->gain;
 +      struct v4l2_ctrl *exp = mt9v022->exposure;
        unsigned long range;
        int data;
  
 -      qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id);
 -
        switch (ctrl->id) {
 -      case V4L2_CID_VFLIP:
 -              data = reg_read(client, MT9V022_READ_MODE);
 -              if (data < 0)
 -                      return -EIO;
 -              ctrl->value = !!(data & 0x10);
 -              break;
 -      case V4L2_CID_HFLIP:
 -              data = reg_read(client, MT9V022_READ_MODE);
 -              if (data < 0)
 -                      return -EIO;
 -              ctrl->value = !!(data & 0x20);
 -              break;
 -      case V4L2_CID_EXPOSURE_AUTO:
 -              data = reg_read(client, MT9V022_AEC_AGC_ENABLE);
 -              if (data < 0)
 -                      return -EIO;
 -              ctrl->value = !!(data & 0x1);
 -              break;
        case V4L2_CID_AUTOGAIN:
 -              data = reg_read(client, MT9V022_AEC_AGC_ENABLE);
 -              if (data < 0)
 -                      return -EIO;
 -              ctrl->value = !!(data & 0x2);
 -              break;
 -      case V4L2_CID_GAIN:
                data = reg_read(client, MT9V022_ANALOG_GAIN);
                if (data < 0)
                        return -EIO;
  
 -              range = qctrl->maximum - qctrl->minimum;
 -              ctrl->value = ((data - 16) * range + 24) / 48 + qctrl->minimum;
 -
 -              break;
 -      case V4L2_CID_EXPOSURE:
 +              range = gain->maximum - gain->minimum;
 +              gain->val = ((data - 16) * range + 24) / 48 + gain->minimum;
 +              return 0;
 +      case V4L2_CID_EXPOSURE_AUTO:
                data = reg_read(client, MT9V022_TOTAL_SHUTTER_WIDTH);
                if (data < 0)
                        return -EIO;
  
 -              range = qctrl->maximum - qctrl->minimum;
 -              ctrl->value = ((data - 1) * range + 239) / 479 + qctrl->minimum;
 -
 -              break;
 +              range = exp->maximum - exp->minimum;
 +              exp->val = ((data - 1) * range + 239) / 479 + exp->minimum;
 +              return 0;
        }
 -      return 0;
 +      return -EINVAL;
  }
  
 -static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 +static int mt9v022_s_ctrl(struct v4l2_ctrl *ctrl)
  {
 -      int data;
 +      struct mt9v022 *mt9v022 = container_of(ctrl->handler,
 +                                             struct mt9v022, hdl);
 +      struct v4l2_subdev *sd = &mt9v022->subdev;
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      const struct v4l2_queryctrl *qctrl;
 -
 -      qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id);
 -      if (!qctrl)
 -              return -EINVAL;
 +      int data;
  
        switch (ctrl->id) {
        case V4L2_CID_VFLIP:
 -              if (ctrl->value)
 +              if (ctrl->val)
                        data = reg_set(client, MT9V022_READ_MODE, 0x10);
                else
                        data = reg_clear(client, MT9V022_READ_MODE, 0x10);
                if (data < 0)
                        return -EIO;
 -              break;
 +              return 0;
        case V4L2_CID_HFLIP:
 -              if (ctrl->value)
 +              if (ctrl->val)
                        data = reg_set(client, MT9V022_READ_MODE, 0x20);
                else
                        data = reg_clear(client, MT9V022_READ_MODE, 0x20);
                if (data < 0)
                        return -EIO;
 -              break;
 -      case V4L2_CID_GAIN:
 -              /* mt9v022 has minimum == default */
 -              if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
 -                      return -EINVAL;
 -              else {
 -                      unsigned long range = qctrl->maximum - qctrl->minimum;
 +              return 0;
 +      case V4L2_CID_AUTOGAIN:
 +              if (ctrl->val) {
 +                      if (reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0)
 +                              return -EIO;
 +              } else {
 +                      struct v4l2_ctrl *gain = mt9v022->gain;
 +                      /* mt9v022 has minimum == default */
 +                      unsigned long range = gain->maximum - gain->minimum;
                        /* Valid values 16 to 64, 32 to 64 must be even. */
 -                      unsigned long gain = ((ctrl->value - qctrl->minimum) *
 +                      unsigned long gain_val = ((gain->val - gain->minimum) *
                                              48 + range / 2) / range + 16;
 -                      if (gain >= 32)
 -                              gain &= ~1;
 +
 +                      if (gain_val >= 32)
 +                              gain_val &= ~1;
 +
                        /*
                         * The user wants to set gain manually, hope, she
                         * knows, what she's doing... Switch AGC off.
                         */
 -
                        if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0)
                                return -EIO;
  
                        dev_dbg(&client->dev, "Setting gain from %d to %lu\n",
 -                              reg_read(client, MT9V022_ANALOG_GAIN), gain);
 -                      if (reg_write(client, MT9V022_ANALOG_GAIN, gain) < 0)
 +                              reg_read(client, MT9V022_ANALOG_GAIN), gain_val);
 +                      if (reg_write(client, MT9V022_ANALOG_GAIN, gain_val) < 0)
                                return -EIO;
                }
 -              break;
 -      case V4L2_CID_EXPOSURE:
 -              /* mt9v022 has maximum == default */
 -              if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
 -                      return -EINVAL;
 -              else {
 -                      unsigned long range = qctrl->maximum - qctrl->minimum;
 -                      unsigned long shutter = ((ctrl->value - qctrl->minimum) *
 -                                               479 + range / 2) / range + 1;
 +              return 0;
 +      case V4L2_CID_EXPOSURE_AUTO:
 +              if (ctrl->val == V4L2_EXPOSURE_AUTO) {
 +                      data = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x1);
 +              } else {
 +                      struct v4l2_ctrl *exp = mt9v022->exposure;
 +                      unsigned long range = exp->maximum - exp->minimum;
 +                      unsigned long shutter = ((exp->val - exp->minimum) *
 +                                      479 + range / 2) / range + 1;
 +
                        /*
                         * The user wants to set shutter width manually, hope,
                         * she knows, what she's doing... Switch AEC off.
                         */
 -
 -                      if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1) < 0)
 +                      data = reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1);
 +                      if (data < 0)
                                return -EIO;
 -
                        dev_dbg(&client->dev, "Shutter width from %d to %lu\n",
 -                              reg_read(client, MT9V022_TOTAL_SHUTTER_WIDTH),
 -                              shutter);
 +                                      reg_read(client, MT9V022_TOTAL_SHUTTER_WIDTH),
 +                                      shutter);
                        if (reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH,
 -                                    shutter) < 0)
 +                                              shutter) < 0)
                                return -EIO;
                }
 -              break;
 -      case V4L2_CID_AUTOGAIN:
 -              if (ctrl->value)
 -                      data = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x2);
 -              else
 -                      data = reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2);
 -              if (data < 0)
 -                      return -EIO;
 -              break;
 -      case V4L2_CID_EXPOSURE_AUTO:
 -              if (ctrl->value)
 -                      data = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x1);
 -              else
 -                      data = reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1);
 -              if (data < 0)
 -                      return -EIO;
 -              break;
 +              return 0;
        }
 -      return 0;
 +      return -EINVAL;
  }
  
  /*
   * Interface active, can use i2c. If it fails, it can indeed mean, that
   * this wasn't our capture interface, so, we wait for the right one
   */
 -static int mt9v022_video_probe(struct soc_camera_device *icd,
 -                             struct i2c_client *client)
 +static int mt9v022_video_probe(struct i2c_client *client)
  {
        struct mt9v022 *mt9v022 = to_mt9v022(client);
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
        s32 data;
        int ret;
        unsigned long flags;
  
 -      /* We must have a parent by now. And it cannot be a wrong one. */
 -      BUG_ON(!icd->parent ||
 -             to_soc_camera_host(icd->parent)->nr != icd->iface);
 -
        /* Read out the chip version register */
        data = reg_read(client, MT9V022_CHIP_VERSION);
  
@@@ -643,6 -806,16 +644,6 @@@ ei2c
        return ret;
  }
  
 -static void mt9v022_video_remove(struct soc_camera_device *icd)
 -{
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -
 -      dev_dbg(icd->pdev, "Video removed: %p, %p\n",
 -              icd->parent, icd->vdev);
 -      if (icl->free_bus)
 -              icl->free_bus(icl);
 -}
 -
  static int mt9v022_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
  {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        return 0;
  }
  
 +static const struct v4l2_ctrl_ops mt9v022_ctrl_ops = {
 +      .g_volatile_ctrl = mt9v022_g_volatile_ctrl,
 +      .s_ctrl = mt9v022_s_ctrl,
 +};
 +
  static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
 -      .g_ctrl         = mt9v022_g_ctrl,
 -      .s_ctrl         = mt9v022_s_ctrl,
        .g_chip_ident   = mt9v022_g_chip_ident,
  #ifdef CONFIG_VIDEO_ADV_DEBUG
        .g_register     = mt9v022_g_register,
@@@ -679,72 -849,6 +680,72 @@@ static int mt9v022_enum_fmt(struct v4l2
        return 0;
  }
  
 +static int mt9v022_g_mbus_config(struct v4l2_subdev *sd,
 +                              struct v4l2_mbus_config *cfg)
 +{
 +      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
 +
 +      cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE |
 +              V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
 +              V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW |
 +              V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
 +              V4L2_MBUS_DATA_ACTIVE_HIGH;
 +      cfg->type = V4L2_MBUS_PARALLEL;
 +      cfg->flags = soc_camera_apply_board_flags(icl, cfg);
 +
 +      return 0;
 +}
 +
 +static int mt9v022_s_mbus_config(struct v4l2_subdev *sd,
 +                               const struct v4l2_mbus_config *cfg)
 +{
 +      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
 +      struct mt9v022 *mt9v022 = to_mt9v022(client);
 +      unsigned long flags = soc_camera_apply_board_flags(icl, cfg);
 +      unsigned int bps = soc_mbus_get_fmtdesc(mt9v022->fmt->code)->bits_per_sample;
 +      int ret;
 +      u16 pixclk = 0;
 +
 +      if (icl->set_bus_param) {
 +              ret = icl->set_bus_param(icl, 1 << (bps - 1));
 +              if (ret)
 +                      return ret;
 +      } else if (bps != 10) {
 +              /*
 +               * Without board specific bus width settings we only support the
 +               * sensors native bus width
 +               */
 +              return -EINVAL;
 +      }
 +
 +      if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
 +              pixclk |= 0x10;
 +
 +      if (!(flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH))
 +              pixclk |= 0x1;
 +
 +      if (!(flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH))
 +              pixclk |= 0x2;
 +
 +      ret = reg_write(client, MT9V022_PIXCLK_FV_LV, pixclk);
 +      if (ret < 0)
 +              return ret;
 +
 +      if (!(flags & V4L2_MBUS_MASTER))
 +              mt9v022->chip_control &= ~0x8;
 +
 +      ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
 +      if (ret < 0)
 +              return ret;
 +
 +      dev_dbg(&client->dev, "Calculated pixclk 0x%x, chip control 0x%x\n",
 +              pixclk, mt9v022->chip_control);
 +
 +      return 0;
 +}
 +
  static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
        .s_stream       = mt9v022_s_stream,
        .s_mbus_fmt     = mt9v022_s_fmt,
        .g_crop         = mt9v022_g_crop,
        .cropcap        = mt9v022_cropcap,
        .enum_mbus_fmt  = mt9v022_enum_fmt,
 +      .g_mbus_config  = mt9v022_g_mbus_config,
 +      .s_mbus_config  = mt9v022_s_mbus_config,
  };
  
  static struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
@@@ -772,10 -874,17 +773,10 @@@ static int mt9v022_probe(struct i2c_cli
                         const struct i2c_device_id *did)
  {
        struct mt9v022 *mt9v022;
 -      struct soc_camera_device *icd = client->dev.platform_data;
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
        struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
 -      struct soc_camera_link *icl;
        int ret;
  
 -      if (!icd) {
 -              dev_err(&client->dev, "MT9V022: missing soc-camera data!\n");
 -              return -EINVAL;
 -      }
 -
 -      icl = to_soc_camera_link(icd);
        if (!icl) {
                dev_err(&client->dev, "MT9V022 driver needs platform data\n");
                return -EINVAL;
                return -ENOMEM;
  
        v4l2_i2c_subdev_init(&mt9v022->subdev, client, &mt9v022_subdev_ops);
 +      v4l2_ctrl_handler_init(&mt9v022->hdl, 6);
 +      v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
 +                      V4L2_CID_VFLIP, 0, 1, 1, 0);
 +      v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
 +                      V4L2_CID_HFLIP, 0, 1, 1, 0);
 +      mt9v022->autogain = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
 +                      V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
 +      mt9v022->gain = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
 +                      V4L2_CID_GAIN, 0, 127, 1, 64);
 +
 +      /*
 +       * Simulated autoexposure. If enabled, we calculate shutter width
 +       * ourselves in the driver based on vertical blanking and frame width
 +       */
 +      mt9v022->autoexposure = v4l2_ctrl_new_std_menu(&mt9v022->hdl,
 +                      &mt9v022_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
 +                      V4L2_EXPOSURE_AUTO);
 +      mt9v022->exposure = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
 +                      V4L2_CID_EXPOSURE, 1, 255, 1, 255);
 +
 +      mt9v022->subdev.ctrl_handler = &mt9v022->hdl;
 +      if (mt9v022->hdl.error) {
 +              int err = mt9v022->hdl.error;
 +
 +              kfree(mt9v022);
 +              return err;
 +      }
 +      v4l2_ctrl_auto_cluster(2, &mt9v022->autoexposure,
 +                              V4L2_EXPOSURE_MANUAL, true);
 +      v4l2_ctrl_auto_cluster(2, &mt9v022->autogain, 0, true);
  
        mt9v022->chip_control = MT9V022_CHIP_CONTROL_DEFAULT;
  
 -      icd->ops                = &mt9v022_ops;
        /*
         * MT9V022 _really_ corrupts the first read out line.
         * TODO: verify on i.MX31
        mt9v022->rect.width     = MT9V022_MAX_WIDTH;
        mt9v022->rect.height    = MT9V022_MAX_HEIGHT;
  
 -      ret = mt9v022_video_probe(icd, client);
 +      ret = mt9v022_video_probe(client);
        if (ret) {
 -              icd->ops = NULL;
 +              v4l2_ctrl_handler_free(&mt9v022->hdl);
                kfree(mt9v022);
        }
  
  static int mt9v022_remove(struct i2c_client *client)
  {
        struct mt9v022 *mt9v022 = to_mt9v022(client);
 -      struct soc_camera_device *icd = client->dev.platform_data;
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
  
 -      icd->ops = NULL;
 -      mt9v022_video_remove(icd);
 +      v4l2_device_unregister_subdev(&mt9v022->subdev);
 +      if (icl->free_bus)
 +              icl->free_bus(icl);
 +      v4l2_ctrl_handler_free(&mt9v022->hdl);
        kfree(mt9v022);
  
        return 0;
      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #define DEBUG_VARIABLE debug
  
  #include <media/saa7146_vv.h>
  #include <media/tuner.h>
  #include <media/v4l2-common.h>
  #include <media/saa7115.h>
+ #include <linux/module.h>
  
  #include "mxb.h"
  #include "tea6415c.h"
@@@ -173,7 -172,7 +174,7 @@@ static int mxb_probe(struct saa7146_de
  
        mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL);
        if (mxb == NULL) {
 -              DEB_D(("not enough kernel memory.\n"));
 +              DEB_D("not enough kernel memory\n");
                return -ENOMEM;
        }
  
  
        saa7146_i2c_adapter_prepare(dev, &mxb->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
        if (i2c_add_adapter(&mxb->i2c_adapter) < 0) {
 -              DEB_S(("cannot register i2c-device. skipping.\n"));
 +              DEB_S("cannot register i2c-device. skipping.\n");
                kfree(mxb);
                return -EFAULT;
        }
        /* check if all devices are present */
        if (!mxb->tea6420_1 || !mxb->tea6420_2 || !mxb->tea6415c ||
            !mxb->tda9840 || !mxb->saa7111a || !mxb->tuner) {
 -              printk("mxb: did not find all i2c devices. aborting\n");
 +              pr_err("did not find all i2c devices. aborting\n");
                i2c_del_adapter(&mxb->i2c_adapter);
                kfree(mxb);
                return -ENODEV;
@@@ -348,11 -347,11 +349,11 @@@ static int mxb_init_done(struct saa7146
                        msg.buf = &mxb_saa7740_init[i].data[0];
                        err = i2c_transfer(&mxb->i2c_adapter, &msg, 1);
                        if (err != 1) {
 -                              DEB_D(("failed to initialize 'sound arena module'.\n"));
 +                              DEB_D("failed to initialize 'sound arena module'\n");
                                goto err;
                        }
                }
 -              INFO(("'sound arena module' detected.\n"));
 +              pr_info("'sound arena module' detected\n");
        }
  err:
        /* the rest for saa7146: you should definitely set some basic values
@@@ -392,7 -391,7 +393,7 @@@ static int vidioc_queryctrl(struct fil
        for (i = MAXCONTROLS - 1; i >= 0; i--) {
                if (mxb_controls[i].id == qc->id) {
                        *qc = mxb_controls[i];
 -                      DEB_D(("VIDIOC_QUERYCTRL %d.\n", qc->id));
 +                      DEB_D("VIDIOC_QUERYCTRL %d\n", qc->id);
                        return 0;
                }
        }
@@@ -415,11 -414,11 +416,11 @@@ static int vidioc_g_ctrl(struct file *f
  
        if (vc->id == V4L2_CID_AUDIO_MUTE) {
                vc->value = mxb->cur_mute;
 -              DEB_D(("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d.\n", vc->value));
 +              DEB_D("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d\n", vc->value);
                return 0;
        }
  
 -      DEB_EE(("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d.\n", vc->value));
 +      DEB_EE("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d\n", vc->value);
        return 0;
  }
  
@@@ -442,14 -441,14 +443,14 @@@ static int vidioc_s_ctrl(struct file *f
                /* switch the audio-source */
                tea6420_route_line(mxb, vc->value ? 6 :
                                video_audio_connect[mxb->cur_input]);
 -              DEB_EE(("VIDIOC_S_CTRL, V4L2_CID_AUDIO_MUTE: %d.\n", vc->value));
 +              DEB_EE("VIDIOC_S_CTRL, V4L2_CID_AUDIO_MUTE: %d\n", vc->value);
        }
        return 0;
  }
  
  static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
  {
 -      DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
 +      DEB_EE("VIDIOC_ENUMINPUT %d\n", i->index);
        if (i->index >= MXB_INPUTS)
                return -EINVAL;
        memcpy(i, &mxb_inputs[i->index], sizeof(struct v4l2_input));
@@@ -462,7 -461,7 +463,7 @@@ static int vidioc_g_input(struct file *
        struct mxb *mxb = (struct mxb *)dev->ext_priv;
        *i = mxb->cur_input;
  
 -      DEB_EE(("VIDIOC_G_INPUT %d.\n", *i));
 +      DEB_EE("VIDIOC_G_INPUT %d\n", *i);
        return 0;
  }
  
@@@ -473,7 -472,7 +474,7 @@@ static int vidioc_s_input(struct file *
        int err = 0;
        int i = 0;
  
 -      DEB_EE(("VIDIOC_S_INPUT %d.\n", input));
 +      DEB_EE("VIDIOC_S_INPUT %d\n", input);
  
        if (input >= MXB_INPUTS)
                return -EINVAL;
  
        /* switch video in saa7111a */
        if (saa7111a_call(mxb, video, s_routing, i, SAA7111_FMT_CCIR, 0))
 -              printk(KERN_ERR "VIDIOC_S_INPUT: could not address saa7111a.\n");
 +              pr_err("VIDIOC_S_INPUT: could not address saa7111a\n");
  
        /* switch the audio-source only if necessary */
        if (0 == mxb->cur_mute)
@@@ -531,12 -530,11 +532,12 @@@ static int vidioc_g_tuner(struct file *
        struct mxb *mxb = (struct mxb *)dev->ext_priv;
  
        if (t->index) {
 -              DEB_D(("VIDIOC_G_TUNER: channel %d does not have a tuner attached.\n", t->index));
 +              DEB_D("VIDIOC_G_TUNER: channel %d does not have a tuner attached\n",
 +                    t->index);
                return -EINVAL;
        }
  
 -      DEB_EE(("VIDIOC_G_TUNER: %d\n", t->index));
 +      DEB_EE("VIDIOC_G_TUNER: %d\n", t->index);
  
        memset(t, 0, sizeof(*t));
        strlcpy(t->name, "TV Tuner", sizeof(t->name));
@@@ -553,8 -551,7 +554,8 @@@ static int vidioc_s_tuner(struct file *
        struct mxb *mxb = (struct mxb *)dev->ext_priv;
  
        if (t->index) {
 -              DEB_D(("VIDIOC_S_TUNER: channel %d does not have a tuner attached.\n", t->index));
 +              DEB_D("VIDIOC_S_TUNER: channel %d does not have a tuner attached\n",
 +                    t->index);
                return -EINVAL;
        }
  
@@@ -568,14 -565,14 +569,14 @@@ static int vidioc_g_frequency(struct fi
        struct mxb *mxb = (struct mxb *)dev->ext_priv;
  
        if (mxb->cur_input) {
 -              DEB_D(("VIDIOC_G_FREQ: channel %d does not have a tuner!\n",
 -                                      mxb->cur_input));
 +              DEB_D("VIDIOC_G_FREQ: channel %d does not have a tuner!\n",
 +                    mxb->cur_input);
                return -EINVAL;
        }
  
        *f = mxb->cur_freq;
  
 -      DEB_EE(("VIDIOC_G_FREQ: freq:0x%08x.\n", mxb->cur_freq.frequency));
 +      DEB_EE("VIDIOC_G_FREQ: freq:0x%08x\n", mxb->cur_freq.frequency);
        return 0;
  }
  
@@@ -592,13 -589,12 +593,13 @@@ static int vidioc_s_frequency(struct fi
                return -EINVAL;
  
        if (mxb->cur_input) {
 -              DEB_D(("VIDIOC_S_FREQ: channel %d does not have a tuner!\n", mxb->cur_input));
 +              DEB_D("VIDIOC_S_FREQ: channel %d does not have a tuner!\n",
 +                    mxb->cur_input);
                return -EINVAL;
        }
  
        mxb->cur_freq = *f;
 -      DEB_EE(("VIDIOC_S_FREQUENCY: freq:0x%08x.\n", mxb->cur_freq.frequency));
 +      DEB_EE("VIDIOC_S_FREQUENCY: freq:0x%08x\n", mxb->cur_freq.frequency);
  
        /* tune in desired frequency */
        tuner_call(mxb, tuner, s_frequency, &mxb->cur_freq);
@@@ -617,18 -613,18 +618,18 @@@ static int vidioc_g_audio(struct file *
        struct mxb *mxb = (struct mxb *)dev->ext_priv;
  
        if (a->index > MXB_INPUTS) {
 -              DEB_D(("VIDIOC_G_AUDIO %d out of range.\n", a->index));
 +              DEB_D("VIDIOC_G_AUDIO %d out of range\n", a->index);
                return -EINVAL;
        }
  
 -      DEB_EE(("VIDIOC_G_AUDIO %d.\n", a->index));
 +      DEB_EE("VIDIOC_G_AUDIO %d\n", a->index);
        memcpy(a, &mxb_audios[video_audio_connect[mxb->cur_input]], sizeof(struct v4l2_audio));
        return 0;
  }
  
  static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
  {
 -      DEB_D(("VIDIOC_S_AUDIO %d.\n", a->index));
 +      DEB_D("VIDIOC_S_AUDIO %d\n", a->index);
        return 0;
  }
  
@@@ -660,11 -656,11 +661,11 @@@ static long vidioc_default(struct file 
                int i = *(int *)arg;
  
                if (i < 0 || i >= MXB_AUDIOS) {
 -                      DEB_D(("illegal argument to MXB_S_AUDIO_CD: i:%d.\n", i));
 +                      DEB_D("invalid argument to MXB_S_AUDIO_CD: i:%d\n", i);
                        return -EINVAL;
                }
  
 -              DEB_EE(("MXB_S_AUDIO_CD: i:%d.\n", i));
 +              DEB_EE("MXB_S_AUDIO_CD: i:%d\n", i);
  
                tea6420_route_cd(mxb, i);
                return 0;
                int i = *(int *)arg;
  
                if (i < 0 || i >= MXB_AUDIOS) {
 -                      DEB_D(("illegal argument to MXB_S_AUDIO_LINE: i:%d.\n", i));
 +                      DEB_D("invalid argument to MXB_S_AUDIO_LINE: i:%d\n",
 +                            i);
                        return -EINVAL;
                }
  
 -              DEB_EE(("MXB_S_AUDIO_LINE: i:%d.\n", i));
 +              DEB_EE("MXB_S_AUDIO_LINE: i:%d\n", i);
                tea6420_route_line(mxb, i);
                return 0;
        }
        default:
  /*
 -              DEB2(printk("does not handle this ioctl.\n"));
 +              DEB2(pr_err("does not handle this ioctl\n"));
  */
                return -ENOIOCTLCMD;
        }
@@@ -699,7 -694,7 +700,7 @@@ static int mxb_attach(struct saa7146_de
  {
        struct mxb *mxb;
  
 -      DEB_EE(("dev:%p\n", dev));
 +      DEB_EE("dev:%p\n", dev);
  
        saa7146_vv_init(dev, &vv_data);
        if (mxb_probe(dev)) {
  #endif
        vv_data.ops.vidioc_default = vidioc_default;
        if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
 -              ERR(("cannot register capture v4l2 device. skipping.\n"));
 +              ERR("cannot register capture v4l2 device. skipping.\n");
                saa7146_vv_release(dev);
                return -1;
        }
        /* initialization stuff (vbi) (only for revision > 0 and for extensions which want it)*/
        if (MXB_BOARD_CAN_DO_VBI(dev)) {
                if (saa7146_register_device(&mxb->vbi_dev, dev, "mxb", VFL_TYPE_VBI)) {
 -                      ERR(("cannot register vbi v4l2 device. skipping.\n"));
 +                      ERR("cannot register vbi v4l2 device. skipping.\n");
                }
        }
  
 -      printk("mxb: found Multimedia eXtension Board #%d.\n", mxb_num);
 +      pr_info("found Multimedia eXtension Board #%d\n", mxb_num);
  
        mxb_num++;
        mxb_init_done(dev);
@@@ -749,7 -744,7 +750,7 @@@ static int mxb_detach(struct saa7146_de
  {
        struct mxb *mxb = (struct mxb *)dev->ext_priv;
  
 -      DEB_EE(("dev:%p\n", dev));
 +      DEB_EE("dev:%p\n", dev);
  
        saa7146_unregister_device(&mxb->video_dev,dev);
        if (MXB_BOARD_CAN_DO_VBI(dev))
@@@ -771,7 -766,7 +772,7 @@@ static int std_callback(struct saa7146_
        if (V4L2_STD_PAL_I == standard->id) {
                v4l2_std_id std = V4L2_STD_PAL_I;
  
 -              DEB_D(("VIDIOC_S_STD: setting mxb for PAL_I.\n"));
 +              DEB_D("VIDIOC_S_STD: setting mxb for PAL_I\n");
                /* set the 7146 gpio register -- I don't know what this does exactly */
                saa7146_write(dev, GPIO_CTRL, 0x00404050);
                /* unset the 7111 gpio register -- I don't know what this does exactly */
        } else {
                v4l2_std_id std = V4L2_STD_PAL_BG;
  
 -              DEB_D(("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM.\n"));
 +              DEB_D("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM\n");
                /* set the 7146 gpio register -- I don't know what this does exactly */
                saa7146_write(dev, GPIO_CTRL, 0x00404050);
                /* set the 7111 gpio register -- I don't know what this does exactly */
@@@ -858,7 -853,7 +859,7 @@@ static struct saa7146_extension extensi
  static int __init mxb_init_module(void)
  {
        if (saa7146_register_extension(&extension)) {
 -              DEB_S(("failed to register extension.\n"));
 +              DEB_S("failed to register extension\n");
                return -ENODEV;
        }
  
@@@ -1,7 -1,7 +1,7 @@@
  /*
   * Driver for SiliconFile NOON010PC30 CIF (1/11") Image Sensor with ISP
   *
 - * Copyright (C) 2010 Samsung Electronics
 + * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
   * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
   *
   * Initial register configuration based on a driver authored by
@@@ -10,7 -10,7 +10,7 @@@
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later vergsion.
 + * (at your option) any later version.
   */
  
  #include <linux/delay.h>
@@@ -21,6 -21,7 +21,7 @@@
  #include <media/noon010pc30.h>
  #include <media/v4l2-chip-ident.h>
  #include <linux/videodev2.h>
+ #include <linux/module.h>
  #include <media/v4l2-ctrls.h>
  #include <media/v4l2-device.h>
  #include <media/v4l2-mediabus.h>
@@@ -131,23 -132,17 +132,23 @@@ static const char * const noon010_suppl
  
  struct noon010_info {
        struct v4l2_subdev sd;
 +      struct media_pad pad;
        struct v4l2_ctrl_handler hdl;
 -      const struct noon010pc30_platform_data *pdata;
 +      struct regulator_bulk_data supply[NOON010_NUM_SUPPLIES];
 +      u32 gpio_nreset;
 +      u32 gpio_nstby;
 +
 +      /* Protects the struct members below */
 +      struct mutex lock;
 +
        const struct noon010_format *curr_fmt;
        const struct noon010_frmsize *curr_win;
 +      unsigned int apply_new_cfg:1;
 +      unsigned int streaming:1;
        unsigned int hflip:1;
        unsigned int vflip:1;
        unsigned int power:1;
        u8 i2c_reg_page;
 -      struct regulator_bulk_data supply[NOON010_NUM_SUPPLIES];
 -      u32 gpio_nreset;
 -      u32 gpio_nstby;
  };
  
  struct i2c_regval {
@@@ -298,10 -293,8 +299,10 @@@ static int noon010_power_ctrl(struct v4
        u8 reg = sleep ? 0xF1 : 0xF0;
        int ret = 0;
  
 -      if (reset)
 +      if (reset) {
                ret = cam_i2c_write(sd, POWER_CTRL_REG, reg | 0x02);
 +              udelay(20);
 +      }
        if (!ret) {
                ret = cam_i2c_write(sd, POWER_CTRL_REG, reg);
                if (reset && !ret)
@@@ -321,7 -314,6 +322,7 @@@ static int noon010_enable_autowhitebala
        return ret;
  }
  
 +/* Called with struct noon010_info.lock mutex held */
  static int noon010_set_flip(struct v4l2_subdev *sd, int hflip, int vflip)
  {
        struct noon010_info *info = to_noon010(sd);
  static int noon010_set_params(struct v4l2_subdev *sd)
  {
        struct noon010_info *info = to_noon010(sd);
 -      int ret;
  
 -      if (!info->curr_win)
 -              return -EINVAL;
 -
 -      ret = cam_i2c_write(sd, VDO_CTL_REG(0), info->curr_win->vid_ctl1);
 -
 -      if (!ret && info->curr_fmt)
 -              ret = cam_i2c_write(sd, ISP_CTL_REG(0),
 -                              info->curr_fmt->ispctl1_reg);
 -      return ret;
 +      int ret = cam_i2c_write(sd, VDO_CTL_REG(0),
 +                              info->curr_win->vid_ctl1);
 +      if (ret)
 +              return ret;
 +      return cam_i2c_write(sd, ISP_CTL_REG(0),
 +                           info->curr_fmt->ispctl1_reg);
  }
  
  /* Find nearest matching image pixel size. */
 -static int noon010_try_frame_size(struct v4l2_mbus_framefmt *mf)
 +static int noon010_try_frame_size(struct v4l2_mbus_framefmt *mf,
 +                                const struct noon010_frmsize **size)
  {
        unsigned int min_err = ~0;
        int i = ARRAY_SIZE(noon010_sizes);
        if (match) {
                mf->width  = match->width;
                mf->height = match->height;
 +              if (size)
 +                      *size = match;
                return 0;
        }
        return -EINVAL;
  }
  
 +/* Called with info.lock mutex held */
  static int power_enable(struct noon010_info *info)
  {
        int ret;
        return 0;
  }
  
 +/* Called with info.lock mutex held */
  static int power_disable(struct noon010_info *info)
  {
        int ret;
  static int noon010_s_ctrl(struct v4l2_ctrl *ctrl)
  {
        struct v4l2_subdev *sd = to_sd(ctrl);
 +      struct noon010_info *info = to_noon010(sd);
 +      int ret = 0;
  
        v4l2_dbg(1, debug, sd, "%s: ctrl_id: %d, value: %d\n",
                 __func__, ctrl->id, ctrl->val);
  
 +      mutex_lock(&info->lock);
 +      /*
 +       * If the device is not powered up by the host driver do
 +       * not apply any controls to H/W at this time. Instead
 +       * the controls will be restored right after power-up.
 +       */
 +      if (!info->power)
 +              goto unlock;
 +
        switch (ctrl->id) {
        case V4L2_CID_AUTO_WHITE_BALANCE:
 -              return noon010_enable_autowhitebalance(sd, ctrl->val);
 +              ret = noon010_enable_autowhitebalance(sd, ctrl->val);
 +              break;
        case V4L2_CID_BLUE_BALANCE:
 -              return cam_i2c_write(sd, MWB_BGAIN_REG, ctrl->val);
 +              ret = cam_i2c_write(sd, MWB_BGAIN_REG, ctrl->val);
 +              break;
        case V4L2_CID_RED_BALANCE:
 -              return cam_i2c_write(sd, MWB_RGAIN_REG, ctrl->val);
 +              ret =  cam_i2c_write(sd, MWB_RGAIN_REG, ctrl->val);
 +              break;
        default:
 -              return -EINVAL;
 +              ret = -EINVAL;
        }
 +unlock:
 +      mutex_unlock(&info->lock);
 +      return ret;
  }
  
 -static int noon010_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
 -                          enum v4l2_mbus_pixelcode *code)
 +static int noon010_enum_mbus_code(struct v4l2_subdev *sd,
 +                                struct v4l2_subdev_fh *fh,
 +                                struct v4l2_subdev_mbus_code_enum *code)
  {
 -      if (!code || index >= ARRAY_SIZE(noon010_formats))
 +      if (code->index >= ARRAY_SIZE(noon010_formats))
                return -EINVAL;
  
 -      *code = noon010_formats[index].code;
 +      code->code = noon010_formats[code->index].code;
        return 0;
  }
  
 -static int noon010_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
 +static int noon010_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
 +                         struct v4l2_subdev_format *fmt)
  {
        struct noon010_info *info = to_noon010(sd);
 -      int ret;
 -
 -      if (!mf)
 -              return -EINVAL;
 +      struct v4l2_mbus_framefmt *mf;
  
 -      if (!info->curr_win || !info->curr_fmt) {
 -              ret = noon010_set_params(sd);
 -              if (ret)
 -                      return ret;
 +      if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
 +              if (fh) {
 +                      mf = v4l2_subdev_get_try_format(fh, 0);
 +                      fmt->format = *mf;
 +              }
 +              return 0;
        }
 +      mf = &fmt->format;
  
 -      mf->width       = info->curr_win->width;
 -      mf->height      = info->curr_win->height;
 -      mf->code        = info->curr_fmt->code;
 -      mf->colorspace  = info->curr_fmt->colorspace;
 -      mf->field       = V4L2_FIELD_NONE;
 +      mutex_lock(&info->lock);
 +      mf->width = info->curr_win->width;
 +      mf->height = info->curr_win->height;
 +      mf->code = info->curr_fmt->code;
 +      mf->colorspace = info->curr_fmt->colorspace;
 +      mf->field = V4L2_FIELD_NONE;
  
 +      mutex_unlock(&info->lock);
        return 0;
  }
  
  /* Return nearest media bus frame format. */
 -static const struct noon010_format *try_fmt(struct v4l2_subdev *sd,
 +static const struct noon010_format *noon010_try_fmt(struct v4l2_subdev *sd,
                                            struct v4l2_mbus_framefmt *mf)
  {
        int i = ARRAY_SIZE(noon010_formats);
  
 -      noon010_try_frame_size(mf);
 -
 -      while (i--)
 +      while (--i)
                if (mf->code == noon010_formats[i].code)
                        break;
 -
        mf->code = noon010_formats[i].code;
  
        return &noon010_formats[i];
  }
  
 -static int noon010_try_fmt(struct v4l2_subdev *sd,
 -                         struct v4l2_mbus_framefmt *mf)
 -{
 -      if (!sd || !mf)
 -              return -EINVAL;
 -
 -      try_fmt(sd, mf);
 -      return 0;
 -}
 -
 -static int noon010_s_fmt(struct v4l2_subdev *sd,
 -                       struct v4l2_mbus_framefmt *mf)
 +static int noon010_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
 +                         struct v4l2_subdev_format *fmt)
  {
        struct noon010_info *info = to_noon010(sd);
 +      const struct noon010_frmsize *size = NULL;
 +      const struct noon010_format *nf;
 +      struct v4l2_mbus_framefmt *mf;
 +      int ret = 0;
  
 -      if (!sd || !mf)
 -              return -EINVAL;
 -
 -      info->curr_fmt = try_fmt(sd, mf);
 +      nf = noon010_try_fmt(sd, &fmt->format);
 +      noon010_try_frame_size(&fmt->format, &size);
 +      fmt->format.colorspace = V4L2_COLORSPACE_JPEG;
  
 -      return noon010_set_params(sd);
 +      if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
 +              if (fh) {
 +                      mf = v4l2_subdev_get_try_format(fh, 0);
 +                      *mf = fmt->format;
 +              }
 +              return 0;
 +      }
 +      mutex_lock(&info->lock);
 +      if (!info->streaming) {
 +              info->apply_new_cfg = 1;
 +              info->curr_fmt = nf;
 +              info->curr_win = size;
 +      } else {
 +              ret = -EBUSY;
 +      }
 +      mutex_unlock(&info->lock);
 +      return ret;
  }
  
 +/* Called with struct noon010_info.lock mutex held */
  static int noon010_base_config(struct v4l2_subdev *sd)
  {
 -      struct noon010_info *info = to_noon010(sd);
 -      int ret;
 -
 -      ret = noon010_bulk_write_reg(sd, noon010_base_regs);
 -      if (!ret) {
 -              info->curr_fmt = &noon010_formats[0];
 -              info->curr_win = &noon010_sizes[0];
 +      int ret = noon010_bulk_write_reg(sd, noon010_base_regs);
 +      if (!ret)
                ret = noon010_set_params(sd);
 -      }
        if (!ret)
                ret = noon010_set_flip(sd, 1, 0);
 -      if (!ret)
 -              ret = noon010_power_ctrl(sd, false, false);
  
 -      /* sync the handler and the registers state */
 -      v4l2_ctrl_handler_setup(&to_noon010(sd)->hdl);
        return ret;
  }
  
  static int noon010_s_power(struct v4l2_subdev *sd, int on)
  {
        struct noon010_info *info = to_noon010(sd);
 -      const struct noon010pc30_platform_data *pdata = info->pdata;
 -      int ret = 0;
 -
 -      if (WARN(pdata == NULL, "No platform data!\n"))
 -              return -ENOMEM;
 +      int ret;
  
 +      mutex_lock(&info->lock);
        if (on) {
                ret = power_enable(info);
 -              if (ret)
 -                      return ret;
 -              ret = noon010_base_config(sd);
 +              if (!ret)
 +                      ret = noon010_base_config(sd);
        } else {
                noon010_power_ctrl(sd, false, true);
                ret = power_disable(info);
 -              info->curr_win = NULL;
 -              info->curr_fmt = NULL;
        }
 +      mutex_unlock(&info->lock);
 +
 +      /* Restore the controls state */
 +      if (!ret && on)
 +              ret = v4l2_ctrl_handler_setup(&info->hdl);
  
        return ret;
  }
  
 -static int noon010_g_chip_ident(struct v4l2_subdev *sd,
 -                              struct v4l2_dbg_chip_ident *chip)
 +static int noon010_s_stream(struct v4l2_subdev *sd, int on)
  {
 -      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct noon010_info *info = to_noon010(sd);
 +      int ret = 0;
  
 -      return v4l2_chip_ident_i2c_client(client, chip,
 -                                        V4L2_IDENT_NOON010PC30, 0);
 +      mutex_lock(&info->lock);
 +      if (!info->streaming != !on) {
 +              ret = noon010_power_ctrl(sd, false, !on);
 +              if (!ret)
 +                      info->streaming = on;
 +      }
 +      if (!ret && on && info->apply_new_cfg) {
 +              ret = noon010_set_params(sd);
 +              if (!ret)
 +                      info->apply_new_cfg = 0;
 +      }
 +      mutex_unlock(&info->lock);
 +      return ret;
  }
  
  static int noon010_log_status(struct v4l2_subdev *sd)
        return 0;
  }
  
 +static int noon010_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 +{
 +      struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(fh, 0);
 +
 +      mf->width = noon010_sizes[0].width;
 +      mf->height = noon010_sizes[0].height;
 +      mf->code = noon010_formats[0].code;
 +      mf->colorspace = V4L2_COLORSPACE_JPEG;
 +      mf->field = V4L2_FIELD_NONE;
 +      return 0;
 +}
 +
 +static const struct v4l2_subdev_internal_ops noon010_subdev_internal_ops = {
 +      .open = noon010_open,
 +};
 +
  static const struct v4l2_ctrl_ops noon010_ctrl_ops = {
        .s_ctrl = noon010_s_ctrl,
  };
  
  static const struct v4l2_subdev_core_ops noon010_core_ops = {
 -      .g_chip_ident   = noon010_g_chip_ident,
        .s_power        = noon010_s_power,
        .g_ctrl         = v4l2_subdev_g_ctrl,
        .s_ctrl         = v4l2_subdev_s_ctrl,
        .log_status     = noon010_log_status,
  };
  
 -static const struct v4l2_subdev_video_ops noon010_video_ops = {
 -      .g_mbus_fmt     = noon010_g_fmt,
 -      .s_mbus_fmt     = noon010_s_fmt,
 -      .try_mbus_fmt   = noon010_try_fmt,
 -      .enum_mbus_fmt  = noon010_enum_fmt,
 +static struct v4l2_subdev_pad_ops noon010_pad_ops = {
 +      .enum_mbus_code = noon010_enum_mbus_code,
 +      .get_fmt        = noon010_get_fmt,
 +      .set_fmt        = noon010_set_fmt,
 +};
 +
 +static struct v4l2_subdev_video_ops noon010_video_ops = {
 +      .s_stream       = noon010_s_stream,
  };
  
  static const struct v4l2_subdev_ops noon010_ops = {
        .core   = &noon010_core_ops,
 +      .pad    = &noon010_pad_ops,
        .video  = &noon010_video_ops,
  };
  
@@@ -722,14 -666,10 +723,14 @@@ static int noon010_probe(struct i2c_cli
        if (!info)
                return -ENOMEM;
  
 +      mutex_init(&info->lock);
        sd = &info->sd;
        strlcpy(sd->name, MODULE_NAME, sizeof(sd->name));
        v4l2_i2c_subdev_init(sd, client, &noon010_ops);
  
 +      sd->internal_ops = &noon010_subdev_internal_ops;
 +      sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
 +
        v4l2_ctrl_handler_init(&info->hdl, 3);
  
        v4l2_ctrl_new_std(&info->hdl, &noon010_ctrl_ops,
        if (ret)
                goto np_err;
  
 -      info->pdata             = client->dev.platform_data;
        info->i2c_reg_page      = -1;
        info->gpio_nreset       = -EINVAL;
        info->gpio_nstby        = -EINVAL;
 +      info->curr_fmt          = &noon010_formats[0];
 +      info->curr_win          = &noon010_sizes[0];
  
        if (gpio_is_valid(pdata->gpio_nreset)) {
                ret = gpio_request(pdata->gpio_nreset, "NOON010PC30 NRST");
        if (ret)
                goto np_reg_err;
  
 +      info->pad.flags = MEDIA_PAD_FL_SOURCE;
 +      sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
 +      ret = media_entity_init(&sd->entity, 1, &info->pad, 0);
 +      if (ret < 0)
 +              goto np_me_err;
 +
        ret = noon010_detect(client, info);
        if (!ret)
                return 0;
  
 -      /* the sensor detection failed */
 +np_me_err:
        regulator_bulk_free(NOON010_NUM_SUPPLIES, info->supply);
  np_reg_err:
        if (gpio_is_valid(info->gpio_nstby))
@@@ -822,7 -755,6 +823,7 @@@ static int noon010_remove(struct i2c_cl
        if (gpio_is_valid(info->gpio_nstby))
                gpio_free(info->gpio_nstby);
  
 +      media_entity_cleanup(&sd->entity);
        kfree(info);
        return 0;
  }
  #include <linux/delay.h>
  #include <linux/i2c.h>
  #include <linux/slab.h>
 +#include <linux/v4l2-mediabus.h>
+ #include <linux/module.h>
  
  #include <media/soc_camera.h>
  #include <media/v4l2-chip-ident.h>
 -
 +#include <media/v4l2-ctrls.h>
  
  /* Register definitions */
  #define REG_GAIN              0x00    /* range 00 - 3F */
@@@ -178,23 -178,20 +179,23 @@@ struct ov6650_reg 
  
  struct ov6650 {
        struct v4l2_subdev      subdev;
 -
 -      int                     gain;
 -      int                     blue;
 -      int                     red;
 -      int                     saturation;
 -      int                     hue;
 -      int                     brightness;
 -      int                     exposure;
 -      int                     gamma;
 -      int                     aec;
 -      bool                    vflip;
 -      bool                    hflip;
 -      bool                    awb;
 -      bool                    agc;
 +      struct v4l2_ctrl_handler hdl;
 +      struct {
 +              /* exposure/autoexposure cluster */
 +              struct v4l2_ctrl *autoexposure;
 +              struct v4l2_ctrl *exposure;
 +      };
 +      struct {
 +              /* gain/autogain cluster */
 +              struct v4l2_ctrl *autogain;
 +              struct v4l2_ctrl *gain;
 +      };
 +      struct {
 +              /* blue/red/autowhitebalance cluster */
 +              struct v4l2_ctrl *autowb;
 +              struct v4l2_ctrl *blue;
 +              struct v4l2_ctrl *red;
 +      };
        bool                    half_scale;     /* scale down output by 2 */
        struct v4l2_rect        rect;           /* sensor cropping window */
        unsigned long           pclk_limit;     /* from host */
@@@ -214,6 -211,126 +215,6 @@@ static enum v4l2_mbus_pixelcode ov6650_
        V4L2_MBUS_FMT_Y8_1X8,
  };
  
 -static const struct v4l2_queryctrl ov6650_controls[] = {
 -      {
 -              .id             = V4L2_CID_AUTOGAIN,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "AGC",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 1,
 -      },
 -      {
 -              .id             = V4L2_CID_GAIN,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Gain",
 -              .minimum        = 0,
 -              .maximum        = 0x3f,
 -              .step           = 1,
 -              .default_value  = DEF_GAIN,
 -      },
 -      {
 -              .id             = V4L2_CID_AUTO_WHITE_BALANCE,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "AWB",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 1,
 -      },
 -      {
 -              .id             = V4L2_CID_BLUE_BALANCE,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Blue",
 -              .minimum        = 0,
 -              .maximum        = 0xff,
 -              .step           = 1,
 -              .default_value  = DEF_BLUE,
 -      },
 -      {
 -              .id             = V4L2_CID_RED_BALANCE,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Red",
 -              .minimum        = 0,
 -              .maximum        = 0xff,
 -              .step           = 1,
 -              .default_value  = DEF_RED,
 -      },
 -      {
 -              .id             = V4L2_CID_SATURATION,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Saturation",
 -              .minimum        = 0,
 -              .maximum        = 0xf,
 -              .step           = 1,
 -              .default_value  = 0x8,
 -      },
 -      {
 -              .id             = V4L2_CID_HUE,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Hue",
 -              .minimum        = 0,
 -              .maximum        = HUE_MASK,
 -              .step           = 1,
 -              .default_value  = DEF_HUE,
 -      },
 -      {
 -              .id             = V4L2_CID_BRIGHTNESS,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Brightness",
 -              .minimum        = 0,
 -              .maximum        = 0xff,
 -              .step           = 1,
 -              .default_value  = 0x80,
 -      },
 -      {
 -              .id             = V4L2_CID_EXPOSURE_AUTO,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "AEC",
 -              .minimum        = 0,
 -              .maximum        = 3,
 -              .step           = 1,
 -              .default_value  = 0,
 -      },
 -      {
 -              .id             = V4L2_CID_EXPOSURE,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Exposure",
 -              .minimum        = 0,
 -              .maximum        = 0xff,
 -              .step           = 1,
 -              .default_value  = DEF_AECH,
 -      },
 -      {
 -              .id             = V4L2_CID_GAMMA,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Gamma",
 -              .minimum        = 0,
 -              .maximum        = 0xff,
 -              .step           = 1,
 -              .default_value  = 0x12,
 -      },
 -      {
 -              .id             = V4L2_CID_VFLIP,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Flip Vertically",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 0,
 -      },
 -      {
 -              .id             = V4L2_CID_HFLIP,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Flip Horizontally",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 0,
 -      },
 -};
 -
  /* read a register */
  static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
  {
@@@ -303,90 -420,213 +304,90 @@@ static int ov6650_s_stream(struct v4l2_
        return 0;
  }
  
 -/* Alter bus settings on camera side */
 -static int ov6650_set_bus_param(struct soc_camera_device *icd,
 -                              unsigned long flags)
 -{
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -      struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
 -      int ret;
 -
 -      flags = soc_camera_apply_sensor_flags(icl, flags);
 -
 -      if (flags & SOCAM_PCLK_SAMPLE_RISING)
 -              ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_PCLK_RISING, 0);
 -      else
 -              ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_PCLK_RISING);
 -      if (ret)
 -              return ret;
 -
 -      if (flags & SOCAM_HSYNC_ACTIVE_LOW)
 -              ret = ov6650_reg_rmw(client, REG_COMF, COMF_HREF_LOW, 0);
 -      else
 -              ret = ov6650_reg_rmw(client, REG_COMF, 0, COMF_HREF_LOW);
 -      if (ret)
 -              return ret;
 -
 -      if (flags & SOCAM_VSYNC_ACTIVE_HIGH)
 -              ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_VSYNC_HIGH, 0);
 -      else
 -              ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_VSYNC_HIGH);
 -
 -      return ret;
 -}
 -
 -/* Request bus settings on camera side */
 -static unsigned long ov6650_query_bus_param(struct soc_camera_device *icd)
 -{
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -
 -      unsigned long flags = SOCAM_MASTER |
 -              SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING |
 -              SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW |
 -              SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW |
 -              SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8;
 -
 -      return soc_camera_apply_sensor_flags(icl, flags);
 -}
 -
  /* Get status of additional camera capabilities */
 -static int ov6650_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 +static int ov6550_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
  {
 +      struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl);
 +      struct v4l2_subdev *sd = &priv->subdev;
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      struct ov6650 *priv = to_ov6650(client);
 -      uint8_t reg;
 -      int ret = 0;
 +      uint8_t reg, reg2;
 +      int ret;
  
        switch (ctrl->id) {
        case V4L2_CID_AUTOGAIN:
 -              ctrl->value = priv->agc;
 -              break;
 -      case V4L2_CID_GAIN:
 -              if (priv->agc) {
 -                      ret = ov6650_reg_read(client, REG_GAIN, &reg);
 -                      ctrl->value = reg;
 -              } else {
 -                      ctrl->value = priv->gain;
 -              }
 -              break;
 +              ret = ov6650_reg_read(client, REG_GAIN, &reg);
 +              if (!ret)
 +                      priv->gain->val = reg;
 +              return ret;
        case V4L2_CID_AUTO_WHITE_BALANCE:
 -              ctrl->value = priv->awb;
 -              break;
 -      case V4L2_CID_BLUE_BALANCE:
 -              if (priv->awb) {
 -                      ret = ov6650_reg_read(client, REG_BLUE, &reg);
 -                      ctrl->value = reg;
 -              } else {
 -                      ctrl->value = priv->blue;
 -              }
 -              break;
 -      case V4L2_CID_RED_BALANCE:
 -              if (priv->awb) {
 -                      ret = ov6650_reg_read(client, REG_RED, &reg);
 -                      ctrl->value = reg;
 -              } else {
 -                      ctrl->value = priv->red;
 +              ret = ov6650_reg_read(client, REG_BLUE, &reg);
 +              if (!ret)
 +                      ret = ov6650_reg_read(client, REG_RED, &reg2);
 +              if (!ret) {
 +                      priv->blue->val = reg;
 +                      priv->red->val = reg2;
                }
 -              break;
 -      case V4L2_CID_SATURATION:
 -              ctrl->value = priv->saturation;
 -              break;
 -      case V4L2_CID_HUE:
 -              ctrl->value = priv->hue;
 -              break;
 -      case V4L2_CID_BRIGHTNESS:
 -              ctrl->value = priv->brightness;
 -              break;
 +              return ret;
        case V4L2_CID_EXPOSURE_AUTO:
 -              ctrl->value = priv->aec;
 -              break;
 -      case V4L2_CID_EXPOSURE:
 -              if (priv->aec) {
 -                      ret = ov6650_reg_read(client, REG_AECH, &reg);
 -                      ctrl->value = reg;
 -              } else {
 -                      ctrl->value = priv->exposure;
 -              }
 -              break;
 -      case V4L2_CID_GAMMA:
 -              ctrl->value = priv->gamma;
 -              break;
 -      case V4L2_CID_VFLIP:
 -              ctrl->value = priv->vflip;
 -              break;
 -      case V4L2_CID_HFLIP:
 -              ctrl->value = priv->hflip;
 -              break;
 +              ret = ov6650_reg_read(client, REG_AECH, &reg);
 +              if (!ret)
 +                      priv->exposure->val = reg;
 +              return ret;
        }
 -      return ret;
 +      return -EINVAL;
  }
  
  /* Set status of additional camera capabilities */
 -static int ov6650_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 +static int ov6550_s_ctrl(struct v4l2_ctrl *ctrl)
  {
 +      struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl);
 +      struct v4l2_subdev *sd = &priv->subdev;
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      struct ov6650 *priv = to_ov6650(client);
 -      int ret = 0;
 +      int ret;
  
        switch (ctrl->id) {
        case V4L2_CID_AUTOGAIN:
                ret = ov6650_reg_rmw(client, REG_COMB,
 -                              ctrl->value ? COMB_AGC : 0, COMB_AGC);
 -              if (!ret)
 -                      priv->agc = ctrl->value;
 -              break;
 -      case V4L2_CID_GAIN:
 -              ret = ov6650_reg_write(client, REG_GAIN, ctrl->value);
 -              if (!ret)
 -                      priv->gain = ctrl->value;
 -              break;
 +                              ctrl->val ? COMB_AGC : 0, COMB_AGC);
 +              if (!ret && !ctrl->val)
 +                      ret = ov6650_reg_write(client, REG_GAIN, priv->gain->val);
 +              return ret;
        case V4L2_CID_AUTO_WHITE_BALANCE:
                ret = ov6650_reg_rmw(client, REG_COMB,
 -                              ctrl->value ? COMB_AWB : 0, COMB_AWB);
 -              if (!ret)
 -                      priv->awb = ctrl->value;
 -              break;
 -      case V4L2_CID_BLUE_BALANCE:
 -              ret = ov6650_reg_write(client, REG_BLUE, ctrl->value);
 -              if (!ret)
 -                      priv->blue = ctrl->value;
 -              break;
 -      case V4L2_CID_RED_BALANCE:
 -              ret = ov6650_reg_write(client, REG_RED, ctrl->value);
 -              if (!ret)
 -                      priv->red = ctrl->value;
 -              break;
 +                              ctrl->val ? COMB_AWB : 0, COMB_AWB);
 +              if (!ret && !ctrl->val) {
 +                      ret = ov6650_reg_write(client, REG_BLUE, priv->blue->val);
 +                      if (!ret)
 +                              ret = ov6650_reg_write(client, REG_RED,
 +                                                      priv->red->val);
 +              }
 +              return ret;
        case V4L2_CID_SATURATION:
 -              ret = ov6650_reg_rmw(client, REG_SAT, SET_SAT(ctrl->value),
 +              return ov6650_reg_rmw(client, REG_SAT, SET_SAT(ctrl->val),
                                SAT_MASK);
 -              if (!ret)
 -                      priv->saturation = ctrl->value;
 -              break;
        case V4L2_CID_HUE:
 -              ret = ov6650_reg_rmw(client, REG_HUE, SET_HUE(ctrl->value),
 +              return ov6650_reg_rmw(client, REG_HUE, SET_HUE(ctrl->val),
                                HUE_MASK);
 -              if (!ret)
 -                      priv->hue = ctrl->value;
 -              break;
        case V4L2_CID_BRIGHTNESS:
 -              ret = ov6650_reg_write(client, REG_BRT, ctrl->value);
 -              if (!ret)
 -                      priv->brightness = ctrl->value;
 -              break;
 +              return ov6650_reg_write(client, REG_BRT, ctrl->val);
        case V4L2_CID_EXPOSURE_AUTO:
 -              switch (ctrl->value) {
 -              case V4L2_EXPOSURE_AUTO:
 -                      ret = ov6650_reg_rmw(client, REG_COMB, COMB_AEC, 0);
 -                      break;
 -              default:
 -                      ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_AEC);
 -                      break;
 -              }
 -              if (!ret)
 -                      priv->aec = ctrl->value;
 -              break;
 -      case V4L2_CID_EXPOSURE:
 -              ret = ov6650_reg_write(client, REG_AECH, ctrl->value);
 -              if (!ret)
 -                      priv->exposure = ctrl->value;
 -              break;
 +              ret = ov6650_reg_rmw(client, REG_COMB, ctrl->val ==
 +                              V4L2_EXPOSURE_AUTO ? COMB_AEC : 0, COMB_AEC);
 +              if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL)
 +                      ret = ov6650_reg_write(client, REG_AECH,
 +                                              priv->exposure->val);
 +              return ret;
        case V4L2_CID_GAMMA:
 -              ret = ov6650_reg_write(client, REG_GAM1, ctrl->value);
 -              if (!ret)
 -                      priv->gamma = ctrl->value;
 -              break;
 +              return ov6650_reg_write(client, REG_GAM1, ctrl->val);
        case V4L2_CID_VFLIP:
 -              ret = ov6650_reg_rmw(client, REG_COMB,
 -                              ctrl->value ? COMB_FLIP_V : 0, COMB_FLIP_V);
 -              if (!ret)
 -                      priv->vflip = ctrl->value;
 -              break;
 +              return ov6650_reg_rmw(client, REG_COMB,
 +                              ctrl->val ? COMB_FLIP_V : 0, COMB_FLIP_V);
        case V4L2_CID_HFLIP:
 -              ret = ov6650_reg_rmw(client, REG_COMB,
 -                              ctrl->value ? COMB_FLIP_H : 0, COMB_FLIP_H);
 -              if (!ret)
 -                      priv->hflip = ctrl->value;
 -              break;
 +              return ov6650_reg_rmw(client, REG_COMB,
 +                              ctrl->val ? COMB_FLIP_H : 0, COMB_FLIP_H);
        }
  
 -      return ret;
 +      return -EINVAL;
  }
  
  /* Get chip identification */
@@@ -539,7 -779,7 +540,7 @@@ static u8 to_clkrc(struct v4l2_fract *t
  static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
  {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      struct soc_camera_device *icd = client->dev.platform_data;
 +      struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
        struct soc_camera_sense *sense = icd->sense;
        struct ov6650 *priv = to_ov6650(client);
        bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
@@@ -818,7 -1058,8 +819,7 @@@ static int ov6650_prog_dflt(struct i2c_
        return ret;
  }
  
 -static int ov6650_video_probe(struct soc_camera_device *icd,
 -                              struct i2c_client *client)
 +static int ov6650_video_probe(struct i2c_client *client)
  {
        u8              pidh, pidl, midh, midl;
        int             ret = 0;
        return ret;
  }
  
 -static struct soc_camera_ops ov6650_ops = {
 -      .set_bus_param          = ov6650_set_bus_param,
 -      .query_bus_param        = ov6650_query_bus_param,
 -      .controls               = ov6650_controls,
 -      .num_controls           = ARRAY_SIZE(ov6650_controls),
 +static const struct v4l2_ctrl_ops ov6550_ctrl_ops = {
 +      .g_volatile_ctrl = ov6550_g_volatile_ctrl,
 +      .s_ctrl = ov6550_s_ctrl,
  };
  
  static struct v4l2_subdev_core_ops ov6650_core_ops = {
 -      .g_ctrl                 = ov6650_g_ctrl,
 -      .s_ctrl                 = ov6650_s_ctrl,
        .g_chip_ident           = ov6650_g_chip_ident,
  #ifdef CONFIG_VIDEO_ADV_DEBUG
        .g_register             = ov6650_get_register,
  #endif
  };
  
 +/* Request bus settings on camera side */
 +static int ov6650_g_mbus_config(struct v4l2_subdev *sd,
 +                              struct v4l2_mbus_config *cfg)
 +{
 +      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
 +
 +      cfg->flags = V4L2_MBUS_MASTER |
 +              V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
 +              V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW |
 +              V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
 +              V4L2_MBUS_DATA_ACTIVE_HIGH;
 +      cfg->type = V4L2_MBUS_PARALLEL;
 +      cfg->flags = soc_camera_apply_board_flags(icl, cfg);
 +
 +      return 0;
 +}
 +
 +/* Alter bus settings on camera side */
 +static int ov6650_s_mbus_config(struct v4l2_subdev *sd,
 +                              const struct v4l2_mbus_config *cfg)
 +{
 +      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
 +      unsigned long flags = soc_camera_apply_board_flags(icl, cfg);
 +      int ret;
 +
 +      if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
 +              ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_PCLK_RISING, 0);
 +      else
 +              ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_PCLK_RISING);
 +      if (ret)
 +              return ret;
 +
 +      if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
 +              ret = ov6650_reg_rmw(client, REG_COMF, COMF_HREF_LOW, 0);
 +      else
 +              ret = ov6650_reg_rmw(client, REG_COMF, 0, COMF_HREF_LOW);
 +      if (ret)
 +              return ret;
 +
 +      if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
 +              ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_VSYNC_HIGH, 0);
 +      else
 +              ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_VSYNC_HIGH);
 +
 +      return ret;
 +}
 +
  static struct v4l2_subdev_video_ops ov6650_video_ops = {
        .s_stream       = ov6650_s_stream,
        .g_mbus_fmt     = ov6650_g_fmt,
        .s_crop         = ov6650_s_crop,
        .g_parm         = ov6650_g_parm,
        .s_parm         = ov6650_s_parm,
 +      .g_mbus_config  = ov6650_g_mbus_config,
 +      .s_mbus_config  = ov6650_s_mbus_config,
  };
  
  static struct v4l2_subdev_ops ov6650_subdev_ops = {
@@@ -943,9 -1137,16 +944,9 @@@ static int ov6650_probe(struct i2c_clie
                        const struct i2c_device_id *did)
  {
        struct ov6650 *priv;
 -      struct soc_camera_device *icd = client->dev.platform_data;
 -      struct soc_camera_link *icl;
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
        int ret;
  
 -      if (!icd) {
 -              dev_err(&client->dev, "Missing soc-camera data!\n");
 -              return -EINVAL;
 -      }
 -
 -      icl = to_soc_camera_link(icd);
        if (!icl) {
                dev_err(&client->dev, "Missing platform_data for driver\n");
                return -EINVAL;
        }
  
        v4l2_i2c_subdev_init(&priv->subdev, client, &ov6650_subdev_ops);
 +      v4l2_ctrl_handler_init(&priv->hdl, 13);
 +      v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_VFLIP, 0, 1, 1, 0);
 +      v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_HFLIP, 0, 1, 1, 0);
 +      priv->autogain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
 +      priv->gain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_GAIN, 0, 0x3f, 1, DEF_GAIN);
 +      priv->autowb = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
 +      priv->blue = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_BLUE_BALANCE, 0, 0xff, 1, DEF_BLUE);
 +      priv->red = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_RED_BALANCE, 0, 0xff, 1, DEF_RED);
 +      v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_SATURATION, 0, 0xf, 1, 0x8);
 +      v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_HUE, 0, HUE_MASK, 1, DEF_HUE);
 +      v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_BRIGHTNESS, 0, 0xff, 1, 0x80);
 +      priv->autoexposure = v4l2_ctrl_new_std_menu(&priv->hdl,
 +                      &ov6550_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
 +                      V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
 +      priv->exposure = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_EXPOSURE, 0, 0xff, 1, DEF_AECH);
 +      v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
 +                      V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
 +
 +      priv->subdev.ctrl_handler = &priv->hdl;
 +      if (priv->hdl.error) {
 +              int err = priv->hdl.error;
  
 -      icd->ops = &ov6650_ops;
 +              kfree(priv);
 +              return err;
 +      }
 +      v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
 +      v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
 +      v4l2_ctrl_auto_cluster(2, &priv->autoexposure,
 +                              V4L2_EXPOSURE_MANUAL, true);
  
        priv->rect.left   = DEF_HSTRT << 1;
        priv->rect.top    = DEF_VSTRT << 1;
        priv->code        = V4L2_MBUS_FMT_YUYV8_2X8;
        priv->colorspace  = V4L2_COLORSPACE_JPEG;
  
 -      ret = ov6650_video_probe(icd, client);
 +      ret = ov6650_video_probe(client);
 +      if (!ret)
 +              ret = v4l2_ctrl_handler_setup(&priv->hdl);
  
        if (ret) {
 -              icd->ops = NULL;
 +              v4l2_ctrl_handler_free(&priv->hdl);
                kfree(priv);
        }
  
@@@ -1024,8 -1185,6 +1025,8 @@@ static int ov6650_remove(struct i2c_cli
  {
        struct ov6650 *priv = to_ov6650(client);
  
 +      v4l2_device_unregister_subdev(&priv->subdev);
 +      v4l2_ctrl_handler_free(&priv->hdl);
        kfree(priv);
        return 0;
  }
@@@ -21,6 -21,7 +21,7 @@@
  #include <linux/errno.h>
  #include <linux/string.h>
  #include <linux/slab.h>
+ #include <linux/module.h>
  #include <linux/firmware.h>
  #include <linux/videodev2.h>
  #include <media/v4l2-common.h>
@@@ -2993,13 -2994,6 +2994,13 @@@ static void pvr2_subdev_set_control(str
                pvr2_subdev_set_control(hdw, id, #lab, (hdw)->lab##_val); \
        }
  
 +int pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw, v4l2_std_id *std)
 +{
 +      v4l2_device_call_all(&hdw->v4l2_dev, 0,
 +                           video, querystd, std);
 +      return 0;
 +}
 +
  /* Execute whatever commands are required to update the state of all the
     sub-devices so that they match our current control values. */
  static void pvr2_subdev_update(struct pvr2_hdw *hdw)
@@@ -29,6 -29,7 +29,7 @@@
  #include "pvrusb2-v4l2.h"
  #include "pvrusb2-ioread.h"
  #include <linux/videodev2.h>
+ #include <linux/module.h>
  #include <media/v4l2-dev.h>
  #include <media/v4l2-common.h>
  #include <media/v4l2-ioctl.h>
@@@ -227,14 -228,6 +228,14 @@@ static long pvr2_v4l2_do_ioctl(struct f
                break;
        }
  
 +      case VIDIOC_QUERYSTD:
 +      {
 +              v4l2_std_id *std = arg;
 +              *std = V4L2_STD_ALL;
 +              ret = pvr2_hdw_get_detected_std(hdw, std);
 +              break;
 +      }
 +
        case VIDIOC_G_STD:
        {
                int val = 0;
  #include <linux/delay.h>
  #include <linux/i2c.h>
  #include <linux/slab.h>
 +#include <linux/v4l2-mediabus.h>
  #include <linux/videodev2.h>
+ #include <linux/module.h>
  
  #include <media/rj54n1cb0c.h>
  #include <media/soc_camera.h>
 -#include <media/soc_mediabus.h>
  #include <media/v4l2-subdev.h>
  #include <media/v4l2-chip-ident.h>
 +#include <media/v4l2-ctrls.h>
  
  #define RJ54N1_DEV_CODE                       0x0400
  #define RJ54N1_DEV_CODE2              0x0401
@@@ -149,7 -149,6 +150,7 @@@ struct rj54n1_clock_div 
  
  struct rj54n1 {
        struct v4l2_subdev subdev;
 +      struct v4l2_ctrl_handler hdl;
        struct rj54n1_clock_div clk_div;
        const struct rj54n1_datafmt *fmt;
        struct v4l2_rect rect;  /* Sensor window */
@@@ -501,6 -500,31 +502,6 @@@ static int rj54n1_s_stream(struct v4l2_
        return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80);
  }
  
 -static int rj54n1_set_bus_param(struct soc_camera_device *icd,
 -                              unsigned long flags)
 -{
 -      struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
 -      struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      /* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */
 -
 -      if (flags & SOCAM_PCLK_SAMPLE_RISING)
 -              return reg_write(client, RJ54N1_OUT_SIGPO, 1 << 4);
 -      else
 -              return reg_write(client, RJ54N1_OUT_SIGPO, 0);
 -}
 -
 -static unsigned long rj54n1_query_bus_param(struct soc_camera_device *icd)
 -{
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -      const unsigned long flags =
 -              SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING |
 -              SOCAM_MASTER | SOCAM_DATAWIDTH_8 |
 -              SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |
 -              SOCAM_DATA_ACTIVE_HIGH;
 -
 -      return soc_camera_apply_sensor_flags(icl, flags);
 -}
 -
  static int rj54n1_set_rect(struct i2c_client *client,
                           u16 reg_x, u16 reg_y, u16 reg_xy,
                           u32 width, u32 height)
@@@ -1179,51 -1203,134 +1180,51 @@@ static int rj54n1_s_register(struct v4l
  }
  #endif
  
 -static const struct v4l2_queryctrl rj54n1_controls[] = {
 -      {
 -              .id             = V4L2_CID_VFLIP,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Flip Vertically",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 0,
 -      }, {
 -              .id             = V4L2_CID_HFLIP,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Flip Horizontally",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 0,
 -      }, {
 -              .id             = V4L2_CID_GAIN,
 -              .type           = V4L2_CTRL_TYPE_INTEGER,
 -              .name           = "Gain",
 -              .minimum        = 0,
 -              .maximum        = 127,
 -              .step           = 1,
 -              .default_value  = 66,
 -              .flags          = V4L2_CTRL_FLAG_SLIDER,
 -      }, {
 -              .id             = V4L2_CID_AUTO_WHITE_BALANCE,
 -              .type           = V4L2_CTRL_TYPE_BOOLEAN,
 -              .name           = "Auto white balance",
 -              .minimum        = 0,
 -              .maximum        = 1,
 -              .step           = 1,
 -              .default_value  = 1,
 -      },
 -};
 -
 -static struct soc_camera_ops rj54n1_ops = {
 -      .set_bus_param          = rj54n1_set_bus_param,
 -      .query_bus_param        = rj54n1_query_bus_param,
 -      .controls               = rj54n1_controls,
 -      .num_controls           = ARRAY_SIZE(rj54n1_controls),
 -};
 -
 -static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 +static int rj54n1_s_ctrl(struct v4l2_ctrl *ctrl)
  {
 +      struct rj54n1 *rj54n1 = container_of(ctrl->handler, struct rj54n1, hdl);
 +      struct v4l2_subdev *sd = &rj54n1->subdev;
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      struct rj54n1 *rj54n1 = to_rj54n1(client);
        int data;
  
        switch (ctrl->id) {
        case V4L2_CID_VFLIP:
 -              data = reg_read(client, RJ54N1_MIRROR_STILL_MODE);
 -              if (data < 0)
 -                      return -EIO;
 -              ctrl->value = !(data & 1);
 -              break;
 -      case V4L2_CID_HFLIP:
 -              data = reg_read(client, RJ54N1_MIRROR_STILL_MODE);
 -              if (data < 0)
 -                      return -EIO;
 -              ctrl->value = !(data & 2);
 -              break;
 -      case V4L2_CID_GAIN:
 -              data = reg_read(client, RJ54N1_Y_GAIN);
 -              if (data < 0)
 -                      return -EIO;
 -
 -              ctrl->value = data / 2;
 -              break;
 -      case V4L2_CID_AUTO_WHITE_BALANCE:
 -              ctrl->value = rj54n1->auto_wb;
 -              break;
 -      }
 -
 -      return 0;
 -}
 -
 -static int rj54n1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
 -{
 -      int data;
 -      struct i2c_client *client = v4l2_get_subdevdata(sd);
 -      struct rj54n1 *rj54n1 = to_rj54n1(client);
 -      const struct v4l2_queryctrl *qctrl;
 -
 -      qctrl = soc_camera_find_qctrl(&rj54n1_ops, ctrl->id);
 -      if (!qctrl)
 -              return -EINVAL;
 -
 -      switch (ctrl->id) {
 -      case V4L2_CID_VFLIP:
 -              if (ctrl->value)
 +              if (ctrl->val)
                        data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 1);
                else
                        data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 1, 1);
                if (data < 0)
                        return -EIO;
 -              break;
 +              return 0;
        case V4L2_CID_HFLIP:
 -              if (ctrl->value)
 +              if (ctrl->val)
                        data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 2);
                else
                        data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 2, 2);
                if (data < 0)
                        return -EIO;
 -              break;
 +              return 0;
        case V4L2_CID_GAIN:
 -              if (ctrl->value > qctrl->maximum ||
 -                  ctrl->value < qctrl->minimum)
 -                      return -EINVAL;
 -              else if (reg_write(client, RJ54N1_Y_GAIN, ctrl->value * 2) < 0)
 +              if (reg_write(client, RJ54N1_Y_GAIN, ctrl->val * 2) < 0)
                        return -EIO;
 -              break;
 +              return 0;
        case V4L2_CID_AUTO_WHITE_BALANCE:
                /* Auto WB area - whole image */
 -              if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->value << 7,
 +              if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->val << 7,
                            0x80) < 0)
                        return -EIO;
 -              rj54n1->auto_wb = ctrl->value;
 -              break;
 +              rj54n1->auto_wb = ctrl->val;
 +              return 0;
        }
  
 -      return 0;
 +      return -EINVAL;
  }
  
 +static const struct v4l2_ctrl_ops rj54n1_ctrl_ops = {
 +      .s_ctrl = rj54n1_s_ctrl,
 +};
 +
  static struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
 -      .g_ctrl         = rj54n1_g_ctrl,
 -      .s_ctrl         = rj54n1_s_ctrl,
        .g_chip_ident   = rj54n1_g_chip_ident,
  #ifdef CONFIG_VIDEO_ADV_DEBUG
        .g_register     = rj54n1_g_register,
  #endif
  };
  
 +static int rj54n1_g_mbus_config(struct v4l2_subdev *sd,
 +                              struct v4l2_mbus_config *cfg)
 +{
 +      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
 +
 +      cfg->flags =
 +              V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
 +              V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH |
 +              V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH;
 +      cfg->type = V4L2_MBUS_PARALLEL;
 +      cfg->flags = soc_camera_apply_board_flags(icl, cfg);
 +
 +      return 0;
 +}
 +
 +static int rj54n1_s_mbus_config(struct v4l2_subdev *sd,
 +                              const struct v4l2_mbus_config *cfg)
 +{
 +      struct i2c_client *client = v4l2_get_subdevdata(sd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
 +
 +      /* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */
 +      if (soc_camera_apply_board_flags(icl, cfg) &
 +          V4L2_MBUS_PCLK_SAMPLE_RISING)
 +              return reg_write(client, RJ54N1_OUT_SIGPO, 1 << 4);
 +      else
 +              return reg_write(client, RJ54N1_OUT_SIGPO, 0);
 +}
 +
  static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
        .s_stream       = rj54n1_s_stream,
        .s_mbus_fmt     = rj54n1_s_fmt,
        .g_crop         = rj54n1_g_crop,
        .s_crop         = rj54n1_s_crop,
        .cropcap        = rj54n1_cropcap,
 +      .g_mbus_config  = rj54n1_g_mbus_config,
 +      .s_mbus_config  = rj54n1_s_mbus_config,
  };
  
  static struct v4l2_subdev_ops rj54n1_subdev_ops = {
   * Interface active, can use i2c. If it fails, it can indeed mean, that
   * this wasn't our capture interface, so, we wait for the right one
   */
 -static int rj54n1_video_probe(struct soc_camera_device *icd,
 -                            struct i2c_client *client,
 +static int rj54n1_video_probe(struct i2c_client *client,
                              struct rj54n1_pdata *priv)
  {
        int data1, data2;
        int ret;
  
 -      /* We must have a parent by now. And it cannot be a wrong one. */
 -      BUG_ON(!icd->parent ||
 -             to_soc_camera_host(icd->parent)->nr != icd->iface);
 -
        /* Read out the chip version register */
        data1 = reg_read(client, RJ54N1_DEV_CODE);
        data2 = reg_read(client, RJ54N1_DEV_CODE2);
@@@ -1316,11 -1396,18 +1317,11 @@@ static int rj54n1_probe(struct i2c_clie
                        const struct i2c_device_id *did)
  {
        struct rj54n1 *rj54n1;
 -      struct soc_camera_device *icd = client->dev.platform_data;
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
        struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
 -      struct soc_camera_link *icl;
        struct rj54n1_pdata *rj54n1_priv;
        int ret;
  
 -      if (!icd) {
 -              dev_err(&client->dev, "RJ54N1CB0C: missing soc-camera data!\n");
 -              return -EINVAL;
 -      }
 -
 -      icl = to_soc_camera_link(icd);
        if (!icl || !icl->priv) {
                dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n");
                return -EINVAL;
                return -ENOMEM;
  
        v4l2_i2c_subdev_init(&rj54n1->subdev, client, &rj54n1_subdev_ops);
 +      v4l2_ctrl_handler_init(&rj54n1->hdl, 4);
 +      v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
 +                      V4L2_CID_VFLIP, 0, 1, 1, 0);
 +      v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
 +                      V4L2_CID_HFLIP, 0, 1, 1, 0);
 +      v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
 +                      V4L2_CID_GAIN, 0, 127, 1, 66);
 +      v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
 +                      V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
 +      rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
 +      if (rj54n1->hdl.error) {
 +              int err = rj54n1->hdl.error;
  
 -      icd->ops                = &rj54n1_ops;
 +              kfree(rj54n1);
 +              return err;
 +      }
  
        rj54n1->clk_div         = clk_div;
        rj54n1->rect.left       = RJ54N1_COLUMN_SKIP;
        rj54n1->tgclk_mhz       = (rj54n1_priv->mclk_freq / PLL_L * PLL_N) /
                (clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1);
  
 -      ret = rj54n1_video_probe(icd, client, rj54n1_priv);
 +      ret = rj54n1_video_probe(client, rj54n1_priv);
        if (ret < 0) {
 -              icd->ops = NULL;
 +              v4l2_ctrl_handler_free(&rj54n1->hdl);
                kfree(rj54n1);
                return ret;
        }
 -
 -      return ret;
 +      return v4l2_ctrl_handler_setup(&rj54n1->hdl);
  }
  
  static int rj54n1_remove(struct i2c_client *client)
  {
        struct rj54n1 *rj54n1 = to_rj54n1(client);
 -      struct soc_camera_device *icd = client->dev.platform_data;
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 +      struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
  
 -      icd->ops = NULL;
 +      v4l2_device_unregister_subdev(&rj54n1->subdev);
        if (icl->free_bus)
                icl->free_bus(icl);
 +      v4l2_ctrl_handler_free(&rj54n1->hdl);
        kfree(rj54n1);
  
        return 0;
  #include <linux/pm_runtime.h>
  #include <linux/slab.h>
  #include <linux/videodev2.h>
+ #include <linux/module.h>
  
  #include <media/sh_mobile_ceu.h>
  #include <media/sh_mobile_csi2.h>
  #include <media/soc_camera.h>
 +#include <media/soc_mediabus.h>
  #include <media/v4l2-common.h>
  #include <media/v4l2-dev.h>
  #include <media/v4l2-device.h>
@@@ -36,10 -36,11 +37,10 @@@ struct sh_csi2 
        struct v4l2_subdev              subdev;
        struct list_head                list;
        unsigned int                    irq;
 +      unsigned long                   mipi_flags;
        void __iomem                    *base;
        struct platform_device          *pdev;
        struct sh_csi2_client_config    *client;
 -      unsigned long (*query_bus_param)(struct soc_camera_device *);
 -      int (*set_bus_param)(struct soc_camera_device *, unsigned long);
  };
  
  static int sh_csi2_try_fmt(struct v4l2_subdev *sd,
@@@ -127,34 -128,9 +128,34 @@@ static int sh_csi2_s_fmt(struct v4l2_su
        return 0;
  }
  
 +static int sh_csi2_g_mbus_config(struct v4l2_subdev *sd,
 +                               struct v4l2_mbus_config *cfg)
 +{
 +      cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING |
 +              V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
 +              V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH;
 +      cfg->type = V4L2_MBUS_PARALLEL;
 +
 +      return 0;
 +}
 +
 +static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
 +                               const struct v4l2_mbus_config *cfg)
 +{
 +      struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
 +      struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
 +      struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
 +      struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
 +                                            .flags = priv->mipi_flags};
 +
 +      return v4l2_subdev_call(client_sd, video, s_mbus_config, &client_cfg);
 +}
 +
  static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = {
        .s_mbus_fmt     = sh_csi2_s_fmt,
        .try_mbus_fmt   = sh_csi2_try_fmt,
 +      .g_mbus_config  = sh_csi2_g_mbus_config,
 +      .s_mbus_config  = sh_csi2_s_mbus_config,
  };
  
  static void sh_csi2_hwinit(struct sh_csi2 *priv)
        udelay(5);
        iowrite32(0x00000000, priv->base + SH_CSI2_SRST);
  
 -      if (priv->client->lanes & 3)
 -              tmp |= priv->client->lanes & 3;
 -      else
 -              /* Default - both lanes */
 -              tmp |= 3;
 +      switch (pdata->type) {
 +      case SH_CSI2C:
 +              if (priv->client->lanes == 1)
 +                      tmp |= 1;
 +              else
 +                      /* Default - both lanes */
 +                      tmp |= 3;
 +              break;
 +      case SH_CSI2I:
 +              if (!priv->client->lanes || priv->client->lanes > 4)
 +                      /* Default - all 4 lanes */
 +                      tmp |= 0xf;
 +              else
 +                      tmp |= (1 << priv->client->lanes) - 1;
 +      }
  
        if (priv->client->phy == SH_CSI2_PHY_MAIN)
                tmp |= 0x8000;
        iowrite32(tmp, priv->base + SH_CSI2_CHKSUM);
  }
  
 -static int sh_csi2_set_bus_param(struct soc_camera_device *icd,
 -                               unsigned long flags)
 -{
 -      return 0;
 -}
 -
 -static unsigned long sh_csi2_query_bus_param(struct soc_camera_device *icd)
 -{
 -      struct soc_camera_link *icl = to_soc_camera_link(icd);
 -      const unsigned long flags = SOCAM_PCLK_SAMPLE_RISING |
 -              SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |
 -              SOCAM_MASTER | SOCAM_DATAWIDTH_8 | SOCAM_DATA_ACTIVE_HIGH;
 -
 -      return soc_camera_apply_sensor_flags(icl, flags);
 -}
 -
  static int sh_csi2_client_connect(struct sh_csi2 *priv)
  {
        struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
 -      struct v4l2_subdev *sd, *csi2_sd = &priv->subdev;
 -      struct soc_camera_device *icd = NULL;
 +      struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
 +      struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
        struct device *dev = v4l2_get_subdevdata(&priv->subdev);
 -      int i;
 +      struct v4l2_mbus_config cfg;
 +      unsigned long common_flags, csi2_flags;
 +      int i, ret;
  
 -      v4l2_device_for_each_subdev(sd, csi2_sd->v4l2_dev)
 -              if (sd->grp_id) {
 -                      icd = (struct soc_camera_device *)sd->grp_id;
 -                      break;
 -              }
 -
 -      if (!icd)
 -              return -EINVAL;
 +      if (priv->client)
 +              return -EBUSY;
  
        for (i = 0; i < pdata->num_clients; i++)
                if (&pdata->clients[i].pdev->dev == icd->pdev)
        if (i == pdata->num_clients)
                return -ENODEV;
  
 -      priv->client = pdata->clients + i;
 +      /* Check if we can support this camera */
 +      csi2_flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK | V4L2_MBUS_CSI2_1_LANE;
 +
 +      switch (pdata->type) {
 +      case SH_CSI2C:
 +              if (pdata->clients[i].lanes != 1)
 +                      csi2_flags |= V4L2_MBUS_CSI2_2_LANE;
 +              break;
 +      case SH_CSI2I:
 +              switch (pdata->clients[i].lanes) {
 +              default:
 +                      csi2_flags |= V4L2_MBUS_CSI2_4_LANE;
 +              case 3:
 +                      csi2_flags |= V4L2_MBUS_CSI2_3_LANE;
 +              case 2:
 +                      csi2_flags |= V4L2_MBUS_CSI2_2_LANE;
 +              }
 +      }
  
 -      priv->set_bus_param             = icd->ops->set_bus_param;
 -      priv->query_bus_param           = icd->ops->query_bus_param;
 -      icd->ops->set_bus_param         = sh_csi2_set_bus_param;
 -      icd->ops->query_bus_param       = sh_csi2_query_bus_param;
 +      cfg.type = V4L2_MBUS_CSI2;
 +      ret = v4l2_subdev_call(client_sd, video, g_mbus_config, &cfg);
 +      if (ret == -ENOIOCTLCMD)
 +              common_flags = csi2_flags;
 +      else if (!ret)
 +              common_flags = soc_mbus_config_compatible(&cfg,
 +                                                        csi2_flags);
 +      else
 +              common_flags = 0;
  
 -      csi2_sd->grp_id = (long)icd;
 +      if (!common_flags)
 +              return -EINVAL;
 +
 +      /* All good: camera MIPI configuration supported */
 +      priv->mipi_flags = common_flags;
 +      priv->client = pdata->clients + i;
  
        pm_runtime_get_sync(dev);
  
  
  static void sh_csi2_client_disconnect(struct sh_csi2 *priv)
  {
 -      struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
 +      if (!priv->client)
 +              return;
  
        priv->client = NULL;
 -      priv->subdev.grp_id = 0;
 -
 -      /* Driver is about to be unbound */
 -      icd->ops->set_bus_param         = priv->set_bus_param;
 -      icd->ops->query_bus_param       = priv->query_bus_param;
 -      priv->set_bus_param             = NULL;
 -      priv->query_bus_param           = NULL;
  
        pm_runtime_put(v4l2_get_subdevdata(&priv->subdev));
  }
@@@ -19,6 -19,7 +19,7 @@@
  #include <linux/i2c.h>
  #include <linux/delay.h>
  #include <linux/slab.h>
+ #include <linux/module.h>
  #include <media/v4l2-device.h>
  #include <media/v4l2-subdev.h>
  #include <media/v4l2-mediabus.h>
@@@ -714,6 -715,11 +715,6 @@@ static int sr030pc30_base_config(struc
        return ret;
  }
  
 -static int sr030pc30_s_stream(struct v4l2_subdev *sd, int enable)
 -{
 -      return 0;
 -}
 -
  static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
  {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
@@@ -759,6 -765,7 +760,6 @@@ static const struct v4l2_subdev_core_op
  };
  
  static const struct v4l2_subdev_video_ops sr030pc30_video_ops = {
 -      .s_stream       = sr030pc30_s_stream,
        .g_mbus_fmt     = sr030pc30_g_fmt,
        .s_mbus_fmt     = sr030pc30_s_fmt,
        .try_mbus_fmt   = sr030pc30_try_fmt,
@@@ -28,6 -28,7 +28,7 @@@
  #include <linux/i2c.h>
  #include <linux/slab.h>
  #include <linux/videodev2.h>
+ #include <linux/module.h>
  #include <media/tvp7002.h>
  #include <media/v4l2-device.h>
  #include <media/v4l2-chip-ident.h>
@@@ -128,7 -129,7 +129,7 @@@ static const struct i2c_reg_value tvp70
        { TVP7002_ADC_SETUP, 0x50, TVP7002_WRITE },
        { TVP7002_COARSE_CLAMP_CTL, 0x00, TVP7002_WRITE },
        { TVP7002_SOG_CLAMP, 0x80, TVP7002_WRITE },
 -      { TVP7002_RGB_COARSE_CLAMP_CTL, 0x00, TVP7002_WRITE },
 +      { TVP7002_RGB_COARSE_CLAMP_CTL, 0x8c, TVP7002_WRITE },
        { TVP7002_SOG_COARSE_CLAMP_CTL, 0x04, TVP7002_WRITE },
        { TVP7002_ALC_PLACEMENT, 0x5a, TVP7002_WRITE },
        { 0x32, 0x18, TVP7002_RESERVED },
@@@ -182,6 -183,7 +183,6 @@@ static const struct i2c_reg_value tvp70
        { TVP7002_HPLL_FDBK_DIV_MSBS, 0x35, TVP7002_WRITE },
        { TVP7002_HPLL_FDBK_DIV_LSBS, 0xa0, TVP7002_WRITE },
        { TVP7002_HPLL_CRTL, 0x02, TVP7002_WRITE },
 -      { TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_LSBS, 0x91, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_MSBS, 0x00, TVP7002_WRITE },
        { TVP7002_AVID_STOP_PIXEL_LSBS, 0x0B, TVP7002_WRITE },
@@@ -203,6 -205,7 +204,6 @@@ static const struct i2c_reg_value tvp70
        { TVP7002_HPLL_FDBK_DIV_MSBS, 0x36, TVP7002_WRITE },
        { TVP7002_HPLL_FDBK_DIV_LSBS, 0x00, TVP7002_WRITE },
        { TVP7002_HPLL_CRTL, 0x18, TVP7002_WRITE },
 -      { TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_LSBS, 0x9B, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_MSBS, 0x00, TVP7002_WRITE },
        { TVP7002_AVID_STOP_PIXEL_LSBS, 0x0F, TVP7002_WRITE },
@@@ -224,6 -227,7 +225,6 @@@ static const struct i2c_reg_value tvp70
        { TVP7002_HPLL_FDBK_DIV_MSBS, 0x89, TVP7002_WRITE },
        { TVP7002_HPLL_FDBK_DIV_LSBS, 0x80, TVP7002_WRITE },
        { TVP7002_HPLL_CRTL, 0x98, TVP7002_WRITE },
 -      { TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_LSBS, 0x06, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_MSBS, 0x01, TVP7002_WRITE },
        { TVP7002_AVID_STOP_PIXEL_LSBS, 0x8a, TVP7002_WRITE },
@@@ -245,6 -249,7 +246,6 @@@ static const struct i2c_reg_value tvp70
        { TVP7002_HPLL_FDBK_DIV_MSBS, 0x89, TVP7002_WRITE },
        { TVP7002_HPLL_FDBK_DIV_LSBS, 0x80, TVP7002_WRITE },
        { TVP7002_HPLL_CRTL, 0xE0, TVP7002_WRITE },
 -      { TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_LSBS, 0x06, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_MSBS, 0x01, TVP7002_WRITE },
        { TVP7002_AVID_STOP_PIXEL_LSBS, 0x8a, TVP7002_WRITE },
@@@ -266,6 -271,7 +267,6 @@@ static const struct i2c_reg_value tvp70
        { TVP7002_HPLL_FDBK_DIV_MSBS, 0xa5, TVP7002_WRITE },
        { TVP7002_HPLL_FDBK_DIV_LSBS, 0x00, TVP7002_WRITE },
        { TVP7002_HPLL_CRTL, 0x98, TVP7002_WRITE },
 -      { TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_LSBS, 0x06, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_MSBS, 0x01, TVP7002_WRITE },
        { TVP7002_AVID_STOP_PIXEL_LSBS, 0x8a, TVP7002_WRITE },
@@@ -287,6 -293,7 +288,6 @@@ static const struct i2c_reg_value tvp70
        { TVP7002_HPLL_FDBK_DIV_MSBS, 0x67, TVP7002_WRITE },
        { TVP7002_HPLL_FDBK_DIV_LSBS, 0x20, TVP7002_WRITE },
        { TVP7002_HPLL_CRTL, 0xa0, TVP7002_WRITE },
 -      { TVP7002_HPLL_PHASE_SEL, 0x16, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_LSBS, 0x47, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_MSBS, 0x01, TVP7002_WRITE },
        { TVP7002_AVID_STOP_PIXEL_LSBS, 0x4B, TVP7002_WRITE },
@@@ -308,6 -315,7 +309,6 @@@ static const struct i2c_reg_value tvp70
        { TVP7002_HPLL_FDBK_DIV_MSBS, 0x7b, TVP7002_WRITE },
        { TVP7002_HPLL_FDBK_DIV_LSBS, 0xc0, TVP7002_WRITE },
        { TVP7002_HPLL_CRTL, 0x98, TVP7002_WRITE },
 -      { TVP7002_HPLL_PHASE_SEL, 0x16, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_LSBS, 0x47, TVP7002_WRITE },
        { TVP7002_AVID_START_PIXEL_MSBS, 0x01, TVP7002_WRITE },
        { TVP7002_AVID_STOP_PIXEL_LSBS, 0x4B, TVP7002_WRITE },
@@@ -680,9 -688,6 +681,9 @@@ static int tvp7002_query_dv_preset(stru
        u8 cpl_msb;
        int index;
  
 +      /* Return invalid preset if no active input is detected */
 +      qpreset->preset = V4L2_DV_INVALID;
 +
        device = to_tvp7002(sd);
  
        /* Read standards from device registers */
        if (index == NUM_PRESETS) {
                v4l2_dbg(1, debug, sd, "detection failed: lpf = %x, cpl = %x\n",
                                                                lpfr, cpln);
 -              /* Could not detect a signal, so return the 'invalid' preset */
 -              qpreset->preset = V4L2_DV_INVALID;
                return 0;
        }
  
@@@ -20,6 -20,7 +20,7 @@@
  
  #include <linux/ctype.h>
  #include <linux/slab.h>
+ #include <linux/export.h>
  #include <media/v4l2-ioctl.h>
  #include <media/v4l2-device.h>
  #include <media/v4l2-ctrls.h>
@@@ -43,7 -44,7 +44,7 @@@ struct v4l2_ctrl_helper 
  };
  
  /* Small helper function to determine if the autocluster is set to manual
 -   mode. In that case the is_volatile flag should be ignored. */
 +   mode. */
  static bool is_cur_manual(const struct v4l2_ctrl *master)
  {
        return master->is_auto && master->cur.val == master->manual_mode_value;
@@@ -210,7 -211,6 +211,7 @@@ const char * const *v4l2_ctrl_get_menu(
                "Disabled",
                "50 Hz",
                "60 Hz",
 +              "Auto",
                NULL
        };
        static const char * const camera_exposure_auto[] = {
@@@ -938,14 -938,9 +939,14 @@@ static void new_to_cur(struct v4l2_fh *
                break;
        }
        if (update_inactive) {
 -              ctrl->flags &= ~V4L2_CTRL_FLAG_INACTIVE;
 -              if (!is_cur_manual(ctrl->cluster[0]))
 +              /* Note: update_inactive can only be true for auto clusters. */
 +              ctrl->flags &=
 +                      ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
 +              if (!is_cur_manual(ctrl->cluster[0])) {
                        ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
 +                      if (ctrl->cluster[0]->has_volatiles)
 +                              ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
 +              }
        }
        if (changed || update_inactive) {
                /* If a control was changed that was not one of the controls
@@@ -1400,8 -1395,10 +1401,8 @@@ struct v4l2_ctrl *v4l2_ctrl_new_custom(
                        type, min, max,
                        is_menu ? cfg->menu_skip_mask : step,
                        def, flags, qmenu, priv);
 -      if (ctrl) {
 +      if (ctrl)
                ctrl->is_private = cfg->is_private;
 -              ctrl->is_volatile = cfg->is_volatile;
 -      }
        return ctrl;
  }
  EXPORT_SYMBOL(v4l2_ctrl_new_custom);
@@@ -1495,7 -1492,6 +1496,7 @@@ EXPORT_SYMBOL(v4l2_ctrl_add_handler)
  /* Cluster controls */
  void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
  {
 +      bool has_volatiles = false;
        int i;
  
        /* The first control is the master control and it must not be NULL */
                if (controls[i]) {
                        controls[i]->cluster = controls;
                        controls[i]->ncontrols = ncontrols;
 +                      if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE)
 +                              has_volatiles = true;
                }
        }
 +      controls[0]->has_volatiles = has_volatiles;
  }
  EXPORT_SYMBOL(v4l2_ctrl_cluster);
  
@@@ -1517,25 -1510,22 +1518,25 @@@ void v4l2_ctrl_auto_cluster(unsigned nc
                            u8 manual_val, bool set_volatile)
  {
        struct v4l2_ctrl *master = controls[0];
 -      u32 flag;
 +      u32 flag = 0;
        int i;
  
        v4l2_ctrl_cluster(ncontrols, controls);
        WARN_ON(ncontrols <= 1);
        WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
 +      WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl));
        master->is_auto = true;
 +      master->has_volatiles = set_volatile;
        master->manual_mode_value = manual_val;
        master->flags |= V4L2_CTRL_FLAG_UPDATE;
 -      flag = is_cur_manual(master) ? 0 : V4L2_CTRL_FLAG_INACTIVE;
 +
 +      if (!is_cur_manual(master))
 +              flag = V4L2_CTRL_FLAG_INACTIVE |
 +                      (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0);
  
        for (i = 1; i < ncontrols; i++)
 -              if (controls[i]) {
 -                      controls[i]->is_volatile = set_volatile;
 +              if (controls[i])
                        controls[i]->flags |= flag;
 -              }
  }
  EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
  
@@@ -1590,6 -1580,9 +1591,6 @@@ EXPORT_SYMBOL(v4l2_ctrl_grab)
  static void log_ctrl(const struct v4l2_ctrl *ctrl,
                     const char *prefix, const char *colon)
  {
 -      int fl_inact = ctrl->flags & V4L2_CTRL_FLAG_INACTIVE;
 -      int fl_grabbed = ctrl->flags & V4L2_CTRL_FLAG_GRABBED;
 -
        if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
                return;
        if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
                printk(KERN_CONT "unknown type %d", ctrl->type);
                break;
        }
 -      if (fl_inact && fl_grabbed)
 -              printk(KERN_CONT " (inactive, grabbed)\n");
 -      else if (fl_inact)
 -              printk(KERN_CONT " (inactive)\n");
 -      else if (fl_grabbed)
 -              printk(KERN_CONT " (grabbed)\n");
 -      else
 -              printk(KERN_CONT "\n");
 +      if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
 +                         V4L2_CTRL_FLAG_GRABBED |
 +                         V4L2_CTRL_FLAG_VOLATILE)) {
 +              if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
 +                      printk(KERN_CONT " inactive");
 +              if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)
 +                      printk(KERN_CONT " grabbed");
 +              if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE)
 +                      printk(KERN_CONT " volatile");
 +      }
 +      printk(KERN_CONT "\n");
  }
  
  /* Log all controls owned by the handler */
@@@ -1970,8 -1960,7 +1971,8 @@@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_h
                v4l2_ctrl_lock(master);
  
                /* g_volatile_ctrl will update the new control values */
 -              if (has_op(master, g_volatile_ctrl) && !is_cur_manual(master)) {
 +              if ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
 +                      (master->has_volatiles && !is_cur_manual(master))) {
                        for (j = 0; j < master->ncontrols; j++)
                                cur_to_new(master->cluster[j]);
                        ret = call_op(master, g_volatile_ctrl);
@@@ -2016,7 -2005,7 +2017,7 @@@ static int get_ctrl(struct v4l2_ctrl *c
  
        v4l2_ctrl_lock(master);
        /* g_volatile_ctrl will update the current control values */
 -      if (ctrl->is_volatile && !is_cur_manual(master)) {
 +      if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
                for (i = 0; i < master->ncontrols; i++)
                        cur_to_new(master->cluster[i]);
                ret = call_op(master, g_volatile_ctrl);
@@@ -2132,20 -2121,6 +2133,20 @@@ static int validate_ctrls(struct v4l2_e
        return 0;
  }
  
 +/* Obtain the current volatile values of an autocluster and mark them
 +   as new. */
 +static void update_from_auto_cluster(struct v4l2_ctrl *master)
 +{
 +      int i;
 +
 +      for (i = 0; i < master->ncontrols; i++)
 +              cur_to_new(master->cluster[i]);
 +      if (!call_op(master, g_volatile_ctrl))
 +              for (i = 1; i < master->ncontrols; i++)
 +                      if (master->cluster[i])
 +                              master->cluster[i]->is_new = 1;
 +}
 +
  /* Try or try-and-set controls */
  static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
                             struct v4l2_ext_controls *cs,
                        if (master->cluster[j])
                                master->cluster[j]->is_new = 0;
  
 +              /* For volatile autoclusters that are currently in auto mode
 +                 we need to discover if it will be set to manual mode.
 +                 If so, then we have to copy the current volatile values
 +                 first since those will become the new manual values (which
 +                 may be overwritten by explicit new values from this set
 +                 of controls). */
 +              if (master->is_auto && master->has_volatiles &&
 +                                              !is_cur_manual(master)) {
 +                      /* Pick an initial non-manual value */
 +                      s32 new_auto_val = master->manual_mode_value + 1;
 +                      u32 tmp_idx = idx;
 +
 +                      do {
 +                              /* Check if the auto control is part of the
 +                                 list, and remember the new value. */
 +                              if (helpers[tmp_idx].ctrl == master)
 +                                      new_auto_val = cs->controls[tmp_idx].value;
 +                              tmp_idx = helpers[tmp_idx].next;
 +                      } while (tmp_idx);
 +                      /* If the new value == the manual value, then copy
 +                         the current volatile values. */
 +                      if (new_auto_val == master->manual_mode_value)
 +                              update_from_auto_cluster(master);
 +              }
 +
                /* Copy the new caller-supplied control values.
                   user_to_new() sets 'is_new' to 1. */
                do {
@@@ -2286,12 -2236,6 +2287,12 @@@ static int set_ctrl(struct v4l2_fh *fh
                if (master->cluster[i])
                        master->cluster[i]->is_new = 0;
  
 +      /* For autoclusters with volatiles that are switched from auto to
 +         manual mode we have to update the current volatile values since
 +         those will become the initial manual values after such a switch. */
 +      if (master->is_auto && master->has_volatiles && ctrl == master &&
 +          !is_cur_manual(master) && *val == master->manual_mode_value)
 +              update_from_auto_cluster(master);
        ctrl->val = *val;
        ctrl->is_new = 1;
        ret = try_or_set_cluster(fh, master, true);
@@@ -20,8 -20,8 +20,9 @@@
  
  #include <linux/types.h>
  #include <linux/ioctl.h>
+ #include <linux/module.h>
  #include <linux/i2c.h>
 +#include <linux/slab.h>
  #if defined(CONFIG_SPI)
  #include <linux/spi/spi.h>
  #endif
@@@ -194,13 -194,6 +195,13 @@@ int v4l2_device_register_subdev(struct 
  }
  EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
  
 +static void v4l2_device_release_subdev_node(struct video_device *vdev)
 +{
 +      struct v4l2_subdev *sd = video_get_drvdata(vdev);
 +      sd->devnode = NULL;
 +      kfree(vdev);
 +}
 +
  int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
  {
        struct video_device *vdev;
                if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
                        continue;
  
 -              vdev = &sd->devnode;
 +              vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
 +              if (!vdev) {
 +                      err = -ENOMEM;
 +                      goto clean_up;
 +              }
 +
 +              video_set_drvdata(vdev, sd);
                strlcpy(vdev->name, sd->name, sizeof(vdev->name));
                vdev->v4l2_dev = v4l2_dev;
                vdev->fops = &v4l2_subdev_fops;
 -              vdev->release = video_device_release_empty;
 +              vdev->release = v4l2_device_release_subdev_node;
                vdev->ctrl_handler = sd->ctrl_handler;
                err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
                                              sd->owner);
 -              if (err < 0)
 -                      return err;
 +              if (err < 0) {
 +                      kfree(vdev);
 +                      goto clean_up;
 +              }
  #if defined(CONFIG_MEDIA_CONTROLLER)
                sd->entity.v4l.major = VIDEO_MAJOR;
                sd->entity.v4l.minor = vdev->minor;
  #endif
 +              sd->devnode = vdev;
        }
        return 0;
 +
 +clean_up:
 +      list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
 +              if (!sd->devnode)
 +                      break;
 +              video_unregister_device(sd->devnode);
 +      }
 +
 +      return err;
  }
  EXPORT_SYMBOL_GPL(v4l2_device_register_subdev_nodes);
  
@@@ -273,7 -248,7 +274,7 @@@ void v4l2_device_unregister_subdev(stru
        if (v4l2_dev->mdev)
                media_device_unregister_entity(&sd->entity);
  #endif
 -      video_unregister_device(&sd->devnode);
 +      video_unregister_device(sd->devnode);
        module_put(sd->owner);
  }
  EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
@@@ -24,6 -24,7 +24,7 @@@
  #include <linux/slab.h>
  #include <linux/types.h>
  #include <linux/videodev2.h>
+ #include <linux/export.h>
  
  #include <media/v4l2-ctrls.h>
  #include <media/v4l2-device.h>
@@@ -173,25 -174,6 +174,25 @@@ static long subdev_do_ioctl(struct fil
  
        case VIDIOC_UNSUBSCRIBE_EVENT:
                return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
 +
 +#ifdef CONFIG_VIDEO_ADV_DEBUG
 +      case VIDIOC_DBG_G_REGISTER:
 +      {
 +              struct v4l2_dbg_register *p = arg;
 +
 +              if (!capable(CAP_SYS_ADMIN))
 +                      return -EPERM;
 +              return v4l2_subdev_call(sd, core, g_register, p);
 +      }
 +      case VIDIOC_DBG_S_REGISTER:
 +      {
 +              struct v4l2_dbg_register *p = arg;
 +
 +              if (!capable(CAP_SYS_ADMIN))
 +                      return -EPERM;
 +              return v4l2_subdev_call(sd, core, s_register, p);
 +      }
 +#endif
  #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
        case VIDIOC_SUBDEV_G_FMT: {
                struct v4l2_subdev_format *format = arg;
@@@ -12,6 -12,7 +12,7 @@@
  #include <linux/notifier.h>
  #include <linux/slab.h>
  #include <linux/err.h>
+ #include <linux/module.h>
  #include <linux/platform_device.h>
  #include <linux/device.h>
  #include <linux/interrupt.h>
@@@ -809,7 -810,7 +810,7 @@@ struct ab_family_id 
        char    *name;
  };
  
 -static const struct ab_family_id ids[] __devinitdata = {
 +static const struct ab_family_id ids[] __devinitconst = {
        /* AB3100 */
        {
                .id = 0xc0,
diff --combined drivers/mfd/asic3.c
@@@ -20,6 -20,7 +20,7 @@@
  #include <linux/delay.h>
  #include <linux/irq.h>
  #include <linux/gpio.h>
+ #include <linux/export.h>
  #include <linux/io.h>
  #include <linux/slab.h>
  #include <linux/spinlock.h>
@@@ -584,7 -585,7 +585,7 @@@ static int asic3_gpio_remove(struct pla
        return gpiochip_remove(&asic->gpio);
  }
  
 -static int asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
 +static void asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
  {
        unsigned long flags;
        u32 cdex;
                asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex);
        }
        spin_unlock_irqrestore(&asic->lock, flags);
 -
 -      return 0;
  }
  
  static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk)
@@@ -777,8 -780,6 +778,8 @@@ static struct mfd_cell asic3_cell_mmc 
        .name          = "tmio-mmc",
        .enable        = asic3_mmc_enable,
        .disable       = asic3_mmc_disable,
 +      .suspend       = asic3_mmc_disable,
 +      .resume        = asic3_mmc_enable,
        .platform_data = &asic3_mmc_data,
        .pdata_size    = sizeof(asic3_mmc_data),
        .num_resources = ARRAY_SIZE(asic3_mmc_resources),
@@@ -811,43 -812,24 +812,43 @@@ static int asic3_leds_disable(struct pl
        return 0;
  }
  
 +static int asic3_leds_suspend(struct platform_device *pdev)
 +{
 +      const struct mfd_cell *cell = mfd_get_cell(pdev);
 +      struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
 +
 +      while (asic3_gpio_get(&asic->gpio, ASIC3_GPIO(C, cell->id)) != 0)
 +              msleep(1);
 +
 +      asic3_clk_disable(asic, &asic->clocks[clock_ledn[cell->id]]);
 +
 +      return 0;
 +}
 +
  static struct mfd_cell asic3_cell_leds[ASIC3_NUM_LEDS] = {
        [0] = {
                .name          = "leds-asic3",
                .id            = 0,
                .enable        = asic3_leds_enable,
                .disable       = asic3_leds_disable,
 +              .suspend       = asic3_leds_suspend,
 +              .resume        = asic3_leds_enable,
        },
        [1] = {
                .name          = "leds-asic3",
                .id            = 1,
                .enable        = asic3_leds_enable,
                .disable       = asic3_leds_disable,
 +              .suspend       = asic3_leds_suspend,
 +              .resume        = asic3_leds_enable,
        },
        [2] = {
                .name          = "leds-asic3",
                .id            = 2,
                .enable        = asic3_leds_enable,
                .disable       = asic3_leds_disable,
 +              .suspend       = asic3_leds_suspend,
 +              .resume        = asic3_leds_enable,
        },
  };
  
@@@ -968,7 -950,6 +969,7 @@@ static int __init asic3_probe(struct pl
                goto out_unmap;
        }
  
 +      asic->gpio.label = "asic3";
        asic->gpio.base = pdata->gpio_base;
        asic->gpio.ngpio = ASIC3_NUM_GPIOS;
        asic->gpio.get = asic3_gpio_get;
diff --combined drivers/mfd/max8997.c
@@@ -23,8 -23,8 +23,9 @@@
  
  #include <linux/slab.h>
  #include <linux/i2c.h>
 +#include <linux/interrupt.h>
  #include <linux/pm_runtime.h>
+ #include <linux/module.h>
  #include <linux/mutex.h>
  #include <linux/mfd/core.h>
  #include <linux/mfd/max8997.h>
@@@ -143,6 -143,7 +144,6 @@@ static int max8997_i2c_probe(struct i2c
  
        max8997->irq_base = pdata->irq_base;
        max8997->ono = pdata->ono;
 -      max8997->wakeup = pdata->wakeup;
  
        mutex_init(&max8997->iolock);
  
        if (ret < 0)
                goto err_mfd;
  
 +      /* MAX8997 has a power button input. */
 +      device_init_wakeup(max8997->dev, pdata->wakeup);
 +
        return ret;
  
  err_mfd:
@@@ -401,29 -399,7 +402,29 @@@ static int max8997_restore(struct devic
        return 0;
  }
  
 +static int max8997_suspend(struct device *dev)
 +{
 +      struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
 +      struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
 +
 +      if (device_may_wakeup(dev))
 +              irq_set_irq_wake(max8997->irq, 1);
 +      return 0;
 +}
 +
 +static int max8997_resume(struct device *dev)
 +{
 +      struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
 +      struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
 +
 +      if (device_may_wakeup(dev))
 +              irq_set_irq_wake(max8997->irq, 0);
 +      return max8997_irq_resume(max8997);
 +}
 +
  const struct dev_pm_ops max8997_pm = {
 +      .suspend = max8997_suspend,
 +      .resume = max8997_resume,
        .freeze = max8997_freeze,
        .restore = max8997_restore,
  };
diff --combined drivers/mfd/twl-core.c
@@@ -30,6 -30,7 +30,7 @@@
  
  #include <linux/init.h>
  #include <linux/mutex.h>
+ #include <linux/module.h>
  #include <linux/platform_device.h>
  #include <linux/clk.h>
  #include <linux/err.h>
  #define twl_has_watchdog()        false
  #endif
  
 -#if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) ||\
 +#if defined(CONFIG_MFD_TWL4030_AUDIO) || defined(CONFIG_MFD_TWL4030_AUDIO_MODULE) ||\
        defined(CONFIG_TWL6040_CORE) || defined(CONFIG_TWL6040_CORE_MODULE)
  #define twl_has_codec()       true
  #else
   */
  
  #include <linux/init.h>
+ #include <linux/export.h>
  #include <linux/interrupt.h>
  #include <linux/irq.h>
  #include <linux/kthread.h>
  #include <linux/i2c/twl.h>
  #include <linux/platform_device.h>
 +#include <linux/suspend.h>
  
  #include "twl-core.h"
  
@@@ -84,48 -84,8 +85,48 @@@ static int twl6030_interrupt_mapping[24
  /*----------------------------------------------------------------------*/
  
  static unsigned twl6030_irq_base;
 +static int twl_irq;
 +static bool twl_irq_wake_enabled;
  
  static struct completion irq_event;
 +static atomic_t twl6030_wakeirqs = ATOMIC_INIT(0);
 +
 +static int twl6030_irq_pm_notifier(struct notifier_block *notifier,
 +                                 unsigned long pm_event, void *unused)
 +{
 +      int chained_wakeups;
 +
 +      switch (pm_event) {
 +      case PM_SUSPEND_PREPARE:
 +              chained_wakeups = atomic_read(&twl6030_wakeirqs);
 +
 +              if (chained_wakeups && !twl_irq_wake_enabled) {
 +                      if (enable_irq_wake(twl_irq))
 +                              pr_err("twl6030 IRQ wake enable failed\n");
 +                      else
 +                              twl_irq_wake_enabled = true;
 +              } else if (!chained_wakeups && twl_irq_wake_enabled) {
 +                      disable_irq_wake(twl_irq);
 +                      twl_irq_wake_enabled = false;
 +              }
 +
 +              disable_irq(twl_irq);
 +              break;
 +
 +      case PM_POST_SUSPEND:
 +              enable_irq(twl_irq);
 +              break;
 +
 +      default:
 +              break;
 +      }
 +
 +      return NOTIFY_DONE;
 +}
 +
 +static struct notifier_block twl6030_irq_pm_notifier_block = {
 +      .notifier_call = twl6030_irq_pm_notifier,
 +};
  
  /*
   * This thread processes interrupts reported by the Primary Interrupt Handler.
@@@ -228,16 -188,6 +229,16 @@@ static inline void activate_irq(int irq
  #endif
  }
  
 +int twl6030_irq_set_wake(struct irq_data *d, unsigned int on)
 +{
 +      if (on)
 +              atomic_inc(&twl6030_wakeirqs);
 +      else
 +              atomic_dec(&twl6030_wakeirqs);
 +
 +      return 0;
 +}
 +
  /*----------------------------------------------------------------------*/
  
  static unsigned twl6030_irq_next;
@@@ -369,12 -319,10 +370,12 @@@ int twl6030_init_irq(int irq_num, unsig
        twl6030_irq_chip = dummy_irq_chip;
        twl6030_irq_chip.name = "twl6030";
        twl6030_irq_chip.irq_set_type = NULL;
 +      twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
  
        for (i = irq_base; i < irq_end; i++) {
                irq_set_chip_and_handler(i, &twl6030_irq_chip,
                                         handle_simple_irq);
 +              irq_set_chip_data(i, (void *)irq_num);
                activate_irq(i);
        }
  
  
        /* install an irq handler to demultiplex the TWL6030 interrupt */
        init_completion(&irq_event);
 +
 +      status = request_irq(irq_num, handle_twl6030_pih, 0,
 +                              "TWL6030-PIH", &irq_event);
 +      if (status < 0) {
 +              pr_err("twl6030: could not claim irq%d: %d\n", irq_num, status);
 +              goto fail_irq;
 +      }
 +
        task = kthread_run(twl6030_irq_thread, (void *)irq_num, "twl6030-irq");
        if (IS_ERR(task)) {
                pr_err("twl6030: could not create irq %d thread!\n", irq_num);
                goto fail_kthread;
        }
  
 -      status = request_irq(irq_num, handle_twl6030_pih, IRQF_DISABLED,
 -                              "TWL6030-PIH", &irq_event);
 -      if (status < 0) {
 -              pr_err("twl6030: could not claim irq%d: %d\n", irq_num, status);
 -              goto fail_irq;
 -      }
 +      twl_irq = irq_num;
 +      register_pm_notifier(&twl6030_irq_pm_notifier_block);
        return status;
 -fail_irq:
 -      free_irq(irq_num, &irq_event);
  
  fail_kthread:
 +      free_irq(irq_num, &irq_event);
 +
 +fail_irq:
        for (i = irq_base; i < irq_end; i++)
                irq_set_chip_and_handler(i, NULL, NULL);
        return status;
  
  int twl6030_exit_irq(void)
  {
 +      unregister_pm_notifier(&twl6030_irq_pm_notifier_block);
  
        if (twl6030_irq_base) {
                pr_err("twl6030: can't yet clean up IRQs?\n");
@@@ -1,4 -1,5 +1,5 @@@
  #include <linux/proc_fs.h>
+ #include <linux/export.h>
  #include <net/net_namespace.h>
  #include <net/netns/generic.h>
  #include "bonding.h"
@@@ -157,16 -158,8 +158,16 @@@ static void bond_info_show_slave(struc
        seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
        seq_printf(seq, "MII Status: %s\n",
                   (slave->link == BOND_LINK_UP) ?  "up" : "down");
 -      seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
 -      seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
 +      if (slave->speed == -1)
 +              seq_printf(seq, "Speed: %s\n", "Unknown");
 +      else
 +              seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
 +
 +      if (slave->duplex == -1)
 +              seq_printf(seq, "Duplex: %s\n", "Unknown");
 +      else
 +              seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
 +
        seq_printf(seq, "Link Failure Count: %u\n",
                   slave->link_failure_count);
  
@@@ -16,6 -16,7 +16,7 @@@
   */
  
  #include <linux/prefetch.h>
+ #include <linux/module.h>
  #include "be.h"
  #include "be_cmds.h"
  #include <asm/div64.h>
@@@ -229,29 -230,27 +230,29 @@@ static int be_mac_addr_set(struct net_d
        struct be_adapter *adapter = netdev_priv(netdev);
        struct sockaddr *addr = p;
        int status = 0;
 +      u8 current_mac[ETH_ALEN];
 +      u32 pmac_id = adapter->pmac_id;
  
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
  
 -      /* MAC addr configuration will be done in hardware for VFs
 -       * by their corresponding PFs. Just copy to netdev addr here
 -       */
 -      if (!be_physfn(adapter))
 -              goto netdev_addr;
 -
 -      status = be_cmd_pmac_del(adapter, adapter->if_handle,
 -                              adapter->pmac_id, 0);
 +      status = be_cmd_mac_addr_query(adapter, current_mac,
 +                      MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
        if (status)
 -              return status;
 +              goto err;
  
 -      status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
 +      if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
 +              status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
                                adapter->if_handle, &adapter->pmac_id, 0);
 -netdev_addr:
 -      if (!status)
 -              memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 +              if (status)
 +                      goto err;
  
 +              be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
 +      }
 +      memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 +      return 0;
 +err:
 +      dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
        return status;
  }
  
@@@ -1905,8 -1904,6 +1906,8 @@@ loop_continue
                be_rx_stats_update(rxo, rxcp);
        }
  
 +      be_cq_notify(adapter, rx_cq->id, false, work_done);
 +
        /* Refill the queue */
        if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
                be_post_rx_frags(rxo, GFP_ATOMIC);
        /* All consumed */
        if (work_done < budget) {
                napi_complete(napi);
 -              be_cq_notify(adapter, rx_cq->id, true, work_done);
 -      } else {
 -              /* More to be consumed; continue with interrupts disabled */
 -              be_cq_notify(adapter, rx_cq->id, false, work_done);
 +              /* Arm CQ */
 +              be_cq_notify(adapter, rx_cq->id, true, 0);
        }
        return work_done;
  }
@@@ -1977,62 -1976,42 +1978,62 @@@ static int be_poll_tx_mcc(struct napi_s
  
  void be_detect_dump_ue(struct be_adapter *adapter)
  {
 -      u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
 +      u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
 +      u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
        u32 i;
  
 -      pci_read_config_dword(adapter->pdev,
 -                              PCICFG_UE_STATUS_LOW, &ue_status_lo);
 -      pci_read_config_dword(adapter->pdev,
 -                              PCICFG_UE_STATUS_HIGH, &ue_status_hi);
 -      pci_read_config_dword(adapter->pdev,
 -                              PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
 -      pci_read_config_dword(adapter->pdev,
 -                              PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
 -
 -      ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
 -      ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
 +      if (lancer_chip(adapter)) {
 +              sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
 +              if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
 +                      sliport_err1 = ioread32(adapter->db +
 +                                      SLIPORT_ERROR1_OFFSET);
 +                      sliport_err2 = ioread32(adapter->db +
 +                                      SLIPORT_ERROR2_OFFSET);
 +              }
 +      } else {
 +              pci_read_config_dword(adapter->pdev,
 +                              PCICFG_UE_STATUS_LOW, &ue_lo);
 +              pci_read_config_dword(adapter->pdev,
 +                              PCICFG_UE_STATUS_HIGH, &ue_hi);
 +              pci_read_config_dword(adapter->pdev,
 +                              PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
 +              pci_read_config_dword(adapter->pdev,
 +                              PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
 +
 +              ue_lo = (ue_lo & (~ue_lo_mask));
 +              ue_hi = (ue_hi & (~ue_hi_mask));
 +      }
  
 -      if (ue_status_lo || ue_status_hi) {
 +      if (ue_lo || ue_hi ||
 +              sliport_status & SLIPORT_STATUS_ERR_MASK) {
                adapter->ue_detected = true;
                adapter->eeh_err = true;
                dev_err(&adapter->pdev->dev, "UE Detected!!\n");
        }
  
 -      if (ue_status_lo) {
 -              for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
 -                      if (ue_status_lo & 1)
 +      if (ue_lo) {
 +              for (i = 0; ue_lo; ue_lo >>= 1, i++) {
 +                      if (ue_lo & 1)
                                dev_err(&adapter->pdev->dev,
                                "UE: %s bit set\n", ue_status_low_desc[i]);
                }
        }
 -      if (ue_status_hi) {
 -              for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
 -                      if (ue_status_hi & 1)
 +      if (ue_hi) {
 +              for (i = 0; ue_hi; ue_hi >>= 1, i++) {
 +                      if (ue_hi & 1)
                                dev_err(&adapter->pdev->dev,
                                "UE: %s bit set\n", ue_status_hi_desc[i]);
                }
        }
  
 +      if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
 +              dev_err(&adapter->pdev->dev,
 +                      "sliport status 0x%x\n", sliport_status);
 +              dev_err(&adapter->pdev->dev,
 +                      "sliport error1 0x%x\n", sliport_err1);
 +              dev_err(&adapter->pdev->dev,
 +                      "sliport error2 0x%x\n", sliport_err2);
 +      }
  }
  
  static void be_worker(struct work_struct *work)
        struct be_rx_obj *rxo;
        int i;
  
 -      if (!adapter->ue_detected && !lancer_chip(adapter))
 +      if (!adapter->ue_detected)
                be_detect_dump_ue(adapter);
  
        /* when interrupts are not yet enabled, just reap any pending
@@@ -33,6 -33,7 +33,7 @@@
  
  #include <linux/interrupt.h>
  #include <linux/slab.h>
+ #include <linux/export.h>
  #include <linux/mm.h>
  #include <linux/dma-mapping.h>
  
@@@ -484,7 -485,7 +485,7 @@@ static void mlx4_free_eq(struct mlx4_de
  
        mlx4_mtt_cleanup(dev, &eq->mtt);
        for (i = 0; i < npages; ++i)
 -              pci_free_consistent(dev->pdev, PAGE_SIZE,
 +              dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
                                    eq->page_list[i].buf,
                                    eq->page_list[i].map);
  
@@@ -33,6 -33,7 +33,7 @@@
   */
  
  #include <linux/mlx4/cmd.h>
+ #include <linux/module.h>
  #include <linux/cache.h>
  
  #include "fw.h"
@@@ -205,8 -206,6 +206,8 @@@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev 
  #define QUERY_DEV_CAP_MAX_MCG_OFFSET          0x63
  #define QUERY_DEV_CAP_RSVD_PD_OFFSET          0x64
  #define QUERY_DEV_CAP_MAX_PD_OFFSET           0x65
 +#define QUERY_DEV_CAP_RSVD_XRC_OFFSET         0x66
 +#define QUERY_DEV_CAP_MAX_XRC_OFFSET          0x67
  #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET     0x68
  #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET  0x80
  #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET     0x82
        dev_cap->reserved_pds = field >> 4;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
        dev_cap->max_pds = 1 << (field & 0x3f);
 +      MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
 +      dev_cap->reserved_xrcds = field >> 4;
 +      MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
 +      dev_cap->max_xrcds = 1 << (field & 0x1f);
  
        MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
        dev_cap->rdmarc_entry_sz = size;
@@@ -33,6 -33,7 +33,7 @@@
   */
  
  #include <linux/errno.h>
+ #include <linux/export.h>
  #include <linux/slab.h>
  
  #include <linux/mlx4/cmd.h>
@@@ -139,7 -140,7 +140,7 @@@ static int mlx4_buddy_init(struct mlx4_
  
        buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
                              GFP_KERNEL);
 -      buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
 +      buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
                                  GFP_KERNEL);
        if (!buddy->bits || !buddy->num_free)
                goto err_out;
@@@ -32,6 -32,7 +32,7 @@@
   */
  
  #include <linux/errno.h>
+ #include <linux/export.h>
  #include <linux/io-mapping.h>
  
  #include <asm/page.h>
@@@ -61,24 -62,6 +62,24 @@@ void mlx4_pd_free(struct mlx4_dev *dev
  }
  EXPORT_SYMBOL_GPL(mlx4_pd_free);
  
 +int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
 +{
 +      struct mlx4_priv *priv = mlx4_priv(dev);
 +
 +      *xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap);
 +      if (*xrcdn == -1)
 +              return -ENOMEM;
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
 +
 +void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
 +{
 +      mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
 +}
 +EXPORT_SYMBOL_GPL(mlx4_xrcd_free);
 +
  int mlx4_init_pd_table(struct mlx4_dev *dev)
  {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@@ -92,18 -75,6 +93,18 @@@ void mlx4_cleanup_pd_table(struct mlx4_
        mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
  }
  
 +int mlx4_init_xrcd_table(struct mlx4_dev *dev)
 +{
 +      struct mlx4_priv *priv = mlx4_priv(dev);
 +
 +      return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16),
 +                              (1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0);
 +}
 +
 +void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
 +{
 +      mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap);
 +}
  
  int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
  {
@@@ -32,6 -32,7 +32,7 @@@
  
  #include <linux/errno.h>
  #include <linux/if_ether.h>
+ #include <linux/export.h>
  
  #include <linux/mlx4/cmd.h>
  
@@@ -148,26 -149,22 +149,26 @@@ int mlx4_register_mac(struct mlx4_dev *
  
        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
                err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
 -              if (!err) {
 -                      entry = kmalloc(sizeof *entry, GFP_KERNEL);
 -                      if (!entry) {
 -                              mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
 -                              return -ENOMEM;
 -                      }
 -                      entry->mac = mac;
 -                      err = radix_tree_insert(&info->mac_tree, *qpn, entry);
 -                      if (err) {
 -                              mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
 -                              return err;
 -                      }
 -              } else
 +              if (err)
                        return err;
 +
 +              entry = kmalloc(sizeof *entry, GFP_KERNEL);
 +              if (!entry) {
 +                      mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
 +                      return -ENOMEM;
 +              }
 +
 +              entry->mac = mac;
 +              err = radix_tree_insert(&info->mac_tree, *qpn, entry);
 +              if (err) {
 +                      kfree(entry);
 +                      mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
 +                      return err;
 +              }
        }
 +
        mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
 +
        mutex_lock(&table->mutex);
        for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
                if (free < 0 && !table->refs[i]) {
@@@ -469,48 -466,6 +470,48 @@@ int mlx4_get_port_ib_caps(struct mlx4_d
        return err;
  }
  
 +int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
 +{
 +      struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
 +      u8 *inbuf, *outbuf;
 +      int err, packet_error;
 +
 +      inmailbox = mlx4_alloc_cmd_mailbox(dev);
 +      if (IS_ERR(inmailbox))
 +              return PTR_ERR(inmailbox);
 +
 +      outmailbox = mlx4_alloc_cmd_mailbox(dev);
 +      if (IS_ERR(outmailbox)) {
 +              mlx4_free_cmd_mailbox(dev, inmailbox);
 +              return PTR_ERR(outmailbox);
 +      }
 +
 +      inbuf = inmailbox->buf;
 +      outbuf = outmailbox->buf;
 +      memset(inbuf, 0, 256);
 +      memset(outbuf, 0, 256);
 +      inbuf[0] = 1;
 +      inbuf[1] = 1;
 +      inbuf[2] = 1;
 +      inbuf[3] = 1;
 +
 +      *(__be16 *) (&inbuf[16]) = MLX4_ATTR_EXTENDED_PORT_INFO;
 +      *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
 +
 +      err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
 +                         MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
 +
 +      packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
 +
 +      dev->caps.ext_port_cap[port] = (!err && !packet_error) ?
 +                                     MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO
 +                                     : 0;
 +
 +      mlx4_free_cmd_mailbox(dev, inmailbox);
 +      mlx4_free_cmd_mailbox(dev, outmailbox);
 +      return err;
 +}
 +
  int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
  {
        struct mlx4_cmd_mailbox *mailbox;
@@@ -34,6 -34,7 +34,7 @@@
   */
  
  #include <linux/gfp.h>
+ #include <linux/export.h>
  #include <linux/mlx4/cmd.h>
  #include <linux/mlx4/qp.h>
  
@@@ -280,9 -281,6 +281,9 @@@ int mlx4_init_qp_table(struct mlx4_dev 
         * We reserve 2 extra QPs per port for the special QPs.  The
         * block of special QPs must be aligned to a multiple of 8, so
         * round up.
 +       *
 +       * We also reserve the MSB of the 24-bit QP number to indicate
 +       * that a QP is an XRC QP.
         */
        dev->caps.sqp_start =
                ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
@@@ -32,6 -32,7 +32,7 @@@
   */
  
  #include <linux/mlx4/cmd.h>
+ #include <linux/export.h>
  #include <linux/gfp.h>
  
  #include "mlx4.h"
  struct mlx4_srq_context {
        __be32                  state_logsize_srqn;
        u8                      logstride;
 -      u8                      reserved1[3];
 -      u8                      pg_offset;
 -      u8                      reserved2[3];
 -      u32                     reserved3;
 +      u8                      reserved1;
 +      __be16                  xrcd;
 +      __be32                  pg_offset_cqn;
 +      u32                     reserved2;
        u8                      log_page_size;
 -      u8                      reserved4[2];
 +      u8                      reserved3[2];
        u8                      mtt_base_addr_h;
        __be32                  mtt_base_addr_l;
        __be32                  pd;
        __be16                  limit_watermark;
        __be16                  wqe_cnt;
 -      u16                     reserved5;
 +      u16                     reserved4;
        __be16                  wqe_counter;
 -      u32                     reserved6;
 +      u32                     reserved5;
        __be64                  db_rec_addr;
  };
  
@@@ -109,8 -110,8 +110,8 @@@ static int mlx4_QUERY_SRQ(struct mlx4_d
                            MLX4_CMD_TIME_CLASS_A);
  }
  
 -int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
 -                 u64 db_rec, struct mlx4_srq *srq)
 +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
 +                 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
  {
        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
        struct mlx4_cmd_mailbox *mailbox;
        srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
                                                      srq->srqn);
        srq_context->logstride          = srq->wqe_shift - 4;
 +      srq_context->xrcd               = cpu_to_be16(xrcd);
 +      srq_context->pg_offset_cqn      = cpu_to_be32(cqn & 0xffffff);
        srq_context->log_page_size      = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
  
        mtt_addr = mlx4_mtt_addr(dev, mtt);
@@@ -36,6 -36,7 +36,7 @@@
  #include <linux/list.h>
  #include <linux/acpi.h>
  #include <linux/slab.h>
+ #include <linux/module.h>
  #include <acpi/acpi_bus.h>
  #include <acpi/acpi_drivers.h>
  
@@@ -754,13 -755,9 +755,13 @@@ static void wmi_free_devices(void
        struct wmi_block *wblock, *next;
  
        /* Delete devices for all the GUIDs */
 -      list_for_each_entry_safe(wblock, next, &wmi_block_list, list)
 +      list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
 +              list_del(&wblock->list);
                if (wblock->dev.class)
                        device_unregister(&wblock->dev);
 +              else
 +                      kfree(wblock);
 +      }
  }
  
  static bool guid_already_parsed(const char *guid_string)
@@@ -16,6 -16,7 +16,7 @@@
  #include <linux/regulator/driver.h>
  #include <linux/regulator/machine.h>
  #include <linux/mfd/88pm860x.h>
+ #include <linux/module.h>
  
  struct pm8607_regulator_info {
        struct regulator_desc   desc;
@@@ -412,7 -413,7 +413,7 @@@ static int __devinit pm8607_regulator_p
                if (info->desc.id == res->start)
                        break;
        }
 -      if ((i < 0) || (i > PM8607_ID_RG_MAX)) {
 +      if (i == ARRAY_SIZE(pm8607_regulator_info)) {
                dev_err(&pdev->dev, "Failed to find regulator %llu\n",
                        (unsigned long long)res->start);
                return -EINVAL;
diff --combined drivers/regulator/core.c
@@@ -28,6 -28,7 +28,7 @@@
  #include <linux/regulator/consumer.h>
  #include <linux/regulator/driver.h>
  #include <linux/regulator/machine.h>
+ #include <linux/module.h>
  
  #define CREATE_TRACE_POINTS
  #include <trace/events/regulator.h>
@@@ -1425,7 -1426,7 +1426,7 @@@ int regulator_enable(struct regulator *
        ret = _regulator_enable(rdev);
        mutex_unlock(&rdev->mutex);
  
 -      if (ret != 0)
 +      if (ret != 0 && rdev->supply)
                regulator_disable(rdev->supply);
  
        return ret;
@@@ -2971,43 -2972,6 +2972,43 @@@ void *regulator_get_init_drvdata(struc
  }
  EXPORT_SYMBOL_GPL(regulator_get_init_drvdata);
  
 +#ifdef CONFIG_DEBUG_FS
 +static ssize_t supply_map_read_file(struct file *file, char __user *user_buf,
 +                                  size_t count, loff_t *ppos)
 +{
 +      char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 +      ssize_t len, ret = 0;
 +      struct regulator_map *map;
 +
 +      if (!buf)
 +              return -ENOMEM;
 +
 +      list_for_each_entry(map, &regulator_map_list, list) {
 +              len = snprintf(buf + ret, PAGE_SIZE - ret,
 +                             "%s -> %s.%s\n",
 +                             rdev_get_name(map->regulator), map->dev_name,
 +                             map->supply);
 +              if (len >= 0)
 +                      ret += len;
 +              if (ret > PAGE_SIZE) {
 +                      ret = PAGE_SIZE;
 +                      break;
 +              }
 +      }
 +
 +      ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
 +
 +      kfree(buf);
 +
 +      return ret;
 +}
 +
 +static const struct file_operations supply_map_fops = {
 +      .read = supply_map_read_file,
 +      .llseek = default_llseek,
 +};
 +#endif
 +
  static int __init regulator_init(void)
  {
        int ret;
                pr_warn("regulator: Failed to create debugfs directory\n");
                debugfs_root = NULL;
        }
 +
 +      if (IS_ERR(debugfs_create_file("supply_map", 0444, debugfs_root,
 +                                     NULL, &supply_map_fops)))
 +              pr_warn("regulator: Failed to create supplies debugfs\n");
  #endif
  
        regulator_dummy_init();
  #include <linux/err.h>
  #include <linux/spinlock.h>
  #include <linux/platform_device.h>
 -#include <linux/mfd/db8500-prcmu.h>
 +#include <linux/mfd/dbx500-prcmu.h>
  #include <linux/regulator/driver.h>
  #include <linux/regulator/machine.h>
  #include <linux/regulator/db8500-prcmu.h>
+ #include <linux/module.h>
  
  /*
   * power state reference count
@@@ -18,6 -18,7 +18,7 @@@
  #include <linux/slab.h>
  #include <linux/init.h>
  #include <linux/err.h>
+ #include <linux/module.h>
  #include "mc13xxx.h"
  
  #define MC13783_REG_SWITCHERS5                        29
@@@ -336,9 -337,9 +337,9 @@@ static int __devinit mc13783_regulator_
  {
        struct mc13xxx_regulator_priv *priv;
        struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
 -      struct mc13783_regulator_platform_data *pdata =
 +      struct mc13xxx_regulator_platform_data *pdata =
                dev_get_platdata(&pdev->dev);
 -      struct mc13783_regulator_init_data *init_data;
 +      struct mc13xxx_regulator_init_data *init_data;
        int i, ret;
  
        dev_dbg(&pdev->dev, "%s id %d\n", __func__, pdev->id);
@@@ -381,7 -382,7 +382,7 @@@ err
  static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
  {
        struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
 -      struct mc13783_regulator_platform_data *pdata =
 +      struct mc13xxx_regulator_platform_data *pdata =
                dev_get_platdata(&pdev->dev);
        int i;
  
@@@ -14,6 -14,7 +14,7 @@@
   */
  
  #include <linux/kernel.h>
+ #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/err.h>
  #include <linux/slab.h>
@@@ -332,36 -333,6 +333,36 @@@ static inline int tps6586x_regulator_pr
                                 1 << ri->enable_bit[1]);
  }
  
 +static int tps6586x_regulator_set_slew_rate(struct platform_device *pdev)
 +{
 +      struct device *parent = pdev->dev.parent;
 +      struct regulator_init_data *p = pdev->dev.platform_data;
 +      struct tps6586x_settings *setting = p->driver_data;
 +      uint8_t reg;
 +
 +      if (setting == NULL)
 +              return 0;
 +
 +      if (!(setting->slew_rate & TPS6586X_SLEW_RATE_SET))
 +              return 0;
 +
 +      /* only SM0 and SM1 can have the slew rate settings */
 +      switch (pdev->id) {
 +      case TPS6586X_ID_SM_0:
 +              reg = TPS6586X_SM0SL;
 +              break;
 +      case TPS6586X_ID_SM_1:
 +              reg = TPS6586X_SM1SL;
 +              break;
 +      default:
 +              dev_warn(&pdev->dev, "Only SM0/SM1 can set slew rate\n");
 +              return -EINVAL;
 +      }
 +
 +      return tps6586x_write(parent, reg,
 +                      setting->slew_rate & TPS6586X_SLEW_RATE_MASK);
 +}
 +
  static inline struct tps6586x_regulator *find_regulator_info(int id)
  {
        struct tps6586x_regulator *ri;
@@@ -404,7 -375,7 +405,7 @@@ static int __devinit tps6586x_regulator
  
        platform_set_drvdata(pdev, rdev);
  
 -      return 0;
 +      return tps6586x_regulator_set_slew_rate(pdev);
  }
  
  static int __devexit tps6586x_regulator_remove(struct platform_device *pdev)
diff --combined drivers/s390/char/vmur.c
  #define KMSG_COMPONENT "vmur"
  #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  
 -#include <linux/kernel_stat.h>
  #include <linux/cdev.h>
  #include <linux/slab.h>
+ #include <linux/module.h>
  
  #include <asm/uaccess.h>
  #include <asm/cio.h>
@@@ -73,7 -75,6 +74,7 @@@ static struct ccw_driver ur_driver = 
        .set_online     = ur_set_online,
        .set_offline    = ur_set_offline,
        .freeze         = ur_pm_suspend,
 +      .int_class      = IOINT_VMR,
  };
  
  static DEFINE_MUTEX(vmur_mutex);
@@@ -305,6 -306,7 +306,6 @@@ static void ur_int_handler(struct ccw_d
  {
        struct urdev *urd;
  
 -      kstat_cpu(smp_processor_id()).irqs[IOINT_VMR]++;
        TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
              intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
              irb->scsw.cmd.count);
@@@ -7,6 -7,8 +7,8 @@@
   */
  #include <linux/seq_file.h>
  #include <linux/debugfs.h>
+ #include <linux/uaccess.h>
+ #include <linux/export.h>
  #include <asm/debug.h>
  #include "qdio_debug.h"
  #include "qdio.h"
@@@ -54,17 -56,15 +56,17 @@@ static int qstat_show(struct seq_file *
        if (!q)
                return 0;
  
 -      seq_printf(m, "DSCI: %d   nr_used: %d\n",
 -                 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
 -      seq_printf(m, "ftc: %d  last_move: %d\n",
 +      seq_printf(m, "Timestamp: %Lx  Last AI: %Lx\n",
 +                 q->timestamp, last_ai_time);
 +      seq_printf(m, "nr_used: %d  ftc: %d  last_move: %d\n",
 +                 atomic_read(&q->nr_buf_used),
                   q->first_to_check, q->last_move);
        if (q->is_input_q) {
                seq_printf(m, "polling: %d  ack start: %d  ack count: %d\n",
                           q->u.in.polling, q->u.in.ack_start,
                           q->u.in.ack_count);
 -              seq_printf(m, "IRQs disabled: %u\n",
 +              seq_printf(m, "DSCI: %d   IRQs disabled: %u\n",
 +                         *(u32 *)q->irq_ptr->dsci,
                           test_bit(QDIO_QUEUE_IRQS_DISABLED,
                           &q->u.in.queue_irq_state));
        }
@@@ -20,6 -20,7 +20,7 @@@
  #include <linux/virtio_console.h>
  #include <linux/interrupt.h>
  #include <linux/virtio_ring.h>
+ #include <linux/export.h>
  #include <linux/pfn.h>
  #include <asm/io.h>
  #include <asm/kvm_para.h>
@@@ -33,7 -34,7 +34,7 @@@
   * The pointer to our (page) of device descriptions.
   */
  static void *kvm_devices;
 -struct work_struct hotplug_work;
 +static struct work_struct hotplug_work;
  
  struct kvm_device {
        struct virtio_device vdev;
@@@ -334,10 -335,10 +335,10 @@@ static void scan_devices(void
   */
  static int match_desc(struct device *dev, void *data)
  {
 -      if ((ulong)to_kvmdev(dev_to_virtio(dev))->desc == (ulong)data)
 -              return 1;
 +      struct virtio_device *vdev = dev_to_virtio(dev);
 +      struct kvm_device *kdev = to_kvmdev(vdev);
  
 -      return 0;
 +      return kdev->desc == data;
  }
  
  /*
@@@ -22,6 -22,7 +22,7 @@@
   */
  
  #include <linux/slab.h>
+ #include <linux/module.h>
  #include <scsi/scsi_dh.h>
  #include "../scsi_priv.h"
  
@@@ -441,15 -442,7 +442,15 @@@ int scsi_dh_activate(struct request_que
  
        spin_lock_irqsave(q->queue_lock, flags);
        sdev = q->queuedata;
 -      if (sdev && sdev->scsi_dh_data)
 +      if (!sdev) {
 +              spin_unlock_irqrestore(q->queue_lock, flags);
 +              err = SCSI_DH_NOSYS;
 +              if (fn)
 +                      fn(data, err);
 +              return err;
 +      }
 +
 +      if (sdev->scsi_dh_data)
                scsi_dh = sdev->scsi_dh_data->scsi_dh;
        dev = get_device(&sdev->sdev_gendev);
        if (!scsi_dh || !dev ||
@@@ -21,6 -21,7 +21,7 @@@
   */
  #include <linux/slab.h>
  #include <linux/delay.h>
+ #include <linux/module.h>
  #include <scsi/scsi.h>
  #include <scsi/scsi_eh.h>
  #include <scsi/scsi_dh.h>
@@@ -507,7 -508,7 +508,7 @@@ static int alua_rtpg(struct scsi_devic
        int len, k, off, valid_states = 0;
        unsigned char *ucp;
        unsigned err;
 -      unsigned long expiry, interval = 1;
 +      unsigned long expiry, interval = 1000;
  
        expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT);
   retry:
@@@ -734,7 -735,6 +735,7 @@@ static int alua_bus_attach(struct scsi_
        spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
        sdev->scsi_dh_data = scsi_dh_data;
        spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
 +      sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME);
  
        return 0;
  
@@@ -26,6 -26,7 +26,7 @@@
  #include <linux/timer.h>
  #include <linux/slab.h>
  #include <linux/err.h>
+ #include <linux/export.h>
  
  #include <scsi/fc/fc_fc2.h>
  
@@@ -469,7 -470,6 +470,7 @@@ static int fc_seq_send(struct fc_lport 
        struct fc_frame_header *fh = fc_frame_header_get(fp);
        int error;
        u32 f_ctl;
 +      u8 fh_type = fh->fh_type;
  
        ep = fc_seq_exch(sp);
        WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
         */
        error = lport->tt.frame_send(lport, fp);
  
 -      if (fh->fh_type == FC_TYPE_BLS)
 +      if (fh_type == FC_TYPE_BLS)
                return error;
  
        /*
@@@ -1793,9 -1793,6 +1794,9 @@@ restart
                        goto restart;
                }
        }
 +      pool->next_index = 0;
 +      pool->left = FC_XID_UNKNOWN;
 +      pool->right = FC_XID_UNKNOWN;
        spin_unlock_bh(&pool->lock);
  }
  
@@@ -2284,7 -2281,6 +2285,7 @@@ struct fc_exch_mgr *fc_exch_mgr_alloc(s
                goto free_mempool;
        for_each_possible_cpu(cpu) {
                pool = per_cpu_ptr(mp->pool, cpu);
 +              pool->next_index = 0;
                pool->left = FC_XID_UNKNOWN;
                pool->right = FC_XID_UNKNOWN;
                spin_lock_init(&pool->lock);
@@@ -89,6 -89,7 +89,7 @@@
  
  #include <linux/timer.h>
  #include <linux/delay.h>
+ #include <linux/module.h>
  #include <linux/slab.h>
  #include <asm/unaligned.h>
  
@@@ -1030,8 -1031,16 +1031,8 @@@ static void fc_lport_enter_reset(struc
                           FCH_EVT_LIPRESET, 0);
        fc_vports_linkchange(lport);
        fc_lport_reset_locked(lport);
 -      if (lport->link_up) {
 -              /*
 -               * Wait upto resource allocation time out before
 -               * doing re-login since incomplete FIP exchanged
 -               * from last session may collide with exchanges
 -               * in new session.
 -               */
 -              msleep(lport->r_a_tov);
 +      if (lport->link_up)
                fc_lport_enter_flogi(lport);
 -      }
  }
  
  /**
@@@ -1473,7 -1482,6 +1474,7 @@@ void fc_lport_flogi_resp(struct fc_seq 
                         void *lp_arg)
  {
        struct fc_lport *lport = lp_arg;
 +      struct fc_frame_header *fh;
        struct fc_els_flogi *flp;
        u32 did;
        u16 csp_flags;
                goto err;
        }
  
 +      fh = fc_frame_header_get(fp);
        did = fc_frame_did(fp);
 -      if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) {
 -              flp = fc_frame_payload_get(fp, sizeof(*flp));
 -              if (flp) {
 -                      mfs = ntohs(flp->fl_csp.sp_bb_data) &
 -                              FC_SP_BB_DATA_MASK;
 -                      if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
 -                          mfs < lport->mfs)
 -                              lport->mfs = mfs;
 -                      csp_flags = ntohs(flp->fl_csp.sp_features);
 -                      r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
 -                      e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
 -                      if (csp_flags & FC_SP_FT_EDTR)
 -                              e_d_tov /= 1000000;
 -
 -                      lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
 -
 -                      if ((csp_flags & FC_SP_FT_FPORT) == 0) {
 -                              if (e_d_tov > lport->e_d_tov)
 -                                      lport->e_d_tov = e_d_tov;
 -                              lport->r_a_tov = 2 * e_d_tov;
 -                              fc_lport_set_port_id(lport, did, fp);
 -                              printk(KERN_INFO "host%d: libfc: "
 -                                     "Port (%6.6x) entered "
 -                                     "point-to-point mode\n",
 -                                     lport->host->host_no, did);
 -                              fc_lport_ptp_setup(lport, fc_frame_sid(fp),
 -                                                 get_unaligned_be64(
 -                                                         &flp->fl_wwpn),
 -                                                 get_unaligned_be64(
 -                                                         &flp->fl_wwnn));
 -                      } else {
 -                              lport->e_d_tov = e_d_tov;
 -                              lport->r_a_tov = r_a_tov;
 -                              fc_host_fabric_name(lport->host) =
 -                                      get_unaligned_be64(&flp->fl_wwnn);
 -                              fc_lport_set_port_id(lport, did, fp);
 -                              fc_lport_enter_dns(lport);
 -                      }
 -              }
 -      } else {
 -              FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n");
 +      if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 ||
 +          fc_frame_payload_op(fp) != ELS_LS_ACC) {
 +              FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
                fc_lport_error(lport, fp);
 +              goto err;
 +      }
 +
 +      flp = fc_frame_payload_get(fp, sizeof(*flp));
 +      if (!flp) {
 +              FC_LPORT_DBG(lport, "FLOGI bad response\n");
 +              fc_lport_error(lport, fp);
 +              goto err;
 +      }
 +
 +      mfs = ntohs(flp->fl_csp.sp_bb_data) &
 +              FC_SP_BB_DATA_MASK;
 +      if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
 +          mfs < lport->mfs)
 +              lport->mfs = mfs;
 +      csp_flags = ntohs(flp->fl_csp.sp_features);
 +      r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
 +      e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
 +      if (csp_flags & FC_SP_FT_EDTR)
 +              e_d_tov /= 1000000;
 +
 +      lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
 +
 +      if ((csp_flags & FC_SP_FT_FPORT) == 0) {
 +              if (e_d_tov > lport->e_d_tov)
 +                      lport->e_d_tov = e_d_tov;
 +              lport->r_a_tov = 2 * e_d_tov;
 +              fc_lport_set_port_id(lport, did, fp);
 +              printk(KERN_INFO "host%d: libfc: "
 +                     "Port (%6.6x) entered "
 +                     "point-to-point mode\n",
 +                     lport->host->host_no, did);
 +              fc_lport_ptp_setup(lport, fc_frame_sid(fp),
 +                                 get_unaligned_be64(
 +                                         &flp->fl_wwpn),
 +                                 get_unaligned_be64(
 +                                         &flp->fl_wwnn));
 +      } else {
 +              lport->e_d_tov = e_d_tov;
 +              lport->r_a_tov = r_a_tov;
 +              fc_host_fabric_name(lport->host) =
 +                      get_unaligned_be64(&flp->fl_wwnn);
 +              fc_lport_set_port_id(lport, did, fp);
 +              fc_lport_enter_dns(lport);
        }
  
  out:
diff --combined drivers/scsi/scsi_lib.c
@@@ -12,6 -12,7 +12,7 @@@
  #include <linux/blkdev.h>
  #include <linux/completion.h>
  #include <linux/kernel.h>
+ #include <linux/export.h>
  #include <linux/mempool.h>
  #include <linux/slab.h>
  #include <linux/init.h>
@@@ -1698,15 -1699,6 +1699,15 @@@ struct request_queue *scsi_alloc_queue(
  
  void scsi_free_queue(struct request_queue *q)
  {
 +      unsigned long flags;
 +
 +      WARN_ON(q->queuedata);
 +
 +      /* cause scsi_request_fn() to kill all non-finished requests */
 +      spin_lock_irqsave(q->queue_lock, flags);
 +      q->request_fn(q);
 +      spin_unlock_irqrestore(q->queue_lock, flags);
 +
        blk_cleanup_queue(q);
  }
  
@@@ -23,6 -23,7 +23,7 @@@
  #include <linux/kthread.h>
  #include <linux/slab.h>
  #include <linux/freezer.h>
+ #include <linux/export.h>
  #include <sound/core.h>
  #include <sound/initval.h>
  #include <sound/pcm.h>
@@@ -22,6 -22,7 +22,7 @@@
  #include <linux/kthread.h>
  #include <linux/crypto.h>
  #include <linux/completion.h>
+ #include <linux/module.h>
  #include <asm/unaligned.h>
  #include <scsi/scsi_device.h>
  #include <scsi/iscsi_proto.h>
@@@ -1079,9 -1080,7 +1080,9 @@@ attach_cmd
         */
        if (!cmd->immediate_data) {
                cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
 -              if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
 +              if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
 +                      return 0;
 +              else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
                        return iscsit_add_reject_from_cmd(
                                ISCSI_REASON_PROTOCOL_ERROR,
                                1, 0, buf, cmd);
@@@ -1821,16 -1820,17 +1822,16 @@@ attach
                int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
                if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
                        out_of_order_cmdsn = 1;
 -              else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
 +              else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
                        return 0;
 -              } else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */
 +              else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
                        return iscsit_add_reject_from_cmd(
                                        ISCSI_REASON_PROTOCOL_ERROR,
                                        1, 0, buf, cmd);
 -              }
        }
        iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
  
 -      if (out_of_order_cmdsn)
 +      if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
                return 0;
        /*
         * Found the referenced task, send to transport for processing.
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/slab.h>
  #include <linux/spinlock.h>
  #include <linux/configfs.h>
+ #include <linux/export.h>
  #include <scsi/scsi.h>
  #include <scsi/scsi_cmnd.h>
  
@@@ -58,9 -59,8 +59,9 @@@ struct t10_alua_lu_gp *default_lu_gp
   *
   * See spc4r17 section 6.27
   */
 -int core_emulate_report_target_port_groups(struct se_cmd *cmd)
 +int target_emulate_report_target_port_groups(struct se_task *task)
  {
 +      struct se_cmd *cmd = task->task_se_cmd;
        struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
        struct se_port *port;
        struct t10_alua_tg_pt_gp *tg_pt_gp;
  
        transport_kunmap_first_data_page(cmd);
  
 +      task->task_scsi_status = GOOD;
 +      transport_complete_task(task, 1);
        return 0;
  }
  
   *
   * See spc4r17 section 6.35
   */
 -int core_emulate_set_target_port_groups(struct se_cmd *cmd)
 +int target_emulate_set_target_port_groups(struct se_task *task)
  {
 +      struct se_cmd *cmd = task->task_se_cmd;
        struct se_device *dev = cmd->se_dev;
        struct se_subsystem_dev *su_dev = dev->se_sub_dev;
        struct se_port *port, *l_port = cmd->se_lun->lun_sep;
  
  out:
        transport_kunmap_first_data_page(cmd);
 -
 +      task->task_scsi_status = GOOD;
 +      transport_complete_task(task, 1);
        return 0;
  }
  
@@@ -35,6 -35,7 +35,7 @@@
  #include <linux/spinlock.h>
  #include <linux/kthread.h>
  #include <linux/in.h>
+ #include <linux/export.h>
  #include <net/sock.h>
  #include <net/tcp.h>
  #include <scsi/scsi.h>
@@@ -651,15 -652,23 +652,15 @@@ void core_dev_unexport
        lun->lun_se_dev = NULL;
  }
  
 -int transport_core_report_lun_response(struct se_cmd *se_cmd)
 +int target_report_luns(struct se_task *se_task)
  {
 +      struct se_cmd *se_cmd = se_task->task_se_cmd;
        struct se_dev_entry *deve;
        struct se_lun *se_lun;
        struct se_session *se_sess = se_cmd->se_sess;
 -      struct se_task *se_task;
        unsigned char *buf;
        u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
  
 -      list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
 -              break;
 -
 -      if (!se_task) {
 -              pr_err("Unable to locate struct se_task for struct se_cmd\n");
 -              return PYX_TRANSPORT_LU_COMM_FAILURE;
 -      }
 -
        buf = transport_kmap_first_data_page(se_cmd);
  
        /*
@@@ -705,8 -714,6 +706,8 @@@ done
        buf[2] = ((lun_count >> 8) & 0xff);
        buf[3] = (lun_count & 0xff);
  
 +      se_task->task_scsi_status = GOOD;
 +      transport_complete_task(se_task, 1);
        return PYX_TRANSPORT_SENT_TO_TRANSPORT;
  }
  
@@@ -36,6 -36,7 +36,7 @@@
  #include <linux/genhd.h>
  #include <linux/cdrom.h>
  #include <linux/file.h>
+ #include <linux/module.h>
  #include <scsi/scsi.h>
  #include <scsi/scsi_device.h>
  #include <scsi/scsi_cmnd.h>
@@@ -1091,7 -1092,7 +1092,7 @@@ static int pscsi_do_task(struct se_tas
  
                req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
                                       GFP_KERNEL);
 -              if (!req) {
 +              if (IS_ERR(req)) {
                        pr_err("pSCSI: blk_make_request() failed\n");
                        goto fail;
                }
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/slab.h>
  #include <linux/spinlock.h>
  #include <linux/list.h>
+ #include <linux/export.h>
  #include <scsi/scsi.h>
  #include <scsi/scsi_cmnd.h>
  
@@@ -118,7 -119,7 +119,7 @@@ static void core_tmr_drain_tmr_list
                /*
                 * Allow the received TMR to return with FUNCTION_COMPLETE.
                 */
 -              if (tmr && (tmr_p == tmr))
 +              if (tmr_p == tmr)
                        continue;
  
                cmd = tmr_p->task_cmd;
                }
                spin_unlock(&cmd->t_state_lock);
  
 -              list_move_tail(&tmr->tmr_list, &drain_tmr_list);
 +              list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
        }
        spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
  
 -      while (!list_empty(&drain_tmr_list)) {
 -              tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list);
 -              list_del(&tmr->tmr_list);
 +      list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
 +              list_del_init(&tmr_p->tmr_list);
                cmd = tmr_p->task_cmd;
  
                pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
                        " Response: 0x%02x, t_state: %d\n",
 -                      (preempt_and_abort_list) ? "Preempt" : "", tmr,
 -                      tmr->function, tmr->response, cmd->t_state);
 +                      (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
 +                      tmr_p->function, tmr_p->response, cmd->t_state);
  
                transport_cmd_finish_abort(cmd, 1);
        }
@@@ -329,6 -331,16 +330,6 @@@ static void core_tmr_drain_cmd_list
                 */
                if (prout_cmd == cmd)
                        continue;
 -              /*
 -               * Skip direct processing of TRANSPORT_FREE_CMD_INTR for
 -               * HW target mode fabrics.
 -               */
 -              spin_lock(&cmd->t_state_lock);
 -              if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) {
 -                      spin_unlock(&cmd->t_state_lock);
 -                      continue;
 -              }
 -              spin_unlock(&cmd->t_state_lock);
  
                atomic_set(&cmd->t_transport_queue_active, 0);
                atomic_dec(&qobj->queue_cnt);
@@@ -36,6 -36,7 +36,7 @@@
  #include <linux/kthread.h>
  #include <linux/in.h>
  #include <linux/cdrom.h>
+ #include <linux/module.h>
  #include <asm/unaligned.h>
  #include <net/sock.h>
  #include <net/tcp.h>
@@@ -52,7 -53,6 +53,7 @@@
  #include <target/target_core_configfs.h>
  
  #include "target_core_alua.h"
 +#include "target_core_cdb.h"
  #include "target_core_hba.h"
  #include "target_core_pr.h"
  #include "target_core_ua.h"
@@@ -269,9 -269,6 +270,9 @@@ struct se_session *transport_init_sessi
        }
        INIT_LIST_HEAD(&se_sess->sess_list);
        INIT_LIST_HEAD(&se_sess->sess_acl_list);
 +      INIT_LIST_HEAD(&se_sess->sess_cmd_list);
 +      INIT_LIST_HEAD(&se_sess->sess_wait_list);
 +      spin_lock_init(&se_sess->sess_cmd_lock);
  
        return se_sess;
  }
@@@ -518,16 -515,13 +519,16 @@@ static int transport_cmd_check_stop
                         * Some fabric modules like tcm_loop can release
                         * their internally allocated I/O reference now and
                         * struct se_cmd now.
 +                       *
 +                       * Fabric modules are expected to return '1' here if the
 +                       * se_cmd being passed is released at this point,
 +                       * or zero if not being released.
                         */
                        if (cmd->se_tfo->check_stop_free != NULL) {
                                spin_unlock_irqrestore(
                                        &cmd->t_state_lock, flags);
  
 -                              cmd->se_tfo->check_stop_free(cmd);
 -                              return 1;
 +                              return cmd->se_tfo->check_stop_free(cmd);
                        }
                }
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@@ -737,10 -731,6 +738,10 @@@ void transport_complete_task(struct se_
                complete(&task->task_stop_comp);
                return;
        }
 +
 +      if (!success)
 +              cmd->t_tasks_failed = 1;
 +
        /*
         * Decrement the outstanding t_task_cdbs_left count.  The last
         * struct se_task from struct se_cmd will complete itself into the
                return;
        }
  
 -      if (!success || cmd->t_tasks_failed) {
 +      if (cmd->t_tasks_failed) {
                if (!task->task_error_status) {
                        task->task_error_status =
                                PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
@@@ -919,7 -909,7 +920,7 @@@ void transport_remove_task_from_execute
  }
  
  /*
 - * Handle QUEUE_FULL / -EAGAIN status
 + * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
   */
  
  static void target_qf_do_work(struct work_struct *work)
@@@ -1509,12 -1499,11 +1510,12 @@@ void transport_init_se_cmd
        INIT_LIST_HEAD(&cmd->se_ordered_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
        INIT_LIST_HEAD(&cmd->se_queue_node);
 -
 +      INIT_LIST_HEAD(&cmd->se_cmd_list);
        INIT_LIST_HEAD(&cmd->t_task_list);
        init_completion(&cmd->transport_lun_fe_stop_comp);
        init_completion(&cmd->transport_lun_stop_comp);
        init_completion(&cmd->t_transport_stop_comp);
 +      init_completion(&cmd->cmd_wait_comp);
        spin_lock_init(&cmd->t_state_lock);
        atomic_set(&cmd->transport_dev_active, 1);
  
@@@ -1657,7 -1646,9 +1658,7 @@@ int transport_handle_cdb_direct
         * and call transport_generic_request_failure() if necessary..
         */
        ret = transport_generic_new_cmd(cmd);
 -      if (ret == -EAGAIN)
 -              return 0;
 -      else if (ret < 0) {
 +      if (ret < 0) {
                cmd->transport_error_status = ret;
                transport_generic_request_failure(cmd, 0,
                                (cmd->data_direction != DMA_TO_DEVICE));
@@@ -1727,6 -1718,13 +1728,6 @@@ int transport_generic_handle_tmr
  }
  EXPORT_SYMBOL(transport_generic_handle_tmr);
  
 -void transport_generic_free_cmd_intr(
 -      struct se_cmd *cmd)
 -{
 -      transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false);
 -}
 -EXPORT_SYMBOL(transport_generic_free_cmd_intr);
 -
  /*
   * If the task is active, request it to be stopped and sleep until it
   * has completed.
@@@ -1889,7 -1887,7 +1890,7 @@@ static void transport_generic_request_f
                                ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
  
                ret = cmd->se_tfo->queue_status(cmd);
 -              if (ret == -EAGAIN)
 +              if (ret == -EAGAIN || ret == -ENOMEM)
                        goto queue_full;
                goto check_stop;
        case PYX_TRANSPORT_USE_SENSE_REASON:
        else {
                ret = transport_send_check_condition_and_sense(cmd,
                                cmd->scsi_sense_reason, 0);
 -              if (ret == -EAGAIN)
 +              if (ret == -EAGAIN || ret == -ENOMEM)
                        goto queue_full;
        }
  
@@@ -2156,20 -2154,62 +2157,20 @@@ check_depth
                atomic_set(&cmd->t_transport_sent, 1);
  
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 -      /*
 -       * The struct se_cmd->transport_emulate_cdb() function pointer is used
 -       * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
 -       * struct se_subsystem_api->do_task() caller below.
 -       */
 -      if (cmd->transport_emulate_cdb) {
 -              error = cmd->transport_emulate_cdb(cmd);
 -              if (error != 0) {
 -                      cmd->transport_error_status = error;
 -                      spin_lock_irqsave(&cmd->t_state_lock, flags);
 -                      task->task_flags &= ~TF_ACTIVE;
 -                      spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 -                      atomic_set(&cmd->t_transport_sent, 0);
 -                      transport_stop_tasks_for_cmd(cmd);
 -                      atomic_inc(&dev->depth_left);
 -                      transport_generic_request_failure(cmd, 0, 1);
 -                      goto check_depth;
 -              }
 -              /*
 -               * Handle the successful completion for transport_emulate_cdb()
 -               * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
 -               * Otherwise the caller is expected to complete the task with
 -               * proper status.
 -               */
 -              if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
 -                      cmd->scsi_status = SAM_STAT_GOOD;
 -                      task->task_scsi_status = GOOD;
 -                      transport_complete_task(task, 1);
 -              }
 -      } else {
 -              /*
 -               * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
 -               * RAMDISK we use the internal transport_emulate_control_cdb() logic
 -               * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
 -               * LUN emulation code.
 -               *
 -               * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
 -               * call ->do_task() directly and let the underlying TCM subsystem plugin
 -               * code handle the CDB emulation.
 -               */
 -              if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
 -                  (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
 -                      error = transport_emulate_control_cdb(task);
 -              else
 -                      error = dev->transport->do_task(task);
  
 -              if (error != 0) {
 -                      cmd->transport_error_status = error;
 -                      spin_lock_irqsave(&cmd->t_state_lock, flags);
 -                      task->task_flags &= ~TF_ACTIVE;
 -                      spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 -                      atomic_set(&cmd->t_transport_sent, 0);
 -                      transport_stop_tasks_for_cmd(cmd);
 -                      atomic_inc(&dev->depth_left);
 -                      transport_generic_request_failure(cmd, 0, 1);
 -              }
 +      if (cmd->execute_task)
 +              error = cmd->execute_task(task);
 +      else
 +              error = dev->transport->do_task(task);
 +      if (error != 0) {
 +              cmd->transport_error_status = error;
 +              spin_lock_irqsave(&cmd->t_state_lock, flags);
 +              task->task_flags &= ~TF_ACTIVE;
 +              spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 +              atomic_set(&cmd->t_transport_sent, 0);
 +              transport_stop_tasks_for_cmd(cmd);
 +              atomic_inc(&dev->depth_left);
 +              transport_generic_request_failure(cmd, 0, 1);
        }
  
        goto check_depth;
@@@ -2603,13 -2643,6 +2604,13 @@@ static int transport_generic_cmd_sequen
                 */
        }
  
 +      /*
 +       * If we operate in passthrough mode we skip most CDB emulation and
 +       * instead hand the commands down to the physical SCSI device.
 +       */
 +      passthrough =
 +              (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
 +
        switch (cdb[0]) {
        case READ_6:
                sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
                cmd->t_task_lba = transport_lba_32(cdb);
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  
 -              if (dev->transport->transport_type ==
 -                              TRANSPORT_PLUGIN_PHBA_PDEV)
 +              /*
 +               * Do now allow BIDI commands for passthrough mode.
 +               */
 +              if (passthrough)
                        goto out_unsupported_cdb;
 +
                /*
                 * Setup BIDI XOR callback to be run after I/O completion.
                 */
                break;
        case VARIABLE_LENGTH_CMD:
                service_action = get_unaligned_be16(&cdb[8]);
 -              /*
 -               * Determine if this is TCM/PSCSI device and we should disable
 -               * internal emulation for this CDB.
 -               */
 -              passthrough = (dev->transport->transport_type ==
 -                                      TRANSPORT_PLUGIN_PHBA_PDEV);
 -
                switch (service_action) {
                case XDWRITEREAD_32:
                        sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
                        cmd->t_task_lba = transport_lba_64_ext(cdb);
                        cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  
 +                      /*
 +                       * Do now allow BIDI commands for passthrough mode.
 +                       */
                        if (passthrough)
                                goto out_unsupported_cdb;
 +
                        /*
                         * Setup BIDI XOR callback to be run during after I/O
                         * completion.
  
                        if (target_check_write_same_discard(&cdb[10], dev) < 0)
                                goto out_invalid_cdb_field;
 -
 +                      if (!passthrough)
 +                              cmd->execute_task = target_emulate_write_same;
                        break;
                default:
                        pr_err("VARIABLE_LENGTH_CMD service action"
                        /*
                         * Check for emulated MI_REPORT_TARGET_PGS.
                         */
 -                      if (cdb[1] == MI_REPORT_TARGET_PGS) {
 -                              cmd->transport_emulate_cdb =
 -                              (su_dev->t10_alua.alua_type ==
 -                               SPC3_ALUA_EMULATED) ?
 -                              core_emulate_report_target_port_groups :
 -                              NULL;
 +                      if (cdb[1] == MI_REPORT_TARGET_PGS &&
 +                          su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
 +                              cmd->execute_task =
 +                                      target_emulate_report_target_port_groups;
                        }
                        size = (cdb[6] << 24) | (cdb[7] << 16) |
                               (cdb[8] << 8) | cdb[9];
        case MODE_SENSE:
                size = cdb[4];
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 +              if (!passthrough)
 +                      cmd->execute_task = target_emulate_modesense;
                break;
        case MODE_SENSE_10:
 +              size = (cdb[7] << 8) + cdb[8];
 +              cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 +              if (!passthrough)
 +                      cmd->execute_task = target_emulate_modesense;
 +              break;
        case GPCMD_READ_BUFFER_CAPACITY:
        case GPCMD_SEND_OPC:
        case LOG_SELECT:
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
                break;
        case PERSISTENT_RESERVE_IN:
 +              if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
 +                      cmd->execute_task = target_scsi3_emulate_pr_in;
 +              size = (cdb[7] << 8) + cdb[8];
 +              cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 +              break;
        case PERSISTENT_RESERVE_OUT:
 -              cmd->transport_emulate_cdb =
 -                      (su_dev->t10_pr.res_type ==
 -                       SPC3_PERSISTENT_RESERVATIONS) ?
 -                      core_scsi3_emulate_pr : NULL;
 +              if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
 +                      cmd->execute_task = target_scsi3_emulate_pr_out;
                size = (cdb[7] << 8) + cdb[8];
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
                break;
                         *
                         * Check for emulated MO_SET_TARGET_PGS.
                         */
 -                      if (cdb[1] == MO_SET_TARGET_PGS) {
 -                              cmd->transport_emulate_cdb =
 -                              (su_dev->t10_alua.alua_type ==
 -                                      SPC3_ALUA_EMULATED) ?
 -                              core_emulate_set_target_port_groups :
 -                              NULL;
 +                      if (cdb[1] == MO_SET_TARGET_PGS &&
 +                          su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
 +                              cmd->execute_task =
 +                                      target_emulate_set_target_port_groups;
                        }
  
                        size = (cdb[6] << 24) | (cdb[7] << 16) |
                if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
                        cmd->sam_task_attr = MSG_HEAD_TAG;
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 +              if (!passthrough)
 +                      cmd->execute_task = target_emulate_inquiry;
                break;
        case READ_BUFFER:
                size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
        case READ_CAPACITY:
                size = READ_CAP_LEN;
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 +              if (!passthrough)
 +                      cmd->execute_task = target_emulate_readcapacity;
                break;
        case READ_MEDIA_SERIAL_NUMBER:
        case SECURITY_PROTOCOL_IN:
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
                break;
        case SERVICE_ACTION_IN:
 +              switch (cmd->t_task_cdb[1] & 0x1f) {
 +              case SAI_READ_CAPACITY_16:
 +                      if (!passthrough)
 +                              cmd->execute_task =
 +                                      target_emulate_readcapacity_16;
 +                      break;
 +              default:
 +                      if (passthrough)
 +                              break;
 +
 +                      pr_err("Unsupported SA: 0x%02x\n",
 +                              cmd->t_task_cdb[1] & 0x1f);
 +                      goto out_unsupported_cdb;
 +              }
 +              /*FALLTHROUGH*/
        case ACCESS_CONTROL_IN:
        case ACCESS_CONTROL_OUT:
        case EXTENDED_COPY:
        case REQUEST_SENSE:
                size = cdb[4];
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 +              if (!passthrough)
 +                      cmd->execute_task = target_emulate_request_sense;
                break;
        case READ_ELEMENT_STATUS:
                size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
                 * is running in SPC_PASSTHROUGH, and wants reservations
                 * emulation disabled.
                 */
 -              cmd->transport_emulate_cdb =
 -                              (su_dev->t10_pr.res_type !=
 -                               SPC_PASSTHROUGH) ?
 -                              core_scsi2_emulate_crh : NULL;
 +              if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
 +                      cmd->execute_task = target_scsi2_reservation_reserve;
                cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
                break;
        case RELEASE:
                else
                        size = cmd->data_length;
  
 -              cmd->transport_emulate_cdb =
 -                              (su_dev->t10_pr.res_type !=
 -                               SPC_PASSTHROUGH) ?
 -                              core_scsi2_emulate_crh : NULL;
 +              if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
 +                      cmd->execute_task = target_scsi2_reservation_release;
                cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
                break;
        case SYNCHRONIZE_CACHE:
                size = transport_get_size(sectors, cdb, cmd);
                cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  
 -              /*
 -               * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
 -               */
 -              if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
 +              if (passthrough)
                        break;
 -              /*
 -               * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
 -               * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
 -               */
 -              cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
 +
                /*
                 * Check to ensure that LBA + Range does not exceed past end of
                 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
                        if (transport_cmd_get_valid_sectors(cmd) < 0)
                                goto out_invalid_cdb_field;
                }
 +              cmd->execute_task = target_emulate_synchronize_cache;
                break;
        case UNMAP:
                size = get_unaligned_be16(&cdb[7]);
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 +              if (!passthrough)
 +                      cmd->execute_task = target_emulate_unmap;
                break;
        case WRITE_SAME_16:
                sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  
                if (target_check_write_same_discard(&cdb[1], dev) < 0)
                        goto out_invalid_cdb_field;
 +              if (!passthrough)
 +                      cmd->execute_task = target_emulate_write_same;
                break;
        case WRITE_SAME:
                sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
                 */
                if (target_check_write_same_discard(&cdb[1], dev) < 0)
                        goto out_invalid_cdb_field;
 +              if (!passthrough)
 +                      cmd->execute_task = target_emulate_write_same;
                break;
        case ALLOW_MEDIUM_REMOVAL:
 -      case GPCMD_CLOSE_TRACK:
        case ERASE:
 -      case INITIALIZE_ELEMENT_STATUS:
 -      case GPCMD_LOAD_UNLOAD:
        case REZERO_UNIT:
        case SEEK_10:
 -      case GPCMD_SET_SPEED:
        case SPACE:
        case START_STOP:
        case TEST_UNIT_READY:
        case VERIFY:
        case WRITE_FILEMARKS:
 +              cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
 +              if (!passthrough)
 +                      cmd->execute_task = target_emulate_noop;
 +              break;
 +      case GPCMD_CLOSE_TRACK:
 +      case INITIALIZE_ELEMENT_STATUS:
 +      case GPCMD_LOAD_UNLOAD:
 +      case GPCMD_SET_SPEED:
        case MOVE_MEDIUM:
                cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
                break;
        case REPORT_LUNS:
 -              cmd->transport_emulate_cdb =
 -                              transport_core_report_lun_response;
 +              cmd->execute_task = target_report_luns;
                size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
                /*
                 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
                cmd->data_length = size;
        }
  
 +      /* reject any command that we don't have a handler for */
 +      if (!(passthrough || cmd->execute_task ||
 +           (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
 +              goto out_unsupported_cdb;
 +
        /* Let's limit control cdbs to a page, for simplicity's sake. */
        if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
            size > PAGE_SIZE)
@@@ -3308,7 -3309,7 +3309,7 @@@ static void target_complete_ok_work(str
                if (cmd->scsi_status) {
                        ret = transport_send_check_condition_and_sense(
                                        cmd, reason, 1);
 -                      if (ret == -EAGAIN)
 +                      if (ret == -EAGAIN || ret == -ENOMEM)
                                goto queue_full;
  
                        transport_lun_remove_cmd(cmd);
                spin_unlock(&cmd->se_lun->lun_sep_lock);
  
                ret = cmd->se_tfo->queue_data_in(cmd);
 -              if (ret == -EAGAIN)
 +              if (ret == -EAGAIN || ret == -ENOMEM)
                        goto queue_full;
                break;
        case DMA_TO_DEVICE:
                        }
                        spin_unlock(&cmd->se_lun->lun_sep_lock);
                        ret = cmd->se_tfo->queue_data_in(cmd);
 -                      if (ret == -EAGAIN)
 +                      if (ret == -EAGAIN || ret == -ENOMEM)
                                goto queue_full;
                        break;
                }
                /* Fall through for DMA_TO_DEVICE */
        case DMA_NONE:
                ret = cmd->se_tfo->queue_status(cmd);
 -              if (ret == -EAGAIN)
 +              if (ret == -EAGAIN || ret == -ENOMEM)
                        goto queue_full;
                break;
        default:
@@@ -3890,10 -3891,7 +3891,10 @@@ EXPORT_SYMBOL(transport_generic_process
  
  static void transport_write_pending_qf(struct se_cmd *cmd)
  {
 -      if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) {
 +      int ret;
 +
 +      ret = cmd->se_tfo->write_pending(cmd);
 +      if (ret == -EAGAIN || ret == -ENOMEM) {
                pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
                         cmd);
                transport_handle_queue_full(cmd, cmd->se_dev);
@@@ -3923,7 -3921,7 +3924,7 @@@ static int transport_generic_write_pend
         * frontend know that WRITE buffers are ready.
         */
        ret = cmd->se_tfo->write_pending(cmd);
 -      if (ret == -EAGAIN)
 +      if (ret == -EAGAIN || ret == -ENOMEM)
                goto queue_full;
        else if (ret < 0)
                return ret;
@@@ -3934,7 -3932,7 +3935,7 @@@ queue_full
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
        cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
        transport_handle_queue_full(cmd, cmd->se_dev);
 -      return ret;
 +      return 0;
  }
  
  /**
@@@ -3952,14 -3950,6 +3953,14 @@@ void transport_release_cmd(struct se_cm
                core_tmr_release_req(cmd->se_tmr_req);
        if (cmd->t_task_cdb != cmd->__t_task_cdb)
                kfree(cmd->t_task_cdb);
 +      /*
 +       * Check if target_wait_for_sess_cmds() is expecting to
 +       * release se_cmd directly here..
 +       */
 +      if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
 +              if (cmd->se_tfo->check_release_cmd(cmd) != 0)
 +                      return;
 +
        cmd->se_tfo->release_cmd(cmd);
  }
  EXPORT_SYMBOL(transport_release_cmd);
@@@ -3987,114 -3977,6 +3988,114 @@@ void transport_generic_free_cmd(struct 
  }
  EXPORT_SYMBOL(transport_generic_free_cmd);
  
 +/* target_get_sess_cmd - Add command to active ->sess_cmd_list
 + * @se_sess:  session to reference
 + * @se_cmd:   command descriptor to add
 + */
 +void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 +      list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
 +      se_cmd->check_release = 1;
 +      spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 +}
 +EXPORT_SYMBOL(target_get_sess_cmd);
 +
 +/* target_put_sess_cmd - Check for active I/O shutdown or list delete
 + * @se_sess:  session to reference
 + * @se_cmd:   command descriptor to drop
 + */
 +int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 +      if (list_empty(&se_cmd->se_cmd_list)) {
 +              spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 +              WARN_ON(1);
 +              return 0;
 +      }
 +
 +      if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
 +              spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 +              complete(&se_cmd->cmd_wait_comp);
 +              return 1;
 +      }
 +      list_del(&se_cmd->se_cmd_list);
 +      spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL(target_put_sess_cmd);
 +
 +/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
 + * @se_sess:  session to split
 + */
 +void target_splice_sess_cmd_list(struct se_session *se_sess)
 +{
 +      struct se_cmd *se_cmd;
 +      unsigned long flags;
 +
 +      WARN_ON(!list_empty(&se_sess->sess_wait_list));
 +      INIT_LIST_HEAD(&se_sess->sess_wait_list);
 +
 +      spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 +      se_sess->sess_tearing_down = 1;
 +
 +      list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
 +
 +      list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
 +              se_cmd->cmd_wait_set = 1;
 +
 +      spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 +}
 +EXPORT_SYMBOL(target_splice_sess_cmd_list);
 +
 +/* target_wait_for_sess_cmds - Wait for outstanding descriptors
 + * @se_sess:    session to wait for active I/O
 + * @wait_for_tasks:   Make extra transport_wait_for_tasks call
 + */
 +void target_wait_for_sess_cmds(
 +      struct se_session *se_sess,
 +      int wait_for_tasks)
 +{
 +      struct se_cmd *se_cmd, *tmp_cmd;
 +      bool rc = false;
 +
 +      list_for_each_entry_safe(se_cmd, tmp_cmd,
 +                              &se_sess->sess_wait_list, se_cmd_list) {
 +              list_del(&se_cmd->se_cmd_list);
 +
 +              pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
 +                      " %d\n", se_cmd, se_cmd->t_state,
 +                      se_cmd->se_tfo->get_cmd_state(se_cmd));
 +
 +              if (wait_for_tasks) {
 +                      pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
 +                              " fabric state: %d\n", se_cmd, se_cmd->t_state,
 +                              se_cmd->se_tfo->get_cmd_state(se_cmd));
 +
 +                      rc = transport_wait_for_tasks(se_cmd);
 +
 +                      pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
 +                              " fabric state: %d\n", se_cmd, se_cmd->t_state,
 +                              se_cmd->se_tfo->get_cmd_state(se_cmd));
 +              }
 +
 +              if (!rc) {
 +                      wait_for_completion(&se_cmd->cmd_wait_comp);
 +                      pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
 +                              " fabric state: %d\n", se_cmd, se_cmd->t_state,
 +                              se_cmd->se_tfo->get_cmd_state(se_cmd));
 +              }
 +
 +              se_cmd->se_tfo->release_cmd(se_cmd);
 +      }
 +}
 +EXPORT_SYMBOL(target_wait_for_sess_cmds);
 +
  /*    transport_lun_wait_for_tasks():
   *
   *    Called from ConfigFS context to stop the passed struct se_cmd to allow
@@@ -4271,14 -4153,14 +4272,14 @@@ int transport_clear_lun_from_sessions(s
   * Called from frontend fabric context to wait for storage engine
   * to pause and/or release frontend generated struct se_cmd.
   */
 -void transport_wait_for_tasks(struct se_cmd *cmd)
 +bool transport_wait_for_tasks(struct se_cmd *cmd)
  {
        unsigned long flags;
  
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 -              return;
 +              return false;
        }
        /*
         * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
         */
        if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 -              return;
 +              return false;
        }
        /*
         * If we are already stopped due to an external event (ie: LUN shutdown)
        if (!atomic_read(&cmd->t_transport_active) ||
             atomic_read(&cmd->t_transport_aborted)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 -              return;
 +              return false;
        }
  
        atomic_set(&cmd->t_transport_stop, 1);
                cmd->se_tfo->get_task_tag(cmd));
  
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 +
 +      return true;
  }
  EXPORT_SYMBOL(transport_wait_for_tasks);
  
@@@ -4704,7 -4584,9 +4705,7 @@@ get_cmd
                                break;
                        }
                        ret = transport_generic_new_cmd(cmd);
 -                      if (ret == -EAGAIN)
 -                              break;
 -                      else if (ret < 0) {
 +                      if (ret < 0) {
                                cmd->transport_error_status = ret;
                                transport_generic_request_failure(cmd,
                                        0, (cmd->data_direction !=
                case TRANSPORT_PROCESS_WRITE:
                        transport_generic_process_write(cmd);
                        break;
 -              case TRANSPORT_FREE_CMD_INTR:
 -                      transport_generic_free_cmd(cmd, 0);
 -                      break;
                case TRANSPORT_PROCESS_TMR:
                        transport_generic_do_tmr(cmd);
                        break;
@@@ -24,6 -24,7 +24,7 @@@
  
  #include <linux/device.h>
  #include <linux/slab.h>
+ #include <linux/export.h>
  #include <linux/usb.h>
  #include <linux/usb/quirks.h>
  #include <linux/usb/hcd.h>
@@@ -1667,11 -1668,6 +1668,11 @@@ int usb_runtime_suspend(struct device *
                return -EAGAIN;
  
        status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
 +
 +      /* Allow a retry if autosuspend failed temporarily */
 +      if (status == -EAGAIN || status == -EBUSY)
 +              usb_mark_last_busy(udev);
 +
        /* The PM core reacts badly unless the return code is 0,
         * -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
         */
@@@ -14,6 -14,7 +14,7 @@@
  #include <linux/device.h>
  #include <linux/kernel.h>
  #include <linux/delay.h>
+ #include <linux/module.h>
  #include <linux/gpio.h>
  #include <linux/lcd.h>
  #include <linux/slab.h>
@@@ -52,11 -53,15 +53,11 @@@ static void l4f00242t03_lcd_init(struc
  
        dev_dbg(&spi->dev, "initializing LCD\n");
  
 -      if (priv->io_reg) {
 -              regulator_set_voltage(priv->io_reg, 1800000, 1800000);
 -              regulator_enable(priv->io_reg);
 -      }
 +      regulator_set_voltage(priv->io_reg, 1800000, 1800000);
 +      regulator_enable(priv->io_reg);
  
 -      if (priv->core_reg) {
 -              regulator_set_voltage(priv->core_reg, 2800000, 2800000);
 -              regulator_enable(priv->core_reg);
 -      }
 +      regulator_set_voltage(priv->core_reg, 2800000, 2800000);
 +      regulator_enable(priv->core_reg);
  
        l4f00242t03_reset(pdata->reset_gpio);
  
@@@ -74,8 -79,11 +75,8 @@@ static void l4f00242t03_lcd_powerdown(s
  
        gpio_set_value(pdata->data_enable_gpio, 0);
  
 -      if (priv->io_reg)
 -              regulator_disable(priv->io_reg);
 -
 -      if (priv->core_reg)
 -              regulator_disable(priv->core_reg);
 +      regulator_disable(priv->io_reg);
 +      regulator_disable(priv->core_reg);
  }
  
  static int l4f00242t03_lcd_power_get(struct lcd_device *ld)
@@@ -171,34 -179,47 +172,34 @@@ static int __devinit l4f00242t03_probe(
  
        priv->spi = spi;
  
 -      ret = gpio_request(pdata->reset_gpio, "lcd l4f00242t03 reset");
 +      ret = gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH,
 +                                              "lcd l4f00242t03 reset");
        if (ret) {
                dev_err(&spi->dev,
                        "Unable to get the lcd l4f00242t03 reset gpio.\n");
                goto err;
        }
  
 -      ret = gpio_direction_output(pdata->reset_gpio, 1);
 -      if (ret)
 -              goto err2;
 -
 -      ret = gpio_request(pdata->data_enable_gpio,
 -                              "lcd l4f00242t03 data enable");
 +      ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW,
 +                                              "lcd l4f00242t03 data enable");
        if (ret) {
                dev_err(&spi->dev,
                        "Unable to get the lcd l4f00242t03 data en gpio.\n");
                goto err2;
        }
  
 -      ret = gpio_direction_output(pdata->data_enable_gpio, 0);
 -      if (ret)
 +      priv->io_reg = regulator_get(&spi->dev, "vdd");
 +      if (IS_ERR(priv->io_reg)) {
 +              dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
 +                     __func__);
                goto err3;
 -
 -      if (pdata->io_supply) {
 -              priv->io_reg = regulator_get(NULL, pdata->io_supply);
 -
 -              if (IS_ERR(priv->io_reg)) {
 -                      pr_err("%s: Unable to get the IO regulator\n",
 -                                                              __func__);
 -                      goto err3;
 -              }
        }
  
 -      if (pdata->core_supply) {
 -              priv->core_reg = regulator_get(NULL, pdata->core_supply);
 -
 -              if (IS_ERR(priv->core_reg)) {
 -                      pr_err("%s: Unable to get the core regulator\n",
 -                                                              __func__);
 -                      goto err4;
 -              }
 +      priv->core_reg = regulator_get(&spi->dev, "vcore");
 +      if (IS_ERR(priv->core_reg)) {
 +              dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
 +                     __func__);
 +              goto err4;
        }
  
        priv->ld = lcd_device_register("l4f00242t03",
        return 0;
  
  err5:
 -      if (priv->core_reg)
 -              regulator_put(priv->core_reg);
 +      regulator_put(priv->core_reg);
  err4:
 -      if (priv->io_reg)
 -              regulator_put(priv->io_reg);
 +      regulator_put(priv->io_reg);
  err3:
        gpio_free(pdata->data_enable_gpio);
  err2:
@@@ -244,8 -267,10 +245,8 @@@ static int __devexit l4f00242t03_remove
        gpio_free(pdata->data_enable_gpio);
        gpio_free(pdata->reset_gpio);
  
 -      if (priv->io_reg)
 -              regulator_put(priv->io_reg);
 -      if (priv->core_reg)
 -              regulator_put(priv->core_reg);
 +      regulator_put(priv->io_reg);
 +      regulator_put(priv->core_reg);
  
        kfree(priv);
  
diff --combined drivers/w1/w1_int.c
@@@ -24,6 -24,8 +24,8 @@@
  #include <linux/delay.h>
  #include <linux/kthread.h>
  #include <linux/slab.h>
+ #include <linux/export.h>
+ #include <linux/moduleparam.h>
  
  #include "w1.h"
  #include "w1_log.h"
@@@ -78,7 -80,6 +80,7 @@@ static struct w1_master * w1_alloc_dev(
        memcpy(&dev->dev, device, sizeof(struct device));
        dev_set_name(&dev->dev, "w1_bus_master%u", dev->id);
        snprintf(dev->name, sizeof(dev->name), "w1_bus_master%u", dev->id);
 +      dev->dev.init_name = dev->name;
  
        dev->driver = driver;
  
@@@ -33,6 -33,7 +33,7 @@@
  #include <linux/slab.h>
  #include <linux/types.h>
  #include <linux/vmalloc.h>
+ #include <linux/export.h>
  #include <asm/xen/hypervisor.h>
  #include <xen/interface/xen.h>
  #include <xen/interface/event_channel.h>
@@@ -443,7 -444,7 +444,7 @@@ int xenbus_map_ring_valloc(struct xenbu
  
        *vaddr = NULL;
  
 -      area = xen_alloc_vm_area(PAGE_SIZE);
 +      area = alloc_vm_area(PAGE_SIZE);
        if (!area)
                return -ENOMEM;
  
                BUG();
  
        if (op.status != GNTST_okay) {
 -              xen_free_vm_area(area);
 +              free_vm_area(area);
                xenbus_dev_fatal(dev, op.status,
                                 "mapping in shared page %d from domain %d",
                                 gnt_ref, dev->otherend_id);
@@@ -552,7 -553,7 +553,7 @@@ int xenbus_unmap_ring_vfree(struct xenb
                BUG();
  
        if (op.status == GNTST_okay)
 -              xen_free_vm_area(area);
 +              free_vm_area(area);
        else
                xenbus_dev_error(dev, op.status,
                                 "unmapping page at handle %d error %d",
diff --combined fs/cifs/connect.c
@@@ -37,6 -37,7 +37,7 @@@
  #include <asm/uaccess.h>
  #include <asm/processor.h>
  #include <linux/inet.h>
+ #include <linux/module.h>
  #include <net/ipv6.h>
  #include "cifspdu.h"
  #include "cifsglob.h"
@@@ -3452,7 -3453,7 +3453,7 @@@ CIFSTCon(unsigned int xid, struct cifs_
                else
  #endif /* CIFS_WEAK_PW_HASH */
                rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
 -                                      bcc_ptr);
 +                                      bcc_ptr, nls_codepage);
  
                bcc_ptr += CIFS_AUTH_RESP_SIZE;
                if (ses->capabilities & CAP_UNICODE) {
diff --combined fs/logfs/super.c
@@@ -13,6 -13,7 +13,7 @@@
  #include <linux/bio.h>
  #include <linux/slab.h>
  #include <linux/blkdev.h>
+ #include <linux/module.h>
  #include <linux/mtd/mtd.h>
  #include <linux/statfs.h>
  #include <linux/buffer_head.h>
@@@ -90,6 -91,28 +91,6 @@@ void logfs_crash_dump(struct super_bloc
        dump_segfile(sb);
  }
  
 -/*
 - * TODO: move to lib/string.c
 - */
 -/**
 - * memchr_inv - Find a character in an area of memory.
 - * @s: The memory area
 - * @c: The byte to search for
 - * @n: The size of the area.
 - *
 - * returns the address of the first character other than @c, or %NULL
 - * if the whole buffer contains just @c.
 - */
 -void *memchr_inv(const void *s, int c, size_t n)
 -{
 -      const unsigned char *p = s;
 -      while (n-- != 0)
 -              if ((unsigned char)c != *p++)
 -                      return (void *)(p - 1);
 -
 -      return NULL;
 -}
 -
  /*
   * FIXME: There should be a reserve for root, similar to ext2.
   */
diff --combined fs/nfs/nfs4filelayout.c
@@@ -31,6 -31,7 +31,7 @@@
  
  #include <linux/nfs_fs.h>
  #include <linux/nfs_page.h>
+ #include <linux/module.h>
  
  #include "internal.h"
  #include "nfs4filelayout.h"
@@@ -449,8 -450,9 +450,8 @@@ filelayout_check_layout(struct pnfs_lay
  
        fl->dsaddr = dsaddr;
  
 -      if (fl->first_stripe_index < 0 ||
 -          fl->first_stripe_index >= dsaddr->stripe_count) {
 -              dprintk("%s Bad first_stripe_index %d\n",
 +      if (fl->first_stripe_index >= dsaddr->stripe_count) {
 +              dprintk("%s Bad first_stripe_index %u\n",
                                __func__, fl->first_stripe_index);
                goto out_put;
        }
@@@ -551,7 -553,7 +552,7 @@@ filelayout_decode_layout(struct pnfs_la
  
        /* Note that a zero value for num_fh is legal for STRIPE_SPARSE.
         * Futher checking is done in filelayout_check_layout */
 -      if (fl->num_fh < 0 || fl->num_fh >
 +      if (fl->num_fh >
            max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT))
                goto out_err;
  
diff --combined fs/nfs/pagelist.c
@@@ -18,6 -18,7 +18,7 @@@
  #include <linux/nfs_page.h>
  #include <linux/nfs_fs.h>
  #include <linux/nfs_mount.h>
+ #include <linux/export.h>
  
  #include "internal.h"
  #include "pnfs.h"
@@@ -41,7 -42,7 +42,7 @@@ nfs_page_free(struct nfs_page *p
  
  /**
   * nfs_create_request - Create an NFS read/write request.
 - * @file: file descriptor to use
 + * @ctx: open context to use
   * @inode: inode to which the request is attached
   * @page: page to write
   * @offset: starting offset within the page for the write
diff --combined fs/nfs/pnfs.c
@@@ -29,6 -29,7 +29,7 @@@
  
  #include <linux/nfs_fs.h>
  #include <linux/nfs_page.h>
+ #include <linux/module.h>
  #include "internal.h"
  #include "pnfs.h"
  #include "iostat.h"
@@@ -1443,31 -1444,17 +1444,31 @@@ pnfs_layoutcommit_inode(struct inode *i
        /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
        data = kzalloc(sizeof(*data), GFP_NOFS);
        if (!data) {
 -              mark_inode_dirty_sync(inode);
                status = -ENOMEM;
                goto out;
        }
  
 +      if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
 +              goto out_free;
 +
 +      if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
 +              if (!sync) {
 +                      status = -EAGAIN;
 +                      goto out_free;
 +              }
 +              status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
 +                                      nfs_wait_bit_killable, TASK_KILLABLE);
 +              if (status)
 +                      goto out_free;
 +      }
 +
        INIT_LIST_HEAD(&data->lseg_list);
        spin_lock(&inode->i_lock);
        if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
 +              clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
                spin_unlock(&inode->i_lock);
 -              kfree(data);
 -              goto out;
 +              wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
 +              goto out_free;
        }
  
        pnfs_list_write_lseg(inode, &data->lseg_list);
  
        status = nfs4_proc_layoutcommit(data, sync);
  out:
 +      if (status)
 +              mark_inode_dirty_sync(inode);
        dprintk("<-- %s status %d\n", __func__, status);
        return status;
 +out_free:
 +      kfree(data);
 +      goto out;
  }
diff --combined fs/nfs/write.c
@@@ -20,6 -20,7 +20,7 @@@
  #include <linux/nfs_mount.h>
  #include <linux/nfs_page.h>
  #include <linux/backing-dev.h>
+ #include <linux/export.h>
  
  #include <asm/uaccess.h>
  
@@@ -1243,6 -1244,7 +1244,6 @@@ void nfs_writeback_done(struct rpc_tas
  {
        struct nfs_writeargs    *argp = &data->args;
        struct nfs_writeres     *resp = &data->res;
 -      struct nfs_server       *server = NFS_SERVER(data->inode);
        int status;
  
        dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
                if (time_before(complain, jiffies)) {
                        dprintk("NFS:       faulty NFS server %s:"
                                " (committed = %d) != (stable = %d)\n",
 -                              server->nfs_client->cl_hostname,
 +                              NFS_SERVER(data->inode)->nfs_client->cl_hostname,
                                resp->verf->committed, argp->stable);
                        complain = jiffies + 300 * HZ;
                }
diff --combined fs/nfsd/nfssvc.c
@@@ -8,6 -8,7 +8,7 @@@
  
  #include <linux/sched.h>
  #include <linux/freezer.h>
+ #include <linux/module.h>
  #include <linux/fs_struct.h>
  #include <linux/swap.h>
  
@@@ -256,8 -257,6 +257,8 @@@ static void nfsd_last_thread(struct svc
        nfsd_serv = NULL;
        nfsd_shutdown();
  
 +      svc_rpcb_cleanup(serv);
 +
        printk(KERN_WARNING "nfsd: last server has exited, flushing export "
                            "cache\n");
        nfsd_export_flush();
diff --combined include/drm/drmP.h
@@@ -42,7 -42,6 +42,6 @@@
   * can build the DRM (part of PI DRI). 4/21/2000 S + B */
  #include <asm/current.h>
  #endif                                /* __alpha__ */
- #include <linux/module.h>
  #include <linux/kernel.h>
  #include <linux/miscdevice.h>
  #include <linux/fs.h>
@@@ -80,6 -79,8 +79,8 @@@
  #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
  #define __OS_HAS_MTRR (defined(CONFIG_MTRR))
  
+ struct module;
  struct drm_file;
  struct drm_device;
  
   * using the DRM_DEBUG_KMS and DRM_DEBUG.
   */
  
 -extern __attribute__((format (printf, 4, 5)))
 +extern __printf(4, 5)
  void drm_ut_debug_printk(unsigned int request_level,
 -                              const char *prefix,
 -                              const char *function_name,
 -                              const char *format, ...);
 -extern __attribute__((format (printf, 2, 3)))
 +                       const char *prefix,
 +                       const char *function_name,
 +                       const char *format, ...);
 +extern __printf(2, 3)
  int drm_err(const char *func, const char *format, ...);
  
  /***********************************************************************/
diff --combined include/linux/blkdev.h
@@@ -14,7 -14,6 +14,6 @@@
  #include <linux/wait.h>
  #include <linux/mempool.h>
  #include <linux/bio.h>
- #include <linux/module.h>
  #include <linux/stringify.h>
  #include <linux/gfp.h>
  #include <linux/bsg.h>
@@@ -22,6 -21,7 +21,7 @@@
  
  #include <asm/scatterlist.h>
  
+ struct module;
  struct scsi_ioctl_command;
  
  struct request_queue;
@@@ -195,7 -195,7 +195,7 @@@ struct request_pm_stat
  #include <linux/elevator.h>
  
  typedef void (request_fn_proc) (struct request_queue *q);
 -typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
 +typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
  typedef int (prep_rq_fn) (struct request_queue *, struct request *);
  typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
  
@@@ -680,8 -680,6 +680,8 @@@ extern int scsi_cmd_ioctl(struct reques
  extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
                         struct scsi_ioctl_command __user *);
  
 +extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
 +
  /*
   * A queue has just exitted congestion.  Note this in the global counter of
   * congested queues, and wake up anyone who was waiting for requests to be
@@@ -865,22 -863,16 +865,22 @@@ struct request_queue *blk_alloc_queue_n
  extern void blk_put_queue(struct request_queue *);
  
  /*
 - * Note: Code in between changing the blk_plug list/cb_list or element of such
 - * lists is preemptable, but such code can't do sleep (or be very careful),
 - * otherwise data is corrupted. For details, please check schedule() where
 - * blk_schedule_flush_plug() is called.
 + * blk_plug permits building a queue of related requests by holding the I/O
 + * fragments for a short period. This allows merging of sequential requests
 + * into single larger request. As the requests are moved from a per-task list to
 + * the device's request_queue in a batch, this results in improved scalability
 + * as the lock contention for request_queue lock is reduced.
 + *
 + * It is ok not to disable preemption when adding the request to the plug list
 + * or when attempting a merge, because blk_schedule_flush_list() will only flush
 + * the plug list when the task sleeps by itself. For details, please see
 + * schedule() where blk_schedule_flush_plug() is called.
   */
  struct blk_plug {
 -      unsigned long magic;
 -      struct list_head list;
 -      struct list_head cb_list;
 -      unsigned int should_sort;
 +      unsigned long magic; /* detect uninitialized use-cases */
 +      struct list_head list; /* requests */
 +      struct list_head cb_list; /* md requires an unplug callback */
 +      unsigned int should_sort; /* list to be sorted before flushing? */
  };
  #define BLK_MAX_REQUEST_COUNT 16
  
@@@ -1197,6 -1189,20 +1197,6 @@@ static inline uint64_t rq_io_start_time
  }
  #endif
  
 -#ifdef CONFIG_BLK_DEV_THROTTLING
 -extern int blk_throtl_init(struct request_queue *q);
 -extern void blk_throtl_exit(struct request_queue *q);
 -extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
 -#else /* CONFIG_BLK_DEV_THROTTLING */
 -static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
 -{
 -      return 0;
 -}
 -
 -static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 -static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
 -#endif /* CONFIG_BLK_DEV_THROTTLING */
 -
  #define MODULE_ALIAS_BLOCKDEV(major,minor) \
        MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
  #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
diff --combined include/linux/crypto.h
@@@ -18,7 -18,6 +18,6 @@@
  #define _LINUX_CRYPTO_H
  
  #include <linux/atomic.h>
- #include <linux/module.h>
  #include <linux/kernel.h>
  #include <linux/list.h>
  #include <linux/slab.h>
  
  #define CRYPTO_ALG_TESTED             0x00000400
  
 +/*
 + * Set if the algorithm is an instance that is build from templates.
 + */
 +#define CRYPTO_ALG_INSTANCE           0x00000800
 +
  /*
   * Transform masks and values (for crt_flags).
   */
@@@ -510,11 -504,6 +509,6 @@@ static inline int crypto_tfm_alg_priori
        return tfm->__crt_alg->cra_priority;
  }
  
- static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm)
- {
-       return module_name(tfm->__crt_alg->cra_module);
- }
  static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
  {
        return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
diff --combined include/linux/device.h
@@@ -20,7 -20,7 +20,7 @@@
  #include <linux/lockdep.h>
  #include <linux/compiler.h>
  #include <linux/types.h>
- #include <linux/module.h>
+ #include <linux/mutex.h>
  #include <linux/pm.h>
  #include <linux/atomic.h>
  #include <asm/device.h>
@@@ -29,6 -29,7 +29,7 @@@ struct device
  struct device_private;
  struct device_driver;
  struct driver_private;
+ struct module;
  struct class;
  struct subsys_private;
  struct bus_type;
@@@ -622,8 -623,8 +623,8 @@@ static inline const char *dev_name(cons
        return kobject_name(&dev->kobj);
  }
  
 -extern int dev_set_name(struct device *dev, const char *name, ...)
 -                      __attribute__((format(printf, 2, 3)));
 +extern __printf(2, 3)
 +int dev_set_name(struct device *dev, const char *name, ...);
  
  #ifdef CONFIG_NUMA
  static inline int dev_to_node(struct device *dev)
@@@ -723,10 -724,14 +724,14 @@@ extern int dev_set_drvdata(struct devic
   */
  extern struct device *__root_device_register(const char *name,
                                             struct module *owner);
- static inline struct device *root_device_register(const char *name)
- {
-       return __root_device_register(name, THIS_MODULE);
- }
+ /*
+  * This is a macro to avoid include problems with THIS_MODULE,
+  * just as per what is done for device_schedule_callback() above.
+  */
+ #define root_device_register(name) \
+       __root_device_register(name, THIS_MODULE)
  extern void root_device_unregister(struct device *root);
  
  static inline void *dev_get_platdata(const struct device *dev)
@@@ -753,10 -758,10 +758,10 @@@ extern struct device *device_create_var
                                          void *drvdata,
                                          const char *fmt,
                                          va_list vargs);
 -extern struct device *device_create(struct class *cls, struct device *parent,
 -                                  dev_t devt, void *drvdata,
 -                                  const char *fmt, ...)
 -                                  __attribute__((format(printf, 5, 6)));
 +extern __printf(5, 6)
 +struct device *device_create(struct class *cls, struct device *parent,
 +                           dev_t devt, void *drvdata,
 +                           const char *fmt, ...);
  extern void device_destroy(struct class *cls, dev_t devt);
  
  /*
@@@ -800,56 -805,64 +805,56 @@@ extern const char *dev_driver_string(co
  
  extern int __dev_printk(const char *level, const struct device *dev,
                        struct va_format *vaf);
 -extern int dev_printk(const char *level, const struct device *dev,
 -                    const char *fmt, ...)
 -      __attribute__ ((format (printf, 3, 4)));
 -extern int dev_emerg(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -extern int dev_alert(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -extern int dev_crit(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -extern int dev_err(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -extern int dev_warn(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -extern int dev_notice(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -extern int _dev_info(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 +extern __printf(3, 4)
 +int dev_printk(const char *level, const struct device *dev,
 +             const char *fmt, ...)
 +      ;
 +extern __printf(2, 3)
 +int dev_emerg(const struct device *dev, const char *fmt, ...);
 +extern __printf(2, 3)
 +int dev_alert(const struct device *dev, const char *fmt, ...);
 +extern __printf(2, 3)
 +int dev_crit(const struct device *dev, const char *fmt, ...);
 +extern __printf(2, 3)
 +int dev_err(const struct device *dev, const char *fmt, ...);
 +extern __printf(2, 3)
 +int dev_warn(const struct device *dev, const char *fmt, ...);
 +extern __printf(2, 3)
 +int dev_notice(const struct device *dev, const char *fmt, ...);
 +extern __printf(2, 3)
 +int _dev_info(const struct device *dev, const char *fmt, ...);
  
  #else
  
  static inline int __dev_printk(const char *level, const struct device *dev,
                               struct va_format *vaf)
 -       { return 0; }
 -static inline int dev_printk(const char *level, const struct device *dev,
 -                    const char *fmt, ...)
 -      __attribute__ ((format (printf, 3, 4)));
 -static inline int dev_printk(const char *level, const struct device *dev,
 -                    const char *fmt, ...)
 -       { return 0; }
 -
 -static inline int dev_emerg(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -static inline int dev_emerg(const struct device *dev, const char *fmt, ...)
 -      { return 0; }
 -static inline int dev_crit(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -static inline int dev_crit(const struct device *dev, const char *fmt, ...)
 -      { return 0; }
 -static inline int dev_alert(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -static inline int dev_alert(const struct device *dev, const char *fmt, ...)
 -      { return 0; }
 -static inline int dev_err(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -static inline int dev_err(const struct device *dev, const char *fmt, ...)
 -      { return 0; }
 -static inline int dev_warn(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -static inline int dev_warn(const struct device *dev, const char *fmt, ...)
 -      { return 0; }
 -static inline int dev_notice(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -static inline int dev_notice(const struct device *dev, const char *fmt, ...)
 -      { return 0; }
 -static inline int _dev_info(const struct device *dev, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 -static inline int _dev_info(const struct device *dev, const char *fmt, ...)
 -      { return 0; }
 +{ return 0; }
 +static inline __printf(3, 4)
 +int dev_printk(const char *level, const struct device *dev,
 +             const char *fmt, ...)
 +{ return 0; }
 +
 +static inline __printf(2, 3)
 +int dev_emerg(const struct device *dev, const char *fmt, ...)
 +{ return 0; }
 +static inline __printf(2, 3)
 +int dev_crit(const struct device *dev, const char *fmt, ...)
 +{ return 0; }
 +static inline __printf(2, 3)
 +int dev_alert(const struct device *dev, const char *fmt, ...)
 +{ return 0; }
 +static inline __printf(2, 3)
 +int dev_err(const struct device *dev, const char *fmt, ...)
 +{ return 0; }
 +static inline __printf(2, 3)
 +int dev_warn(const struct device *dev, const char *fmt, ...)
 +{ return 0; }
 +static inline __printf(2, 3)
 +int dev_notice(const struct device *dev, const char *fmt, ...)
 +{ return 0; }
 +static inline __printf(2, 3)
 +int _dev_info(const struct device *dev, const char *fmt, ...)
 +{ return 0; }
  
  #endif
  
  #include <linux/device.h>
  #include <linux/uio.h>
  #include <linux/dma-direction.h>
 +#include <linux/scatterlist.h>
+ #include <linux/bitmap.h>
+ #include <asm/page.h>
  
 -struct scatterlist;
 -
  /**
   * typedef dma_cookie_t - an opaque DMA cookie
   *
@@@ -518,16 -521,6 +520,16 @@@ static inline int dmaengine_slave_confi
                        (unsigned long)config);
  }
  
 +static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
 +      struct dma_chan *chan, void *buf, size_t len,
 +      enum dma_data_direction dir, unsigned long flags)
 +{
 +      struct scatterlist sg;
 +      sg_init_one(&sg, buf, len);
 +
 +      return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags);
 +}
 +
  static inline int dmaengine_terminate_all(struct dma_chan *chan)
  {
        return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
diff --combined include/linux/gameport.h
@@@ -71,15 -71,14 +71,14 @@@ void gameport_close(struct gameport *ga
  #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
  
  void __gameport_register_port(struct gameport *gameport, struct module *owner);
- static inline void gameport_register_port(struct gameport *gameport)
- {
-       __gameport_register_port(gameport, THIS_MODULE);
- }
+ /* use a define to avoid include chaining to get THIS_MODULE */
+ #define gameport_register_port(gameport) \
+       __gameport_register_port(gameport, THIS_MODULE)
  
  void gameport_unregister_port(struct gameport *gameport);
  
 -void gameport_set_phys(struct gameport *gameport, const char *fmt, ...)
 -      __attribute__ ((format (printf, 2, 3)));
 +__printf(2, 3)
 +void gameport_set_phys(struct gameport *gameport, const char *fmt, ...);
  
  #else
  
@@@ -93,8 -92,8 +92,8 @@@ static inline void gameport_unregister_
        return;
  }
  
 -static inline void gameport_set_phys(struct gameport *gameport,
 -                                   const char *fmt, ...)
 +static inline __printf(2, 3)
 +void gameport_set_phys(struct gameport *gameport, const char *fmt, ...)
  {
        return;
  }
@@@ -145,12 -144,12 +144,12 @@@ static inline void gameport_unpin_drive
        mutex_unlock(&gameport->drv_mutex);
  }
  
- int __gameport_register_driver(struct gameport_driver *drv,
+ int __must_check __gameport_register_driver(struct gameport_driver *drv,
                                struct module *owner, const char *mod_name);
- static inline int __must_check gameport_register_driver(struct gameport_driver *drv)
- {
-       return __gameport_register_driver(drv, THIS_MODULE, KBUILD_MODNAME);
- }
+ /* use a define to avoid include chaining to get THIS_MODULE & friends */
+ #define gameport_register_driver(drv) \
+       __gameport_register_driver(drv, THIS_MODULE, KBUILD_MODNAME)
  
  void gameport_unregister_driver(struct gameport_driver *drv);
  
diff --combined include/linux/of.h
@@@ -23,6 -23,7 +23,7 @@@
  #include <linux/spinlock.h>
  
  #include <asm/byteorder.h>
+ #include <asm/errno.h>
  
  typedef u32 phandle;
  typedef u32 ihandle;
@@@ -207,11 -208,6 +208,11 @@@ extern int of_property_read_u64(const s
  extern int of_property_read_string(struct device_node *np,
                                   const char *propname,
                                   const char **out_string);
 +extern int of_property_read_string_index(struct device_node *np,
 +                                       const char *propname,
 +                                       int index, const char **output);
 +extern int of_property_count_strings(struct device_node *np,
 +                                   const char *propname);
  extern int of_device_is_compatible(const struct device_node *device,
                                   const char *);
  extern int of_device_is_available(const struct device_node *device);
@@@ -288,19 -284,6 +289,19 @@@ static inline int of_property_read_stri
        return -ENOSYS;
  }
  
 +static inline int of_property_read_string_index(struct device_node *np,
 +                                              const char *propname, int index,
 +                                              const char **out_string)
 +{
 +      return -ENOSYS;
 +}
 +
 +static inline int of_property_count_strings(struct device_node *np,
 +                                          const char *propname)
 +{
 +      return -ENOSYS;
 +}
 +
  static inline const void *of_get_property(const struct device_node *node,
                                const char *name,
                                int *lenp)
@@@ -321,16 -304,6 +322,16 @@@ static inline struct device_node *of_pa
        return NULL;
  }
  
 +static inline int of_alias_get_id(struct device_node *np, const char *stem)
 +{
 +      return -ENOSYS;
 +}
 +
 +static inline int of_machine_is_compatible(const char *compat)
 +{
 +      return 0;
 +}
 +
  #define of_match_ptr(_ptr)    NULL
  #define of_match_node(_matches, _node)        NULL
  #endif /* CONFIG_OF */
diff --combined include/media/saa7146.h
@@@ -1,7 -1,6 +1,6 @@@
  #ifndef __SAA7146__
  #define __SAA7146__
  
- #include <linux/module.h>     /* for module-version */
  #include <linux/delay.h>      /* for delay-stuff */
  #include <linux/slab.h>               /* for kmalloc/kfree */
  #include <linux/pci.h>                /* for pci-config-stuff, vendor ids etc. */
  
  extern unsigned int saa7146_debug;
  
 -//#define DEBUG_PROLOG printk("(0x%08x)(0x%08x) %s: %s(): ",(dev==0?-1:(dev->mem==0?-1:saa7146_read(dev,RPS_ADDR0))),(dev==0?-1:(dev->mem==0?-1:saa7146_read(dev,IER))),KBUILD_MODNAME,__func__)
 -
  #ifndef DEBUG_VARIABLE
        #define DEBUG_VARIABLE saa7146_debug
  #endif
  
 -#define DEBUG_PROLOG printk("%s: %s(): ",KBUILD_MODNAME, __func__)
 -#define INFO(x) { printk("%s: ",KBUILD_MODNAME); printk x; }
 -
 -#define ERR(x) { DEBUG_PROLOG; printk x; }
 -
 -#define DEB_S(x)    if (0!=(DEBUG_VARIABLE&0x01)) { DEBUG_PROLOG; printk x; } /* simple debug messages */
 -#define DEB_D(x)    if (0!=(DEBUG_VARIABLE&0x02)) { DEBUG_PROLOG; printk x; } /* more detailed debug messages */
 -#define DEB_EE(x)   if (0!=(DEBUG_VARIABLE&0x04)) { DEBUG_PROLOG; printk x; } /* print enter and exit of functions */
 -#define DEB_I2C(x)  if (0!=(DEBUG_VARIABLE&0x08)) { DEBUG_PROLOG; printk x; } /* i2c debug messages */
 -#define DEB_VBI(x)  if (0!=(DEBUG_VARIABLE&0x10)) { DEBUG_PROLOG; printk x; } /* vbi debug messages */
 -#define DEB_INT(x)  if (0!=(DEBUG_VARIABLE&0x20)) { DEBUG_PROLOG; printk x; } /* interrupt debug messages */
 -#define DEB_CAP(x)  if (0!=(DEBUG_VARIABLE&0x40)) { DEBUG_PROLOG; printk x; } /* capture debug messages */
 +#define ERR(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
 +
 +#define _DBG(mask, fmt, ...)                                          \
 +do {                                                                  \
 +      if (DEBUG_VARIABLE & mask)                                      \
 +              pr_debug("%s(): " fmt, __func__, ##__VA_ARGS__);        \
 +} while (0)
 +
 +/* simple debug messages */
 +#define DEB_S(fmt, ...)               _DBG(0x01, fmt, ##__VA_ARGS__)
 +/* more detailed debug messages */
 +#define DEB_D(fmt, ...)               _DBG(0x02, fmt, ##__VA_ARGS__)
 +/* print enter and exit of functions */
 +#define DEB_EE(fmt, ...)      _DBG(0x04, fmt, ##__VA_ARGS__)
 +/* i2c debug messages */
 +#define DEB_I2C(fmt, ...)     _DBG(0x08, fmt, ##__VA_ARGS__)
 +/* vbi debug messages */
 +#define DEB_VBI(fmt, ...)     _DBG(0x10, fmt, ##__VA_ARGS__)
 +/* interrupt debug messages */
 +#define DEB_INT(fmt, ...)     _DBG(0x20, fmt, ##__VA_ARGS__)
 +/* capture debug messages */
 +#define DEB_CAP(fmt, ...)     _DBG(0x40, fmt, ##__VA_ARGS__)
  
  #define SAA7146_ISR_CLEAR(x,y) \
        saa7146_write(x, ISR, (y));
  
+ struct module;
  struct saa7146_dev;
  struct saa7146_extension;
  struct saa7146_vv;
@@@ -18,7 -18,6 +18,6 @@@
  
  #include <linux/kmemcheck.h>
  #include <linux/list.h>
- #include <linux/module.h>
  #include <linux/timer.h>
  #include <linux/types.h>
  #include <linux/workqueue.h>
@@@ -134,7 -133,6 +133,7 @@@ struct inet_timewait_sock 
        struct inet_bind_bucket *tw_tb;
        struct hlist_node       tw_death_node;
  };
 +#define tw_tclass tw_tos
  
  static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
                                      struct hlist_nulls_head *list)
diff --combined include/net/ip_vs.h
@@@ -425,9 -425,9 +425,9 @@@ struct ip_vs_protocol 
  
        const char *(*state_name)(int state);
  
 -      int (*state_transition)(struct ip_vs_conn *cp, int direction,
 -                              const struct sk_buff *skb,
 -                              struct ip_vs_proto_data *pd);
 +      void (*state_transition)(struct ip_vs_conn *cp, int direction,
 +                               const struct sk_buff *skb,
 +                               struct ip_vs_proto_data *pd);
  
        int (*register_app)(struct net *net, struct ip_vs_app *inc);
  
@@@ -1126,17 -1126,16 +1126,16 @@@ int unregister_ip_vs_pe(struct ip_vs_p
  struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
  struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
  
- static inline void ip_vs_pe_get(const struct ip_vs_pe *pe)
- {
-       if (pe && pe->module)
+ /*
+  * Use a #define to avoid all of module.h just for these trivial ops
+  */
+ #define ip_vs_pe_get(pe)                      \
+       if (pe && pe->module)                   \
                __module_get(pe->module);
- }
  
- static inline void ip_vs_pe_put(const struct ip_vs_pe *pe)
- {
-       if (pe && pe->module)
+ #define ip_vs_pe_put(pe)                      \
+       if (pe && pe->module)                   \
                module_put(pe->module);
- }
  
  /*
   *    IPVS protocol functions (from ip_vs_proto.c)
@@@ -1378,7 -1377,7 +1377,7 @@@ static inline int ip_vs_conntrack_enabl
  
  extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
                                   int outin);
 -extern int ip_vs_confirm_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp);
 +extern int ip_vs_confirm_conntrack(struct sk_buff *skb);
  extern void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
                                      struct ip_vs_conn *cp, u_int8_t proto,
                                      const __be16 port, int from_rs);
@@@ -1396,7 -1395,8 +1395,7 @@@ static inline void ip_vs_update_conntra
  {
  }
  
 -static inline int ip_vs_confirm_conntrack(struct sk_buff *skb,
 -                                        struct ip_vs_conn *cp)
 +static inline int ip_vs_confirm_conntrack(struct sk_buff *skb)
  {
        return NF_ACCEPT;
  }
diff --combined include/net/sock.h
@@@ -46,7 -46,6 +46,6 @@@
  #include <linux/list_nulls.h>
  #include <linux/timer.h>
  #include <linux/cache.h>
- #include <linux/module.h>
  #include <linux/lockdep.h>
  #include <linux/netdevice.h>
  #include <linux/skbuff.h>     /* struct sk_buff */
@@@ -76,8 -75,8 +75,8 @@@
                                        printk(KERN_DEBUG msg); } while (0)
  #else
  /* Validate arguments and do nothing */
 -static inline void __attribute__ ((format (printf, 2, 3)))
 -SOCK_DEBUG(struct sock *sk, const char *msg, ...)
 +static inline __printf(2, 3)
 +void SOCK_DEBUG(struct sock *sk, const char *msg, ...)
  {
  }
  #endif
@@@ -729,6 -728,7 +728,7 @@@ struct request_sock_ops
  struct timewait_sock_ops;
  struct inet_hashinfo;
  struct raw_hashinfo;
+ struct module;
  
  /* Networking protocol blocks we attach to sockets.
   * socket layer -> transport layer interface
diff --combined include/sound/core.h
@@@ -22,7 -22,6 +22,6 @@@
   *
   */
  
- #include <linux/module.h>
  #include <linux/sched.h>              /* wake_up() */
  #include <linux/mutex.h>              /* struct mutex */
  #include <linux/rwsem.h>              /* struct rw_semaphore */
@@@ -43,6 -42,7 +42,7 @@@
  #ifdef CONFIG_PCI
  struct pci_dev;
  #endif
+ struct module;
  
  /* device allocation stuff */
  
@@@ -326,9 -326,9 +326,9 @@@ void release_and_free_resource(struct r
  /* --- */
  
  #if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
 +__printf(4, 5)
  void __snd_printk(unsigned int level, const char *file, int line,
 -                const char *format, ...)
 -     __attribute__ ((format (printf, 4, 5)));
 +                const char *format, ...);
  #else
  #define __snd_printk(level, file, line, format, args...) \
        printk(format, ##args)
diff --combined include/xen/xenbus.h
@@@ -37,6 -37,7 +37,7 @@@
  #include <linux/device.h>
  #include <linux/notifier.h>
  #include <linux/mutex.h>
+ #include <linux/export.h>
  #include <linux/completion.h>
  #include <linux/init.h>
  #include <linux/slab.h>
@@@ -156,9 -157,9 +157,9 @@@ int xenbus_scanf(struct xenbus_transact
        __attribute__((format(scanf, 4, 5)));
  
  /* Single printf and write: returns -errno or 0. */
 +__printf(4, 5)
  int xenbus_printf(struct xenbus_transaction t,
 -                const char *dir, const char *node, const char *fmt, ...)
 -      __attribute__((format(printf, 4, 5)));
 +                const char *dir, const char *node, const char *fmt, ...);
  
  /* Generic read function: NULL-terminated triples of name,
   * sprintf-style type string, and pointer. Returns 0 or errno.*/
@@@ -200,11 -201,11 +201,11 @@@ int xenbus_watch_path(struct xenbus_dev
                      struct xenbus_watch *watch,
                      void (*callback)(struct xenbus_watch *,
                                       const char **, unsigned int));
 +__printf(4, 5)
  int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
                         void (*callback)(struct xenbus_watch *,
                                          const char **, unsigned int),
 -                       const char *pathfmt, ...)
 -      __attribute__ ((format (printf, 4, 5)));
 +                       const char *pathfmt, ...);
  
  int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
  int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
@@@ -223,9 -224,9 +224,9 @@@ int xenbus_free_evtchn(struct xenbus_de
  
  enum xenbus_state xenbus_read_driver_state(const char *path);
  
 -__attribute__((format(printf, 3, 4)))
 +__printf(3, 4)
  void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...);
 -__attribute__((format(printf, 3, 4)))
 +__printf(3, 4)
  void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...);
  
  const char *xenbus_strstate(enum xenbus_state state);
diff --combined kernel/cpu.c
  #include <linux/sched.h>
  #include <linux/unistd.h>
  #include <linux/cpu.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/kthread.h>
  #include <linux/stop_machine.h>
  #include <linux/mutex.h>
  #include <linux/gfp.h>
 +#include <linux/suspend.h>
  
  #ifdef CONFIG_SMP
  /* Serializes the updates to cpu_online_mask, cpu_present_mask */
@@@ -477,79 -476,6 +477,79 @@@ static int alloc_frozen_cpus(void
        return 0;
  }
  core_initcall(alloc_frozen_cpus);
 +
 +/*
 + * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
 + * hotplug when tasks are about to be frozen. Also, don't allow the freezer
 + * to continue until any currently running CPU hotplug operation gets
 + * completed.
 + * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
 + * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
 + * CPU hotplug path and released only after it is complete. Thus, we
 + * (and hence the freezer) will block here until any currently running CPU
 + * hotplug operation gets completed.
 + */
 +void cpu_hotplug_disable_before_freeze(void)
 +{
 +      cpu_maps_update_begin();
 +      cpu_hotplug_disabled = 1;
 +      cpu_maps_update_done();
 +}
 +
 +
 +/*
 + * When tasks have been thawed, re-enable regular CPU hotplug (which had been
 + * disabled while beginning to freeze tasks).
 + */
 +void cpu_hotplug_enable_after_thaw(void)
 +{
 +      cpu_maps_update_begin();
 +      cpu_hotplug_disabled = 0;
 +      cpu_maps_update_done();
 +}
 +
 +/*
 + * When callbacks for CPU hotplug notifications are being executed, we must
 + * ensure that the state of the system with respect to the tasks being frozen
 + * or not, as reported by the notification, remains unchanged *throughout the
 + * duration* of the execution of the callbacks.
 + * Hence we need to prevent the freezer from racing with regular CPU hotplug.
 + *
 + * This synchronization is implemented by mutually excluding regular CPU
 + * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
 + * Hibernate notifications.
 + */
 +static int
 +cpu_hotplug_pm_callback(struct notifier_block *nb,
 +                      unsigned long action, void *ptr)
 +{
 +      switch (action) {
 +
 +      case PM_SUSPEND_PREPARE:
 +      case PM_HIBERNATION_PREPARE:
 +              cpu_hotplug_disable_before_freeze();
 +              break;
 +
 +      case PM_POST_SUSPEND:
 +      case PM_POST_HIBERNATION:
 +              cpu_hotplug_enable_after_thaw();
 +              break;
 +
 +      default:
 +              return NOTIFY_DONE;
 +      }
 +
 +      return NOTIFY_OK;
 +}
 +
 +
 +int cpu_hotplug_pm_sync_init(void)
 +{
 +      pm_notifier(cpu_hotplug_pm_callback, 0);
 +      return 0;
 +}
 +core_initcall(cpu_hotplug_pm_sync_init);
 +
  #endif /* CONFIG_PM_SLEEP_SMP */
  
  /**
diff --combined kernel/cpuset.c
@@@ -37,7 -37,7 +37,7 @@@
  #include <linux/mempolicy.h>
  #include <linux/mm.h>
  #include <linux/memory.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/mount.h>
  #include <linux/namei.h>
  #include <linux/pagemap.h>
@@@ -949,8 -949,6 +949,8 @@@ static void cpuset_migrate_mm(struct mm
  static void cpuset_change_task_nodemask(struct task_struct *tsk,
                                        nodemask_t *newmems)
  {
 +      bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
 +
  repeat:
        /*
         * Allow tasks that have access to memory reserves because they have
        nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
        mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
  
 -
        /*
         * ensure checking ->mems_allowed_change_disable after setting all new
         * allowed nodes.
  
        /*
         * Allocation of memory is very fast, we needn't sleep when waiting
 -       * for the read-side.
 +       * for the read-side.  No wait is necessary, however, if at least one
 +       * node remains unchanged.
         */
 -      while (ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
 +      while (masks_disjoint &&
 +                      ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
                task_unlock(tsk);
                if (!task_curr(tsk))
                        yield();
diff --combined kernel/crash_dump.c
@@@ -2,7 -2,7 +2,7 @@@
  #include <linux/crash_dump.h>
  #include <linux/init.h>
  #include <linux/errno.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  
  /*
   * If we have booted due to a crash, max_pfn will be a very low value. We need
@@@ -19,16 -19,9 +19,16 @@@ unsigned long saved_max_pfn
   */
  unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
  
 +/*
 + * stores the size of elf header of crash image
 + */
 +unsigned long long elfcorehdr_size;
 +
  /*
   * elfcorehdr= specifies the location of elf core header stored by the crashed
   * kernel. This option will be passed by kexec loader to the capture kernel.
 + *
 + * Syntax: elfcorehdr=[size[KMG]@]offset[KMG]
   */
  static int __init setup_elfcorehdr(char *arg)
  {
        if (!arg)
                return -EINVAL;
        elfcorehdr_addr = memparse(arg, &end);
 +      if (*end == '@') {
 +              elfcorehdr_size = elfcorehdr_addr;
 +              elfcorehdr_addr = memparse(end + 1, &end);
 +      }
        return end > arg ? 0 : -EINVAL;
  }
  early_param("elfcorehdr", setup_elfcorehdr);
diff --combined kernel/events/core.c
  #include <linux/reboot.h>
  #include <linux/vmstat.h>
  #include <linux/device.h>
+ #include <linux/export.h>
  #include <linux/vmalloc.h>
  #include <linux/hardirq.h>
  #include <linux/rculist.h>
  #include <linux/uaccess.h>
 -#include <linux/suspend.h>
  #include <linux/syscalls.h>
  #include <linux/anon_inodes.h>
  #include <linux/kernel_stat.h>
@@@ -3543,7 -3545,7 +3544,7 @@@ static void perf_mmap_close(struct vm_a
                struct ring_buffer *rb = event->rb;
  
                atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
 -              vma->vm_mm->locked_vm -= event->mmap_locked;
 +              vma->vm_mm->pinned_vm -= event->mmap_locked;
                rcu_assign_pointer(event->rb, NULL);
                mutex_unlock(&event->mmap_mutex);
  
@@@ -3624,7 -3626,7 +3625,7 @@@ static int perf_mmap(struct file *file
  
        lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
 -      locked = vma->vm_mm->locked_vm + extra;
 +      locked = vma->vm_mm->pinned_vm + extra;
  
        if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
                !capable(CAP_IPC_LOCK)) {
        atomic_long_add(user_extra, &user->locked_vm);
        event->mmap_locked = extra;
        event->mmap_user = get_current_user();
 -      vma->vm_mm->locked_vm += event->mmap_locked;
 +      vma->vm_mm->pinned_vm += event->mmap_locked;
  
  unlock:
        if (!ret)
@@@ -6852,7 -6854,7 +6853,7 @@@ static void __cpuinit perf_event_init_c
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  
        mutex_lock(&swhash->hlist_mutex);
 -      if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) {
 +      if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
  
                hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@@ -6941,7 -6943,14 +6942,7 @@@ perf_cpu_notify(struct notifier_block *
  {
        unsigned int cpu = (long)hcpu;
  
 -      /*
 -       * Ignore suspend/resume action, the perf_pm_notifier will
 -       * take care of that.
 -       */
 -      if (action & CPU_TASKS_FROZEN)
 -              return NOTIFY_OK;
 -
 -      switch (action) {
 +      switch (action & ~CPU_TASKS_FROZEN) {
  
        case CPU_UP_PREPARE:
        case CPU_DOWN_FAILED:
        return NOTIFY_OK;
  }
  
 -static void perf_pm_resume_cpu(void *unused)
 -{
 -      struct perf_cpu_context *cpuctx;
 -      struct perf_event_context *ctx;
 -      struct pmu *pmu;
 -      int idx;
 -
 -      idx = srcu_read_lock(&pmus_srcu);
 -      list_for_each_entry_rcu(pmu, &pmus, entry) {
 -              cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 -              ctx = cpuctx->task_ctx;
 -
 -              perf_ctx_lock(cpuctx, ctx);
 -              perf_pmu_disable(cpuctx->ctx.pmu);
 -
 -              cpu_ctx_sched_out(cpuctx, EVENT_ALL);
 -              if (ctx)
 -                      ctx_sched_out(ctx, cpuctx, EVENT_ALL);
 -
 -              perf_pmu_enable(cpuctx->ctx.pmu);
 -              perf_ctx_unlock(cpuctx, ctx);
 -      }
 -      srcu_read_unlock(&pmus_srcu, idx);
 -}
 -
 -static void perf_pm_suspend_cpu(void *unused)
 -{
 -      struct perf_cpu_context *cpuctx;
 -      struct perf_event_context *ctx;
 -      struct pmu *pmu;
 -      int idx;
 -
 -      idx = srcu_read_lock(&pmus_srcu);
 -      list_for_each_entry_rcu(pmu, &pmus, entry) {
 -              cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 -              ctx = cpuctx->task_ctx;
 -
 -              perf_ctx_lock(cpuctx, ctx);
 -              perf_pmu_disable(cpuctx->ctx.pmu);
 -
 -              perf_event_sched_in(cpuctx, ctx, current);
 -
 -              perf_pmu_enable(cpuctx->ctx.pmu);
 -              perf_ctx_unlock(cpuctx, ctx);
 -      }
 -      srcu_read_unlock(&pmus_srcu, idx);
 -}
 -
 -static int perf_resume(void)
 -{
 -      get_online_cpus();
 -      smp_call_function(perf_pm_resume_cpu, NULL, 1);
 -      put_online_cpus();
 -
 -      return NOTIFY_OK;
 -}
 -
 -static int perf_suspend(void)
 -{
 -      get_online_cpus();
 -      smp_call_function(perf_pm_suspend_cpu, NULL, 1);
 -      put_online_cpus();
 -
 -      return NOTIFY_OK;
 -}
 -
 -static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr)
 -{
 -      switch (action) {
 -      case PM_POST_HIBERNATION:
 -      case PM_POST_SUSPEND:
 -              return perf_resume();
 -      case PM_HIBERNATION_PREPARE:
 -      case PM_SUSPEND_PREPARE:
 -              return perf_suspend();
 -      default:
 -              return NOTIFY_DONE;
 -      }
 -}
 -
 -static struct notifier_block perf_pm_notifier = {
 -      .notifier_call = perf_pm,
 -};
 -
  void __init perf_event_init(void)
  {
        int ret;
        perf_tp_register();
        perf_cpu_notifier(perf_cpu_notify);
        register_reboot_notifier(&perf_reboot_notifier);
 -      register_pm_notifier(&perf_pm_notifier);
  
        ret = init_hw_breakpoint();
        WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
diff --combined kernel/freezer.c
@@@ -6,7 -6,7 +6,7 @@@
  
  #include <linux/interrupt.h>
  #include <linux/suspend.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/syscalls.h>
  #include <linux/freezer.h>
  
@@@ -67,7 -67,7 +67,7 @@@ static void fake_signal_wake_up(struct 
        unsigned long flags;
  
        spin_lock_irqsave(&p->sighand->siglock, flags);
 -      signal_wake_up(p, 1);
 +      signal_wake_up(p, 0);
        spin_unlock_irqrestore(&p->sighand->siglock, flags);
  }
  
diff --combined kernel/module.c
@@@ -16,7 -16,7 +16,7 @@@
      along with this program; if not, write to the Free Software
      Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/moduleloader.h>
  #include <linux/ftrace_event.h>
  #include <linux/init.h>
@@@ -2487,9 -2487,6 +2487,9 @@@ static int check_modinfo(struct module 
                return -ENOEXEC;
        }
  
 +      if (!get_modinfo(info, "intree"))
 +              add_taint_module(mod, TAINT_OOT_MODULE);
 +
        if (get_modinfo(info, "staging")) {
                add_taint_module(mod, TAINT_CRAP);
                printk(KERN_WARNING "%s: module is from the staging directory,"
@@@ -2881,7 -2878,8 +2881,7 @@@ static struct module *load_module(void 
        }
  
        /* This has to be done once we're sure module name is unique. */
 -      if (!mod->taints || mod->taints == (1U<<TAINT_CRAP))
 -              dynamic_debug_setup(info.debug, info.num_debug);
 +      dynamic_debug_setup(info.debug, info.num_debug);
  
        /* Find duplicate symbols */
        err = verify_export_symbols(mod);
        module_bug_cleanup(mod);
  
   ddebug:
 -      if (!mod->taints || mod->taints == (1U<<TAINT_CRAP))
 -              dynamic_debug_remove(info.debug);
 +      dynamic_debug_remove(info.debug);
   unlock:
        mutex_unlock(&module_mutex);
        synchronize_sched();
@@@ -3258,8 -3257,6 +3258,8 @@@ static char *module_flags(struct modul
                buf[bx++] = '(';
                if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
                        buf[bx++] = 'P';
 +              else if (mod->taints & (1 << TAINT_OOT_MODULE))
 +                      buf[bx++] = 'O';
                if (mod->taints & (1 << TAINT_FORCED_MODULE))
                        buf[bx++] = 'F';
                if (mod->taints & (1 << TAINT_CRAP))
diff --combined kernel/power/qos.c
@@@ -43,6 -43,7 +43,7 @@@
  #include <linux/kernel.h>
  
  #include <linux/uaccess.h>
+ #include <linux/export.h>
  
  /*
   * locking rule: all changes to constraints or notifiers lists
@@@ -386,7 -387,8 +387,7 @@@ static int pm_qos_power_open(struct ino
                pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
                filp->private_data = req;
  
 -              if (filp->private_data)
 -                      return 0;
 +              return 0;
        }
        return -EPERM;
  }
diff --combined kernel/stop_machine.c
@@@ -12,7 -12,7 +12,7 @@@
  #include <linux/cpu.h>
  #include <linux/init.h>
  #include <linux/kthread.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/percpu.h>
  #include <linux/sched.h>
  #include <linux/stop_machine.h>
@@@ -41,7 -41,6 +41,7 @@@ struct cpu_stopper 
  };
  
  static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
 +static bool stop_machine_initialized = false;
  
  static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
  {
@@@ -387,8 -386,6 +387,8 @@@ static int __init cpu_stop_init(void
        cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
        register_cpu_notifier(&cpu_stop_cpu_notifier);
  
 +      stop_machine_initialized = true;
 +
        return 0;
  }
  early_initcall(cpu_stop_init);
@@@ -488,25 -485,6 +488,25 @@@ int __stop_machine(int (*fn)(void *), v
                                            .num_threads = num_online_cpus(),
                                            .active_cpus = cpus };
  
 +      if (!stop_machine_initialized) {
 +              /*
 +               * Handle the case where stop_machine() is called
 +               * early in boot before stop_machine() has been
 +               * initialized.
 +               */
 +              unsigned long flags;
 +              int ret;
 +
 +              WARN_ON_ONCE(smdata.num_threads != 1);
 +
 +              local_irq_save(flags);
 +              hard_irq_disable();
 +              ret = (*fn)(data);
 +              local_irq_restore(flags);
 +
 +              return ret;
 +      }
 +
        /* Set the initial state and stop all online cpus. */
        set_state(&smdata, STOPMACHINE_PREPARE);
        return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
diff --combined kernel/sys.c
@@@ -4,7 -4,7 +4,7 @@@
   *  Copyright (C) 1991, 1992  Linus Torvalds
   */
  
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/mm.h>
  #include <linux/utsname.h>
  #include <linux/mman.h>
@@@ -12,6 -12,7 +12,7 @@@
  #include <linux/prctl.h>
  #include <linux/highuid.h>
  #include <linux/fs.h>
+ #include <linux/kmod.h>
  #include <linux/perf_event.h>
  #include <linux/resource.h>
  #include <linux/kernel.h>
@@@ -1286,7 -1287,6 +1287,7 @@@ SYSCALL_DEFINE2(sethostname, char __use
                memset(u->nodename + len, 0, sizeof(u->nodename) - len);
                errno = 0;
        }
 +      uts_proc_notify(UTS_PROC_HOSTNAME);
        up_write(&uts_sem);
        return errno;
  }
@@@ -1337,7 -1337,6 +1338,7 @@@ SYSCALL_DEFINE2(setdomainname, char __u
                memset(u->domainname + len, 0, sizeof(u->domainname) - len);
                errno = 0;
        }
 +      uts_proc_notify(UTS_PROC_DOMAINNAME);
        up_write(&uts_sem);
        return errno;
  }
diff --combined kernel/utsname_sysctl.c
@@@ -9,11 -9,10 +9,11 @@@
   *  License.
   */
  
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/uts.h>
  #include <linux/utsname.h>
  #include <linux/sysctl.h>
 +#include <linux/wait.h>
  
  static void *get_uts(ctl_table *table, int write)
  {
@@@ -52,19 -51,12 +52,19 @@@ static int proc_do_uts_string(ctl_tabl
        uts_table.data = get_uts(table, write);
        r = proc_dostring(&uts_table,write,buffer,lenp, ppos);
        put_uts(table, write, uts_table.data);
 +
 +      if (write)
 +              proc_sys_poll_notify(table->poll);
 +
        return r;
  }
  #else
  #define proc_do_uts_string NULL
  #endif
  
 +static DEFINE_CTL_TABLE_POLL(hostname_poll);
 +static DEFINE_CTL_TABLE_POLL(domainname_poll);
 +
  static struct ctl_table uts_kern_table[] = {
        {
                .procname       = "ostype",
@@@ -93,7 -85,6 +93,7 @@@
                .maxlen         = sizeof(init_uts_ns.name.nodename),
                .mode           = 0644,
                .proc_handler   = proc_do_uts_string,
 +              .poll           = &hostname_poll,
        },
        {
                .procname       = "domainname",
                .maxlen         = sizeof(init_uts_ns.name.domainname),
                .mode           = 0644,
                .proc_handler   = proc_do_uts_string,
 +              .poll           = &domainname_poll,
        },
        {}
  };
@@@ -115,19 -105,6 +115,19 @@@ static struct ctl_table uts_root_table[
        {}
  };
  
 +#ifdef CONFIG_PROC_SYSCTL
 +/*
 + * Notify userspace about a change in a certain entry of uts_kern_table,
 + * identified by the parameter proc.
 + */
 +void uts_proc_notify(enum uts_proc proc)
 +{
 +      struct ctl_table *table = &uts_kern_table[proc];
 +
 +      proc_sys_poll_notify(table->poll);
 +}
 +#endif
 +
  static int __init utsname_sysctl_init(void)
  {
        register_sysctl_table(uts_root_table);
diff --combined mm/bounce.c
@@@ -4,7 -4,7 +4,7 @@@
   */
  
  #include <linux/mm.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/swap.h>
  #include <linux/gfp.h>
  #include <linux/bio.h>
@@@ -14,7 -14,6 +14,7 @@@
  #include <linux/init.h>
  #include <linux/hash.h>
  #include <linux/highmem.h>
 +#include <linux/bootmem.h>
  #include <asm/tlbflush.h>
  
  #include <trace/events/block.h>
@@@ -27,10 -26,12 +27,10 @@@ static mempool_t *page_pool, *isa_page_
  #ifdef CONFIG_HIGHMEM
  static __init int init_emergency_pool(void)
  {
 -      struct sysinfo i;
 -      si_meminfo(&i);
 -      si_swapinfo(&i);
 -
 -      if (!i.totalhigh)
 +#ifndef CONFIG_MEMORY_HOTPLUG
 +      if (max_pfn <= max_low_pfn)
                return 0;
 +#endif
  
        page_pool = mempool_create_page_pool(POOL_SIZE, 0);
        BUG_ON(!page_pool);
diff --combined mm/highmem.c
@@@ -17,7 -17,7 +17,7 @@@
   */
  
  #include <linux/mm.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/swap.h>
  #include <linux/bio.h>
  #include <linux/pagemap.h>
@@@ -250,7 -250,7 +250,7 @@@ void *kmap_high_get(struct page *page
  #endif
  
  /**
 - * kunmap_high - map a highmem page into memory
 + * kunmap_high - unmap a highmem page into memory
   * @page: &struct page to unmap
   *
   * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
diff --combined mm/memcontrol.c
@@@ -33,6 -33,7 +33,7 @@@
  #include <linux/bit_spinlock.h>
  #include <linux/rcupdate.h>
  #include <linux/limits.h>
+ #include <linux/export.h>
  #include <linux/mutex.h>
  #include <linux/rbtree.h>
  #include <linux/slab.h>
@@@ -201,8 -202,8 +202,8 @@@ struct mem_cgroup_eventfd_list 
        struct eventfd_ctx *eventfd;
  };
  
 -static void mem_cgroup_threshold(struct mem_cgroup *mem);
 -static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
 +static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 +static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
  
  /*
   * The memory controller data structure. The memory controller controls both
@@@ -362,29 -363,29 +363,29 @@@ enum charge_type 
  #define MEM_CGROUP_RECLAIM_SOFT_BIT   0x2
  #define MEM_CGROUP_RECLAIM_SOFT               (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
  
 -static void mem_cgroup_get(struct mem_cgroup *mem);
 -static void mem_cgroup_put(struct mem_cgroup *mem);
 -static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
 -static void drain_all_stock_async(struct mem_cgroup *mem);
 +static void mem_cgroup_get(struct mem_cgroup *memcg);
 +static void mem_cgroup_put(struct mem_cgroup *memcg);
 +static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
 +static void drain_all_stock_async(struct mem_cgroup *memcg);
  
  static struct mem_cgroup_per_zone *
 -mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
 +mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
  {
 -      return &mem->info.nodeinfo[nid]->zoneinfo[zid];
 +      return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
  }
  
 -struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
 +struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
  {
 -      return &mem->css;
 +      return &memcg->css;
  }
  
  static struct mem_cgroup_per_zone *
 -page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
 +page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
  {
        int nid = page_to_nid(page);
        int zid = page_zonenum(page);
  
 -      return mem_cgroup_zoneinfo(mem, nid, zid);
 +      return mem_cgroup_zoneinfo(memcg, nid, zid);
  }
  
  static struct mem_cgroup_tree_per_zone *
@@@ -403,7 -404,7 +404,7 @@@ soft_limit_tree_from_page(struct page *
  }
  
  static void
 -__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
 +__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
                                struct mem_cgroup_per_zone *mz,
                                struct mem_cgroup_tree_per_zone *mctz,
                                unsigned long long new_usage_in_excess)
  }
  
  static void
 -__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
 +__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
                                struct mem_cgroup_per_zone *mz,
                                struct mem_cgroup_tree_per_zone *mctz)
  {
  }
  
  static void
 -mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
 +mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
                                struct mem_cgroup_per_zone *mz,
                                struct mem_cgroup_tree_per_zone *mctz)
  {
        spin_lock(&mctz->lock);
 -      __mem_cgroup_remove_exceeded(mem, mz, mctz);
 +      __mem_cgroup_remove_exceeded(memcg, mz, mctz);
        spin_unlock(&mctz->lock);
  }
  
  
 -static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
 +static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
  {
        unsigned long long excess;
        struct mem_cgroup_per_zone *mz;
         * Necessary to update all ancestors when hierarchy is used.
         * because their event counter is not touched.
         */
 -      for (; mem; mem = parent_mem_cgroup(mem)) {
 -              mz = mem_cgroup_zoneinfo(mem, nid, zid);
 -              excess = res_counter_soft_limit_excess(&mem->res);
 +      for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 +              mz = mem_cgroup_zoneinfo(memcg, nid, zid);
 +              excess = res_counter_soft_limit_excess(&memcg->res);
                /*
                 * We have to update the tree if mz is on RB-tree or
                 * mem is over its softlimit.
                        spin_lock(&mctz->lock);
                        /* if on-tree, remove it */
                        if (mz->on_tree)
 -                              __mem_cgroup_remove_exceeded(mem, mz, mctz);
 +                              __mem_cgroup_remove_exceeded(memcg, mz, mctz);
                        /*
                         * Insert again. mz->usage_in_excess will be updated.
                         * If excess is 0, no tree ops.
                         */
 -                      __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
 +                      __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
                        spin_unlock(&mctz->lock);
                }
        }
  }
  
 -static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
 +static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
  {
        int node, zone;
        struct mem_cgroup_per_zone *mz;
  
        for_each_node_state(node, N_POSSIBLE) {
                for (zone = 0; zone < MAX_NR_ZONES; zone++) {
 -                      mz = mem_cgroup_zoneinfo(mem, node, zone);
 +                      mz = mem_cgroup_zoneinfo(memcg, node, zone);
                        mctz = soft_limit_tree_node_zone(node, zone);
 -                      mem_cgroup_remove_exceeded(mem, mz, mctz);
 +                      mem_cgroup_remove_exceeded(memcg, mz, mctz);
                }
        }
  }
@@@ -564,7 -565,7 +565,7 @@@ mem_cgroup_largest_soft_limit_node(stru
   * common workload, threashold and synchonization as vmstat[] should be
   * implemented.
   */
 -static long mem_cgroup_read_stat(struct mem_cgroup *mem,
 +static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
                                 enum mem_cgroup_stat_index idx)
  {
        long val = 0;
  
        get_online_cpus();
        for_each_online_cpu(cpu)
 -              val += per_cpu(mem->stat->count[idx], cpu);
 +              val += per_cpu(memcg->stat->count[idx], cpu);
  #ifdef CONFIG_HOTPLUG_CPU
 -      spin_lock(&mem->pcp_counter_lock);
 -      val += mem->nocpu_base.count[idx];
 -      spin_unlock(&mem->pcp_counter_lock);
 +      spin_lock(&memcg->pcp_counter_lock);
 +      val += memcg->nocpu_base.count[idx];
 +      spin_unlock(&memcg->pcp_counter_lock);
  #endif
        put_online_cpus();
        return val;
  }
  
 -static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
 +static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
                                         bool charge)
  {
        int val = (charge) ? 1 : -1;
 -      this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
 +      this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
  }
  
 -void mem_cgroup_pgfault(struct mem_cgroup *mem, int val)
 +void mem_cgroup_pgfault(struct mem_cgroup *memcg, int val)
  {
 -      this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
 +      this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
  }
  
 -void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val)
 +void mem_cgroup_pgmajfault(struct mem_cgroup *memcg, int val)
  {
 -      this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
 +      this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
  }
  
 -static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
 +static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
                                            enum mem_cgroup_events_index idx)
  {
        unsigned long val = 0;
        int cpu;
  
        for_each_online_cpu(cpu)
 -              val += per_cpu(mem->stat->events[idx], cpu);
 +              val += per_cpu(memcg->stat->events[idx], cpu);
  #ifdef CONFIG_HOTPLUG_CPU
 -      spin_lock(&mem->pcp_counter_lock);
 -      val += mem->nocpu_base.events[idx];
 -      spin_unlock(&mem->pcp_counter_lock);
 +      spin_lock(&memcg->pcp_counter_lock);
 +      val += memcg->nocpu_base.events[idx];
 +      spin_unlock(&memcg->pcp_counter_lock);
  #endif
        return val;
  }
  
 -static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
 +static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
                                         bool file, int nr_pages)
  {
        preempt_disable();
  
        if (file)
 -              __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
 +              __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
 +                              nr_pages);
        else
 -              __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
 +              __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
 +                              nr_pages);
  
        /* pagein of a big page is an event. So, ignore page size */
        if (nr_pages > 0)
 -              __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
 +              __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
        else {
 -              __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
 +              __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
                nr_pages = -nr_pages; /* for event */
        }
  
 -      __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
 +      __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
  
        preempt_enable();
  }
  
  unsigned long
 -mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid,
 +mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
                        unsigned int lru_mask)
  {
        struct mem_cgroup_per_zone *mz;
        enum lru_list l;
        unsigned long ret = 0;
  
 -      mz = mem_cgroup_zoneinfo(mem, nid, zid);
 +      mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  
        for_each_lru(l) {
                if (BIT(l) & lru_mask)
  }
  
  static unsigned long
 -mem_cgroup_node_nr_lru_pages(struct mem_cgroup *mem,
 +mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
                        int nid, unsigned int lru_mask)
  {
        u64 total = 0;
        int zid;
  
        for (zid = 0; zid < MAX_NR_ZONES; zid++)
 -              total += mem_cgroup_zone_nr_lru_pages(mem, nid, zid, lru_mask);
 +              total += mem_cgroup_zone_nr_lru_pages(memcg,
 +                                              nid, zid, lru_mask);
  
        return total;
  }
  
 -static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *mem,
 +static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
                        unsigned int lru_mask)
  {
        int nid;
        u64 total = 0;
  
        for_each_node_state(nid, N_HIGH_MEMORY)
 -              total += mem_cgroup_node_nr_lru_pages(mem, nid, lru_mask);
 +              total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
        return total;
  }
  
 -static bool __memcg_event_check(struct mem_cgroup *mem, int target)
 +static bool __memcg_event_check(struct mem_cgroup *memcg, int target)
  {
        unsigned long val, next;
  
 -      val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
 -      next = this_cpu_read(mem->stat->targets[target]);
 +      val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
 +      next = __this_cpu_read(memcg->stat->targets[target]);
        /* from time_after() in jiffies.h */
        return ((long)next - (long)val < 0);
  }
  
 -static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
 +static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
  {
        unsigned long val, next;
  
 -      val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
 +      val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
  
        switch (target) {
        case MEM_CGROUP_TARGET_THRESH:
                return;
        }
  
 -      this_cpu_write(mem->stat->targets[target], next);
 +      __this_cpu_write(memcg->stat->targets[target], next);
  }
  
  /*
   * Check events in order.
   *
   */
 -static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
 +static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
  {
 +      preempt_disable();
        /* threshold event is triggered in finer grain than soft limit */
 -      if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) {
 -              mem_cgroup_threshold(mem);
 -              __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
 -              if (unlikely(__memcg_event_check(mem,
 +      if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) {
 +              mem_cgroup_threshold(memcg);
 +              __mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH);
 +              if (unlikely(__memcg_event_check(memcg,
                             MEM_CGROUP_TARGET_SOFTLIMIT))) {
 -                      mem_cgroup_update_tree(mem, page);
 -                      __mem_cgroup_target_update(mem,
 +                      mem_cgroup_update_tree(memcg, page);
 +                      __mem_cgroup_target_update(memcg,
                                                   MEM_CGROUP_TARGET_SOFTLIMIT);
                }
  #if MAX_NUMNODES > 1
 -              if (unlikely(__memcg_event_check(mem,
 +              if (unlikely(__memcg_event_check(memcg,
                        MEM_CGROUP_TARGET_NUMAINFO))) {
 -                      atomic_inc(&mem->numainfo_events);
 -                      __mem_cgroup_target_update(mem,
 +                      atomic_inc(&memcg->numainfo_events);
 +                      __mem_cgroup_target_update(memcg,
                                MEM_CGROUP_TARGET_NUMAINFO);
                }
  #endif
        }
 +      preempt_enable();
  }
  
  static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
@@@ -767,7 -763,7 +768,7 @@@ struct mem_cgroup *mem_cgroup_from_task
  
  struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
  {
 -      struct mem_cgroup *mem = NULL;
 +      struct mem_cgroup *memcg = NULL;
  
        if (!mm)
                return NULL;
         */
        rcu_read_lock();
        do {
 -              mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
 -              if (unlikely(!mem))
 +              memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 +              if (unlikely(!memcg))
                        break;
 -      } while (!css_tryget(&mem->css));
 +      } while (!css_tryget(&memcg->css));
        rcu_read_unlock();
 -      return mem;
 +      return memcg;
  }
  
  /* The caller has to guarantee "mem" exists before calling this */
 -static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
 +static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *memcg)
  {
        struct cgroup_subsys_state *css;
        int found;
  
 -      if (!mem) /* ROOT cgroup has the smallest ID */
 +      if (!memcg) /* ROOT cgroup has the smallest ID */
                return root_mem_cgroup; /*css_put/get against root is ignored*/
 -      if (!mem->use_hierarchy) {
 -              if (css_tryget(&mem->css))
 -                      return mem;
 +      if (!memcg->use_hierarchy) {
 +              if (css_tryget(&memcg->css))
 +                      return memcg;
                return NULL;
        }
        rcu_read_lock();
         * searching a memory cgroup which has the smallest ID under given
         * ROOT cgroup. (ID >= 1)
         */
 -      css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
 +      css = css_get_next(&mem_cgroup_subsys, 1, &memcg->css, &found);
        if (css && css_tryget(css))
 -              mem = container_of(css, struct mem_cgroup, css);
 +              memcg = container_of(css, struct mem_cgroup, css);
        else
 -              mem = NULL;
 +              memcg = NULL;
        rcu_read_unlock();
 -      return mem;
 +      return memcg;
  }
  
  static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
        for_each_mem_cgroup_tree_cond(iter, NULL, true)
  
  
 -static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
 +static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
  {
 -      return (mem == root_mem_cgroup);
 +      return (memcg == root_mem_cgroup);
  }
  
  void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
  {
 -      struct mem_cgroup *mem;
 +      struct mem_cgroup *memcg;
  
        if (!mm)
                return;
  
        rcu_read_lock();
 -      mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
 -      if (unlikely(!mem))
 +      memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 +      if (unlikely(!memcg))
                goto out;
  
        switch (idx) {
        case PGMAJFAULT:
 -              mem_cgroup_pgmajfault(mem, 1);
 +              mem_cgroup_pgmajfault(memcg, 1);
                break;
        case PGFAULT:
 -              mem_cgroup_pgfault(mem, 1);
 +              mem_cgroup_pgfault(memcg, 1);
                break;
        default:
                BUG();
@@@ -995,16 -991,6 +996,16 @@@ void mem_cgroup_add_lru_list(struct pag
                return;
        pc = lookup_page_cgroup(page);
        VM_BUG_ON(PageCgroupAcctLRU(pc));
 +      /*
 +       * putback:                             charge:
 +       * SetPageLRU                           SetPageCgroupUsed
 +       * smp_mb                               smp_mb
 +       * PageCgroupUsed && add to memcg LRU   PageLRU && add to memcg LRU
 +       *
 +       * Ensure that one of the two sides adds the page to the memcg
 +       * LRU during a race.
 +       */
 +      smp_mb();
        if (!PageCgroupUsed(pc))
                return;
        /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
@@@ -1056,16 -1042,7 +1057,16 @@@ static void mem_cgroup_lru_add_after_co
        unsigned long flags;
        struct zone *zone = page_zone(page);
        struct page_cgroup *pc = lookup_page_cgroup(page);
 -
 +      /*
 +       * putback:                             charge:
 +       * SetPageLRU                           SetPageCgroupUsed
 +       * smp_mb                               smp_mb
 +       * PageCgroupUsed && add to memcg LRU   PageLRU && add to memcg LRU
 +       *
 +       * Ensure that one of the two sides adds the page to the memcg
 +       * LRU during a race.
 +       */
 +      smp_mb();
        /* taking care of that the page is added to LRU while we commit it */
        if (likely(!PageLRU(page)))
                return;
@@@ -1087,21 -1064,21 +1088,21 @@@ void mem_cgroup_move_lists(struct page 
  }
  
  /*
 - * Checks whether given mem is same or in the root_mem's
 + * Checks whether given mem is same or in the root_mem_cgroup's
   * hierarchy subtree
   */
 -static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_mem,
 -              struct mem_cgroup *mem)
 +static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
 +              struct mem_cgroup *memcg)
  {
 -      if (root_mem != mem) {
 -              return (root_mem->use_hierarchy &&
 -                      css_is_ancestor(&mem->css, &root_mem->css));
 +      if (root_memcg != memcg) {
 +              return (root_memcg->use_hierarchy &&
 +                      css_is_ancestor(&memcg->css, &root_memcg->css));
        }
  
        return true;
  }
  
 -int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
 +int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
  {
        int ret;
        struct mem_cgroup *curr = NULL;
        if (!curr)
                return 0;
        /*
 -       * We should check use_hierarchy of "mem" not "curr". Because checking
 +       * We should check use_hierarchy of "memcg" not "curr". Because checking
         * use_hierarchy of "curr" here make this function true if hierarchy is
 -       * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
 -       * hierarchy(even if use_hierarchy is disabled in "mem").
 +       * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
 +       * hierarchy(even if use_hierarchy is disabled in "memcg").
         */
 -      ret = mem_cgroup_same_or_subtree(mem, curr);
 +      ret = mem_cgroup_same_or_subtree(memcg, curr);
        css_put(&curr->css);
        return ret;
  }
  
 -static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
 +int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
  {
 -      unsigned long active;
 +      unsigned long inactive_ratio;
 +      int nid = zone_to_nid(zone);
 +      int zid = zone_idx(zone);
        unsigned long inactive;
 +      unsigned long active;
        unsigned long gb;
 -      unsigned long inactive_ratio;
  
 -      inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
 -      active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
 +      inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
 +                                              BIT(LRU_INACTIVE_ANON));
 +      active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
 +                                            BIT(LRU_ACTIVE_ANON));
  
        gb = (inactive + active) >> (30 - PAGE_SHIFT);
        if (gb)
        else
                inactive_ratio = 1;
  
 -      if (present_pages) {
 -              present_pages[0] = inactive;
 -              present_pages[1] = active;
 -      }
 -
 -      return inactive_ratio;
 +      return inactive * inactive_ratio < active;
  }
  
 -int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
 -{
 -      unsigned long active;
 -      unsigned long inactive;
 -      unsigned long present_pages[2];
 -      unsigned long inactive_ratio;
 -
 -      inactive_ratio = calc_inactive_ratio(memcg, present_pages);
 -
 -      inactive = present_pages[0];
 -      active = present_pages[1];
 -
 -      if (inactive * inactive_ratio < active)
 -              return 1;
 -
 -      return 0;
 -}
 -
 -int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
 +int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
  {
        unsigned long active;
        unsigned long inactive;
 +      int zid = zone_idx(zone);
 +      int nid = zone_to_nid(zone);
  
 -      inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
 -      active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
 +      inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
 +                                              BIT(LRU_INACTIVE_FILE));
 +      active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
 +                                            BIT(LRU_ACTIVE_FILE));
  
        return (active > inactive);
  }
@@@ -1194,8 -1186,7 +1195,8 @@@ mem_cgroup_get_reclaim_stat_from_page(s
  unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        struct list_head *dst,
                                        unsigned long *scanned, int order,
 -                                      int mode, struct zone *z,
 +                                      isolate_mode_t mode,
 +                                      struct zone *z,
                                        struct mem_cgroup *mem_cont,
                                        int active, int file)
  {
   * Returns the maximum amount of memory @mem can be charged with, in
   * pages.
   */
 -static unsigned long mem_cgroup_margin(struct mem_cgroup *mem)
 +static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
  {
        unsigned long long margin;
  
 -      margin = res_counter_margin(&mem->res);
 +      margin = res_counter_margin(&memcg->res);
        if (do_swap_account)
 -              margin = min(margin, res_counter_margin(&mem->memsw));
 +              margin = min(margin, res_counter_margin(&memcg->memsw));
        return margin >> PAGE_SHIFT;
  }
  
@@@ -1284,33 -1275,33 +1285,33 @@@ int mem_cgroup_swappiness(struct mem_cg
        return memcg->swappiness;
  }
  
 -static void mem_cgroup_start_move(struct mem_cgroup *mem)
 +static void mem_cgroup_start_move(struct mem_cgroup *memcg)
  {
        int cpu;
  
        get_online_cpus();
 -      spin_lock(&mem->pcp_counter_lock);
 +      spin_lock(&memcg->pcp_counter_lock);
        for_each_online_cpu(cpu)
 -              per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
 -      mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
 -      spin_unlock(&mem->pcp_counter_lock);
 +              per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
 +      memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
 +      spin_unlock(&memcg->pcp_counter_lock);
        put_online_cpus();
  
        synchronize_rcu();
  }
  
 -static void mem_cgroup_end_move(struct mem_cgroup *mem)
 +static void mem_cgroup_end_move(struct mem_cgroup *memcg)
  {
        int cpu;
  
 -      if (!mem)
 +      if (!memcg)
                return;
        get_online_cpus();
 -      spin_lock(&mem->pcp_counter_lock);
 +      spin_lock(&memcg->pcp_counter_lock);
        for_each_online_cpu(cpu)
 -              per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
 -      mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
 -      spin_unlock(&mem->pcp_counter_lock);
 +              per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
 +      memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
 +      spin_unlock(&memcg->pcp_counter_lock);
        put_online_cpus();
  }
  /*
   *                      waiting at hith-memory prressure caused by "move".
   */
  
 -static bool mem_cgroup_stealed(struct mem_cgroup *mem)
 +static bool mem_cgroup_stealed(struct mem_cgroup *memcg)
  {
        VM_BUG_ON(!rcu_read_lock_held());
 -      return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
 +      return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
  }
  
 -static bool mem_cgroup_under_move(struct mem_cgroup *mem)
 +static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
  {
        struct mem_cgroup *from;
        struct mem_cgroup *to;
        if (!from)
                goto unlock;
  
 -      ret = mem_cgroup_same_or_subtree(mem, from)
 -              || mem_cgroup_same_or_subtree(mem, to);
 +      ret = mem_cgroup_same_or_subtree(memcg, from)
 +              || mem_cgroup_same_or_subtree(memcg, to);
  unlock:
        spin_unlock(&mc.lock);
        return ret;
  }
  
 -static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
 +static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
  {
        if (mc.moving_task && current != mc.moving_task) {
 -              if (mem_cgroup_under_move(mem)) {
 +              if (mem_cgroup_under_move(memcg)) {
                        DEFINE_WAIT(wait);
                        prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
                        /* moving charge context might have finished. */
@@@ -1440,12 -1431,12 +1441,12 @@@ done
   * This function returns the number of memcg under hierarchy tree. Returns
   * 1(self count) if no children.
   */
 -static int mem_cgroup_count_children(struct mem_cgroup *mem)
 +static int mem_cgroup_count_children(struct mem_cgroup *memcg)
  {
        int num = 0;
        struct mem_cgroup *iter;
  
 -      for_each_mem_cgroup_tree(iter, mem)
 +      for_each_mem_cgroup_tree(iter, memcg)
                num++;
        return num;
  }
@@@ -1475,21 -1466,21 +1476,21 @@@ u64 mem_cgroup_get_limit(struct mem_cgr
   * that to reclaim free pages from.
   */
  static struct mem_cgroup *
 -mem_cgroup_select_victim(struct mem_cgroup *root_mem)
 +mem_cgroup_select_victim(struct mem_cgroup *root_memcg)
  {
        struct mem_cgroup *ret = NULL;
        struct cgroup_subsys_state *css;
        int nextid, found;
  
 -      if (!root_mem->use_hierarchy) {
 -              css_get(&root_mem->css);
 -              ret = root_mem;
 +      if (!root_memcg->use_hierarchy) {
 +              css_get(&root_memcg->css);
 +              ret = root_memcg;
        }
  
        while (!ret) {
                rcu_read_lock();
 -              nextid = root_mem->last_scanned_child + 1;
 -              css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
 +              nextid = root_memcg->last_scanned_child + 1;
 +              css = css_get_next(&mem_cgroup_subsys, nextid, &root_memcg->css,
                                   &found);
                if (css && css_tryget(css))
                        ret = container_of(css, struct mem_cgroup, css);
                /* Updates scanning parameter */
                if (!css) {
                        /* this means start scan from ID:1 */
 -                      root_mem->last_scanned_child = 0;
 +                      root_memcg->last_scanned_child = 0;
                } else
 -                      root_mem->last_scanned_child = found;
 +                      root_memcg->last_scanned_child = found;
        }
  
        return ret;
   * reclaimable pages on a node. Returns true if there are any reclaimable
   * pages in the node.
   */
 -static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
 +static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
                int nid, bool noswap)
  {
 -      if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_FILE))
 +      if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
                return true;
        if (noswap || !total_swap_pages)
                return false;
 -      if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_ANON))
 +      if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
                return true;
        return false;
  
   * nodes based on the zonelist. So update the list loosely once per 10 secs.
   *
   */
 -static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
 +static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
  {
        int nid;
        /*
         * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
         * pagein/pageout changes since the last update.
         */
 -      if (!atomic_read(&mem->numainfo_events))
 +      if (!atomic_read(&memcg->numainfo_events))
                return;
 -      if (atomic_inc_return(&mem->numainfo_updating) > 1)
 +      if (atomic_inc_return(&memcg->numainfo_updating) > 1)
                return;
  
        /* make a nodemask where this memcg uses memory from */
 -      mem->scan_nodes = node_states[N_HIGH_MEMORY];
 +      memcg->scan_nodes = node_states[N_HIGH_MEMORY];
  
        for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
  
 -              if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
 -                      node_clear(nid, mem->scan_nodes);
 +              if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
 +                      node_clear(nid, memcg->scan_nodes);
        }
  
 -      atomic_set(&mem->numainfo_events, 0);
 -      atomic_set(&mem->numainfo_updating, 0);
 +      atomic_set(&memcg->numainfo_events, 0);
 +      atomic_set(&memcg->numainfo_updating, 0);
  }
  
  /*
   *
   * Now, we use round-robin. Better algorithm is welcomed.
   */
 -int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
 +int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
  {
        int node;
  
 -      mem_cgroup_may_update_nodemask(mem);
 -      node = mem->last_scanned_node;
 +      mem_cgroup_may_update_nodemask(memcg);
 +      node = memcg->last_scanned_node;
  
 -      node = next_node(node, mem->scan_nodes);
 +      node = next_node(node, memcg->scan_nodes);
        if (node == MAX_NUMNODES)
 -              node = first_node(mem->scan_nodes);
 +              node = first_node(memcg->scan_nodes);
        /*
         * We call this when we hit limit, not when pages are added to LRU.
         * No LRU may hold pages because all pages are UNEVICTABLE or
        if (unlikely(node == MAX_NUMNODES))
                node = numa_node_id();
  
 -      mem->last_scanned_node = node;
 +      memcg->last_scanned_node = node;
        return node;
  }
  
   * unused nodes. But scan_nodes is lazily updated and may not cotain
   * enough new information. We need to do double check.
   */
 -bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
 +bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
  {
        int nid;
  
         * quick check...making use of scan_node.
         * We can skip unused nodes.
         */
 -      if (!nodes_empty(mem->scan_nodes)) {
 -              for (nid = first_node(mem->scan_nodes);
 +      if (!nodes_empty(memcg->scan_nodes)) {
 +              for (nid = first_node(memcg->scan_nodes);
                     nid < MAX_NUMNODES;
 -                   nid = next_node(nid, mem->scan_nodes)) {
 +                   nid = next_node(nid, memcg->scan_nodes)) {
  
 -                      if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
 +                      if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
                                return true;
                }
        }
         * Check rest of nodes.
         */
        for_each_node_state(nid, N_HIGH_MEMORY) {
 -              if (node_isset(nid, mem->scan_nodes))
 +              if (node_isset(nid, memcg->scan_nodes))
                        continue;
 -              if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
 +              if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
                        return true;
        }
        return false;
  }
  
  #else
 -int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
 +int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
  {
        return 0;
  }
  
 -bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
 +bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
  {
 -      return test_mem_cgroup_node_reclaimable(mem, 0, noswap);
 +      return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
  }
  #endif
  
   * we reclaimed from, so that we don't end up penalizing one child extensively
   * based on its position in the children list.
   *
 - * root_mem is the original ancestor that we've been reclaim from.
 + * root_memcg is the original ancestor that we've been reclaim from.
   *
 - * We give up and return to the caller when we visit root_mem twice.
 + * We give up and return to the caller when we visit root_memcg twice.
   * (other groups can be removed while we're walking....)
   *
   * If shrink==true, for avoiding to free too much, this returns immedieately.
   */
 -static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
 +static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_memcg,
                                                struct zone *zone,
                                                gfp_t gfp_mask,
                                                unsigned long reclaim_options,
        unsigned long excess;
        unsigned long nr_scanned;
  
 -      excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
 +      excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
  
        /* If memsw_is_minimum==1, swap-out is of-no-use. */
 -      if (!check_soft && !shrink && root_mem->memsw_is_minimum)
 +      if (!check_soft && !shrink && root_memcg->memsw_is_minimum)
                noswap = true;
  
        while (1) {
 -              victim = mem_cgroup_select_victim(root_mem);
 -              if (victim == root_mem) {
 +              victim = mem_cgroup_select_victim(root_memcg);
 +              if (victim == root_memcg) {
                        loop++;
                        /*
                         * We are not draining per cpu cached charges during
                         * charges will not give any.
                         */
                        if (!check_soft && loop >= 1)
 -                              drain_all_stock_async(root_mem);
 +                              drain_all_stock_async(root_memcg);
                        if (loop >= 2) {
                                /*
                                 * If we have not been able to reclaim
                        return ret;
                total += ret;
                if (check_soft) {
 -                      if (!res_counter_soft_limit_excess(&root_mem->res))
 +                      if (!res_counter_soft_limit_excess(&root_memcg->res))
                                return total;
 -              } else if (mem_cgroup_margin(root_mem))
 +              } else if (mem_cgroup_margin(root_memcg))
                        return total;
        }
        return total;
   * If someone is running, return false.
   * Has to be called with memcg_oom_lock
   */
 -static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
 +static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
  {
        struct mem_cgroup *iter, *failed = NULL;
        bool cond = true;
  
 -      for_each_mem_cgroup_tree_cond(iter, mem, cond) {
 +      for_each_mem_cgroup_tree_cond(iter, memcg, cond) {
                if (iter->oom_lock) {
                        /*
                         * this subtree of our hierarchy is already locked
         * what we set up to the failing subtree
         */
        cond = true;
 -      for_each_mem_cgroup_tree_cond(iter, mem, cond) {
 +      for_each_mem_cgroup_tree_cond(iter, memcg, cond) {
                if (iter == failed) {
                        cond = false;
                        continue;
  /*
   * Has to be called with memcg_oom_lock
   */
 -static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
 +static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
  {
        struct mem_cgroup *iter;
  
 -      for_each_mem_cgroup_tree(iter, mem)
 +      for_each_mem_cgroup_tree(iter, memcg)
                iter->oom_lock = false;
        return 0;
  }
  
 -static void mem_cgroup_mark_under_oom(struct mem_cgroup *mem)
 +static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
  {
        struct mem_cgroup *iter;
  
 -      for_each_mem_cgroup_tree(iter, mem)
 +      for_each_mem_cgroup_tree(iter, memcg)
                atomic_inc(&iter->under_oom);
  }
  
 -static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem)
 +static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
  {
        struct mem_cgroup *iter;
  
         * mem_cgroup_oom_lock() may not be called. We have to use
         * atomic_add_unless() here.
         */
 -      for_each_mem_cgroup_tree(iter, mem)
 +      for_each_mem_cgroup_tree(iter, memcg)
                atomic_add_unless(&iter->under_oom, -1, 0);
  }
  
@@@ -1826,85 -1817,85 +1827,85 @@@ struct oom_wait_info 
  static int memcg_oom_wake_function(wait_queue_t *wait,
        unsigned mode, int sync, void *arg)
  {
 -      struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg,
 -                        *oom_wait_mem;
 +      struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg,
 +                        *oom_wait_memcg;
        struct oom_wait_info *oom_wait_info;
  
        oom_wait_info = container_of(wait, struct oom_wait_info, wait);
 -      oom_wait_mem = oom_wait_info->mem;
 +      oom_wait_memcg = oom_wait_info->mem;
  
        /*
         * Both of oom_wait_info->mem and wake_mem are stable under us.
         * Then we can use css_is_ancestor without taking care of RCU.
         */
 -      if (!mem_cgroup_same_or_subtree(oom_wait_mem, wake_mem)
 -                      && !mem_cgroup_same_or_subtree(wake_mem, oom_wait_mem))
 +      if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
 +              && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
                return 0;
        return autoremove_wake_function(wait, mode, sync, arg);
  }
  
 -static void memcg_wakeup_oom(struct mem_cgroup *mem)
 +static void memcg_wakeup_oom(struct mem_cgroup *memcg)
  {
 -      /* for filtering, pass "mem" as argument. */
 -      __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
 +      /* for filtering, pass "memcg" as argument. */
 +      __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
  }
  
 -static void memcg_oom_recover(struct mem_cgroup *mem)
 +static void memcg_oom_recover(struct mem_cgroup *memcg)
  {
 -      if (mem && atomic_read(&mem->under_oom))
 -              memcg_wakeup_oom(mem);
 +      if (memcg && atomic_read(&memcg->under_oom))
 +              memcg_wakeup_oom(memcg);
  }
  
  /*
   * try to call OOM killer. returns false if we should exit memory-reclaim loop.
   */
 -bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
 +bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask)
  {
        struct oom_wait_info owait;
        bool locked, need_to_kill;
  
 -      owait.mem = mem;
 +      owait.mem = memcg;
        owait.wait.flags = 0;
        owait.wait.func = memcg_oom_wake_function;
        owait.wait.private = current;
        INIT_LIST_HEAD(&owait.wait.task_list);
        need_to_kill = true;
 -      mem_cgroup_mark_under_oom(mem);
 +      mem_cgroup_mark_under_oom(memcg);
  
 -      /* At first, try to OOM lock hierarchy under mem.*/
 +      /* At first, try to OOM lock hierarchy under memcg.*/
        spin_lock(&memcg_oom_lock);
 -      locked = mem_cgroup_oom_lock(mem);
 +      locked = mem_cgroup_oom_lock(memcg);
        /*
         * Even if signal_pending(), we can't quit charge() loop without
         * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
         * under OOM is always welcomed, use TASK_KILLABLE here.
         */
        prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
 -      if (!locked || mem->oom_kill_disable)
 +      if (!locked || memcg->oom_kill_disable)
                need_to_kill = false;
        if (locked)
 -              mem_cgroup_oom_notify(mem);
 +              mem_cgroup_oom_notify(memcg);
        spin_unlock(&memcg_oom_lock);
  
        if (need_to_kill) {
                finish_wait(&memcg_oom_waitq, &owait.wait);
 -              mem_cgroup_out_of_memory(mem, mask);
 +              mem_cgroup_out_of_memory(memcg, mask);
        } else {
                schedule();
                finish_wait(&memcg_oom_waitq, &owait.wait);
        }
        spin_lock(&memcg_oom_lock);
        if (locked)
 -              mem_cgroup_oom_unlock(mem);
 -      memcg_wakeup_oom(mem);
 +              mem_cgroup_oom_unlock(memcg);
 +      memcg_wakeup_oom(memcg);
        spin_unlock(&memcg_oom_lock);
  
 -      mem_cgroup_unmark_under_oom(mem);
 +      mem_cgroup_unmark_under_oom(memcg);
  
        if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
                return false;
        /* Give chance to dying process */
 -      schedule_timeout(1);
 +      schedule_timeout_uninterruptible(1);
        return true;
  }
  
  void mem_cgroup_update_page_stat(struct page *page,
                                 enum mem_cgroup_page_stat_item idx, int val)
  {
 -      struct mem_cgroup *mem;
 +      struct mem_cgroup *memcg;
        struct page_cgroup *pc = lookup_page_cgroup(page);
        bool need_unlock = false;
        unsigned long uninitialized_var(flags);
                return;
  
        rcu_read_lock();
 -      mem = pc->mem_cgroup;
 -      if (unlikely(!mem || !PageCgroupUsed(pc)))
 +      memcg = pc->mem_cgroup;
 +      if (unlikely(!memcg || !PageCgroupUsed(pc)))
                goto out;
        /* pc->mem_cgroup is unstable ? */
 -      if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
 +      if (unlikely(mem_cgroup_stealed(memcg)) || PageTransHuge(page)) {
                /* take a lock against to access pc->mem_cgroup */
                move_lock_page_cgroup(pc, &flags);
                need_unlock = true;
 -              mem = pc->mem_cgroup;
 -              if (!mem || !PageCgroupUsed(pc))
 +              memcg = pc->mem_cgroup;
 +              if (!memcg || !PageCgroupUsed(pc))
                        goto out;
        }
  
                BUG();
        }
  
 -      this_cpu_add(mem->stat->count[idx], val);
 +      this_cpu_add(memcg->stat->count[idx], val);
  
  out:
        if (unlikely(need_unlock))
@@@ -2000,13 -1991,13 +2001,13 @@@ static DEFINE_MUTEX(percpu_charge_mutex
   * cgroup which is not current target, returns false. This stock will be
   * refilled.
   */
 -static bool consume_stock(struct mem_cgroup *mem)
 +static bool consume_stock(struct mem_cgroup *memcg)
  {
        struct memcg_stock_pcp *stock;
        bool ret = true;
  
        stock = &get_cpu_var(memcg_stock);
 -      if (mem == stock->cached && stock->nr_pages)
 +      if (memcg == stock->cached && stock->nr_pages)
                stock->nr_pages--;
        else /* need to call res_counter_charge */
                ret = false;
@@@ -2047,24 -2038,24 +2048,24 @@@ static void drain_local_stock(struct wo
   * Cache charges(val) which is from res_counter, to local per_cpu area.
   * This will be consumed by consume_stock() function, later.
   */
 -static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
 +static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
  {
        struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
  
 -      if (stock->cached != mem) { /* reset if necessary */
 +      if (stock->cached != memcg) { /* reset if necessary */
                drain_stock(stock);
 -              stock->cached = mem;
 +              stock->cached = memcg;
        }
        stock->nr_pages += nr_pages;
        put_cpu_var(memcg_stock);
  }
  
  /*
 - * Drains all per-CPU charge caches for given root_mem resp. subtree
 + * Drains all per-CPU charge caches for given root_memcg resp. subtree
   * of the hierarchy under it. sync flag says whether we should block
   * until the work is done.
   */
 -static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
 +static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
  {
        int cpu, curcpu;
  
        curcpu = get_cpu();
        for_each_online_cpu(cpu) {
                struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
 -              struct mem_cgroup *mem;
 +              struct mem_cgroup *memcg;
  
 -              mem = stock->cached;
 -              if (!mem || !stock->nr_pages)
 +              memcg = stock->cached;
 +              if (!memcg || !stock->nr_pages)
                        continue;
 -              if (!mem_cgroup_same_or_subtree(root_mem, mem))
 +              if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
                        continue;
                if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
                        if (cpu == curcpu)
   * expects some charges will be back to res_counter later but cannot wait for
   * it.
   */
 -static void drain_all_stock_async(struct mem_cgroup *root_mem)
 +static void drain_all_stock_async(struct mem_cgroup *root_memcg)
  {
        /*
         * If someone calls draining, avoid adding more kworker runs.
         */
        if (!mutex_trylock(&percpu_charge_mutex))
                return;
 -      drain_all_stock(root_mem, false);
 +      drain_all_stock(root_memcg, false);
        mutex_unlock(&percpu_charge_mutex);
  }
  
  /* This is a synchronous drain interface. */
 -static void drain_all_stock_sync(struct mem_cgroup *root_mem)
 +static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
  {
        /* called when force_empty is called */
        mutex_lock(&percpu_charge_mutex);
 -      drain_all_stock(root_mem, true);
 +      drain_all_stock(root_memcg, true);
        mutex_unlock(&percpu_charge_mutex);
  }
  
   * This function drains percpu counter value from DEAD cpu and
   * move it to local cpu. Note that this function can be preempted.
   */
 -static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
 +static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
  {
        int i;
  
 -      spin_lock(&mem->pcp_counter_lock);
 +      spin_lock(&memcg->pcp_counter_lock);
        for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
 -              long x = per_cpu(mem->stat->count[i], cpu);
 +              long x = per_cpu(memcg->stat->count[i], cpu);
  
 -              per_cpu(mem->stat->count[i], cpu) = 0;
 -              mem->nocpu_base.count[i] += x;
 +              per_cpu(memcg->stat->count[i], cpu) = 0;
 +              memcg->nocpu_base.count[i] += x;
        }
        for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
 -              unsigned long x = per_cpu(mem->stat->events[i], cpu);
 +              unsigned long x = per_cpu(memcg->stat->events[i], cpu);
  
 -              per_cpu(mem->stat->events[i], cpu) = 0;
 -              mem->nocpu_base.events[i] += x;
 +              per_cpu(memcg->stat->events[i], cpu) = 0;
 +              memcg->nocpu_base.events[i] += x;
        }
        /* need to clear ON_MOVE value, works as a kind of lock. */
 -      per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
 -      spin_unlock(&mem->pcp_counter_lock);
 +      per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
 +      spin_unlock(&memcg->pcp_counter_lock);
  }
  
 -static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
 +static void synchronize_mem_cgroup_on_move(struct mem_cgroup *memcg, int cpu)
  {
        int idx = MEM_CGROUP_ON_MOVE;
  
 -      spin_lock(&mem->pcp_counter_lock);
 -      per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
 -      spin_unlock(&mem->pcp_counter_lock);
 +      spin_lock(&memcg->pcp_counter_lock);
 +      per_cpu(memcg->stat->count[idx], cpu) = memcg->nocpu_base.count[idx];
 +      spin_unlock(&memcg->pcp_counter_lock);
  }
  
  static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
@@@ -2197,7 -2188,7 +2198,7 @@@ enum 
        CHARGE_OOM_DIE,         /* the current is killed because of OOM */
  };
  
 -static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
 +static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
                                unsigned int nr_pages, bool oom_check)
  {
        unsigned long csize = nr_pages * PAGE_SIZE;
        unsigned long flags = 0;
        int ret;
  
 -      ret = res_counter_charge(&mem->res, csize, &fail_res);
 +      ret = res_counter_charge(&memcg->res, csize, &fail_res);
  
        if (likely(!ret)) {
                if (!do_swap_account)
                        return CHARGE_OK;
 -              ret = res_counter_charge(&mem->memsw, csize, &fail_res);
 +              ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
                if (likely(!ret))
                        return CHARGE_OK;
  
 -              res_counter_uncharge(&mem->res, csize);
 +              res_counter_uncharge(&memcg->res, csize);
                mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
                flags |= MEM_CGROUP_RECLAIM_NOSWAP;
        } else
  static int __mem_cgroup_try_charge(struct mm_struct *mm,
                                   gfp_t gfp_mask,
                                   unsigned int nr_pages,
 -                                 struct mem_cgroup **memcg,
 +                                 struct mem_cgroup **ptr,
                                   bool oom)
  {
        unsigned int batch = max(CHARGE_BATCH, nr_pages);
        int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
 -      struct mem_cgroup *mem = NULL;
 +      struct mem_cgroup *memcg = NULL;
        int ret;
  
        /*
         * thread group leader migrates. It's possible that mm is not
         * set, if so charge the init_mm (happens for pagecache usage).
         */
 -      if (!*memcg && !mm)
 +      if (!*ptr && !mm)
                goto bypass;
  again:
 -      if (*memcg) { /* css should be a valid one */
 -              mem = *memcg;
 -              VM_BUG_ON(css_is_removed(&mem->css));
 -              if (mem_cgroup_is_root(mem))
 +      if (*ptr) { /* css should be a valid one */
 +              memcg = *ptr;
 +              VM_BUG_ON(css_is_removed(&memcg->css));
 +              if (mem_cgroup_is_root(memcg))
                        goto done;
 -              if (nr_pages == 1 && consume_stock(mem))
 +              if (nr_pages == 1 && consume_stock(memcg))
                        goto done;
 -              css_get(&mem->css);
 +              css_get(&memcg->css);
        } else {
                struct task_struct *p;
  
                p = rcu_dereference(mm->owner);
                /*
                 * Because we don't have task_lock(), "p" can exit.
 -               * In that case, "mem" can point to root or p can be NULL with
 +               * In that case, "memcg" can point to root or p can be NULL with
                 * race with swapoff. Then, we have small risk of mis-accouning.
                 * But such kind of mis-account by race always happens because
                 * we don't have cgroup_mutex(). It's overkill and we allo that
                 * (*) swapoff at el will charge against mm-struct not against
                 * task-struct. So, mm->owner can be NULL.
                 */
 -              mem = mem_cgroup_from_task(p);
 -              if (!mem || mem_cgroup_is_root(mem)) {
 +              memcg = mem_cgroup_from_task(p);
 +              if (!memcg || mem_cgroup_is_root(memcg)) {
                        rcu_read_unlock();
                        goto done;
                }
 -              if (nr_pages == 1 && consume_stock(mem)) {
 +              if (nr_pages == 1 && consume_stock(memcg)) {
                        /*
                         * It seems dagerous to access memcg without css_get().
                         * But considering how consume_stok works, it's not
                        goto done;
                }
                /* after here, we may be blocked. we need to get refcnt */
 -              if (!css_tryget(&mem->css)) {
 +              if (!css_tryget(&memcg->css)) {
                        rcu_read_unlock();
                        goto again;
                }
  
                /* If killed, bypass charge */
                if (fatal_signal_pending(current)) {
 -                      css_put(&mem->css);
 +                      css_put(&memcg->css);
                        goto bypass;
                }
  
                        nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
                }
  
 -              ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check);
 +              ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
                switch (ret) {
                case CHARGE_OK:
                        break;
                case CHARGE_RETRY: /* not in OOM situation but retry */
                        batch = nr_pages;
 -                      css_put(&mem->css);
 -                      mem = NULL;
 +                      css_put(&memcg->css);
 +                      memcg = NULL;
                        goto again;
                case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
 -                      css_put(&mem->css);
 +                      css_put(&memcg->css);
                        goto nomem;
                case CHARGE_NOMEM: /* OOM routine works */
                        if (!oom) {
 -                              css_put(&mem->css);
 +                              css_put(&memcg->css);
                                goto nomem;
                        }
                        /* If oom, we never return -ENOMEM */
                        nr_oom_retries--;
                        break;
                case CHARGE_OOM_DIE: /* Killed by OOM Killer */
 -                      css_put(&mem->css);
 +                      css_put(&memcg->css);
                        goto bypass;
                }
        } while (ret != CHARGE_OK);
  
        if (batch > nr_pages)
 -              refill_stock(mem, batch - nr_pages);
 -      css_put(&mem->css);
 +              refill_stock(memcg, batch - nr_pages);
 +      css_put(&memcg->css);
  done:
 -      *memcg = mem;
 +      *ptr = memcg;
        return 0;
  nomem:
 -      *memcg = NULL;
 +      *ptr = NULL;
        return -ENOMEM;
  bypass:
 -      *memcg = NULL;
 +      *ptr = NULL;
        return 0;
  }
  
   * This function is for that and do uncharge, put css's refcnt.
   * gotten by try_charge().
   */
 -static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
 +static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
                                       unsigned int nr_pages)
  {
 -      if (!mem_cgroup_is_root(mem)) {
 +      if (!mem_cgroup_is_root(memcg)) {
                unsigned long bytes = nr_pages * PAGE_SIZE;
  
 -              res_counter_uncharge(&mem->res, bytes);
 +              res_counter_uncharge(&memcg->res, bytes);
                if (do_swap_account)
 -                      res_counter_uncharge(&mem->memsw, bytes);
 +                      res_counter_uncharge(&memcg->memsw, bytes);
        }
  }
  
@@@ -2440,7 -2431,7 +2441,7 @@@ static struct mem_cgroup *mem_cgroup_lo
  
  struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
  {
 -      struct mem_cgroup *mem = NULL;
 +      struct mem_cgroup *memcg = NULL;
        struct page_cgroup *pc;
        unsigned short id;
        swp_entry_t ent;
        pc = lookup_page_cgroup(page);
        lock_page_cgroup(pc);
        if (PageCgroupUsed(pc)) {
 -              mem = pc->mem_cgroup;
 -              if (mem && !css_tryget(&mem->css))
 -                      mem = NULL;
 +              memcg = pc->mem_cgroup;
 +              if (memcg && !css_tryget(&memcg->css))
 +                      memcg = NULL;
        } else if (PageSwapCache(page)) {
                ent.val = page_private(page);
                id = lookup_swap_cgroup(ent);
                rcu_read_lock();
 -              mem = mem_cgroup_lookup(id);
 -              if (mem && !css_tryget(&mem->css))
 -                      mem = NULL;
 +              memcg = mem_cgroup_lookup(id);
 +              if (memcg && !css_tryget(&memcg->css))
 +                      memcg = NULL;
                rcu_read_unlock();
        }
        unlock_page_cgroup(pc);
 -      return mem;
 +      return memcg;
  }
  
 -static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
 +static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
                                       struct page *page,
                                       unsigned int nr_pages,
                                       struct page_cgroup *pc,
        lock_page_cgroup(pc);
        if (unlikely(PageCgroupUsed(pc))) {
                unlock_page_cgroup(pc);
 -              __mem_cgroup_cancel_charge(mem, nr_pages);
 +              __mem_cgroup_cancel_charge(memcg, nr_pages);
                return;
        }
        /*
         * we don't need page_cgroup_lock about tail pages, becase they are not
         * accessed by any other context at this point.
         */
 -      pc->mem_cgroup = mem;
 +      pc->mem_cgroup = memcg;
        /*
         * We access a page_cgroup asynchronously without lock_page_cgroup().
         * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
                break;
        }
  
 -      mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
 +      mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
        unlock_page_cgroup(pc);
        /*
         * "charge_statistics" updated event counter. Then, check it.
         * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
         * if they exceeds softlimit.
         */
 -      memcg_check_events(mem, page);
 +      memcg_check_events(memcg, page);
  }
  
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@@ -2699,7 -2690,7 +2700,7 @@@ out
  static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask, enum charge_type ctype)
  {
 -      struct mem_cgroup *mem = NULL;
 +      struct mem_cgroup *memcg = NULL;
        unsigned int nr_pages = 1;
        struct page_cgroup *pc;
        bool oom = true;
        pc = lookup_page_cgroup(page);
        BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
  
 -      ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom);
 -      if (ret || !mem)
 +      ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
 +      if (ret || !memcg)
                return ret;
  
 -      __mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype);
 +      __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype);
        return 0;
  }
  
@@@ -2751,7 -2742,7 +2752,7 @@@ __mem_cgroup_commit_charge_swapin(struc
                                        enum charge_type ctype);
  
  static void
 -__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem,
 +__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
                                        enum charge_type ctype)
  {
        struct page_cgroup *pc = lookup_page_cgroup(page);
         * LRU. Take care of it.
         */
        mem_cgroup_lru_del_before_commit(page);
 -      __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
 +      __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
        mem_cgroup_lru_add_after_commit(page);
        return;
  }
  int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
  {
 -      struct mem_cgroup *mem = NULL;
 +      struct mem_cgroup *memcg = NULL;
        int ret;
  
        if (mem_cgroup_disabled())
                mm = &init_mm;
  
        if (page_is_file_cache(page)) {
 -              ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true);
 -              if (ret || !mem)
 +              ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &memcg, true);
 +              if (ret || !memcg)
                        return ret;
  
                /*
                 * put that would remove them from the LRU list, make
                 * sure that they get relinked properly.
                 */
 -              __mem_cgroup_commit_charge_lrucare(page, mem,
 +              __mem_cgroup_commit_charge_lrucare(page, memcg,
                                        MEM_CGROUP_CHARGE_TYPE_CACHE);
                return ret;
        }
        /* shmem */
        if (PageSwapCache(page)) {
 -              ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
 +              ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
                if (!ret)
 -                      __mem_cgroup_commit_charge_swapin(page, mem,
 +                      __mem_cgroup_commit_charge_swapin(page, memcg,
                                        MEM_CGROUP_CHARGE_TYPE_SHMEM);
        } else
                ret = mem_cgroup_charge_common(page, mm, gfp_mask,
@@@ -2817,7 -2808,7 +2818,7 @@@ int mem_cgroup_try_charge_swapin(struc
                                 struct page *page,
                                 gfp_t mask, struct mem_cgroup **ptr)
  {
 -      struct mem_cgroup *mem;
 +      struct mem_cgroup *memcg;
        int ret;
  
        *ptr = NULL;
         */
        if (!PageSwapCache(page))
                goto charge_cur_mm;
 -      mem = try_get_mem_cgroup_from_page(page);
 -      if (!mem)
 +      memcg = try_get_mem_cgroup_from_page(page);
 +      if (!memcg)
                goto charge_cur_mm;
 -      *ptr = mem;
 +      *ptr = memcg;
        ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
 -      css_put(&mem->css);
 +      css_put(&memcg->css);
        return ret;
  charge_cur_mm:
        if (unlikely(!mm))
@@@ -2900,16 -2891,16 +2901,16 @@@ void mem_cgroup_commit_charge_swapin(st
                                        MEM_CGROUP_CHARGE_TYPE_MAPPED);
  }
  
 -void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
 +void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
  {
        if (mem_cgroup_disabled())
                return;
 -      if (!mem)
 +      if (!memcg)
                return;
 -      __mem_cgroup_cancel_charge(mem, 1);
 +      __mem_cgroup_cancel_charge(memcg, 1);
  }
  
 -static void mem_cgroup_do_uncharge(struct mem_cgroup *mem,
 +static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
                                   unsigned int nr_pages,
                                   const enum charge_type ctype)
  {
         * uncharges. Then, it's ok to ignore memcg's refcnt.
         */
        if (!batch->memcg)
 -              batch->memcg = mem;
 +              batch->memcg = memcg;
        /*
         * do_batch > 0 when unmapping pages or inode invalidate/truncate.
         * In those cases, all pages freed continuously can be expected to be in
         * merge a series of uncharges to an uncharge of res_counter.
         * If not, we uncharge res_counter ony by one.
         */
 -      if (batch->memcg != mem)
 +      if (batch->memcg != memcg)
                goto direct_uncharge;
        /* remember freed charge and uncharge it later */
        batch->nr_pages++;
                batch->memsw_nr_pages++;
        return;
  direct_uncharge:
 -      res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE);
 +      res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
        if (uncharge_memsw)
 -              res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE);
 -      if (unlikely(batch->memcg != mem))
 -              memcg_oom_recover(mem);
 +              res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
 +      if (unlikely(batch->memcg != memcg))
 +              memcg_oom_recover(memcg);
        return;
  }
  
  static struct mem_cgroup *
  __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
  {
 -      struct mem_cgroup *mem = NULL;
 +      struct mem_cgroup *memcg = NULL;
        unsigned int nr_pages = 1;
        struct page_cgroup *pc;
  
  
        lock_page_cgroup(pc);
  
 -      mem = pc->mem_cgroup;
 +      memcg = pc->mem_cgroup;
  
        if (!PageCgroupUsed(pc))
                goto unlock_out;
                break;
        }
  
 -      mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages);
 +      mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages);
  
        ClearPageCgroupUsed(pc);
        /*
  
        unlock_page_cgroup(pc);
        /*
 -       * even after unlock, we have mem->res.usage here and this memcg
 +       * even after unlock, we have memcg->res.usage here and this memcg
         * will never be freed.
         */
 -      memcg_check_events(mem, page);
 +      memcg_check_events(memcg, page);
        if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
 -              mem_cgroup_swap_statistics(mem, true);
 -              mem_cgroup_get(mem);
 +              mem_cgroup_swap_statistics(memcg, true);
 +              mem_cgroup_get(memcg);
        }
 -      if (!mem_cgroup_is_root(mem))
 -              mem_cgroup_do_uncharge(mem, nr_pages, ctype);
 +      if (!mem_cgroup_is_root(memcg))
 +              mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
  
 -      return mem;
 +      return memcg;
  
  unlock_out:
        unlock_page_cgroup(pc);
@@@ -3228,7 -3219,7 +3229,7 @@@ static inline int mem_cgroup_move_swap_
  int mem_cgroup_prepare_migration(struct page *page,
        struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
  {
 -      struct mem_cgroup *mem = NULL;
 +      struct mem_cgroup *memcg = NULL;
        struct page_cgroup *pc;
        enum charge_type ctype;
        int ret = 0;
        pc = lookup_page_cgroup(page);
        lock_page_cgroup(pc);
        if (PageCgroupUsed(pc)) {
 -              mem = pc->mem_cgroup;
 -              css_get(&mem->css);
 +              memcg = pc->mem_cgroup;
 +              css_get(&memcg->css);
                /*
                 * At migrating an anonymous page, its mapcount goes down
                 * to 0 and uncharge() will be called. But, even if it's fully
         * If the page is not charged at this point,
         * we return here.
         */
 -      if (!mem)
 +      if (!memcg)
                return 0;
  
 -      *ptr = mem;
 +      *ptr = memcg;
        ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
 -      css_put(&mem->css);/* drop extra refcnt */
 +      css_put(&memcg->css);/* drop extra refcnt */
        if (ret || *ptr == NULL) {
                if (PageAnon(page)) {
                        lock_page_cgroup(pc);
                ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
        else
                ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
 -      __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
 +      __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
        return ret;
  }
  
  /* remove redundant charge if migration failed*/
 -void mem_cgroup_end_migration(struct mem_cgroup *mem,
 +void mem_cgroup_end_migration(struct mem_cgroup *memcg,
        struct page *oldpage, struct page *newpage, bool migration_ok)
  {
        struct page *used, *unused;
        struct page_cgroup *pc;
  
 -      if (!mem)
 +      if (!memcg)
                return;
        /* blocks rmdir() */
 -      cgroup_exclude_rmdir(&mem->css);
 +      cgroup_exclude_rmdir(&memcg->css);
        if (!migration_ok) {
                used = oldpage;
                unused = newpage;
         * So, rmdir()->pre_destroy() can be called while we do this charge.
         * In that case, we need to call pre_destroy() again. check it here.
         */
 -      cgroup_release_and_wakeup_rmdir(&mem->css);
 +      cgroup_release_and_wakeup_rmdir(&memcg->css);
  }
  
  #ifdef CONFIG_DEBUG_VM
@@@ -3441,7 -3432,7 +3442,7 @@@ static int mem_cgroup_resize_limit(stru
                /*
                 * Rather than hide all in some function, I do this in
                 * open coded manner. You see what this really does.
 -               * We have to guarantee mem->res.limit < mem->memsw.limit.
 +               * We have to guarantee memcg->res.limit < memcg->memsw.limit.
                 */
                mutex_lock(&set_limit_mutex);
                memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
@@@ -3503,7 -3494,7 +3504,7 @@@ static int mem_cgroup_resize_memsw_limi
                /*
                 * Rather than hide all in some function, I do this in
                 * open coded manner. You see what this really does.
 -               * We have to guarantee mem->res.limit < mem->memsw.limit.
 +               * We have to guarantee memcg->res.limit < memcg->memsw.limit.
                 */
                mutex_lock(&set_limit_mutex);
                memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
@@@ -3641,7 -3632,7 +3642,7 @@@ unsigned long mem_cgroup_soft_limit_rec
   * This routine traverse page_cgroup in given list and drop them all.
   * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
   */
 -static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
 +static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
                                int node, int zid, enum lru_list lru)
  {
        struct zone *zone;
        int ret = 0;
  
        zone = &NODE_DATA(node)->node_zones[zid];
 -      mz = mem_cgroup_zoneinfo(mem, node, zid);
 +      mz = mem_cgroup_zoneinfo(memcg, node, zid);
        list = &mz->lists[lru];
  
        loop = MEM_CGROUP_ZSTAT(mz, lru);
  
                page = lookup_cgroup_page(pc);
  
 -              ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL);
 +              ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
                if (ret == -ENOMEM)
                        break;
  
   * make mem_cgroup's charge to be 0 if there is no task.
   * This enables deleting this mem_cgroup.
   */
 -static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
 +static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all)
  {
        int ret;
        int node, zid, shrink;
        int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
 -      struct cgroup *cgrp = mem->css.cgroup;
 +      struct cgroup *cgrp = memcg->css.cgroup;
  
 -      css_get(&mem->css);
 +      css_get(&memcg->css);
  
        shrink = 0;
        /* should free all ? */
@@@ -3723,14 -3714,14 +3724,14 @@@ move_account
                        goto out;
                /* This is for making all *used* pages to be on LRU. */
                lru_add_drain_all();
 -              drain_all_stock_sync(mem);
 +              drain_all_stock_sync(memcg);
                ret = 0;
 -              mem_cgroup_start_move(mem);
 +              mem_cgroup_start_move(memcg);
                for_each_node_state(node, N_HIGH_MEMORY) {
                        for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
                                enum lru_list l;
                                for_each_lru(l) {
 -                                      ret = mem_cgroup_force_empty_list(mem,
 +                                      ret = mem_cgroup_force_empty_list(memcg,
                                                        node, zid, l);
                                        if (ret)
                                                break;
                        if (ret)
                                break;
                }
 -              mem_cgroup_end_move(mem);
 -              memcg_oom_recover(mem);
 +              mem_cgroup_end_move(memcg);
 +              memcg_oom_recover(memcg);
                /* it seems parent cgroup doesn't have enough mem */
                if (ret == -ENOMEM)
                        goto try_to_free;
                cond_resched();
        /* "ret" should also be checked to ensure all lists are empty. */
 -      } while (mem->res.usage > 0 || ret);
 +      } while (memcg->res.usage > 0 || ret);
  out:
 -      css_put(&mem->css);
 +      css_put(&memcg->css);
        return ret;
  
  try_to_free:
        lru_add_drain_all();
        /* try to free all pages in this cgroup */
        shrink = 1;
 -      while (nr_retries && mem->res.usage > 0) {
 +      while (nr_retries && memcg->res.usage > 0) {
                int progress;
  
                if (signal_pending(current)) {
                        ret = -EINTR;
                        goto out;
                }
 -              progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
 +              progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
                                                false);
                if (!progress) {
                        nr_retries--;
@@@ -3797,12 -3788,12 +3798,12 @@@ static int mem_cgroup_hierarchy_write(s
                                        u64 val)
  {
        int retval = 0;
 -      struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
 +      struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
        struct cgroup *parent = cont->parent;
 -      struct mem_cgroup *parent_mem = NULL;
 +      struct mem_cgroup *parent_memcg = NULL;
  
        if (parent)
 -              parent_mem = mem_cgroup_from_cont(parent);
 +              parent_memcg = mem_cgroup_from_cont(parent);
  
        cgroup_lock();
        /*
         * For the root cgroup, parent_mem is NULL, we allow value to be
         * set if there are no children.
         */
 -      if ((!parent_mem || !parent_mem->use_hierarchy) &&
 +      if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
                                (val == 1 || val == 0)) {
                if (list_empty(&cont->children))
 -                      mem->use_hierarchy = val;
 +                      memcg->use_hierarchy = val;
                else
                        retval = -EBUSY;
        } else
  }
  
  
 -static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem,
 +static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
                                               enum mem_cgroup_stat_index idx)
  {
        struct mem_cgroup *iter;
        long val = 0;
  
        /* Per-cpu values can be negative, use a signed accumulator */
 -      for_each_mem_cgroup_tree(iter, mem)
 +      for_each_mem_cgroup_tree(iter, memcg)
                val += mem_cgroup_read_stat(iter, idx);
  
        if (val < 0) /* race ? */
        return val;
  }
  
 -static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
 +static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
  {
        u64 val;
  
 -      if (!mem_cgroup_is_root(mem)) {
 +      if (!mem_cgroup_is_root(memcg)) {
                if (!swap)
 -                      return res_counter_read_u64(&mem->res, RES_USAGE);
 +                      return res_counter_read_u64(&memcg->res, RES_USAGE);
                else
 -                      return res_counter_read_u64(&mem->memsw, RES_USAGE);
 +                      return res_counter_read_u64(&memcg->memsw, RES_USAGE);
        }
  
 -      val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE);
 -      val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS);
 +      val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
 +      val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
  
        if (swap)
 -              val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
 +              val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
  
        return val << PAGE_SHIFT;
  }
  
  static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
  {
 -      struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
 +      struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
        u64 val;
        int type, name;
  
        switch (type) {
        case _MEM:
                if (name == RES_USAGE)
 -                      val = mem_cgroup_usage(mem, false);
 +                      val = mem_cgroup_usage(memcg, false);
                else
 -                      val = res_counter_read_u64(&mem->res, name);
 +                      val = res_counter_read_u64(&memcg->res, name);
                break;
        case _MEMSWAP:
                if (name == RES_USAGE)
 -                      val = mem_cgroup_usage(mem, true);
 +                      val = mem_cgroup_usage(memcg, true);
                else
 -                      val = res_counter_read_u64(&mem->memsw, name);
 +                      val = res_counter_read_u64(&memcg->memsw, name);
                break;
        default:
                BUG();
  
  static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
  {
 -      struct mem_cgroup *mem;
 +      struct mem_cgroup *memcg;
        int type, name;
  
 -      mem = mem_cgroup_from_cont(cont);
 +      memcg = mem_cgroup_from_cont(cont);
        type = MEMFILE_TYPE(event);
        name = MEMFILE_ATTR(event);
        switch (name) {
        case RES_MAX_USAGE:
                if (type == _MEM)
 -                      res_counter_reset_max(&mem->res);
 +                      res_counter_reset_max(&memcg->res);
                else
 -                      res_counter_reset_max(&mem->memsw);
 +                      res_counter_reset_max(&memcg->memsw);
                break;
        case RES_FAILCNT:
                if (type == _MEM)
 -                      res_counter_reset_failcnt(&mem->res);
 +                      res_counter_reset_failcnt(&memcg->res);
                else
 -                      res_counter_reset_failcnt(&mem->memsw);
 +                      res_counter_reset_failcnt(&memcg->memsw);
                break;
        }
  
@@@ -4003,7 -3994,7 +4004,7 @@@ static u64 mem_cgroup_move_charge_read(
  static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
                                        struct cftype *cft, u64 val)
  {
 -      struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
 +      struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  
        if (val >= (1 << NR_MOVE_TYPE))
                return -EINVAL;
         * inconsistent.
         */
        cgroup_lock();
 -      mem->move_charge_at_immigrate = val;
 +      memcg->move_charge_at_immigrate = val;
        cgroup_unlock();
  
        return 0;
@@@ -4070,49 -4061,49 +4071,49 @@@ struct 
  
  
  static void
 -mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
 +mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
  {
        s64 val;
  
        /* per cpu stat */
 -      val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
 +      val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
        s->stat[MCS_CACHE] += val * PAGE_SIZE;
 -      val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
 +      val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
        s->stat[MCS_RSS] += val * PAGE_SIZE;
 -      val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
 +      val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
        s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
 -      val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN);
 +      val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
        s->stat[MCS_PGPGIN] += val;
 -      val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT);
 +      val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
        s->stat[MCS_PGPGOUT] += val;
        if (do_swap_account) {
 -              val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
 +              val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
                s->stat[MCS_SWAP] += val * PAGE_SIZE;
        }
 -      val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT);
 +      val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
        s->stat[MCS_PGFAULT] += val;
 -      val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT);
 +      val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
        s->stat[MCS_PGMAJFAULT] += val;
  
        /* per zone stat */
 -      val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_ANON));
 +      val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
        s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
 -      val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_ANON));
 +      val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
        s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
 -      val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_FILE));
 +      val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
        s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
 -      val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_FILE));
 +      val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
        s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
 -      val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_UNEVICTABLE));
 +      val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
        s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
  }
  
  static void
 -mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
 +mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
  {
        struct mem_cgroup *iter;
  
 -      for_each_mem_cgroup_tree(iter, mem)
 +      for_each_mem_cgroup_tree(iter, memcg)
                mem_cgroup_get_local_stat(iter, s);
  }
  
@@@ -4198,6 -4189,8 +4199,6 @@@ static int mem_control_stat_show(struc
        }
  
  #ifdef CONFIG_DEBUG_VM
 -      cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
 -
        {
                int nid, zid;
                struct mem_cgroup_per_zone *mz;
@@@ -4334,20 -4327,20 +4335,20 @@@ static int compare_thresholds(const voi
        return _a->threshold - _b->threshold;
  }
  
 -static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
 +static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
  {
        struct mem_cgroup_eventfd_list *ev;
  
 -      list_for_each_entry(ev, &mem->oom_notify, list)
 +      list_for_each_entry(ev, &memcg->oom_notify, list)
                eventfd_signal(ev->eventfd, 1);
        return 0;
  }
  
 -static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
 +static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
  {
        struct mem_cgroup *iter;
  
 -      for_each_mem_cgroup_tree(iter, mem)
 +      for_each_mem_cgroup_tree(iter, memcg)
                mem_cgroup_oom_notify_cb(iter);
  }
  
@@@ -4537,7 -4530,7 +4538,7 @@@ static int mem_cgroup_oom_register_even
  static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
        struct cftype *cft, struct eventfd_ctx *eventfd)
  {
 -      struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
 +      struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
        struct mem_cgroup_eventfd_list *ev, *tmp;
        int type = MEMFILE_TYPE(cft->private);
  
  
        spin_lock(&memcg_oom_lock);
  
 -      list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
 +      list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
                if (ev->eventfd == eventfd) {
                        list_del(&ev->list);
                        kfree(ev);
  static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
        struct cftype *cft,  struct cgroup_map_cb *cb)
  {
 -      struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
 +      struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  
 -      cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
 +      cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
  
 -      if (atomic_read(&mem->under_oom))
 +      if (atomic_read(&memcg->under_oom))
                cb->fill(cb, "under_oom", 1);
        else
                cb->fill(cb, "under_oom", 0);
  static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
        struct cftype *cft, u64 val)
  {
 -      struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
 +      struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
        struct mem_cgroup *parent;
  
        /* cannot set to root cgroup and only 0 and 1 are allowed */
        cgroup_lock();
        /* oom-kill-disable is a flag for subhierarchy. */
        if ((parent->use_hierarchy) ||
 -          (mem->use_hierarchy && !list_empty(&cgrp->children))) {
 +          (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
                cgroup_unlock();
                return -EINVAL;
        }
 -      mem->oom_kill_disable = val;
 +      memcg->oom_kill_disable = val;
        if (!val)
 -              memcg_oom_recover(mem);
 +              memcg_oom_recover(memcg);
        cgroup_unlock();
        return 0;
  }
@@@ -4726,7 -4719,7 +4727,7 @@@ static int register_memsw_files(struct 
  }
  #endif
  
 -static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
 +static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
  {
        struct mem_cgroup_per_node *pn;
        struct mem_cgroup_per_zone *mz;
        if (!pn)
                return 1;
  
 -      mem->info.nodeinfo[node] = pn;
        for (zone = 0; zone < MAX_NR_ZONES; zone++) {
                mz = &pn->zoneinfo[zone];
                for_each_lru(l)
                        INIT_LIST_HEAD(&mz->lists[l]);
                mz->usage_in_excess = 0;
                mz->on_tree = false;
 -              mz->mem = mem;
 +              mz->mem = memcg;
        }
 +      memcg->info.nodeinfo[node] = pn;
        return 0;
  }
  
 -static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
 +static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
  {
 -      kfree(mem->info.nodeinfo[node]);
 +      kfree(memcg->info.nodeinfo[node]);
  }
  
  static struct mem_cgroup *mem_cgroup_alloc(void)
@@@ -4802,51 -4795,51 +4803,51 @@@ out_free
   * Removal of cgroup itself succeeds regardless of refs from swap.
   */
  
 -static void __mem_cgroup_free(struct mem_cgroup *mem)
 +static void __mem_cgroup_free(struct mem_cgroup *memcg)
  {
        int node;
  
 -      mem_cgroup_remove_from_trees(mem);
 -      free_css_id(&mem_cgroup_subsys, &mem->css);
 +      mem_cgroup_remove_from_trees(memcg);
 +      free_css_id(&mem_cgroup_subsys, &memcg->css);
  
        for_each_node_state(node, N_POSSIBLE)
 -              free_mem_cgroup_per_zone_info(mem, node);
 +              free_mem_cgroup_per_zone_info(memcg, node);
  
 -      free_percpu(mem->stat);
 +      free_percpu(memcg->stat);
        if (sizeof(struct mem_cgroup) < PAGE_SIZE)
 -              kfree(mem);
 +              kfree(memcg);
        else
 -              vfree(mem);
 +              vfree(memcg);
  }
  
 -static void mem_cgroup_get(struct mem_cgroup *mem)
 +static void mem_cgroup_get(struct mem_cgroup *memcg)
  {
 -      atomic_inc(&mem->refcnt);
 +      atomic_inc(&memcg->refcnt);
  }
  
 -static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
 +static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
  {
 -      if (atomic_sub_and_test(count, &mem->refcnt)) {
 -              struct mem_cgroup *parent = parent_mem_cgroup(mem);
 -              __mem_cgroup_free(mem);
 +      if (atomic_sub_and_test(count, &memcg->refcnt)) {
 +              struct mem_cgroup *parent = parent_mem_cgroup(memcg);
 +              __mem_cgroup_free(memcg);
                if (parent)
                        mem_cgroup_put(parent);
        }
  }
  
 -static void mem_cgroup_put(struct mem_cgroup *mem)
 +static void mem_cgroup_put(struct mem_cgroup *memcg)
  {
 -      __mem_cgroup_put(mem, 1);
 +      __mem_cgroup_put(memcg, 1);
  }
  
  /*
   * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
   */
 -static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
 +static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
  {
 -      if (!mem->res.parent)
 +      if (!memcg->res.parent)
                return NULL;
 -      return mem_cgroup_from_res_counter(mem->res.parent, res);
 +      return mem_cgroup_from_res_counter(memcg->res.parent, res);
  }
  
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@@ -4889,16 -4882,16 +4890,16 @@@ static int mem_cgroup_soft_limit_tree_i
  static struct cgroup_subsys_state * __ref
  mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
  {
 -      struct mem_cgroup *mem, *parent;
 +      struct mem_cgroup *memcg, *parent;
        long error = -ENOMEM;
        int node;
  
 -      mem = mem_cgroup_alloc();
 -      if (!mem)
 +      memcg = mem_cgroup_alloc();
 +      if (!memcg)
                return ERR_PTR(error);
  
        for_each_node_state(node, N_POSSIBLE)
 -              if (alloc_mem_cgroup_per_zone_info(mem, node))
 +              if (alloc_mem_cgroup_per_zone_info(memcg, node))
                        goto free_out;
  
        /* root ? */
                int cpu;
                enable_swap_cgroup();
                parent = NULL;
 -              root_mem_cgroup = mem;
 +              root_mem_cgroup = memcg;
                if (mem_cgroup_soft_limit_tree_init())
                        goto free_out;
                for_each_possible_cpu(cpu) {
                hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
        } else {
                parent = mem_cgroup_from_cont(cont->parent);
 -              mem->use_hierarchy = parent->use_hierarchy;
 -              mem->oom_kill_disable = parent->oom_kill_disable;
 +              memcg->use_hierarchy = parent->use_hierarchy;
 +              memcg->oom_kill_disable = parent->oom_kill_disable;
        }
  
        if (parent && parent->use_hierarchy) {
 -              res_counter_init(&mem->res, &parent->res);
 -              res_counter_init(&mem->memsw, &parent->memsw);
 +              res_counter_init(&memcg->res, &parent->res);
 +              res_counter_init(&memcg->memsw, &parent->memsw);
                /*
                 * We increment refcnt of the parent to ensure that we can
                 * safely access it on res_counter_charge/uncharge.
                 */
                mem_cgroup_get(parent);
        } else {
 -              res_counter_init(&mem->res, NULL);
 -              res_counter_init(&mem->memsw, NULL);
 +              res_counter_init(&memcg->res, NULL);
 +              res_counter_init(&memcg->memsw, NULL);
        }
 -      mem->last_scanned_child = 0;
 -      mem->last_scanned_node = MAX_NUMNODES;
 -      INIT_LIST_HEAD(&mem->oom_notify);
 +      memcg->last_scanned_child = 0;
 +      memcg->last_scanned_node = MAX_NUMNODES;
 +      INIT_LIST_HEAD(&memcg->oom_notify);
  
        if (parent)
 -              mem->swappiness = mem_cgroup_swappiness(parent);
 -      atomic_set(&mem->refcnt, 1);
 -      mem->move_charge_at_immigrate = 0;
 -      mutex_init(&mem->thresholds_lock);
 -      return &mem->css;
 +              memcg->swappiness = mem_cgroup_swappiness(parent);
 +      atomic_set(&memcg->refcnt, 1);
 +      memcg->move_charge_at_immigrate = 0;
 +      mutex_init(&memcg->thresholds_lock);
 +      return &memcg->css;
  free_out:
 -      __mem_cgroup_free(mem);
 +      __mem_cgroup_free(memcg);
        root_mem_cgroup = NULL;
        return ERR_PTR(error);
  }
  static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
                                        struct cgroup *cont)
  {
 -      struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
 +      struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  
 -      return mem_cgroup_force_empty(mem, false);
 +      return mem_cgroup_force_empty(memcg, false);
  }
  
  static void mem_cgroup_destroy(struct cgroup_subsys *ss,
                                struct cgroup *cont)
  {
 -      struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
 +      struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
  
 -      mem_cgroup_put(mem);
 +      mem_cgroup_put(memcg);
  }
  
  static int mem_cgroup_populate(struct cgroup_subsys *ss,
@@@ -4987,9 -4980,9 +4988,9 @@@ static int mem_cgroup_do_precharge(unsi
  {
        int ret = 0;
        int batch_count = PRECHARGE_COUNT_AT_ONCE;
 -      struct mem_cgroup *mem = mc.to;
 +      struct mem_cgroup *memcg = mc.to;
  
 -      if (mem_cgroup_is_root(mem)) {
 +      if (mem_cgroup_is_root(memcg)) {
                mc.precharge += count;
                /* we don't need css_get for root */
                return ret;
        if (count > 1) {
                struct res_counter *dummy;
                /*
 -               * "mem" cannot be under rmdir() because we've already checked
 +               * "memcg" cannot be under rmdir() because we've already checked
                 * by cgroup_lock_live_cgroup() that it is not removed and we
                 * are still under the same cgroup_mutex. So we can postpone
                 * css_get().
                 */
 -              if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
 +              if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
                        goto one_by_one;
 -              if (do_swap_account && res_counter_charge(&mem->memsw,
 +              if (do_swap_account && res_counter_charge(&memcg->memsw,
                                                PAGE_SIZE * count, &dummy)) {
 -                      res_counter_uncharge(&mem->res, PAGE_SIZE * count);
 +                      res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
                        goto one_by_one;
                }
                mc.precharge += count;
@@@ -5024,9 -5017,8 +5025,9 @@@ one_by_one
                        batch_count = PRECHARGE_COUNT_AT_ONCE;
                        cond_resched();
                }
 -              ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false);
 -              if (ret || !mem)
 +              ret = __mem_cgroup_try_charge(NULL,
 +                                      GFP_KERNEL, 1, &memcg, false);
 +              if (ret || !memcg)
                        /* mem_cgroup_clear_mc() will do uncharge later */
                        return -ENOMEM;
                mc.precharge++;
@@@ -5300,13 -5292,13 +5301,13 @@@ static int mem_cgroup_can_attach(struc
                                struct task_struct *p)
  {
        int ret = 0;
 -      struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
 +      struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
  
 -      if (mem->move_charge_at_immigrate) {
 +      if (memcg->move_charge_at_immigrate) {
                struct mm_struct *mm;
                struct mem_cgroup *from = mem_cgroup_from_task(p);
  
 -              VM_BUG_ON(from == mem);
 +              VM_BUG_ON(from == memcg);
  
                mm = get_task_mm(p);
                if (!mm)
                        mem_cgroup_start_move(from);
                        spin_lock(&mc.lock);
                        mc.from = from;
 -                      mc.to = mem;
 +                      mc.to = memcg;
                        spin_unlock(&mc.lock);
                        /* We set mc.moving_task later */
  
diff --combined mm/memory-failure.c
@@@ -42,6 -42,7 +42,7 @@@
  #include <linux/sched.h>
  #include <linux/ksm.h>
  #include <linux/rmap.h>
+ #include <linux/export.h>
  #include <linux/pagemap.h>
  #include <linux/swap.h>
  #include <linux/backing-dev.h>
@@@ -1310,7 -1311,7 +1311,7 @@@ int unpoison_memory(unsigned long pfn
                 * to the end.
                 */
                if (PageHuge(page)) {
 -                      pr_debug("MCE: Memory failure is now running on free hugepage %#lx\n", pfn);
 +                      pr_info("MCE: Memory failure is now running on free hugepage %#lx\n", pfn);
                        return 0;
                }
                if (TestClearPageHWPoison(p))
@@@ -1419,7 -1420,7 +1420,7 @@@ static int soft_offline_huge_page(struc
  
        if (PageHWPoison(hpage)) {
                put_page(hpage);
 -              pr_debug("soft offline: %#lx hugepage already poisoned\n", pfn);
 +              pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
                return -EBUSY;
        }
  
                list_for_each_entry_safe(page1, page2, &pagelist, lru)
                        put_page(page1);
  
 -              pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
 -                       pfn, ret, page->flags);
 +              pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
 +                      pfn, ret, page->flags);
                if (ret > 0)
                        ret = -EIO;
                return ret;
@@@ -1505,7 -1506,7 +1506,7 @@@ int soft_offline_page(struct page *page
        }
        if (!PageLRU(page)) {
                pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
 -                              pfn, page->flags);
 +                      pfn, page->flags);
                return -EIO;
        }
  
                }
        } else {
                pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
 -                              pfn, ret, page_count(page), page->flags);
 +                      pfn, ret, page_count(page), page->flags);
        }
        if (ret)
                return ret;
diff --combined mm/memory.c
@@@ -47,7 -47,7 +47,7 @@@
  #include <linux/pagemap.h>
  #include <linux/ksm.h>
  #include <linux/rmap.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/delayacct.h>
  #include <linux/init.h>
  #include <linux/writeback.h>
@@@ -1503,7 -1503,7 +1503,7 @@@ split_fallthrough
        }
  
        if (flags & FOLL_GET)
 -              get_page(page);
 +              get_page_foll(page);
        if (flags & FOLL_TOUCH) {
                if ((flags & FOLL_WRITE) &&
                    !pte_dirty(pte) && !PageDirty(page))
diff --combined mm/mempolicy.c
@@@ -75,7 -75,7 +75,7 @@@
  #include <linux/cpuset.h>
  #include <linux/slab.h>
  #include <linux/string.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/nsproxy.h>
  #include <linux/interrupt.h>
  #include <linux/init.h>
@@@ -111,7 -111,7 +111,7 @@@ enum zone_type policy_zone = 0
  /*
   * run-time system-wide default policy => local allocation
   */
 -struct mempolicy default_policy = {
 +static struct mempolicy default_policy = {
        .refcnt = ATOMIC_INIT(1), /* never free it */
        .mode = MPOL_PREFERRED,
        .flags = MPOL_F_LOCAL,
diff --combined mm/migrate.c
@@@ -13,7 -13,7 +13,7 @@@
   */
  
  #include <linux/migrate.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/swap.h>
  #include <linux/swapops.h>
  #include <linux/pagemap.h>
@@@ -621,18 -621,38 +621,18 @@@ static int move_to_new_page(struct pag
        return rc;
  }
  
 -/*
 - * Obtain the lock on page, remove all ptes and migrate the page
 - * to the newly allocated page in newpage.
 - */
 -static int unmap_and_move(new_page_t get_new_page, unsigned long private,
 -                      struct page *page, int force, bool offlining, bool sync)
 +static int __unmap_and_move(struct page *page, struct page *newpage,
 +                              int force, bool offlining, bool sync)
  {
 -      int rc = 0;
 -      int *result = NULL;
 -      struct page *newpage = get_new_page(page, private, &result);
 +      int rc = -EAGAIN;
        int remap_swapcache = 1;
        int charge = 0;
        struct mem_cgroup *mem;
        struct anon_vma *anon_vma = NULL;
  
 -      if (!newpage)
 -              return -ENOMEM;
 -
 -      if (page_count(page) == 1) {
 -              /* page was freed from under us. So we are done. */
 -              goto move_newpage;
 -      }
 -      if (unlikely(PageTransHuge(page)))
 -              if (unlikely(split_huge_page(page)))
 -                      goto move_newpage;
 -
 -      /* prepare cgroup just returns 0 or -ENOMEM */
 -      rc = -EAGAIN;
 -
        if (!trylock_page(page)) {
                if (!force || !sync)
 -                      goto move_newpage;
 +                      goto out;
  
                /*
                 * It's not safe for direct compaction to call lock_page.
                 * altogether.
                 */
                if (current->flags & PF_MEMALLOC)
 -                      goto move_newpage;
 +                      goto out;
  
                lock_page(page);
        }
@@@ -765,52 -785,27 +765,52 @@@ uncharge
                mem_cgroup_end_migration(mem, page, newpage, rc == 0);
  unlock:
        unlock_page(page);
 +out:
 +      return rc;
 +}
  
 -move_newpage:
 +/*
 + * Obtain the lock on page, remove all ptes and migrate the page
 + * to the newly allocated page in newpage.
 + */
 +static int unmap_and_move(new_page_t get_new_page, unsigned long private,
 +                      struct page *page, int force, bool offlining, bool sync)
 +{
 +      int rc = 0;
 +      int *result = NULL;
 +      struct page *newpage = get_new_page(page, private, &result);
 +
 +      if (!newpage)
 +              return -ENOMEM;
 +
 +      if (page_count(page) == 1) {
 +              /* page was freed from under us. So we are done. */
 +              goto out;
 +      }
 +
 +      if (unlikely(PageTransHuge(page)))
 +              if (unlikely(split_huge_page(page)))
 +                      goto out;
 +
 +      rc = __unmap_and_move(page, newpage, force, offlining, sync);
 +out:
        if (rc != -EAGAIN) {
 -              /*
 -               * A page that has been migrated has all references
 -               * removed and will be freed. A page that has not been
 -               * migrated will have kepts its references and be
 -               * restored.
 -               */
 -              list_del(&page->lru);
 +              /*
 +               * A page that has been migrated has all references
 +               * removed and will be freed. A page that has not been
 +               * migrated will have kepts its references and be
 +               * restored.
 +               */
 +              list_del(&page->lru);
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
                putback_lru_page(page);
        }
 -
        /*
         * Move the new page to the LRU. If migration was not successful
         * then this will free the page.
         */
        putback_lru_page(newpage);
 -
        if (result) {
                if (rc)
                        *result = rc;
diff --combined mm/mlock.c
@@@ -14,7 -14,7 +14,7 @@@
  #include <linux/mempolicy.h>
  #include <linux/syscalls.h>
  #include <linux/sched.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/rmap.h>
  #include <linux/mmzone.h>
  #include <linux/hugetlb.h>
@@@ -110,15 -110,7 +110,15 @@@ void munlock_vma_page(struct page *page
        if (TestClearPageMlocked(page)) {
                dec_zone_page_state(page, NR_MLOCK);
                if (!isolate_lru_page(page)) {
 -                      int ret = try_to_munlock(page);
 +                      int ret = SWAP_AGAIN;
 +
 +                      /*
 +                       * Optimization: if the page was mapped just once,
 +                       * that's our mapping and we don't need to check all the
 +                       * other vmas.
 +                       */
 +                      if (page_mapcount(page) > 1)
 +                              ret = try_to_munlock(page);
                        /*
                         * did try_to_unlock() succeed or punt?
                         */
@@@ -557,8 -549,7 +557,8 @@@ SYSCALL_DEFINE1(mlockall, int, flags
        if (!can_do_mlock())
                goto out;
  
 -      lru_add_drain_all();    /* flush pagevec */
 +      if (flags & MCL_CURRENT)
 +              lru_add_drain_all();    /* flush pagevec */
  
        down_write(&current->mm->mmap_sem);
  
diff --combined mm/mmap.c
+++ b/mm/mmap.c
@@@ -22,7 -22,7 +22,7 @@@
  #include <linux/security.h>
  #include <linux/hugetlb.h>
  #include <linux/profile.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/mount.h>
  #include <linux/mempolicy.h>
  #include <linux/rmap.h>
@@@ -2558,6 -2558,7 +2558,6 @@@ int mm_take_all_locks(struct mm_struct 
  {
        struct vm_area_struct *vma;
        struct anon_vma_chain *avc;
 -      int ret = -EINTR;
  
        BUG_ON(down_read_trylock(&mm->mmap_sem));
  
                                vm_lock_anon_vma(mm, avc->anon_vma);
        }
  
 -      ret = 0;
 +      return 0;
  
  out_unlock:
 -      if (ret)
 -              mm_drop_all_locks(mm);
 -
 -      return ret;
 +      mm_drop_all_locks(mm);
 +      return -EINTR;
  }
  
  static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
diff --combined mm/oom_kill.c
  #include <linux/timex.h>
  #include <linux/jiffies.h>
  #include <linux/cpuset.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/notifier.h>
  #include <linux/memcontrol.h>
  #include <linux/mempolicy.h>
  #include <linux/security.h>
  #include <linux/ptrace.h>
 +#include <linux/freezer.h>
  
  int sysctl_panic_on_oom;
  int sysctl_oom_kill_allocating_task;
  int sysctl_oom_dump_tasks = 1;
  static DEFINE_SPINLOCK(zone_scan_lock);
  
 +/*
 + * compare_swap_oom_score_adj() - compare and swap current's oom_score_adj
 + * @old_val: old oom_score_adj for compare
 + * @new_val: new oom_score_adj for swap
 + *
 + * Sets the oom_score_adj value for current to @new_val iff its present value is
 + * @old_val.  Usually used to reinstate a previous value to prevent racing with
 + * userspacing tuning the value in the interim.
 + */
 +void compare_swap_oom_score_adj(int old_val, int new_val)
 +{
 +      struct sighand_struct *sighand = current->sighand;
 +
 +      spin_lock_irq(&sighand->siglock);
 +      if (current->signal->oom_score_adj == old_val)
 +              current->signal->oom_score_adj = new_val;
 +      spin_unlock_irq(&sighand->siglock);
 +}
 +
  /**
   * test_set_oom_score_adj() - set current's oom_score_adj and return old value
   * @new_val: new oom_score_adj value
@@@ -73,7 -53,13 +73,7 @@@ int test_set_oom_score_adj(int new_val
  
        spin_lock_irq(&sighand->siglock);
        old_val = current->signal->oom_score_adj;
 -      if (new_val != old_val) {
 -              if (new_val == OOM_SCORE_ADJ_MIN)
 -                      atomic_inc(&current->mm->oom_disable_count);
 -              else if (old_val == OOM_SCORE_ADJ_MIN)
 -                      atomic_dec(&current->mm->oom_disable_count);
 -              current->signal->oom_score_adj = new_val;
 -      }
 +      current->signal->oom_score_adj = new_val;
        spin_unlock_irq(&sighand->siglock);
  
        return old_val;
@@@ -185,6 -171,16 +185,6 @@@ unsigned int oom_badness(struct task_st
        if (!p)
                return 0;
  
 -      /*
 -       * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
 -       * so the entire heuristic doesn't need to be executed for something
 -       * that cannot be killed.
 -       */
 -      if (atomic_read(&p->mm->oom_disable_count)) {
 -              task_unlock(p);
 -              return 0;
 -      }
 -
        /*
         * The memory controller may have a limit of 0 bytes, so avoid a divide
         * by zero, if necessary.
@@@ -321,11 -317,8 +321,11 @@@ static struct task_struct *select_bad_p
                 * blocked waiting for another task which itself is waiting
                 * for memory. Is there a better alternative?
                 */
 -              if (test_tsk_thread_flag(p, TIF_MEMDIE))
 +              if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
 +                      if (unlikely(frozen(p)))
 +                              thaw_process(p);
                        return ERR_PTR(-1UL);
 +              }
                if (!p->mm)
                        continue;
  
@@@ -442,7 -435,7 +442,7 @@@ static int oom_kill_task(struct task_st
        task_unlock(p);
  
        /*
 -       * Kill all processes sharing p->mm in other thread groups, if any.
 +       * Kill all user processes sharing p->mm in other thread groups, if any.
         * They don't get access to memory reserves or a higher scheduler
         * priority, though, to avoid depletion of all memory or task
         * starvation.  This prevents mm->mmap_sem livelock when an oom killed
         * signal.
         */
        for_each_process(q)
 -              if (q->mm == mm && !same_thread_group(q, p)) {
 +              if (q->mm == mm && !same_thread_group(q, p) &&
 +                  !(q->flags & PF_KTHREAD)) {
 +                      if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
 +                              continue;
 +
                        task_lock(q);   /* Protect ->comm from prctl() */
                        pr_err("Kill process %d (%s) sharing same memory\n",
                                task_pid_nr(q), q->comm);
@@@ -733,7 -722,7 +733,7 @@@ void out_of_memory(struct zonelist *zon
        read_lock(&tasklist_lock);
        if (sysctl_oom_kill_allocating_task &&
            !oom_unkillable_task(current, NULL, nodemask) &&
 -          current->mm && !atomic_read(&current->mm->oom_disable_count)) {
 +          current->mm) {
                /*
                 * oom_kill_process() needs tasklist_lock held.  If it returns
                 * non-zero, current could not be killed so we must fallback to
diff --combined mm/page-writeback.c
@@@ -12,7 -12,7 +12,7 @@@
   */
  
  #include <linux/kernel.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/spinlock.h>
  #include <linux/fs.h>
  #include <linux/mm.h>
   */
  #define BANDWIDTH_INTERVAL    max(HZ/5, 1)
  
 +#define RATELIMIT_CALC_SHIFT  10
 +
  /*
   * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
   * will look to see if it needs to force writeback or throttling.
   */
  static long ratelimit_pages = 32;
  
 -/*
 - * When balance_dirty_pages decides that the caller needs to perform some
 - * non-background writeback, this is how many pages it will attempt to write.
 - * It should be somewhat larger than dirtied pages to ensure that reasonably
 - * large amounts of I/O are submitted.
 - */
 -static inline long sync_writeback_pages(unsigned long dirtied)
 -{
 -      if (dirtied < ratelimit_pages)
 -              dirtied = ratelimit_pages;
 -
 -      return dirtied + dirtied / 2;
 -}
 -
  /* The following parameters are exported via /proc/sys/vm */
  
  /*
@@@ -155,8 -167,6 +155,8 @@@ static void update_completion_period(vo
        int shift = calc_period_shift();
        prop_change_shift(&vm_completions, shift);
        prop_change_shift(&vm_dirties, shift);
 +
 +      writeback_set_ratelimit();
  }
  
  int dirty_background_ratio_handler(struct ctl_table *table, int write,
@@@ -250,10 -260,52 +250,10 @@@ static void bdi_writeout_fraction(struc
                                numerator, denominator);
  }
  
 -static inline void task_dirties_fraction(struct task_struct *tsk,
 -              long *numerator, long *denominator)
 -{
 -      prop_fraction_single(&vm_dirties, &tsk->dirties,
 -                              numerator, denominator);
 -}
 -
  /*
 - * task_dirty_limit - scale down dirty throttling threshold for one task
 - *
 - * task specific dirty limit:
 - *
 - *   dirty -= (dirty/8) * p_{t}
 - *
 - * To protect light/slow dirtying tasks from heavier/fast ones, we start
 - * throttling individual tasks before reaching the bdi dirty limit.
 - * Relatively low thresholds will be allocated to heavy dirtiers. So when
 - * dirty pages grow large, heavy dirtiers will be throttled first, which will
 - * effectively curb the growth of dirty pages. Light dirtiers with high enough
 - * dirty threshold may never get throttled.
 - */
 -#define TASK_LIMIT_FRACTION 8
 -static unsigned long task_dirty_limit(struct task_struct *tsk,
 -                                     unsigned long bdi_dirty)
 -{
 -      long numerator, denominator;
 -      unsigned long dirty = bdi_dirty;
 -      u64 inv = dirty / TASK_LIMIT_FRACTION;
 -
 -      task_dirties_fraction(tsk, &numerator, &denominator);
 -      inv *= numerator;
 -      do_div(inv, denominator);
 -
 -      dirty -= inv;
 -
 -      return max(dirty, bdi_dirty/2);
 -}
 -
 -/* Minimum limit for any task */
 -static unsigned long task_min_dirty_limit(unsigned long bdi_dirty)
 -{
 -      return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION;
 -}
 -
 -/*
 - *
 + * bdi_min_ratio keeps the sum of the minimum dirty shares of all
 + * registered backing devices, which, for obvious reasons, can not
 + * exceed 100%.
   */
  static unsigned int bdi_min_ratio;
  
@@@ -359,12 -411,6 +359,12 @@@ unsigned long determine_dirtyable_memor
        return x + 1;   /* Ensure that we never return 0 */
  }
  
 +static unsigned long dirty_freerun_ceiling(unsigned long thresh,
 +                                         unsigned long bg_thresh)
 +{
 +      return (thresh + bg_thresh) / 2;
 +}
 +
  static unsigned long hard_dirty_limit(unsigned long thresh)
  {
        return max(thresh, global_dirty_limit);
@@@ -449,198 -495,6 +449,198 @@@ unsigned long bdi_dirty_limit(struct ba
        return bdi_dirty;
  }
  
 +/*
 + * Dirty position control.
 + *
 + * (o) global/bdi setpoints
 + *
 + * We want the dirty pages be balanced around the global/bdi setpoints.
 + * When the number of dirty pages is higher/lower than the setpoint, the
 + * dirty position control ratio (and hence task dirty ratelimit) will be
 + * decreased/increased to bring the dirty pages back to the setpoint.
 + *
 + *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
 + *
 + *     if (dirty < setpoint) scale up   pos_ratio
 + *     if (dirty > setpoint) scale down pos_ratio
 + *
 + *     if (bdi_dirty < bdi_setpoint) scale up   pos_ratio
 + *     if (bdi_dirty > bdi_setpoint) scale down pos_ratio
 + *
 + *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
 + *
 + * (o) global control line
 + *
 + *     ^ pos_ratio
 + *     |
 + *     |            |<===== global dirty control scope ======>|
 + * 2.0 .............*
 + *     |            .*
 + *     |            . *
 + *     |            .   *
 + *     |            .     *
 + *     |            .        *
 + *     |            .            *
 + * 1.0 ................................*
 + *     |            .                  .     *
 + *     |            .                  .          *
 + *     |            .                  .              *
 + *     |            .                  .                 *
 + *     |            .                  .                    *
 + *   0 +------------.------------------.----------------------*------------->
 + *           freerun^          setpoint^                 limit^   dirty pages
 + *
 + * (o) bdi control line
 + *
 + *     ^ pos_ratio
 + *     |
 + *     |            *
 + *     |              *
 + *     |                *
 + *     |                  *
 + *     |                    * |<=========== span ============>|
 + * 1.0 .......................*
 + *     |                      . *
 + *     |                      .   *
 + *     |                      .     *
 + *     |                      .       *
 + *     |                      .         *
 + *     |                      .           *
 + *     |                      .             *
 + *     |                      .               *
 + *     |                      .                 *
 + *     |                      .                   *
 + *     |                      .                     *
 + * 1/4 ...............................................* * * * * * * * * * * *
 + *     |                      .                         .
 + *     |                      .                           .
 + *     |                      .                             .
 + *   0 +----------------------.-------------------------------.------------->
 + *                bdi_setpoint^                    x_intercept^
 + *
 + * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can
 + * be smoothly throttled down to normal if it starts high in situations like
 + * - start writing to a slow SD card and a fast disk at the same time. The SD
 + *   card's bdi_dirty may rush to many times higher than bdi_setpoint.
 + * - the bdi dirty thresh drops quickly due to change of JBOD workload
 + */
 +static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
 +                                      unsigned long thresh,
 +                                      unsigned long bg_thresh,
 +                                      unsigned long dirty,
 +                                      unsigned long bdi_thresh,
 +                                      unsigned long bdi_dirty)
 +{
 +      unsigned long write_bw = bdi->avg_write_bandwidth;
 +      unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
 +      unsigned long limit = hard_dirty_limit(thresh);
 +      unsigned long x_intercept;
 +      unsigned long setpoint;         /* dirty pages' target balance point */
 +      unsigned long bdi_setpoint;
 +      unsigned long span;
 +      long long pos_ratio;            /* for scaling up/down the rate limit */
 +      long x;
 +
 +      if (unlikely(dirty >= limit))
 +              return 0;
 +
 +      /*
 +       * global setpoint
 +       *
 +       *                           setpoint - dirty 3
 +       *        f(dirty) := 1.0 + (----------------)
 +       *                           limit - setpoint
 +       *
 +       * it's a 3rd order polynomial that subjects to
 +       *
 +       * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
 +       * (2) f(setpoint) = 1.0 => the balance point
 +       * (3) f(limit)    = 0   => the hard limit
 +       * (4) df/dx      <= 0   => negative feedback control
 +       * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
 +       *     => fast response on large errors; small oscillation near setpoint
 +       */
 +      setpoint = (freerun + limit) / 2;
 +      x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
 +                  limit - setpoint + 1);
 +      pos_ratio = x;
 +      pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 +      pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
 +      pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
 +
 +      /*
 +       * We have computed basic pos_ratio above based on global situation. If
 +       * the bdi is over/under its share of dirty pages, we want to scale
 +       * pos_ratio further down/up. That is done by the following mechanism.
 +       */
 +
 +      /*
 +       * bdi setpoint
 +       *
 +       *        f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint)
 +       *
 +       *                        x_intercept - bdi_dirty
 +       *                     := --------------------------
 +       *                        x_intercept - bdi_setpoint
 +       *
 +       * The main bdi control line is a linear function that subjects to
 +       *
 +       * (1) f(bdi_setpoint) = 1.0
 +       * (2) k = - 1 / (8 * write_bw)  (in single bdi case)
 +       *     or equally: x_intercept = bdi_setpoint + 8 * write_bw
 +       *
 +       * For single bdi case, the dirty pages are observed to fluctuate
 +       * regularly within range
 +       *        [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2]
 +       * for various filesystems, where (2) can yield in a reasonable 12.5%
 +       * fluctuation range for pos_ratio.
 +       *
 +       * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its
 +       * own size, so move the slope over accordingly and choose a slope that
 +       * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh.
 +       */
 +      if (unlikely(bdi_thresh > thresh))
 +              bdi_thresh = thresh;
 +      bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
 +      /*
 +       * scale global setpoint to bdi's:
 +       *      bdi_setpoint = setpoint * bdi_thresh / thresh
 +       */
 +      x = div_u64((u64)bdi_thresh << 16, thresh + 1);
 +      bdi_setpoint = setpoint * (u64)x >> 16;
 +      /*
 +       * Use span=(8*write_bw) in single bdi case as indicated by
 +       * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case.
 +       *
 +       *        bdi_thresh                    thresh - bdi_thresh
 +       * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh
 +       *          thresh                            thresh
 +       */
 +      span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16;
 +      x_intercept = bdi_setpoint + span;
 +
 +      if (bdi_dirty < x_intercept - span / 4) {
 +              pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
 +                                  x_intercept - bdi_setpoint + 1);
 +      } else
 +              pos_ratio /= 4;
 +
 +      /*
 +       * bdi reserve area, safeguard against dirty pool underrun and disk idle
 +       * It may push the desired control point of global dirty pages higher
 +       * than setpoint.
 +       */
 +      x_intercept = bdi_thresh / 2;
 +      if (bdi_dirty < x_intercept) {
 +              if (bdi_dirty > x_intercept / 8)
 +                      pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty);
 +              else
 +                      pos_ratio *= 8;
 +      }
 +
 +      return pos_ratio;
 +}
 +
  static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
                                       unsigned long elapsed,
                                       unsigned long written)
@@@ -737,153 -591,8 +737,153 @@@ static void global_update_bandwidth(uns
        spin_unlock(&dirty_lock);
  }
  
 +/*
 + * Maintain bdi->dirty_ratelimit, the base dirty throttle rate.
 + *
 + * Normal bdi tasks will be curbed at or below it in long term.
 + * Obviously it should be around (write_bw / N) when there are N dd tasks.
 + */
 +static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
 +                                     unsigned long thresh,
 +                                     unsigned long bg_thresh,
 +                                     unsigned long dirty,
 +                                     unsigned long bdi_thresh,
 +                                     unsigned long bdi_dirty,
 +                                     unsigned long dirtied,
 +                                     unsigned long elapsed)
 +{
 +      unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
 +      unsigned long limit = hard_dirty_limit(thresh);
 +      unsigned long setpoint = (freerun + limit) / 2;
 +      unsigned long write_bw = bdi->avg_write_bandwidth;
 +      unsigned long dirty_ratelimit = bdi->dirty_ratelimit;
 +      unsigned long dirty_rate;
 +      unsigned long task_ratelimit;
 +      unsigned long balanced_dirty_ratelimit;
 +      unsigned long pos_ratio;
 +      unsigned long step;
 +      unsigned long x;
 +
 +      /*
 +       * The dirty rate will match the writeout rate in long term, except
 +       * when dirty pages are truncated by userspace or re-dirtied by FS.
 +       */
 +      dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed;
 +
 +      pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty,
 +                                     bdi_thresh, bdi_dirty);
 +      /*
 +       * task_ratelimit reflects each dd's dirty rate for the past 200ms.
 +       */
 +      task_ratelimit = (u64)dirty_ratelimit *
 +                                      pos_ratio >> RATELIMIT_CALC_SHIFT;
 +      task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
 +
 +      /*
 +       * A linear estimation of the "balanced" throttle rate. The theory is,
 +       * if there are N dd tasks, each throttled at task_ratelimit, the bdi's
 +       * dirty_rate will be measured to be (N * task_ratelimit). So the below
 +       * formula will yield the balanced rate limit (write_bw / N).
 +       *
 +       * Note that the expanded form is not a pure rate feedback:
 +       *      rate_(i+1) = rate_(i) * (write_bw / dirty_rate)              (1)
 +       * but also takes pos_ratio into account:
 +       *      rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
 +       *
 +       * (1) is not realistic because pos_ratio also takes part in balancing
 +       * the dirty rate.  Consider the state
 +       *      pos_ratio = 0.5                                              (3)
 +       *      rate = 2 * (write_bw / N)                                    (4)
 +       * If (1) is used, it will stuck in that state! Because each dd will
 +       * be throttled at
 +       *      task_ratelimit = pos_ratio * rate = (write_bw / N)           (5)
 +       * yielding
 +       *      dirty_rate = N * task_ratelimit = write_bw                   (6)
 +       * put (6) into (1) we get
 +       *      rate_(i+1) = rate_(i)                                        (7)
 +       *
 +       * So we end up using (2) to always keep
 +       *      rate_(i+1) ~= (write_bw / N)                                 (8)
 +       * regardless of the value of pos_ratio. As long as (8) is satisfied,
 +       * pos_ratio is able to drive itself to 1.0, which is not only where
 +       * the dirty count meet the setpoint, but also where the slope of
 +       * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
 +       */
 +      balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
 +                                         dirty_rate | 1);
 +
 +      /*
 +       * We could safely do this and return immediately:
 +       *
 +       *      bdi->dirty_ratelimit = balanced_dirty_ratelimit;
 +       *
 +       * However to get a more stable dirty_ratelimit, the below elaborated
 +       * code makes use of task_ratelimit to filter out sigular points and
 +       * limit the step size.
 +       *
 +       * The below code essentially only uses the relative value of
 +       *
 +       *      task_ratelimit - dirty_ratelimit
 +       *      = (pos_ratio - 1) * dirty_ratelimit
 +       *
 +       * which reflects the direction and size of dirty position error.
 +       */
 +
 +      /*
 +       * dirty_ratelimit will follow balanced_dirty_ratelimit iff
 +       * task_ratelimit is on the same side of dirty_ratelimit, too.
 +       * For example, when
 +       * - dirty_ratelimit > balanced_dirty_ratelimit
 +       * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
 +       * lowering dirty_ratelimit will help meet both the position and rate
 +       * control targets. Otherwise, don't update dirty_ratelimit if it will
 +       * only help meet the rate target. After all, what the users ultimately
 +       * feel and care are stable dirty rate and small position error.
 +       *
 +       * |task_ratelimit - dirty_ratelimit| is used to limit the step size
 +       * and filter out the sigular points of balanced_dirty_ratelimit. Which
 +       * keeps jumping around randomly and can even leap far away at times
 +       * due to the small 200ms estimation period of dirty_rate (we want to
 +       * keep that period small to reduce time lags).
 +       */
 +      step = 0;
 +      if (dirty < setpoint) {
 +              x = min(bdi->balanced_dirty_ratelimit,
 +                       min(balanced_dirty_ratelimit, task_ratelimit));
 +              if (dirty_ratelimit < x)
 +                      step = x - dirty_ratelimit;
 +      } else {
 +              x = max(bdi->balanced_dirty_ratelimit,
 +                       max(balanced_dirty_ratelimit, task_ratelimit));
 +              if (dirty_ratelimit > x)
 +                      step = dirty_ratelimit - x;
 +      }
 +
 +      /*
 +       * Don't pursue 100% rate matching. It's impossible since the balanced
 +       * rate itself is constantly fluctuating. So decrease the track speed
 +       * when it gets close to the target. Helps eliminate pointless tremors.
 +       */
 +      step >>= dirty_ratelimit / (2 * step + 1);
 +      /*
 +       * Limit the tracking speed to avoid overshooting.
 +       */
 +      step = (step + 7) / 8;
 +
 +      if (dirty_ratelimit < balanced_dirty_ratelimit)
 +              dirty_ratelimit += step;
 +      else
 +              dirty_ratelimit -= step;
 +
 +      bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL);
 +      bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
 +
 +      trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit);
 +}
 +
  void __bdi_update_bandwidth(struct backing_dev_info *bdi,
                            unsigned long thresh,
 +                          unsigned long bg_thresh,
                            unsigned long dirty,
                            unsigned long bdi_thresh,
                            unsigned long bdi_dirty,
  {
        unsigned long now = jiffies;
        unsigned long elapsed = now - bdi->bw_time_stamp;
 +      unsigned long dirtied;
        unsigned long written;
  
        /*
        if (elapsed < BANDWIDTH_INTERVAL)
                return;
  
 +      dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]);
        written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
  
        /*
        if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
                goto snapshot;
  
 -      if (thresh)
 +      if (thresh) {
                global_update_bandwidth(thresh, dirty, now);
 -
 +              bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty,
 +                                         bdi_thresh, bdi_dirty,
 +                                         dirtied, elapsed);
 +      }
        bdi_update_write_bandwidth(bdi, elapsed, written);
  
  snapshot:
 +      bdi->dirtied_stamp = dirtied;
        bdi->written_stamp = written;
        bdi->bw_time_stamp = now;
  }
  
  static void bdi_update_bandwidth(struct backing_dev_info *bdi,
                                 unsigned long thresh,
 +                               unsigned long bg_thresh,
                                 unsigned long dirty,
                                 unsigned long bdi_thresh,
                                 unsigned long bdi_dirty,
        if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
                return;
        spin_lock(&bdi->wb.list_lock);
 -      __bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty,
 -                             start_time);
 +      __bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty,
 +                             bdi_thresh, bdi_dirty, start_time);
        spin_unlock(&bdi->wb.list_lock);
  }
  
 +/*
 + * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr()
 + * will look to see if it needs to start dirty throttling.
 + *
 + * If dirty_poll_interval is too low, big NUMA machines will call the expensive
 + * global_page_state() too often. So scale it near-sqrt to the safety margin
 + * (the number of pages we may dirty without exceeding the dirty limits).
 + */
 +static unsigned long dirty_poll_interval(unsigned long dirty,
 +                                       unsigned long thresh)
 +{
 +      if (thresh > dirty)
 +              return 1UL << (ilog2(thresh - dirty) >> 1);
 +
 +      return 1;
 +}
 +
 +static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
 +                                 unsigned long bdi_dirty)
 +{
 +      unsigned long bw = bdi->avg_write_bandwidth;
 +      unsigned long hi = ilog2(bw);
 +      unsigned long lo = ilog2(bdi->dirty_ratelimit);
 +      unsigned long t;
 +
 +      /* target for 20ms max pause on 1-dd case */
 +      t = HZ / 50;
 +
 +      /*
 +       * Scale up pause time for concurrent dirtiers in order to reduce CPU
 +       * overheads.
 +       *
 +       * (N * 20ms) on 2^N concurrent tasks.
 +       */
 +      if (hi > lo)
 +              t += (hi - lo) * (20 * HZ) / 1024;
 +
 +      /*
 +       * Limit pause time for small memory systems. If sleeping for too long
 +       * time, a small pool of dirty/writeback pages may go empty and disk go
 +       * idle.
 +       *
 +       * 8 serves as the safety ratio.
 +       */
 +      if (bdi_dirty)
 +              t = min(t, bdi_dirty * HZ / (8 * bw + 1));
 +
 +      /*
 +       * The pause time will be settled within range (max_pause/4, max_pause).
 +       * Apply a minimal value of 4 to get a non-zero max_pause/4.
 +       */
 +      return clamp_val(t, 4, MAX_PAUSE);
 +}
 +
  /*
   * balance_dirty_pages() must be called by processes which are generating dirty
   * data.  It looks at the number of dirty pages in the machine and will force
 - * the caller to perform writeback if the system is over `vm_dirty_ratio'.
 + * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
   * If we're over `background_thresh' then the writeback threads are woken to
   * perform some writeout.
   */
  static void balance_dirty_pages(struct address_space *mapping,
 -                              unsigned long write_chunk)
 +                              unsigned long pages_dirtied)
  {
 -      unsigned long nr_reclaimable, bdi_nr_reclaimable;
 +      unsigned long nr_reclaimable;   /* = file_dirty + unstable_nfs */
 +      unsigned long bdi_reclaimable;
        unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
        unsigned long bdi_dirty;
 +      unsigned long freerun;
        unsigned long background_thresh;
        unsigned long dirty_thresh;
        unsigned long bdi_thresh;
 -      unsigned long task_bdi_thresh;
 -      unsigned long min_task_bdi_thresh;
 -      unsigned long pages_written = 0;
 -      unsigned long pause = 1;
 +      long pause = 0;
 +      long uninitialized_var(max_pause);
        bool dirty_exceeded = false;
 -      bool clear_dirty_exceeded = true;
 +      unsigned long task_ratelimit;
 +      unsigned long uninitialized_var(dirty_ratelimit);
 +      unsigned long pos_ratio;
        struct backing_dev_info *bdi = mapping->backing_dev_info;
        unsigned long start_time = jiffies;
  
        for (;;) {
 +              /*
 +               * Unstable writes are a feature of certain networked
 +               * filesystems (i.e. NFS) in which data may have been
 +               * written to the server's write cache, but has not yet
 +               * been flushed to permanent storage.
 +               */
                nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
                                        global_page_state(NR_UNSTABLE_NFS);
                nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
                 * catch-up. This avoids (excessively) small writeouts
                 * when the bdi limits are ramping up.
                 */
 -              if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
 +              freerun = dirty_freerun_ceiling(dirty_thresh,
 +                                              background_thresh);
 +              if (nr_dirty <= freerun)
                        break;
  
 +              if (unlikely(!writeback_in_progress(bdi)))
 +                      bdi_start_background_writeback(bdi);
 +
 +              /*
 +               * bdi_thresh is not treated as some limiting factor as
 +               * dirty_thresh, due to reasons
 +               * - in JBOD setup, bdi_thresh can fluctuate a lot
 +               * - in a system with HDD and USB key, the USB key may somehow
 +               *   go into state (bdi_dirty >> bdi_thresh) either because
 +               *   bdi_dirty starts high, or because bdi_thresh drops low.
 +               *   In this case we don't want to hard throttle the USB key
 +               *   dirtiers for 100 seconds until bdi_dirty drops under
 +               *   bdi_thresh. Instead the auxiliary bdi control line in
 +               *   bdi_position_ratio() will let the dirtier task progress
 +               *   at some rate <= (write_bw / 2) for bringing down bdi_dirty.
 +               */
                bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 -              min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh);
 -              task_bdi_thresh = task_dirty_limit(current, bdi_thresh);
  
                /*
                 * In order to avoid the stacked BDI deadlock we need
                 * actually dirty; with m+n sitting in the percpu
                 * deltas.
                 */
 -              if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) {
 -                      bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
 -                      bdi_dirty = bdi_nr_reclaimable +
 +              if (bdi_thresh < 2 * bdi_stat_error(bdi)) {
 +                      bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
 +                      bdi_dirty = bdi_reclaimable +
                                    bdi_stat_sum(bdi, BDI_WRITEBACK);
                } else {
 -                      bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
 -                      bdi_dirty = bdi_nr_reclaimable +
 +                      bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
 +                      bdi_dirty = bdi_reclaimable +
                                    bdi_stat(bdi, BDI_WRITEBACK);
                }
  
 -              /*
 -               * The bdi thresh is somehow "soft" limit derived from the
 -               * global "hard" limit. The former helps to prevent heavy IO
 -               * bdi or process from holding back light ones; The latter is
 -               * the last resort safeguard.
 -               */
 -              dirty_exceeded = (bdi_dirty > task_bdi_thresh) ||
 +              dirty_exceeded = (bdi_dirty > bdi_thresh) ||
                                  (nr_dirty > dirty_thresh);
 -              clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) &&
 -                                      (nr_dirty <= dirty_thresh);
 -
 -              if (!dirty_exceeded)
 -                      break;
 -
 -              if (!bdi->dirty_exceeded)
 +              if (dirty_exceeded && !bdi->dirty_exceeded)
                        bdi->dirty_exceeded = 1;
  
 -              bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty,
 -                                   bdi_thresh, bdi_dirty, start_time);
 +              bdi_update_bandwidth(bdi, dirty_thresh, background_thresh,
 +                                   nr_dirty, bdi_thresh, bdi_dirty,
 +                                   start_time);
  
 -              /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
 -               * Unstable writes are a feature of certain networked
 -               * filesystems (i.e. NFS) in which data may have been
 -               * written to the server's write cache, but has not yet
 -               * been flushed to permanent storage.
 -               * Only move pages to writeback if this bdi is over its
 -               * threshold otherwise wait until the disk writes catch
 -               * up.
 -               */
 -              trace_balance_dirty_start(bdi);
 -              if (bdi_nr_reclaimable > task_bdi_thresh) {
 -                      pages_written += writeback_inodes_wb(&bdi->wb,
 -                                                           write_chunk);
 -                      trace_balance_dirty_written(bdi, pages_written);
 -                      if (pages_written >= write_chunk)
 -                              break;          /* We've done our duty */
 +              max_pause = bdi_max_pause(bdi, bdi_dirty);
 +
 +              dirty_ratelimit = bdi->dirty_ratelimit;
 +              pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
 +                                             background_thresh, nr_dirty,
 +                                             bdi_thresh, bdi_dirty);
 +              if (unlikely(pos_ratio == 0)) {
 +                      pause = max_pause;
 +                      goto pause;
 +              }
 +              task_ratelimit = (u64)dirty_ratelimit *
 +                                      pos_ratio >> RATELIMIT_CALC_SHIFT;
 +              pause = (HZ * pages_dirtied) / (task_ratelimit | 1);
 +              if (unlikely(pause <= 0)) {
 +                      trace_balance_dirty_pages(bdi,
 +                                                dirty_thresh,
 +                                                background_thresh,
 +                                                nr_dirty,
 +                                                bdi_thresh,
 +                                                bdi_dirty,
 +                                                dirty_ratelimit,
 +                                                task_ratelimit,
 +                                                pages_dirtied,
 +                                                pause,
 +                                                start_time);
 +                      pause = 1; /* avoid resetting nr_dirtied_pause below */
 +                      break;
                }
 +              pause = min(pause, max_pause);
 +
 +pause:
 +              trace_balance_dirty_pages(bdi,
 +                                        dirty_thresh,
 +                                        background_thresh,
 +                                        nr_dirty,
 +                                        bdi_thresh,
 +                                        bdi_dirty,
 +                                        dirty_ratelimit,
 +                                        task_ratelimit,
 +                                        pages_dirtied,
 +                                        pause,
 +                                        start_time);
                __set_current_state(TASK_UNINTERRUPTIBLE);
                io_schedule_timeout(pause);
 -              trace_balance_dirty_wait(bdi);
  
                dirty_thresh = hard_dirty_limit(dirty_thresh);
                /*
                 * 200ms is typically more than enough to curb heavy dirtiers;
                 * (b) the pause time limit makes the dirtiers more responsive.
                 */
 -              if (nr_dirty < dirty_thresh &&
 -                  bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 &&
 -                  time_after(jiffies, start_time + MAX_PAUSE))
 +              if (nr_dirty < dirty_thresh)
                        break;
 -
 -              /*
 -               * Increase the delay for each loop, up to our previous
 -               * default of taking a 100ms nap.
 -               */
 -              pause <<= 1;
 -              if (pause > HZ / 10)
 -                      pause = HZ / 10;
        }
  
 -      /* Clear dirty_exceeded flag only when no task can exceed the limit */
 -      if (clear_dirty_exceeded && bdi->dirty_exceeded)
 +      if (!dirty_exceeded && bdi->dirty_exceeded)
                bdi->dirty_exceeded = 0;
  
 +      current->nr_dirtied = 0;
 +      if (pause == 0) { /* in freerun area */
 +              current->nr_dirtied_pause =
 +                              dirty_poll_interval(nr_dirty, dirty_thresh);
 +      } else if (pause <= max_pause / 4 &&
 +                 pages_dirtied >= current->nr_dirtied_pause) {
 +              current->nr_dirtied_pause = clamp_val(
 +                                      dirty_ratelimit * (max_pause / 2) / HZ,
 +                                      pages_dirtied + pages_dirtied / 8,
 +                                      pages_dirtied * 4);
 +      } else if (pause >= max_pause) {
 +              current->nr_dirtied_pause = 1 | clamp_val(
 +                                      dirty_ratelimit * (max_pause / 2) / HZ,
 +                                      pages_dirtied / 4,
 +                                      pages_dirtied - pages_dirtied / 8);
 +      }
 +
        if (writeback_in_progress(bdi))
                return;
  
         * In normal mode, we start background writeout at the lower
         * background_thresh, to keep the amount of dirty memory low.
         */
 -      if ((laptop_mode && pages_written) ||
 -          (!laptop_mode && (nr_reclaimable > background_thresh)))
 +      if (laptop_mode)
 +              return;
 +
 +      if (nr_reclaimable > background_thresh)
                bdi_start_background_writeback(bdi);
  }
  
@@@ -1195,7 -798,7 +1195,7 @@@ void set_page_dirty_balance(struct pag
        }
  }
  
 -static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
 +static DEFINE_PER_CPU(int, bdp_ratelimits);
  
  /**
   * balance_dirty_pages_ratelimited_nr - balance dirty memory state
@@@ -1215,39 -818,31 +1215,39 @@@ void balance_dirty_pages_ratelimited_nr
                                        unsigned long nr_pages_dirtied)
  {
        struct backing_dev_info *bdi = mapping->backing_dev_info;
 -      unsigned long ratelimit;
 -      unsigned long *p;
 +      int ratelimit;
 +      int *p;
  
        if (!bdi_cap_account_dirty(bdi))
                return;
  
 -      ratelimit = ratelimit_pages;
 -      if (mapping->backing_dev_info->dirty_exceeded)
 -              ratelimit = 8;
 +      ratelimit = current->nr_dirtied_pause;
 +      if (bdi->dirty_exceeded)
 +              ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
 +
 +      current->nr_dirtied += nr_pages_dirtied;
  
 +      preempt_disable();
        /*
 -       * Check the rate limiting. Also, we do not want to throttle real-time
 -       * tasks in balance_dirty_pages(). Period.
 +       * This prevents one CPU to accumulate too many dirtied pages without
 +       * calling into balance_dirty_pages(), which can happen when there are
 +       * 1000+ tasks, all of them start dirtying pages at exactly the same
 +       * time, hence all honoured too large initial task->nr_dirtied_pause.
         */
 -      preempt_disable();
        p =  &__get_cpu_var(bdp_ratelimits);
 -      *p += nr_pages_dirtied;
 -      if (unlikely(*p >= ratelimit)) {
 -              ratelimit = sync_writeback_pages(*p);
 +      if (unlikely(current->nr_dirtied >= ratelimit))
                *p = 0;
 -              preempt_enable();
 -              balance_dirty_pages(mapping, ratelimit);
 -              return;
 +      else {
 +              *p += nr_pages_dirtied;
 +              if (unlikely(*p >= ratelimit_pages)) {
 +                      *p = 0;
 +                      ratelimit = 0;
 +              }
        }
        preempt_enable();
 +
 +      if (unlikely(current->nr_dirtied >= ratelimit))
 +              balance_dirty_pages(mapping, current->nr_dirtied);
  }
  EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
  
@@@ -1303,8 -898,7 +1303,8 @@@ void laptop_mode_timer_fn(unsigned lon
         * threshold
         */
        if (bdi_has_dirty_io(&q->backing_dev_info))
 -              bdi_start_writeback(&q->backing_dev_info, nr_pages);
 +              bdi_start_writeback(&q->backing_dev_info, nr_pages,
 +                                      WB_REASON_LAPTOP_TIMER);
  }
  
  /*
@@@ -1343,17 -937,22 +1343,17 @@@ void laptop_sync_completion(void
   *
   * Here we set ratelimit_pages to a level which ensures that when all CPUs are
   * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
 - * thresholds before writeback cuts in.
 - *
 - * But the limit should not be set too high.  Because it also controls the
 - * amount of memory which the balance_dirty_pages() caller has to write back.
 - * If this is too large then the caller will block on the IO queue all the
 - * time.  So limit it to four megabytes - the balance_dirty_pages() caller
 - * will write six megabyte chunks, max.
 + * thresholds.
   */
  
  void writeback_set_ratelimit(void)
  {
 -      ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
 +      unsigned long background_thresh;
 +      unsigned long dirty_thresh;
 +      global_dirty_limits(&background_thresh, &dirty_thresh);
 +      ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
        if (ratelimit_pages < 16)
                ratelimit_pages = 16;
 -      if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
 -              ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
  }
  
  static int __cpuinit
@@@ -1723,7 -1322,6 +1723,7 @@@ void account_page_dirtied(struct page *
                __inc_zone_page_state(page, NR_FILE_DIRTY);
                __inc_zone_page_state(page, NR_DIRTIED);
                __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
 +              __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
                task_dirty_inc(current);
                task_io_account_write(PAGE_CACHE_SIZE);
        }
diff --combined mm/rmap.c
+++ b/mm/rmap.c
@@@ -51,7 -51,7 +51,7 @@@
  #include <linux/ksm.h>
  #include <linux/rmap.h>
  #include <linux/rcupdate.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/memcontrol.h>
  #include <linux/mmu_notifier.h>
  #include <linux/migrate.h>
@@@ -1164,7 -1164,7 +1164,7 @@@ void page_remove_rmap(struct page *page
  
  /*
   * Subfunctions of try_to_unmap: try_to_unmap_one called
 - * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
 + * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file.
   */
  int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                     unsigned long address, enum ttu_flags flags)
diff --combined mm/shmem.c
@@@ -28,7 -28,7 +28,7 @@@
  #include <linux/pagemap.h>
  #include <linux/file.h>
  #include <linux/mm.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/swap.h>
  
  static struct vfsmount *shm_mnt;
@@@ -1068,12 -1068,6 +1068,12 @@@ int shmem_lock(struct file *file, int l
                user_shm_unlock(inode->i_size, user);
                info->flags &= ~VM_LOCKED;
                mapping_clear_unevictable(file->f_mapping);
 +              /*
 +               * Ensure that a racing putback_lru_page() can see
 +               * the pages of this mapping are evictable when we
 +               * skip them due to !PageLRU during the scan.
 +               */
 +              smp_mb__after_clear_bit();
                scan_mapping_unevictable_pages(file->f_mapping);
        }
        retval = 0;
@@@ -2503,7 -2497,7 +2503,7 @@@ struct file *shmem_file_setup(const cha
  
        d_instantiate(path.dentry, inode);
        inode->i_size = size;
 -      inode->i_nlink = 0;     /* It is unlinked */
 +      clear_nlink(inode);     /* It is unlinked */
  #ifndef CONFIG_MMU
        error = ramfs_nommu_expand_for_mapping(inode, size);
        if (error)
diff --combined mm/swap.c
+++ b/mm/swap.c
@@@ -21,7 -21,7 +21,7 @@@
  #include <linux/pagemap.h>
  #include <linux/pagevec.h>
  #include <linux/init.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/mm_inline.h>
  #include <linux/buffer_head.h>        /* for try_to_release_page() */
  #include <linux/percpu_counter.h>
@@@ -78,22 -78,39 +78,22 @@@ static void put_compound_page(struct pa
  {
        if (unlikely(PageTail(page))) {
                /* __split_huge_page_refcount can run under us */
 -              struct page *page_head = page->first_page;
 -              smp_rmb();
 -              /*
 -               * If PageTail is still set after smp_rmb() we can be sure
 -               * that the page->first_page we read wasn't a dangling pointer.
 -               * See __split_huge_page_refcount() smp_wmb().
 -               */
 -              if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
 +              struct page *page_head = compound_trans_head(page);
 +
 +              if (likely(page != page_head &&
 +                         get_page_unless_zero(page_head))) {
                        unsigned long flags;
                        /*
 -                       * Verify that our page_head wasn't converted
 -                       * to a a regular page before we got a
 -                       * reference on it.
 -                       */
 -                      if (unlikely(!PageHead(page_head))) {
 -                              /* PageHead is cleared after PageTail */
 -                              smp_rmb();
 -                              VM_BUG_ON(PageTail(page));
 -                              goto out_put_head;
 -                      }
 -                      /*
 -                       * Only run compound_lock on a valid PageHead,
 -                       * after having it pinned with
 -                       * get_page_unless_zero() above.
 +                       * page_head wasn't a dangling pointer but it
 +                       * may not be a head page anymore by the time
 +                       * we obtain the lock. That is ok as long as it
 +                       * can't be freed from under us.
                         */
 -                      smp_mb();
 -                      /* page_head wasn't a dangling pointer */
                        flags = compound_lock_irqsave(page_head);
                        if (unlikely(!PageTail(page))) {
                                /* __split_huge_page_refcount run before us */
                                compound_unlock_irqrestore(page_head, flags);
                                VM_BUG_ON(PageHead(page_head));
 -                      out_put_head:
                                if (put_page_testzero(page_head))
                                        __put_single_page(page_head);
                        out_put_single:
                        VM_BUG_ON(page_head != page->first_page);
                        /*
                         * We can release the refcount taken by
 -                       * get_page_unless_zero now that
 -                       * split_huge_page_refcount is blocked on the
 -                       * compound_lock.
 +                       * get_page_unless_zero() now that
 +                       * __split_huge_page_refcount() is blocked on
 +                       * the compound_lock.
                         */
                        if (put_page_testzero(page_head))
                                VM_BUG_ON(1);
                        /* __split_huge_page_refcount will wait now */
 -                      VM_BUG_ON(atomic_read(&page->_count) <= 0);
 -                      atomic_dec(&page->_count);
 +                      VM_BUG_ON(page_mapcount(page) <= 0);
 +                      atomic_dec(&page->_mapcount);
                        VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
 +                      VM_BUG_ON(atomic_read(&page->_count) != 0);
                        compound_unlock_irqrestore(page_head, flags);
                        if (put_page_testzero(page_head)) {
                                if (PageHead(page_head))
@@@ -144,45 -160,6 +144,45 @@@ void put_page(struct page *page
  }
  EXPORT_SYMBOL(put_page);
  
 +/*
 + * This function is exported but must not be called by anything other
 + * than get_page(). It implements the slow path of get_page().
 + */
 +bool __get_page_tail(struct page *page)
 +{
 +      /*
 +       * This takes care of get_page() if run on a tail page
 +       * returned by one of the get_user_pages/follow_page variants.
 +       * get_user_pages/follow_page itself doesn't need the compound
 +       * lock because it runs __get_page_tail_foll() under the
 +       * proper PT lock that already serializes against
 +       * split_huge_page().
 +       */
 +      unsigned long flags;
 +      bool got = false;
 +      struct page *page_head = compound_trans_head(page);
 +
 +      if (likely(page != page_head && get_page_unless_zero(page_head))) {
 +              /*
 +               * page_head wasn't a dangling pointer but it
 +               * may not be a head page anymore by the time
 +               * we obtain the lock. That is ok as long as it
 +               * can't be freed from under us.
 +               */
 +              flags = compound_lock_irqsave(page_head);
 +              /* here __split_huge_page_refcount won't run anymore */
 +              if (likely(PageTail(page))) {
 +                      __get_page_tail_foll(page, false);
 +                      got = true;
 +              }
 +              compound_unlock_irqrestore(page_head, flags);
 +              if (unlikely(!got))
 +                      put_page(page_head);
 +      }
 +      return got;
 +}
 +EXPORT_SYMBOL(__get_page_tail);
 +
  /**
   * put_pages_list() - release a list of pages
   * @pages: list of pages threaded on page->lru
diff --combined mm/swapfile.c
@@@ -21,7 -21,6 +21,6 @@@
  #include <linux/proc_fs.h>
  #include <linux/seq_file.h>
  #include <linux/init.h>
- #include <linux/module.h>
  #include <linux/ksm.h>
  #include <linux/rmap.h>
  #include <linux/security.h>
@@@ -1617,7 -1616,7 +1616,7 @@@ SYSCALL_DEFINE1(swapoff, const char __u
  
        oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
        err = try_to_unuse(type);
 -      test_set_oom_score_adj(oom_score_adj);
 +      compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj);
  
        if (err) {
                /*
diff --combined net/8021q/vlan_core.c
@@@ -2,9 -2,10 +2,10 @@@
  #include <linux/netdevice.h>
  #include <linux/if_vlan.h>
  #include <linux/netpoll.h>
+ #include <linux/export.h>
  #include "vlan.h"
  
 -bool vlan_do_receive(struct sk_buff **skbp)
 +bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
  {
        struct sk_buff *skb = *skbp;
        u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
  
        vlan_dev = vlan_find_dev(skb->dev, vlan_id);
        if (!vlan_dev) {
 -              if (vlan_id)
 +              /* Only the last call to vlan_do_receive() should change
 +               * pkt_type to PACKET_OTHERHOST
 +               */
 +              if (vlan_id && last_handler)
                        skb->pkt_type = PACKET_OTHERHOST;
                return false;
        }
diff --combined net/ipv4/udplite.c
@@@ -10,6 -10,7 +10,7 @@@
   *            as published by the Free Software Foundation; either version
   *            2 of the License, or (at your option) any later version.
   */
+ #include <linux/export.h>
  #include "udp_impl.h"
  
  struct udp_table      udplite_table __read_mostly;
@@@ -71,20 -72,13 +72,20 @@@ static struct inet_protosw udplite4_pro
  };
  
  #ifdef CONFIG_PROC_FS
 +
 +static const struct file_operations udplite_afinfo_seq_fops = {
 +      .owner    = THIS_MODULE,
 +      .open     = udp_seq_open,
 +      .read     = seq_read,
 +      .llseek   = seq_lseek,
 +      .release  = seq_release_net
 +};
 +
  static struct udp_seq_afinfo udplite4_seq_afinfo = {
        .name           = "udplite",
        .family         = AF_INET,
        .udp_table      = &udplite_table,
 -      .seq_fops       = {
 -              .owner  =       THIS_MODULE,
 -      },
 +      .seq_fops       = &udplite_afinfo_seq_fops,
        .seq_ops        = {
                .show           = udp4_seq_show,
        },
diff --combined net/ipv6/addrconf.c
@@@ -87,6 -87,7 +87,7 @@@
  
  #include <linux/proc_fs.h>
  #include <linux/seq_file.h>
+ #include <linux/export.h>
  
  /* Set to 3 to get tracing... */
  #define ACONF_DEBUG 2
@@@ -1713,40 -1714,6 +1714,40 @@@ addrconf_prefix_route(struct in6_addr *
        ip6_route_add(&cfg);
  }
  
 +
 +static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
 +                                                int plen,
 +                                                const struct net_device *dev,
 +                                                u32 flags, u32 noflags)
 +{
 +      struct fib6_node *fn;
 +      struct rt6_info *rt = NULL;
 +      struct fib6_table *table;
 +
 +      table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
 +      if (table == NULL)
 +              return NULL;
 +
 +      write_lock_bh(&table->tb6_lock);
 +      fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
 +      if (!fn)
 +              goto out;
 +      for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
 +              if (rt->rt6i_dev->ifindex != dev->ifindex)
 +                      continue;
 +              if ((rt->rt6i_flags & flags) != flags)
 +                      continue;
 +              if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
 +                      continue;
 +              dst_hold(&rt->dst);
 +              break;
 +      }
 +out:
 +      write_unlock_bh(&table->tb6_lock);
 +      return rt;
 +}
 +
 +
  /* Create "default" multicast route to the interface */
  
  static void addrconf_add_mroute(struct net_device *dev)
@@@ -1876,13 -1843,10 +1877,13 @@@ void addrconf_prefix_rcv(struct net_dev
                if (addrconf_finite_timeout(rt_expires))
                        rt_expires *= HZ;
  
 -              rt = rt6_lookup(net, &pinfo->prefix, NULL,
 -                              dev->ifindex, 1);
 +              rt = addrconf_get_prefix_route(&pinfo->prefix,
 +                                             pinfo->prefix_len,
 +                                             dev,
 +                                             RTF_ADDRCONF | RTF_PREFIX_RT,
 +                                             RTF_GATEWAY | RTF_DEFAULT);
  
 -              if (rt && addrconf_is_prefix_route(rt)) {
 +              if (rt) {
                        /* Autoconf prefix route */
                        if (valid_lft == 0) {
                                ip6_del_rt(rt);
diff --combined net/ipv6/netfilter.c
@@@ -3,6 -3,7 +3,7 @@@
  #include <linux/ipv6.h>
  #include <linux/netfilter.h>
  #include <linux/netfilter_ipv6.h>
+ #include <linux/export.h>
  #include <net/dst.h>
  #include <net/ipv6.h>
  #include <net/ip6_route.h>
@@@ -100,16 -101,9 +101,16 @@@ static int nf_ip6_route(struct net *net
                .pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
        };
        const void *sk = strict ? &fake_sk : NULL;
 -
 -      *dst = ip6_route_output(net, sk, &fl->u.ip6);
 -      return (*dst)->error;
 +      struct dst_entry *result;
 +      int err;
 +
 +      result = ip6_route_output(net, sk, &fl->u.ip6);
 +      err = result->error;
 +      if (err)
 +              dst_release(result);
 +      else
 +              *dst = result;
 +      return err;
  }
  
  __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
diff --combined net/ipv6/route.c
@@@ -26,6 -26,7 +26,7 @@@
  
  #include <linux/capability.h>
  #include <linux/errno.h>
+ #include <linux/export.h>
  #include <linux/types.h>
  #include <linux/times.h>
  #include <linux/socket.h>
@@@ -1086,10 -1087,11 +1087,10 @@@ struct dst_entry *icmp6_dst_alloc(struc
        rt->dst.output  = ip6_output;
        dst_set_neighbour(&rt->dst, neigh);
        atomic_set(&rt->dst.__refcnt, 1);
 -      dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
 -
        ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
        rt->rt6i_dst.plen = 128;
        rt->rt6i_idev     = idev;
 +      dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
  
        spin_lock_bh(&icmp6_dst_lock);
        rt->dst.next = icmp6_dst_gc_list;
diff --combined net/ipv6/udplite.c
@@@ -11,6 -11,7 +11,7 @@@
   *            as published by the Free Software Foundation; either version
   *            2 of the License, or (at your option) any later version.
   */
+ #include <linux/export.h>
  #include "udp_impl.h"
  
  static int udplitev6_rcv(struct sk_buff *skb)
@@@ -93,20 -94,13 +94,20 @@@ void udplitev6_exit(void
  }
  
  #ifdef CONFIG_PROC_FS
 +
 +static const struct file_operations udplite6_afinfo_seq_fops = {
 +      .owner    = THIS_MODULE,
 +      .open     = udp_seq_open,
 +      .read     = seq_read,
 +      .llseek   = seq_lseek,
 +      .release  = seq_release_net
 +};
 +
  static struct udp_seq_afinfo udplite6_seq_afinfo = {
        .name           = "udplite6",
        .family         = AF_INET6,
        .udp_table      = &udplite_table,
 -      .seq_fops       = {
 -              .owner  =       THIS_MODULE,
 -      },
 +      .seq_fops       = &udplite6_afinfo_seq_fops,
        .seq_ops        = {
                .show           = udp6_seq_show,
        },
diff --combined sound/core/control.c
@@@ -21,6 -21,7 +21,7 @@@
  
  #include <linux/threads.h>
  #include <linux/interrupt.h>
+ #include <linux/module.h>
  #include <linux/slab.h>
  #include <linux/vmalloc.h>
  #include <linux/time.h>
@@@ -1081,12 -1082,12 +1082,12 @@@ static int snd_ctl_elem_init_enum_names
        char *names, *p;
        size_t buf_len, name_len;
        unsigned int i;
 +      const uintptr_t user_ptrval = ue->info.value.enumerated.names_ptr;
  
        if (ue->info.value.enumerated.names_length > 64 * 1024)
                return -EINVAL;
  
 -      names = memdup_user(
 -              (const void __user *)ue->info.value.enumerated.names_ptr,
 +      names = memdup_user((const void __user *)user_ptrval,
                ue->info.value.enumerated.names_length);
        if (IS_ERR(names))
                return PTR_ERR(names);
diff --combined sound/core/hwdep.c
@@@ -24,6 -24,7 +24,7 @@@
  #include <linux/slab.h>
  #include <linux/time.h>
  #include <linux/mutex.h>
+ #include <linux/module.h>
  #include <sound/core.h>
  #include <sound/control.h>
  #include <sound/minors.h>
@@@ -272,14 -273,7 +273,14 @@@ static int snd_hwdep_control_ioctl(stru
                        if (get_user(device, (int __user *)arg))
                                return -EFAULT;
                        mutex_lock(&register_mutex);
 -                      device = device < 0 ? 0 : device + 1;
 +
 +                      if (device < 0)
 +                              device = 0;
 +                      else if (device < SNDRV_MINOR_HWDEPS)
 +                              device++;
 +                      else
 +                              device = SNDRV_MINOR_HWDEPS;
 +
                        while (device < SNDRV_MINOR_HWDEPS) {
                                if (snd_hwdep_search(card, device))
                                        break;
@@@ -26,6 -26,7 +26,7 @@@
  #include <linux/ctype.h>
  #include <linux/string.h>
  #include <linux/firmware.h>
+ #include <linux/export.h>
  #include <sound/core.h>
  #include "hda_codec.h"
  #include "hda_local.h"
@@@ -756,6 -757,8 +757,6 @@@ static int get_line_from_fw(char *buf, 
        }
        if (!fw->size)
                return 0;
 -      if (size < fw->size)
 -              size = fw->size;
  
        for (len = 0; len < fw->size; len++) {
                if (!*p)
@@@ -31,7 -31,7 +31,7 @@@
  #include <linux/init.h>
  #include <linux/delay.h>
  #include <linux/slab.h>
- #include <linux/moduleparam.h>
+ #include <linux/module.h>
  #include <sound/core.h>
  #include <sound/jack.h>
  #include "hda_codec.h"
@@@ -1006,6 -1006,7 +1006,6 @@@ static int hdmi_add_pin(struct hda_code
        unsigned int caps, config;
        int pin_idx;
        struct hdmi_spec_per_pin *per_pin;
 -      struct hdmi_eld *eld;
        int err;
  
        caps = snd_hda_param_read(codec, pin_nid, AC_PAR_PIN_CAP);
  
        pin_idx = spec->num_pins;
        per_pin = &spec->pins[pin_idx];
 -      eld = &per_pin->sink_eld;
  
        per_pin->pin_nid = pin_nid;
  
@@@ -1574,7 -1576,7 +1574,7 @@@ static int nvhdmi_8ch_7x_pcm_prepare(st
                                     struct snd_pcm_substream *substream)
  {
        int chs;
 -      unsigned int dataDCC1, dataDCC2, channel_id;
 +      unsigned int dataDCC2, channel_id;
        int i;
        struct hdmi_spec *spec = codec->spec;
        struct hda_spdif_out *spdif =
  
        chs = substream->runtime->channels;
  
 -      dataDCC1 = AC_DIG1_ENABLE | AC_DIG1_COPYRIGHT;
        dataDCC2 = 0x2;
  
        /* turn off SPDIF once; otherwise the IEC958 bits won't be updated */
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/delay.h>
  #include <linux/slab.h>
  #include <linux/pci.h>
+ #include <linux/module.h>
  #include <sound/core.h>
  #include <sound/jack.h>
  #include "hda_codec.h"
@@@ -1604,29 -1605,27 +1605,29 @@@ static void alc_auto_init_digital(struc
  static void alc_auto_parse_digital(struct hda_codec *codec)
  {
        struct alc_spec *spec = codec->spec;
 -      int i, err;
 +      int i, err, nums;
        hda_nid_t dig_nid;
  
        /* support multiple SPDIFs; the secondary is set up as a slave */
 +      nums = 0;
        for (i = 0; i < spec->autocfg.dig_outs; i++) {
                hda_nid_t conn[4];
                err = snd_hda_get_connections(codec,
                                              spec->autocfg.dig_out_pins[i],
                                              conn, ARRAY_SIZE(conn));
 -              if (err < 0)
 +              if (err <= 0)
                        continue;
                dig_nid = conn[0]; /* assume the first element is audio-out */
 -              if (!i) {
 +              if (!nums) {
                        spec->multiout.dig_out_nid = dig_nid;
                        spec->dig_out_type = spec->autocfg.dig_out_type[0];
                } else {
                        spec->multiout.slave_dig_outs = spec->slave_dig_outs;
 -                      if (i >= ARRAY_SIZE(spec->slave_dig_outs) - 1)
 +                      if (nums >= ARRAY_SIZE(spec->slave_dig_outs) - 1)
                                break;
 -                      spec->slave_dig_outs[i - 1] = dig_nid;
 +                      spec->slave_dig_outs[nums - 1] = dig_nid;
                }
 +              nums++;
        }
  
        if (spec->autocfg.dig_in_pin) {
@@@ -2272,7 -2271,6 +2273,7 @@@ static int alc_build_pcms(struct hda_co
        struct alc_spec *spec = codec->spec;
        struct hda_pcm *info = spec->pcm_rec;
        const struct hda_pcm_stream *p;
 +      bool have_multi_adcs;
        int i;
  
        codec->num_pcms = 1;
        /* If the use of more than one ADC is requested for the current
         * model, configure a second analog capture-only PCM.
         */
 +      have_multi_adcs = (spec->num_adc_nids > 1) &&
 +              !spec->dyn_adc_switch && !spec->auto_mic &&
 +              (!spec->input_mux || spec->input_mux->num_items > 1);
        /* Additional Analaog capture for index #2 */
 -      if (spec->alt_dac_nid || spec->num_adc_nids > 1) {
 +      if (spec->alt_dac_nid || have_multi_adcs) {
                codec->num_pcms = 3;
                info = spec->pcm_rec + 2;
                info->name = spec->stream_name_analog;
                                alc_pcm_null_stream;
                        info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = 0;
                }
 -              if (spec->num_adc_nids > 1) {
 +              if (have_multi_adcs) {
                        p = spec->stream_analog_alt_capture;
                        if (!p)
                                p = &alc_pcm_analog_alt_capture;
@@@ -2663,6 -2658,7 +2664,6 @@@ static int alc_auto_fill_adc_caps(struc
        hda_nid_t *adc_nids = spec->private_adc_nids;
        hda_nid_t *cap_nids = spec->private_capsrc_nids;
        int max_nums = ARRAY_SIZE(spec->private_adc_nids);
 -      bool indep_capsrc = false;
        int i, nums = 0;
  
        nid = codec->start_nid;
                                break;
                        if (type == AC_WID_AUD_SEL) {
                                cap_nids[nums] = src;
 -                              indep_capsrc = true;
                                break;
                        }
                        n = snd_hda_get_conn_list(codec, src, &list);
                        if (n > 1) {
                                cap_nids[nums] = src;
 -                              indep_capsrc = true;
                                break;
                        } else if (n != 1)
                                break;
@@@ -3329,12 -3327,6 +3330,12 @@@ static void alc_auto_set_output_and_unm
        if (nid)
                snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
                                    AMP_OUT_ZERO);
 +
 +      /* unmute DAC if it's not assigned to a mixer */
 +      nid = alc_look_for_out_mute_nid(codec, pin, dac);
 +      if (nid == mix && nid_has_mute(codec, dac, HDA_OUTPUT))
 +              snd_hda_codec_write(codec, dac, 0, AC_VERB_SET_AMP_GAIN_MUTE,
 +                                  AMP_OUT_ZERO);
  }
  
  static void alc_auto_init_multi_out(struct hda_codec *codec)
@@@ -29,6 -29,7 +29,7 @@@
  #include <linux/slab.h>
  #include <linux/pci.h>
  #include <linux/dmi.h>
+ #include <linux/module.h>
  #include <sound/core.h>
  #include <sound/asoundef.h>
  #include <sound/jack.h>
@@@ -3791,10 -3792,9 +3792,10 @@@ static int is_dual_headphones(struct hd
  }
  
  
 -static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out, hda_nid_t dig_in)
 +static int stac92xx_parse_auto_config(struct hda_codec *codec)
  {
        struct sigmatel_spec *spec = codec->spec;
 +      hda_nid_t dig_out = 0, dig_in = 0;
        int hp_swap = 0;
        int i, err;
  
        if (spec->multiout.max_channels > 2)
                spec->surr_switch = 1;
  
 +      /* find digital out and in converters */
 +      for (i = codec->start_nid; i < codec->start_nid + codec->num_nodes; i++) {
 +              unsigned int wid_caps = get_wcaps(codec, i);
 +              if (wid_caps & AC_WCAP_DIGITAL) {
 +                      switch (get_wcaps_type(wid_caps)) {
 +                      case AC_WID_AUD_OUT:
 +                              if (!dig_out)
 +                                      dig_out = i;
 +                              break;
 +                      case AC_WID_AUD_IN:
 +                              if (!dig_in)
 +                                      dig_in = i;
 +                              break;
 +                      }
 +              }
 +      }
        if (spec->autocfg.dig_outs)
                spec->multiout.dig_out_nid = dig_out;
        if (dig_in && spec->autocfg.dig_in_pin)
@@@ -5296,7 -5280,7 +5297,7 @@@ static int patch_stac925x(struct hda_co
        spec->capvols = stac925x_capvols;
        spec->capsws = stac925x_capsws;
  
 -      err = stac92xx_parse_auto_config(codec, 0x8, 0x7);
 +      err = stac92xx_parse_auto_config(codec);
        if (!err) {
                if (spec->board_config < 0) {
                        printk(KERN_WARNING "hda_codec: No auto-config is "
@@@ -5437,7 -5421,7 +5438,7 @@@ again
        spec->num_pwrs = ARRAY_SIZE(stac92hd73xx_pwr_nids);
        spec->pwr_nids = stac92hd73xx_pwr_nids;
  
 -      err = stac92xx_parse_auto_config(codec, 0x25, 0x27);
 +      err = stac92xx_parse_auto_config(codec);
  
        if (!err) {
                if (spec->board_config < 0) {
@@@ -5646,8 -5630,26 +5647,8 @@@ again
                stac92xx_set_config_regs(codec,
                                stac92hd83xxx_brd_tbl[spec->board_config]);
  
 -      switch (codec->vendor_id) {
 -      case 0x111d76d1:
 -      case 0x111d76d9:
 -      case 0x111d76df:
 -      case 0x111d76e5:
 -      case 0x111d7666:
 -      case 0x111d7667:
 -      case 0x111d7668:
 -      case 0x111d7669:
 -      case 0x111d76e3:
 -      case 0x111d7604:
 -      case 0x111d76d4:
 -      case 0x111d7605:
 -      case 0x111d76d5:
 -      case 0x111d76e7:
 -              if (spec->board_config == STAC_92HD83XXX_PWR_REF)
 -                      break;
 +      if (spec->board_config != STAC_92HD83XXX_PWR_REF)
                spec->num_pwrs = 0;
 -              break;
 -      }
  
        codec->patch_ops = stac92xx_patch_ops;
  
        }
  #endif        
  
 -      err = stac92xx_parse_auto_config(codec, 0x1d, 0);
 +      err = stac92xx_parse_auto_config(codec);
        if (!err) {
                if (spec->board_config < 0) {
                        printk(KERN_WARNING "hda_codec: No auto-config is "
@@@ -5995,7 -5997,7 +5996,7 @@@ again
  
        spec->multiout.dac_nids = spec->dac_nids;
  
 -      err = stac92xx_parse_auto_config(codec, 0x21, 0);
 +      err = stac92xx_parse_auto_config(codec);
        if (!err) {
                if (spec->board_config < 0) {
                        printk(KERN_WARNING "hda_codec: No auto-config is "
@@@ -6104,7 -6106,7 +6105,7 @@@ static int patch_stac922x(struct hda_co
  
        spec->multiout.dac_nids = spec->dac_nids;
        
 -      err = stac92xx_parse_auto_config(codec, 0x08, 0x09);
 +      err = stac92xx_parse_auto_config(codec);
        if (!err) {
                if (spec->board_config < 0) {
                        printk(KERN_WARNING "hda_codec: No auto-config is "
@@@ -6229,7 -6231,7 +6230,7 @@@ static int patch_stac927x(struct hda_co
        spec->aloopback_shift = 0;
        spec->eapd_switch = 1;
  
 -      err = stac92xx_parse_auto_config(codec, 0x1e, 0x20);
 +      err = stac92xx_parse_auto_config(codec);
        if (!err) {
                if (spec->board_config < 0) {
                        printk(KERN_WARNING "hda_codec: No auto-config is "
@@@ -6354,7 -6356,7 +6355,7 @@@ static int patch_stac9205(struct hda_co
                break;
        }
  
 -      err = stac92xx_parse_auto_config(codec, 0x1f, 0x20);
 +      err = stac92xx_parse_auto_config(codec);
        if (!err) {
                if (spec->board_config < 0) {
                        printk(KERN_WARNING "hda_codec: No auto-config is "
@@@ -6459,7 -6461,7 +6460,7 @@@ static int patch_stac9872(struct hda_co
        spec->capvols = stac9872_capvols;
        spec->capsws = stac9872_capsws;
  
 -      err = stac92xx_parse_auto_config(codec, 0x10, 0x12);
 +      err = stac92xx_parse_auto_config(codec);
        if (err < 0) {
                stac92xx_free(codec);
                return -EINVAL;
@@@ -6564,18 -6566,6 +6565,18 @@@ static const struct hda_codec_preset sn
        { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76e8, .name = "92HD66B1X5", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76e9, .name = "92HD66B2X5", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76ea, .name = "92HD66B3X5", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76eb, .name = "92HD66C1X5", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76ec, .name = "92HD66C2X5", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76ed, .name = "92HD66C3X5", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76ee, .name = "92HD66B1X3", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76ef, .name = "92HD66B2X3", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76f0, .name = "92HD66B3X3", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76f1, .name = "92HD66C1X3", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76f2, .name = "92HD66C2X3", .patch = patch_stac92hd83xxx},
 +      { .id = 0x111d76f3, .name = "92HD66C3/65", .patch = patch_stac92hd83xxx},
        {} /* terminator */
  };
  
@@@ -49,6 -49,7 +49,7 @@@
  #include <linux/init.h>
  #include <linux/delay.h>
  #include <linux/slab.h>
+ #include <linux/module.h>
  #include <sound/core.h>
  #include <sound/asoundef.h>
  #include "hda_codec.h"
@@@ -3700,8 -3701,13 +3701,8 @@@ static const struct hda_verb vt1812_ini
  static void set_widgets_power_state_vt1812(struct hda_codec *codec)
  {
        struct via_spec *spec = codec->spec;
 -      int imux_is_smixer =
 -      snd_hda_codec_read(codec, 0x13, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 3;
        unsigned int parm;
        unsigned int present;
 -      /* MUX10 (1eh) = stereo mixer */
 -      imux_is_smixer =
 -      snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 5;
        /* inputs */
        /* PW 5/6/7 (29h/2ah/2bh) */
        parm = AC_PWRST_D3;
diff --combined sound/pci/intel8x0.c
@@@ -32,7 -32,7 +32,7 @@@
  #include <linux/init.h>
  #include <linux/pci.h>
  #include <linux/slab.h>
- #include <linux/moduleparam.h>
+ #include <linux/module.h>
  #include <sound/core.h>
  #include <sound/pcm.h>
  #include <sound/ac97_codec.h>
  #include <asm/pgtable.h>
  #include <asm/cacheflush.h>
  
 +#ifdef CONFIG_KVM_GUEST
 +#include <linux/kvm_para.h>
 +#else
 +#define kvm_para_available() (0)
 +#endif
 +
  MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
  MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455");
  MODULE_LICENSE("GPL");
@@@ -83,7 -77,6 +83,7 @@@ static int buggy_semaphore
  static int buggy_irq = -1; /* auto-check */
  static int xbox;
  static int spdif_aclink = -1;
 +static int inside_vm = -1;
  
  module_param(index, int, 0444);
  MODULE_PARM_DESC(index, "Index value for Intel i8x0 soundcard.");
@@@ -101,8 -94,6 +101,8 @@@ module_param(xbox, bool, 0444)
  MODULE_PARM_DESC(xbox, "Set to 1 for Xbox, if you have problems with the AC'97 codec detection.");
  module_param(spdif_aclink, int, 0444);
  MODULE_PARM_DESC(spdif_aclink, "S/PDIF over AC-link.");
 +module_param(inside_vm, bool, 0444);
 +MODULE_PARM_DESC(inside_vm, "KVM/Parallels optimization.");
  
  /* just for backward compatibility */
  static int enable;
@@@ -409,7 -400,6 +409,7 @@@ struct intel8x0 
        unsigned buggy_irq: 1;          /* workaround for buggy mobos */
        unsigned xbox: 1;               /* workaround for Xbox AC'97 detection */
        unsigned buggy_semaphore: 1;    /* workaround for buggy codec semaphore */
 +      unsigned inside_vm: 1;          /* enable VM optimization */
  
        int spdif_idx;  /* SPDIF BAR index; *_SPBAR or -1 if use PCMOUT */
        unsigned int sdm_saved; /* SDM reg value */
@@@ -1075,11 -1065,8 +1075,11 @@@ static snd_pcm_uframes_t snd_intel8x0_p
                        udelay(10);
                        continue;
                }
 -              if (civ == igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV) &&
 -                  ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
 +              if (civ != igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV))
 +                      continue;
 +              if (chip->inside_vm)
 +                      break;
 +              if (ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
                        break;
        } while (timeout--);
        ptr = ichdev->last_pos;
@@@ -2997,10 -2984,6 +2997,10 @@@ static int __devinit snd_intel8x0_creat
        if (xbox)
                chip->xbox = 1;
  
 +      chip->inside_vm = inside_vm;
 +      if (inside_vm)
 +              printk(KERN_INFO "intel8x0: enable KVM optimization\n");
 +
        if (pci->vendor == PCI_VENDOR_ID_INTEL &&
            pci->device == PCI_DEVICE_ID_INTEL_440MX)
                chip->fix_nocache = 1; /* enable workaround */
@@@ -3243,14 -3226,6 +3243,14 @@@ static int __devinit snd_intel8x0_probe
                        buggy_irq = 0;
        }
  
 +      if (inside_vm < 0) {
 +              /* detect KVM and Parallels virtual environments */
 +              inside_vm = kvm_para_available();
 +#if defined(__i386__) || defined(__x86_64__)
 +              inside_vm = inside_vm || boot_cpu_has(X86_FEATURE_HYPERVISOR);
 +#endif
 +      }
 +
        if ((err = snd_intel8x0_create(card, pci, pci_id->driver_data,
                                       &chip)) < 0) {
                snd_card_free(card);
diff --combined sound/pci/rme9652/hdsp.c
@@@ -26,7 -26,7 +26,7 @@@
  #include <linux/interrupt.h>
  #include <linux/pci.h>
  #include <linux/firmware.h>
- #include <linux/moduleparam.h>
+ #include <linux/module.h>
  #include <linux/math64.h>
  
  #include <sound/core.h>
@@@ -151,7 -151,7 +151,7 @@@ MODULE_FIRMWARE("digiface_firmware_rev1
  #define HDSP_PROGRAM          0x020
  #define HDSP_CONFIG_MODE_0    0x040
  #define HDSP_CONFIG_MODE_1    0x080
 -#define HDSP_VERSION_BIT      0x100
 +#define HDSP_VERSION_BIT      (0x100 | HDSP_S_LOAD)
  #define HDSP_BIGENDIAN_MODE     0x200
  #define HDSP_RD_MULTIPLE        0x400
  #define HDSP_9652_ENABLE_MIXER  0x800
@@@ -41,7 -41,7 +41,7 @@@
  #include <linux/init.h>
  #include <linux/delay.h>
  #include <linux/interrupt.h>
- #include <linux/moduleparam.h>
+ #include <linux/module.h>
  #include <linux/slab.h>
  #include <linux/pci.h>
  #include <linux/math64.h>
@@@ -520,9 -520,16 +520,9 @@@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MA
  #define HDSPM_DMA_AREA_BYTES (HDSPM_MAX_CHANNELS * HDSPM_CHANNEL_BUFFER_BYTES)
  #define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024)
  
 -/* revisions >= 230 indicate AES32 card */
 -#define HDSPM_MADI_ANCIENT_REV        204
 -#define HDSPM_MADI_OLD_REV    207
 -#define HDSPM_MADI_REV                210
  #define HDSPM_RAYDAT_REV      211
  #define HDSPM_AIO_REV         212
  #define HDSPM_MADIFACE_REV    213
 -#define HDSPM_AES_REV         240
 -#define HDSPM_AES32_REV               234
 -#define HDSPM_AES32_OLD_REV   233
  
  /* speed factor modes */
  #define HDSPM_SPEED_SINGLE 0
@@@ -6246,7 -6253,7 +6246,7 @@@ static int snd_hdspm_hwdep_ioctl(struc
                        status.card_specific.madi.madi_input =
                                (statusregister & HDSPM_AB_int) ? 1 : 0;
                        status.card_specific.madi.channel_format =
 -                              (statusregister & HDSPM_TX_64ch) ? 1 : 0;
 +                              (statusregister & HDSPM_RX_64ch) ? 1 : 0;
                        /* TODO: Mac driver sets it when f_s>48kHz */
                        status.card_specific.madi.frame_format = 0;
  
@@@ -6496,6 -6503,13 +6496,6 @@@ static int __devinit snd_hdspm_create(s
        strcpy(card->driver, "HDSPM");
  
        switch (hdspm->firmware_rev) {
 -      case HDSPM_MADI_REV:
 -      case HDSPM_MADI_OLD_REV:
 -      case HDSPM_MADI_ANCIENT_REV:
 -              hdspm->io_type = MADI;
 -              hdspm->card_name = "RME MADI";
 -              hdspm->midiPorts = 3;
 -              break;
        case HDSPM_RAYDAT_REV:
                hdspm->io_type = RayDAT;
                hdspm->card_name = "RME RayDAT";
                hdspm->card_name = "RME MADIface";
                hdspm->midiPorts = 1;
                break;
 -      case HDSPM_AES_REV:
 -      case HDSPM_AES32_REV:
 -      case HDSPM_AES32_OLD_REV:
 -              hdspm->io_type = AES32;
 -              hdspm->card_name = "RME AES32";
 -              hdspm->midiPorts = 2;
 -              break;
        default:
 -              snd_printk(KERN_ERR "HDSPM: unknown firmware revision %x\n",
 +              if ((hdspm->firmware_rev == 0xf0) ||
 +                      ((hdspm->firmware_rev >= 0xe6) &&
 +                                      (hdspm->firmware_rev <= 0xea))) {
 +                      hdspm->io_type = AES32;
 +                      hdspm->card_name = "RME AES32";
 +                      hdspm->midiPorts = 2;
 +              } else if ((hdspm->firmware_rev == 0xd5) ||
 +                      ((hdspm->firmware_rev >= 0xc8)  &&
 +                              (hdspm->firmware_rev <= 0xcf))) {
 +                      hdspm->io_type = MADI;
 +                      hdspm->card_name = "RME MADI";
 +                      hdspm->midiPorts = 3;
 +              } else {
 +                      snd_printk(KERN_ERR
 +                              "HDSPM: unknown firmware revision %x\n",
                                hdspm->firmware_rev);
 -              return -ENODEV;
 +                      return -ENODEV;
 +              }
        }
  
        err = pci_enable_device(pci);
diff --combined sound/soc/samsung/ac97.c
@@@ -15,6 -15,7 +15,7 @@@
  #include <linux/io.h>
  #include <linux/delay.h>
  #include <linux/clk.h>
+ #include <linux/module.h>
  
  #include <sound/soc.h>
  
@@@ -271,10 -272,7 +272,10 @@@ static int s3c_ac97_trigger(struct snd_
  
        writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
  
 -      s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
 +      if (!dma_data->ops)
 +              dma_data->ops = samsung_dma_get_ops();
 +
 +      dma_data->ops->started(dma_data->channel);
  
        return 0;
  }
@@@ -320,10 -318,7 +321,10 @@@ static int s3c_ac97_mic_trigger(struct 
  
        writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
  
 -      s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
 +      if (!dma_data->ops)
 +              dma_data->ops = samsung_dma_get_ops();
 +
 +      dma_data->ops->started(dma_data->channel);
  
        return 0;
  }
diff --combined sound/soc/samsung/dma.c
@@@ -16,6 -16,7 +16,7 @@@
  
  #include <linux/slab.h>
  #include <linux/dma-mapping.h>
+ #include <linux/module.h>
  
  #include <sound/soc.h>
  #include <sound/pcm_params.h>
@@@ -54,6 -55,7 +55,6 @@@ struct runtime_data 
        spinlock_t lock;
        int state;
        unsigned int dma_loaded;
 -      unsigned int dma_limit;
        unsigned int dma_period;
        dma_addr_t dma_start;
        dma_addr_t dma_pos;
        struct s3c_dma_params *params;
  };
  
 +static void audio_buffdone(void *data);
 +
  /* dma_enqueue
   *
   * place a dma buffer onto the queue for the dma system
   * to handle.
 -*/
 + */
  static void dma_enqueue(struct snd_pcm_substream *substream)
  {
        struct runtime_data *prtd = substream->runtime->private_data;
        dma_addr_t pos = prtd->dma_pos;
        unsigned int limit;
 -      int ret;
 +      struct samsung_dma_prep_info dma_info;
  
        pr_debug("Entered %s\n", __func__);
  
 -      if (s3c_dma_has_circular())
 -              limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period;
 -      else
 -              limit = prtd->dma_limit;
 +      limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period;
  
        pr_debug("%s: loaded %d, limit %d\n",
                                __func__, prtd->dma_loaded, limit);
  
 -      while (prtd->dma_loaded < limit) {
 -              unsigned long len = prtd->dma_period;
 +      dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE);
 +      dma_info.direction =
 +              (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
 +              ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 +      dma_info.fp = audio_buffdone;
 +      dma_info.fp_param = substream;
 +      dma_info.period = prtd->dma_period;
 +      dma_info.len = prtd->dma_period*limit;
  
 +      while (prtd->dma_loaded < limit) {
                pr_debug("dma_loaded: %d\n", prtd->dma_loaded);
  
 -              if ((pos + len) > prtd->dma_end) {
 -                      len  = prtd->dma_end - pos;
 -                      pr_debug("%s: corrected dma len %ld\n", __func__, len);
 +              if ((pos + dma_info.period) > prtd->dma_end) {
 +                      dma_info.period  = prtd->dma_end - pos;
 +                      pr_debug("%s: corrected dma len %ld\n",
 +                                      __func__, dma_info.period);
                }
  
 -              ret = s3c2410_dma_enqueue(prtd->params->channel,
 -                      substream, pos, len);
 +              dma_info.buf = pos;
 +              prtd->params->ops->prepare(prtd->params->ch, &dma_info);
  
 -              if (ret == 0) {
 -                      prtd->dma_loaded++;
 -                      pos += prtd->dma_period;
 -                      if (pos >= prtd->dma_end)
 -                              pos = prtd->dma_start;
 -              } else
 -                      break;
 +              prtd->dma_loaded++;
 +              pos += prtd->dma_period;
 +              if (pos >= prtd->dma_end)
 +                      pos = prtd->dma_start;
        }
  
        prtd->dma_pos = pos;
  }
  
 -static void audio_buffdone(struct s3c2410_dma_chan *channel,
 -                              void *dev_id, int size,
 -                              enum s3c2410_dma_buffresult result)
 +static void audio_buffdone(void *data)
  {
 -      struct snd_pcm_substream *substream = dev_id;
 -      struct runtime_data *prtd;
 +      struct snd_pcm_substream *substream = data;
 +      struct runtime_data *prtd = substream->runtime->private_data;
  
        pr_debug("Entered %s\n", __func__);
  
 -      if (result == S3C2410_RES_ABORT || result == S3C2410_RES_ERR)
 -              return;
 -
 -      prtd = substream->runtime->private_data;
 +      if (prtd->state & ST_RUNNING) {
 +              prtd->dma_pos += prtd->dma_period;
 +              if (prtd->dma_pos >= prtd->dma_end)
 +                      prtd->dma_pos = prtd->dma_start;
  
 -      if (substream)
 -              snd_pcm_period_elapsed(substream);
 +              if (substream)
 +                      snd_pcm_period_elapsed(substream);
  
 -      spin_lock(&prtd->lock);
 -      if (prtd->state & ST_RUNNING && !s3c_dma_has_circular()) {
 -              prtd->dma_loaded--;
 -              dma_enqueue(substream);
 +              spin_lock(&prtd->lock);
 +              if (!samsung_dma_has_circular()) {
 +                      prtd->dma_loaded--;
 +                      dma_enqueue(substream);
 +              }
 +              spin_unlock(&prtd->lock);
        }
 -
 -      spin_unlock(&prtd->lock);
  }
  
  static int dma_hw_params(struct snd_pcm_substream *substream,
        unsigned long totbytes = params_buffer_bytes(params);
        struct s3c_dma_params *dma =
                snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
 -      int ret = 0;
 -
 +      struct samsung_dma_info dma_info;
  
        pr_debug("Entered %s\n", __func__);
  
                pr_debug("params %p, client %p, channel %d\n", prtd->params,
                        prtd->params->client, prtd->params->channel);
  
 -              ret = s3c2410_dma_request(prtd->params->channel,
 -                                        prtd->params->client, NULL);
 -
 -              if (ret < 0) {
 -                      printk(KERN_ERR "failed to get dma channel\n");
 -                      return ret;
 -              }
 -
 -              /* use the circular buffering if we have it available. */
 -              if (s3c_dma_has_circular())
 -                      s3c2410_dma_setflags(prtd->params->channel,
 -                                           S3C2410_DMAF_CIRCULAR);
 +              prtd->params->ops = samsung_dma_get_ops();
 +
 +              dma_info.cap = (samsung_dma_has_circular() ?
 +                      DMA_CYCLIC : DMA_SLAVE);
 +              dma_info.client = prtd->params->client;
 +              dma_info.direction =
 +                      (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
 +                      ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 +              dma_info.width = prtd->params->dma_size;
 +              dma_info.fifo = prtd->params->dma_addr;
 +              prtd->params->ch = prtd->params->ops->request(
 +                              prtd->params->channel, &dma_info);
        }
  
 -      s3c2410_dma_set_buffdone_fn(prtd->params->channel,
 -                                  audio_buffdone);
 -
        snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
  
        runtime->dma_bytes = totbytes;
  
        spin_lock_irq(&prtd->lock);
        prtd->dma_loaded = 0;
 -      prtd->dma_limit = runtime->hw.periods_min;
        prtd->dma_period = params_period_bytes(params);
        prtd->dma_start = runtime->dma_addr;
        prtd->dma_pos = prtd->dma_start;
@@@ -198,12 -203,11 +199,12 @@@ static int dma_hw_free(struct snd_pcm_s
  
        pr_debug("Entered %s\n", __func__);
  
 -      /* TODO - do we need to ensure DMA flushed */
        snd_pcm_set_runtime_buffer(substream, NULL);
  
        if (prtd->params) {
 -              s3c2410_dma_free(prtd->params->channel, prtd->params->client);
 +              prtd->params->ops->flush(prtd->params->ch);
 +              prtd->params->ops->release(prtd->params->ch,
 +                                      prtd->params->client);
                prtd->params = NULL;
        }
  
@@@ -222,9 -226,23 +223,9 @@@ static int dma_prepare(struct snd_pcm_s
        if (!prtd->params)
                return 0;
  
 -      /* channel needs configuring for mem=>device, increment memory addr,
 -       * sync to pclk, half-word transfers to the IIS-FIFO. */
 -      if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 -              s3c2410_dma_devconfig(prtd->params->channel,
 -                                    S3C2410_DMASRC_MEM,
 -                                    prtd->params->dma_addr);
 -      } else {
 -              s3c2410_dma_devconfig(prtd->params->channel,
 -                                    S3C2410_DMASRC_HW,
 -                                    prtd->params->dma_addr);
 -      }
 -
 -      s3c2410_dma_config(prtd->params->channel,
 -                         prtd->params->dma_size);
 -
        /* flush the DMA channel */
 -      s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_FLUSH);
 +      prtd->params->ops->flush(prtd->params->ch);
 +
        prtd->dma_loaded = 0;
        prtd->dma_pos = prtd->dma_start;
  
@@@ -248,14 -266,14 +249,14 @@@ static int dma_trigger(struct snd_pcm_s
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                prtd->state |= ST_RUNNING;
 -              s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START);
 +              prtd->params->ops->trigger(prtd->params->ch);
                break;
  
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                prtd->state &= ~ST_RUNNING;
 -              s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STOP);
 +              prtd->params->ops->stop(prtd->params->ch);
                break;
  
        default:
@@@ -274,12 -292,21 +275,12 @@@ dma_pointer(struct snd_pcm_substream *s
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct runtime_data *prtd = runtime->private_data;
        unsigned long res;
 -      dma_addr_t src, dst;
  
        pr_debug("Entered %s\n", __func__);
  
 -      spin_lock(&prtd->lock);
 -      s3c2410_dma_getposition(prtd->params->channel, &src, &dst);
 -
 -      if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 -              res = dst - prtd->dma_start;
 -      else
 -              res = src - prtd->dma_start;
 -
 -      spin_unlock(&prtd->lock);
 +      res = prtd->dma_pos - prtd->dma_start;
  
 -      pr_debug("Pointer %x %x\n", src, dst);
 +      pr_debug("Pointer offset: %lu\n", res);
  
        /* we seem to be getting the odd error from the pcm library due
         * to out-of-bounds pointers. this is maybe due to the dma engine