Merge branch 'nf' of git://1984.lsi.us.es/net
authorDavid S. Miller <davem@davemloft.net>
Sat, 24 Dec 2011 21:10:26 +0000 (16:10 -0500)
committerDavid S. Miller <davem@davemloft.net>
Sat, 24 Dec 2011 21:10:26 +0000 (16:10 -0500)
439 files changed:
CREDITS
Documentation/ABI/testing/sysfs-bus-rbd
Documentation/kernel-parameters.txt
Documentation/sound/alsa/soc/machine.txt
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/configs/omap1_defconfig
arch/arm/include/asm/unwind.h
arch/arm/kernel/perf_event.c
arch/arm/kernel/setup.c
arch/arm/kernel/unwind.c
arch/arm/mach-at91/at91rm9200_devices.c
arch/arm/mach-at91/at91sam9260.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/include/mach/system_rev.h
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-davinci/include/mach/psc.h
arch/arm/mach-davinci/psc.c
arch/arm/mach-exynos/mct.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-msm/devices-iommu.c
arch/arm/mach-mx5/board-mx51_babbage.c
arch/arm/mach-mx5/board-mx53_evk.c
arch/arm/mach-mx5/board-mx53_loco.c
arch/arm/mach-mx5/board-mx53_smd.c
arch/arm/mach-mx5/imx51-dt.c
arch/arm/mach-mx5/imx53-dt.c
arch/arm/mach-mxs/include/mach/mx28.h
arch/arm/mach-mxs/include/mach/mxs.h
arch/arm/mach-mxs/mach-m28evk.c
arch/arm/mach-mxs/mach-stmp378x_devb.c
arch/arm/mach-mxs/module-tx28.c
arch/arm/mach-omap1/clock_data.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/mcbsp.c
arch/arm/mach-prima2/pm.c
arch/arm/mach-prima2/prima2.c
arch/arm/mach-s3c64xx/dev-spi.c
arch/arm/mach-s3c64xx/s3c6400.c
arch/arm/mach-s3c64xx/setup-fb-24bpp.c
arch/arm/mach-s5pv210/mach-smdkv210.c
arch/arm/mach-sa1100/Makefile.boot
arch/arm/mach-shmobile/board-ag5evm.c
arch/arm/mach-shmobile/board-kota2.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/arm/plat-mxc/cpufreq.c
arch/arm/plat-mxc/pwm.c
arch/arm/plat-samsung/dev-backlight.c
arch/m68k/include/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/s390/oprofile/init.c
arch/sh/boards/board-sh7757lcr.c
arch/sparc/kernel/ds.c
arch/sparc/kernel/pci_sun4v.c
arch/sparc/kernel/prom_common.c
arch/sparc/mm/btfixup.c
arch/tile/include/asm/irq.h
arch/tile/kernel/irq.c
arch/tile/kernel/pci-dma.c
arch/tile/kernel/pci.c
arch/tile/kernel/sysfs.c
arch/tile/lib/exports.c
arch/tile/mm/homecache.c
arch/x86/Kconfig
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/hpet.c
arch/x86/mm/gup.c
arch/x86/net/bpf_jit_comp.c
arch/x86/platform/efi/efi_32.c
arch/x86/xen/setup.c
block/blk-core.c
block/cfq-iosched.c
drivers/ata/Kconfig
drivers/base/core.c
drivers/block/cciss.c
drivers/block/loop.c
drivers/block/rbd.c
drivers/block/swim3.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/firmware/iscsi_ibft.c
drivers/firmware/iscsi_ibft_find.c
drivers/gpio/gpio-da9052.c
drivers/gpio/gpio-ml-ioh.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-pl061.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/hwmon/jz4740-hwmon.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/input/misc/cma3000_d0x.c
drivers/input/mouse/synaptics.c
drivers/input/tablet/wacom_wac.c
drivers/iommu/intel-iommu.c
drivers/md/bitmap.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/raid5.c
drivers/media/common/tuners/mxl5007t.c
drivers/media/common/tuners/tda18218.c
drivers/media/rc/ati_remote.c
drivers/media/rc/keymaps/rc-ati-x10.c
drivers/media/rc/keymaps/rc-medion-x10.c
drivers/media/rc/keymaps/rc-snapstream-firefly.c
drivers/media/video/au0828/au0828-cards.c
drivers/media/video/m5mols/m5mols.h
drivers/media/video/m5mols/m5mols_core.c
drivers/media/video/mt9m111.c
drivers/media/video/mt9t112.c
drivers/media/video/omap/omap_vout.c
drivers/media/video/omap1_camera.c
drivers/media/video/omap24xxcam-dma.c
drivers/media/video/omap3isp/ispccdc.c
drivers/media/video/omap3isp/ispstat.c
drivers/media/video/omap3isp/ispvideo.c
drivers/media/video/ov6650.c
drivers/media/video/s5p-fimc/fimc-capture.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/s5p-fimc/fimc-core.h
drivers/media/video/s5p-fimc/fimc-mdevice.c
drivers/media/video/s5p-fimc/fimc-reg.c
drivers/media/video/s5p-mfc/s5p_mfc_enc.c
drivers/media/video/s5p-tv/mixer_video.c
drivers/media/video/sh_mobile_ceu_camera.c
drivers/media/video/sh_mobile_csi2.c
drivers/media/video/soc_camera.c
drivers/mfd/ab5500-debugfs.c
drivers/mfd/ab8500-core.c
drivers/mfd/adp5520.c
drivers/mfd/da903x.c
drivers/mfd/jz4740-adc.c
drivers/mfd/tps6586x.c
drivers/mfd/tps65910.c
drivers/mfd/twl-core.c
drivers/mfd/twl4030-irq.c
drivers/mfd/wm8994-core.c
drivers/mmc/card/block.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-cns3xxx.c
drivers/mmc/host/sdhci-dove.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci-of-hlwd.c
drivers/mmc/host/sdhci-pci.c
drivers/mmc/host/sdhci-pltfm.c
drivers/mmc/host/sdhci-pltfm.h
drivers/mmc/host/sdhci-pxav2.c
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/tmio_mmc_pio.c
drivers/mmc/host/vub300.c
drivers/mtd/maps/plat-ram.c
drivers/mtd/maps/pxa2xx-flash.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/ndfc.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/usb/asix.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/of/irq.c
drivers/oprofile/oprofile_files.c
drivers/oprofile/oprofilefs.c
drivers/pci/ats.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/iov.c
drivers/pci/pci.c
drivers/ptp/ptp_clock.c
drivers/rapidio/devices/tsi721.c
drivers/rapidio/devices/tsi721.h
drivers/rtc/interface.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-s3c.c
drivers/s390/scsi/zfcp_scsi.c
drivers/sbus/char/bbc_i2c.c
drivers/sbus/char/display7seg.c
drivers/sbus/char/envctrl.c
drivers/sbus/char/flash.c
drivers/sbus/char/uctrl.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/spi/Kconfig
drivers/spi/spi-ath79.c
drivers/spi/spi-gpio.c
drivers/spi/spi-nuc900.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/tidspbridge/core/dsp-clock.c
drivers/staging/tidspbridge/rmgr/drv_interface.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_alua.c
drivers/target/target_core_cdb.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_rd.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
drivers/usb/class/cdc-acm.c
drivers/usb/dwc3/core.c
drivers/usb/gadget/epautoconf.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/host/isp1760-if.c
drivers/usb/musb/musb_host.c
drivers/usb/renesas_usbhs/mod.c
drivers/usb/renesas_usbhs/mod_host.c
drivers/usb/serial/option.c
drivers/xen/swiotlb-xen.c
drivers/xen/xenbus/xenbus_xs.c
firmware/README.AddingFirmware
fs/btrfs/async-thread.c
fs/btrfs/async-thread.h
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/readdir.c
fs/cifs/smbencrypt.c
fs/configfs/inode.c
fs/configfs/mount.c
fs/dcache.c
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/file.c
fs/fuse/inode.c
fs/namespace.c
fs/ncpfs/inode.c
fs/nfs/file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nilfs2/ioctl.c
fs/proc/meminfo.c
fs/proc/root.c
fs/proc/stat.c
fs/seq_file.c
fs/ubifs/super.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_export.c
fs/xfs/xfs_log.c
fs/xfs/xfs_trace.h
include/asm-generic/unistd.h
include/drm/drm_pciids.h
include/linux/blkdev.h
include/linux/clocksource.h
include/linux/compat.h
include/linux/dcache.h
include/linux/dma_remapping.h
include/linux/fs.h
include/linux/lglock.h
include/linux/log2.h
include/linux/mm.h
include/linux/mmc/card.h
include/linux/shrinker.h
include/media/soc_camera.h
include/net/dst.h
include/net/flow.h
include/net/sctp/structs.h
include/net/sock.h
include/scsi/libfcoe.h
include/target/target_core_base.h
include/target/target_core_transport.h
include/trace/events/writeback.h
include/xen/interface/io/xs_wire.h
ipc/mqueue.c
ipc/msgutil.c
kernel/cgroup.c
kernel/cpuset.c
kernel/events/core.c
kernel/lockdep.c
kernel/printk.c
kernel/sched_fair.c
kernel/sysctl_binary.c
kernel/time/alarmtimer.c
kernel/time/clocksource.c
kernel/timer.c
lib/dma-debug.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/migrate.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/percpu.c
mm/vmalloc.c
mm/vmscan.c
net/bluetooth/hci_conn.c
net/bluetooth/l2cap_core.c
net/bluetooth/rfcomm/core.c
net/bridge/br_netfilter.c
net/ceph/crush/mapper.c
net/core/flow.c
net/core/net-sysfs.c
net/core/sock.c
net/ipv4/ipconfig.c
net/ipv4/route.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/llc/af_llc.c
net/netfilter/xt_connbytes.c
net/nfc/nci/core.c
net/packet/af_packet.c
net/sched/sch_mqprio.c
net/sched/sch_netem.c
net/sctp/associola.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sunrpc/sched.c
net/sunrpc/xprt.c
net/xfrm/xfrm_policy.c
scripts/kconfig/Makefile
security/apparmor/path.c
security/integrity/evm/evm_crypto.c
security/selinux/netport.c
security/tomoyo/realpath.c
sound/atmel/ac97c.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/sis7019.c
sound/soc/codecs/Kconfig
sound/soc/codecs/jz4740.c
sound/soc/codecs/uda1380.c
sound/soc/codecs/wm8958-dsp2.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8996.c
sound/soc/imx/Kconfig
sound/soc/kirkwood/Kconfig
sound/soc/mxs/mxs-pcm.c
sound/soc/mxs/mxs-sgtl5000.c
sound/soc/pxa/Kconfig
sound/soc/pxa/hx4700.c
sound/soc/samsung/jive_wm8750.c
sound/soc/samsung/smdk2443_wm9710.c
sound/soc/soc-utils.c
tools/perf/builtin-stat.c
tools/perf/util/header.c

diff --git a/CREDITS b/CREDITS
index 07e32a8..44fce98 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -688,10 +688,13 @@ S: Oxfordshire, UK.
 
 N: Kees Cook
 E: kees@outflux.net
-W: http://outflux.net/
-P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30  1975 1FFF 4BA9 1706 3E6D
-D: Minor updates to SCSI types, added /proc/pid/maps protection
+E: kees@ubuntu.com
+E: keescook@chromium.org
+W: http://outflux.net/blog/
+P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E  6E13 8972 F4DF DC6D C026
+D: Various security things, bug fixes, and documentation.
 S: (ask for current address)
+S: Portland, Oregon
 S: USA
 
 N: Robin Cornelius
index fa72ccb..dbedafb 100644 (file)
@@ -57,13 +57,6 @@ create_snap
 
         $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
 
-rollback_snap
-
-       Rolls back data to the specified snapshot. This goes over the entire
-       list of rados blocks and sends a rollback command to each.
-
-        $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
-
 snap_*
 
        A directory per each snapshot
index 5e22c3f..81c287f 100644 (file)
@@ -320,7 +320,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        on: enable for both 32- and 64-bit processes
                        off: disable for both 32- and 64-bit processes
 
-       amd_iommu=      [HW,X86-84]
+       amd_iommu=      [HW,X86-64]
                        Pass parameters to the AMD IOMMU driver in the system.
                        Possible values are:
                        fullflush - enable flushing of IO/TLB entries when
index 3e2ec9c..d50c14d 100644 (file)
@@ -50,8 +50,7 @@ Machine DAI Configuration
 The machine DAI configuration glues all the codec and CPU DAIs together. It can
 also be used to set up the DAI system clock and for any machine related DAI
 initialisation e.g. the machine audio map can be connected to the codec audio
-map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c
-for examples.
+map, unconnected codec pins can be set as such.
 
 struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
 
@@ -83,8 +82,7 @@ Machine Power Map
 The machine driver can optionally extend the codec power map and to become an
 audio power map of the audio subsystem. This allows for automatic power up/down
 of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack
-sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for
-details.
+sockets in the machine init function.
 
 
 Machine Controls
index 4475602..6afba60 100644 (file)
@@ -511,8 +511,8 @@ M:  Joerg Roedel <joerg.roedel@amd.com>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
 S:     Supported
-F:     arch/x86/kernel/amd_iommu*.c
-F:     arch/x86/include/asm/amd_iommu*.h
+F:     drivers/iommu/amd_iommu*.[ch]
+F:     include/linux/amd-iommu.h
 
 AMD MICROCODE UPDATE SUPPORT
 M:     Andreas Herrmann <andreas.herrmann3@amd.com>
@@ -1054,35 +1054,18 @@ ARM/SAMSUNG ARM ARCHITECTURES
 M:     Ben Dooks <ben-linux@fluff.org>
 M:     Kukjin Kim <kgene.kim@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 W:     http://www.fluff.org/ben/linux/
 S:     Maintained
 F:     arch/arm/plat-samsung/
 F:     arch/arm/plat-s3c24xx/
 F:     arch/arm/plat-s5p/
+F:     arch/arm/mach-s3c24*/
+F:     arch/arm/mach-s3c64xx/
 F:     drivers/*/*s3c2410*
 F:     drivers/*/*/*s3c2410*
-
-ARM/S3C2410 ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c2410/
-
-ARM/S3C244x ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c2440/
-F:     arch/arm/mach-s3c2443/
-
-ARM/S3C64xx ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c64xx/
+F:     drivers/spi/spi-s3c*
+F:     sound/soc/samsung/*
 
 ARM/S5P EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene.kim@samsung.com>
@@ -3118,6 +3101,7 @@ F:        include/linux/hid*
 
 HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Maintained
 F:     Documentation/timers/
 F:     kernel/hrtimer.c
@@ -3627,7 +3611,7 @@ F:        net/irda/
 IRQ SUBSYSTEM
 M:     Thomas Gleixner <tglx@linutronix.de>
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:     kernel/irq/
 
 ISAPNP
@@ -4115,7 +4099,7 @@ F:        drivers/hwmon/lm90.c
 LOCKDEP AND LOCKSTAT
 M:     Peter Zijlstra <peterz@infradead.org>
 M:     Ingo Molnar <mingo@redhat.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
 S:     Maintained
 F:     Documentation/lockdep*.txt
 F:     Documentation/lockstat.txt
@@ -4297,7 +4281,9 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:     Maintained
 F:     Documentation/dvb/
 F:     Documentation/video4linux/
+F:     Documentation/DocBook/media/
 F:     drivers/media/
+F:     drivers/staging/media/
 F:     include/media/
 F:     include/linux/dvb/
 F:     include/linux/videodev*.h
@@ -4319,8 +4305,9 @@ F:        include/linux/mm.h
 F:     mm/
 
 MEMORY RESOURCE CONTROLLER
+M:     Johannes Weiner <hannes@cmpxchg.org>
+M:     Michal Hocko <mhocko@suse.cz>
 M:     Balbir Singh <bsingharora@gmail.com>
-M:     Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
 M:     KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
@@ -5102,6 +5089,7 @@ M:        Peter Zijlstra <a.p.zijlstra@chello.nl>
 M:     Paul Mackerras <paulus@samba.org>
 M:     Ingo Molnar <mingo@elte.hu>
 M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:     Supported
 F:     kernel/events/*
 F:     include/linux/perf_event.h
@@ -5181,6 +5169,7 @@ F:        drivers/scsi/pm8001/
 
 POSIX CLOCKS and TIMERS
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Supported
 F:     fs/timerfd.c
 F:     include/linux/timer*
@@ -5696,6 +5685,7 @@ F:        drivers/dma/dw_dmac.c
 TIMEKEEPING, NTP
 M:     John Stultz <johnstul@us.ibm.com>
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Supported
 F:     include/linux/clocksource.h
 F:     include/linux/time.h
@@ -5720,6 +5710,7 @@ F:        drivers/watchdog/sc1200wdt.c
 SCHEDULER
 M:     Ingo Molnar <mingo@elte.hu>
 M:     Peter Zijlstra <peterz@infradead.org>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
 S:     Maintained
 F:     kernel/sched*
 F:     include/linux/sched.h
@@ -6647,7 +6638,7 @@ TRACING
 M:     Steven Rostedt <rostedt@goodmis.org>
 M:     Frederic Weisbecker <fweisbec@gmail.com>
 M:     Ingo Molnar <mingo@redhat.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:     Maintained
 F:     Documentation/trace/ftrace.txt
 F:     arch/*/*/*/ftrace.h
@@ -7397,7 +7388,7 @@ M:        Thomas Gleixner <tglx@linutronix.de>
 M:     Ingo Molnar <mingo@redhat.com>
 M:     "H. Peter Anvin" <hpa@zytor.com>
 M:     x86@kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
 S:     Maintained
 F:     Documentation/x86/
 F:     arch/x86/
index 12aafc2..ea51081 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc7
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
index e084b7e..776d76b 100644 (file)
@@ -220,8 +220,9 @@ config NEED_MACH_MEMORY_H
          be avoided when possible.
 
 config PHYS_OFFSET
-       hex "Physical address of main memory"
+       hex "Physical address of main memory" if MMU
        depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
+       default DRAM_BASE if !MMU
        help
          Please provide the physical address corresponding to the
          location of main memory in your system.
index a7e7775..945a34f 100644 (file)
@@ -48,12 +48,7 @@ CONFIG_MACH_SX1=y
 CONFIG_MACH_NOKIA770=y
 CONFIG_MACH_AMS_DELTA=y
 CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_ARM_216MHZ=y
-CONFIG_OMAP_ARM_195MHZ=y
-CONFIG_OMAP_ARM_192MHZ=y
 CONFIG_OMAP_ARM_182MHZ=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
 # CONFIG_ARM_THUMB is not set
 CONFIG_PCCARD=y
 CONFIG_OMAP_CF=y
index a5edf42..d1c3f3a 100644 (file)
@@ -30,14 +30,15 @@ enum unwind_reason_code {
 };
 
 struct unwind_idx {
-       unsigned long addr;
+       unsigned long addr_offset;
        unsigned long insn;
 };
 
 struct unwind_table {
        struct list_head list;
-       struct unwind_idx *start;
-       struct unwind_idx *stop;
+       const struct unwind_idx *start;
+       const struct unwind_idx *origin;
+       const struct unwind_idx *stop;
        unsigned long begin_addr;
        unsigned long end_addr;
 };
@@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
 extern void unwind_table_del(struct unwind_table *tab);
 extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
 
-#ifdef CONFIG_ARM_UNWIND
-extern int __init unwind_init(void);
-#else
-static inline int __init unwind_init(void)
-{
-       return 0;
-}
-#endif
-
 #endif /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_ARM_UNWIND
index 8e9c98e..88b0941 100644 (file)
@@ -640,6 +640,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = {
 
 static int __devinit armpmu_device_probe(struct platform_device *pdev)
 {
+       if (!cpu_pmu)
+               return -ENODEV;
+
        cpu_pmu->plat_device = pdev;
        return 0;
 }
index 3448a3f..8fc2c8f 100644 (file)
@@ -895,8 +895,6 @@ void __init setup_arch(char **cmdline_p)
 {
        struct machine_desc *mdesc;
 
-       unwind_init();
-
        setup_processor();
        mdesc = setup_machine_fdt(__atags_pointer);
        if (!mdesc)
@@ -904,6 +902,12 @@ void __init setup_arch(char **cmdline_p)
        machine_desc = mdesc;
        machine_name = mdesc->name;
 
+#ifdef CONFIG_ZONE_DMA
+       if (mdesc->dma_zone_size) {
+               extern unsigned long arm_dma_zone_size;
+               arm_dma_zone_size = mdesc->dma_zone_size;
+       }
+#endif
        if (mdesc->soft_reboot)
                reboot_setup("s");
 
@@ -934,12 +938,6 @@ void __init setup_arch(char **cmdline_p)
 
        tcm_init();
 
-#ifdef CONFIG_ZONE_DMA
-       if (mdesc->dma_zone_size) {
-               extern unsigned long arm_dma_zone_size;
-               arm_dma_zone_size = mdesc->dma_zone_size;
-       }
-#endif
 #ifdef CONFIG_MULTI_IRQ_HANDLER
        handle_arch_irq = mdesc->handle_irq;
 #endif
index e7e8365..00df012 100644 (file)
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
 
 struct unwind_ctrl_block {
        unsigned long vrs[16];          /* virtual register set */
-       unsigned long *insn;            /* pointer to the current instructions word */
+       const unsigned long *insn;      /* pointer to the current instructions word */
        int entries;                    /* number of entries left to interpret */
        int byte;                       /* current byte number in the instructions word */
 };
@@ -83,8 +83,9 @@ enum regs {
        PC = 15
 };
 
-extern struct unwind_idx __start_unwind_idx[];
-extern struct unwind_idx __stop_unwind_idx[];
+extern const struct unwind_idx __start_unwind_idx[];
+static const struct unwind_idx *__origin_unwind_idx;
+extern const struct unwind_idx __stop_unwind_idx[];
 
 static DEFINE_SPINLOCK(unwind_lock);
 static LIST_HEAD(unwind_tables);
@@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables);
 })
 
 /*
- * Binary search in the unwind index. The entries entries are
+ * Binary search in the unwind index. The entries are
  * guaranteed to be sorted in ascending order by the linker.
+ *
+ * start = first entry
+ * origin = first entry with positive offset (or stop if there is no such entry)
+ * stop - 1 = last entry
  */
-static struct unwind_idx *search_index(unsigned long addr,
-                                      struct unwind_idx *first,
-                                      struct unwind_idx *last)
+static const struct unwind_idx *search_index(unsigned long addr,
+                                      const struct unwind_idx *start,
+                                      const struct unwind_idx *origin,
+                                      const struct unwind_idx *stop)
 {
-       pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
+       unsigned long addr_prel31;
+
+       pr_debug("%s(%08lx, %p, %p, %p)\n",
+                       __func__, addr, start, origin, stop);
+
+       /*
+        * only search in the section with the matching sign. This way the
+        * prel31 numbers can be compared as unsigned longs.
+        */
+       if (addr < (unsigned long)start)
+               /* negative offsets: [start; origin) */
+               stop = origin;
+       else
+               /* positive offsets: [origin; stop) */
+               start = origin;
+
+       /* prel31 for address relavive to start */
+       addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
 
-       if (addr < first->addr) {
+       while (start < stop - 1) {
+               const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+               /*
+                * As addr_prel31 is relative to start an offset is needed to
+                * make it relative to mid.
+                */
+               if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
+                               mid->addr_offset)
+                       stop = mid;
+               else {
+                       /* keep addr_prel31 relative to start */
+                       addr_prel31 -= ((unsigned long)mid -
+                                       (unsigned long)start);
+                       start = mid;
+               }
+       }
+
+       if (likely(start->addr_offset <= addr_prel31))
+               return start;
+       else {
                pr_warning("unwind: Unknown symbol address %08lx\n", addr);
                return NULL;
-       } else if (addr >= last->addr)
-               return last;
+       }
+}
 
-       while (first < last - 1) {
-               struct unwind_idx *mid = first + ((last - first + 1) >> 1);
+static const struct unwind_idx *unwind_find_origin(
+               const struct unwind_idx *start, const struct unwind_idx *stop)
+{
+       pr_debug("%s(%p, %p)\n", __func__, start, stop);
+       while (start < stop) {
+               const struct unwind_idx *mid = start + ((stop - start) >> 1);
 
-               if (addr < mid->addr)
-                       last = mid;
+               if (mid->addr_offset >= 0x40000000)
+                       /* negative offset */
+                       start = mid + 1;
                else
-                       first = mid;
+                       /* positive offset */
+                       stop = mid;
        }
-
-       return first;
+       pr_debug("%s -> %p\n", __func__, stop);
+       return stop;
 }
 
-static struct unwind_idx *unwind_find_idx(unsigned long addr)
+static const struct unwind_idx *unwind_find_idx(unsigned long addr)
 {
-       struct unwind_idx *idx = NULL;
+       const struct unwind_idx *idx = NULL;
        unsigned long flags;
 
        pr_debug("%s(%08lx)\n", __func__, addr);
 
-       if (core_kernel_text(addr))
+       if (core_kernel_text(addr)) {
+               if (unlikely(!__origin_unwind_idx))
+                       __origin_unwind_idx =
+                               unwind_find_origin(__start_unwind_idx,
+                                               __stop_unwind_idx);
+
                /* main unwind table */
                idx = search_index(addr, __start_unwind_idx,
-                                  __stop_unwind_idx - 1);
-       else {
+                                  __origin_unwind_idx,
+                                  __stop_unwind_idx);
+       } else {
                /* module unwind tables */
                struct unwind_table *table;
 
@@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
                        if (addr >= table->begin_addr &&
                            addr < table->end_addr) {
                                idx = search_index(addr, table->start,
-                                                  table->stop - 1);
+                                                  table->origin,
+                                                  table->stop);
                                /* Move-to-front to exploit common traces */
                                list_move(&table->list, &unwind_tables);
                                break;
@@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
 int unwind_frame(struct stackframe *frame)
 {
        unsigned long high, low;
-       struct unwind_idx *idx;
+       const struct unwind_idx *idx;
        struct unwind_ctrl_block ctrl;
 
        /* only go to a higher address on the stack */
@@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
                                      unsigned long text_size)
 {
        unsigned long flags;
-       struct unwind_idx *idx;
        struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
 
        pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
@@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
        if (!tab)
                return tab;
 
-       tab->start = (struct unwind_idx *)start;
-       tab->stop = (struct unwind_idx *)(start + size);
+       tab->start = (const struct unwind_idx *)start;
+       tab->stop = (const struct unwind_idx *)(start + size);
+       tab->origin = unwind_find_origin(tab->start, tab->stop);
        tab->begin_addr = text_addr;
        tab->end_addr = text_addr + text_size;
 
-       /* Convert the symbol addresses to absolute values */
-       for (idx = tab->start; idx < tab->stop; idx++)
-               idx->addr = prel31_to_addr(&idx->addr);
-
        spin_lock_irqsave(&unwind_lock, flags);
        list_add_tail(&tab->list, &unwind_tables);
        spin_unlock_irqrestore(&unwind_lock, flags);
@@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab)
 
        kfree(tab);
 }
-
-int __init unwind_init(void)
-{
-       struct unwind_idx *idx;
-
-       /* Convert the symbol addresses to absolute values */
-       for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
-               idx->addr = prel31_to_addr(&idx->addr);
-
-       pr_debug("unwind: ARM stack unwinding initialised\n");
-
-       return 0;
-}
index 66591fa..ad93068 100644 (file)
@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index b84a9f6..0d20677 100644 (file)
@@ -195,9 +195,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
-       CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
-       CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
-       CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+       CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+       CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+       CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
        CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
        /* more usart lookup table for DT entries */
        CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
index 25e3464..629fa97 100644 (file)
@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index ae78f4d..a178b58 100644 (file)
@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index ad017eb..d5fbac9 100644 (file)
@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index 8f48660..ec164a4 100644 (file)
@@ -19,7 +19,7 @@
 #define BOARD_HAVE_NAND_16BIT  (1 << 31)
 static inline int board_have_nand_16bit(void)
 {
-       return system_rev & BOARD_HAVE_NAND_16BIT;
+       return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0;
 }
 
 #endif /* __ARCH_SYSTEM_REV_H__ */
index 1d7d249..6659a90 100644 (file)
@@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = {
        .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
        .tdm_slots      = 2,
        .serial_dir     = da850_iis_serializer_direction,
-       .asp_chan_q     = EVENTQ_1,
+       .asp_chan_q     = EVENTQ_0,
        .version        = MCASP_VERSION_2,
        .txnumevt       = 1,
        .rxnumevt       = 1,
index 1918ae7..46e1f41 100644 (file)
@@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = {
                /* UBL (a few copies) plus U-Boot */
                .name           = "bootloader",
                .offset         = 0,
-               .size           = 28 * NAND_BLOCK_SIZE,
+               .size           = 30 * NAND_BLOCK_SIZE,
                .mask_flags     = MTD_WRITEABLE, /* force read-only */
        }, {
                /* U-Boot environment */
index e574d7f..635bf77 100644 (file)
@@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
        int val;
        u32 value;
 
-       if (!vpif_vsclkdis_reg || !cpld_client)
+       if (!vpif_vidclkctl_reg || !cpld_client)
                return -ENXIO;
 
        val = i2c_smbus_read_byte(cpld_client);
@@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                return val;
 
        spin_lock_irqsave(&vpif_reg_lock, flags);
-       value = __raw_readl(vpif_vsclkdis_reg);
+       value = __raw_readl(vpif_vidclkctl_reg);
        if (mux_mode) {
                val &= VPIF_INPUT_TWO_CHANNEL;
                value |= VIDCH1CLK;
@@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                val |= VPIF_INPUT_ONE_CHANNEL;
                value &= ~VIDCH1CLK;
        }
-       __raw_writel(value, vpif_vsclkdis_reg);
+       __raw_writel(value, vpif_vidclkctl_reg);
        spin_unlock_irqrestore(&vpif_reg_lock, flags);
 
        err = i2c_smbus_write_byte(cpld_client, val);
index 0b68ed5..af27c13 100644 (file)
@@ -161,7 +161,6 @@ static struct clk dsp_clk = {
        .name = "dsp",
        .parent = &pll1_sysclk1,
        .lpsc = DM646X_LPSC_C64X_CPU,
-       .flags = PSC_DSP,
        .usecount = 1,                  /* REVISIT how to disable? */
 };
 
index fa59c09..8bc3fc2 100644 (file)
 #define PTCMD          0x120
 #define PTSTAT         0x128
 #define PDSTAT         0x200
-#define PDCTL1         0x304
+#define PDCTL          0x300
 #define MDSTAT         0x800
 #define MDCTL          0xA00
 
 #define PSC_STATE_ENABLE       3
 
 #define MDSTAT_STATE_MASK      0x3f
+#define PDSTAT_STATE_MASK      0x1f
 #define MDCTL_FORCE            BIT(31)
+#define PDCTL_NEXT             BIT(1)
+#define PDCTL_EPCGOOD          BIT(8)
 
 #ifndef __ASSEMBLER__
 
index 1fb6bdf..d7e210f 100644 (file)
@@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
 void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                unsigned int id, bool enable, u32 flags)
 {
-       u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
+       u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
        void __iomem *psc_base;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
        u32 next_state = PSC_STATE_ENABLE;
@@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                mdctl |= MDCTL_FORCE;
        __raw_writel(mdctl, psc_base + MDCTL + 4 * id);
 
-       pdstat = __raw_readl(psc_base + PDSTAT);
-       if ((pdstat & 0x00000001) == 0) {
-               pdctl1 = __raw_readl(psc_base + PDCTL1);
-               pdctl1 |= 0x1;
-               __raw_writel(pdctl1, psc_base + PDCTL1);
+       pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
+       if ((pdstat & PDSTAT_STATE_MASK) == 0) {
+               pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+               pdctl |= PDCTL_NEXT;
+               __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
 
                ptcmd = 1 << domain;
                __raw_writel(ptcmd, psc_base + PTCMD);
@@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                        epcpr = __raw_readl(psc_base + EPCPR);
                } while ((((epcpr >> domain) & 1) == 0));
 
-               pdctl1 = __raw_readl(psc_base + PDCTL1);
-               pdctl1 |= 0x100;
-               __raw_writel(pdctl1, psc_base + PDCTL1);
+               pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+               pdctl |= PDCTL_EPCGOOD;
+               __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
        } else {
                ptcmd = 1 << domain;
                __raw_writel(ptcmd, psc_base + PTCMD);
index 97343df..85b5527 100644 (file)
@@ -44,8 +44,6 @@ struct mct_clock_event_device {
        char name[10];
 };
 
-static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
-
 static void exynos4_mct_write(unsigned int value, void *addr)
 {
        void __iomem *stat_addr;
@@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void)
 }
 
 #ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
 /* Clock event handling */
 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
 {
@@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
 
 void local_timer_stop(struct clock_event_device *evt)
 {
+       unsigned int cpu = smp_processor_id();
        evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
        if (mct_int_type == MCT_INT_SPI)
-               disable_irq(evt->irq);
+               if (cpu == 0)
+                       remove_irq(evt->irq, &mct_tick0_event_irq);
+               else
+                       remove_irq(evt->irq, &mct_tick1_event_irq);
        else
                disable_percpu_irq(IRQ_MCT_LOCALTIMER);
 }
@@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void)
 
        clk_rate = clk_get_rate(mct_clk);
 
+#ifdef CONFIG_LOCAL_TIMERS
        if (mct_int_type == MCT_INT_PPI) {
                int err;
 
@@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void)
                WARN(err, "MCT: can't request IRQ %d (%d)\n",
                     IRQ_MCT_LOCALTIMER, err);
        }
+#endif /* CONFIG_LOCAL_TIMERS */
 }
 
 static void __init exynos4_timer_init(void)
index 9cd860a..8deb012 100644 (file)
@@ -37,14 +37,15 @@ static void __init imx6q_map_io(void)
        imx6q_clock_map_io();
 }
 
-static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
+static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 7; /* imx6q gets 7 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx6q_irq_match[] __initconst = {
index 24030d0..0fb7a17 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/bootmem.h>
+#include <linux/module.h>
 #include <mach/irqs.h>
 #include <mach/iommu.h>
 
index 5c83760..24994bb 100644 (file)
@@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void)
 {
        iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
        iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
-               PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+               PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
 
        imx51_soc_init();
 
index 6bea31a..64bbfce 100644 (file)
@@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void)
        gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_evk_fec_pdata = {
+static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 7678f77..237bdec 100644 (file)
@@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void)
        gpio_set_value(LOCO_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_loco_fec_data = {
+static const struct fec_platform_data mx53_loco_fec_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 59c0845..d42132a 100644 (file)
@@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void)
        gpio_set_value(SMD_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_smd_fec_data = {
+static const struct fec_platform_data mx53_smd_fec_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index ccc6158..596edd9 100644 (file)
@@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
        { /* sentinel */ }
 };
 
-static void __init imx51_tzic_add_irq_domain(struct device_node *np,
+static int __init imx51_tzic_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
        irq_domain_add_simple(np, 0);
+       return 0;
 }
 
-static void __init imx51_gpio_add_irq_domain(struct device_node *np,
+static int __init imx51_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 4; /* imx51 gets 4 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx51_irq_match[] __initconst = {
index ccaa0b8..85bfd5f 100644 (file)
@@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
        { /* sentinel */ }
 };
 
-static void __init imx53_tzic_add_irq_domain(struct device_node *np,
+static int __init imx53_tzic_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
        irq_domain_add_simple(np, 0);
+       return 0;
 }
 
-static void __init imx53_gpio_add_irq_domain(struct device_node *np,
+static int __init imx53_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 7; /* imx53 gets 7 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx53_irq_match[] __initconst = {
index 75d8611..30c7990 100644 (file)
 #define MX28_INT_CAN1                  9
 #define MX28_INT_LRADC_TOUCH           10
 #define MX28_INT_HSADC                 13
-#define MX28_INT_IRADC_THRESH0         14
-#define MX28_INT_IRADC_THRESH1         15
+#define MX28_INT_LRADC_THRESH0         14
+#define MX28_INT_LRADC_THRESH1         15
 #define MX28_INT_LRADC_CH0             16
 #define MX28_INT_LRADC_CH1             17
 #define MX28_INT_LRADC_CH2             18
index 0d2d2b4..bde5f66 100644 (file)
@@ -30,6 +30,7 @@
  */
 #define cpu_is_mx23()          (                                       \
                machine_is_mx23evk() ||                                 \
+               machine_is_stmp378x() ||                                \
                0)
 #define cpu_is_mx28()          (                                       \
                machine_is_mx28evk() ||                                 \
index 3b1681e..6b00577 100644 (file)
@@ -361,6 +361,6 @@ static struct sys_timer m28evk_timer = {
 MACHINE_START(M28EVK, "DENX M28 EVK")
        .map_io         = mx28_map_io,
        .init_irq       = mx28_init_irq,
-       .init_machine   = m28evk_init,
        .timer          = &m28evk_timer,
+       .init_machine   = m28evk_init,
 MACHINE_END
index 177e531..6834dea 100644 (file)
@@ -115,6 +115,6 @@ static struct sys_timer stmp378x_dvb_timer = {
 MACHINE_START(STMP378X, "STMP378X")
        .map_io         = mx23_map_io,
        .init_irq       = mx23_init_irq,
-       .init_machine   = stmp378x_dvb_init,
        .timer          = &stmp378x_dvb_timer,
+       .init_machine   = stmp378x_dvb_init,
 MACHINE_END
index 0fcff47..9a7b08b 100644 (file)
@@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = {
        MX28_PAD_ENET0_CRS__ENET1_RX_EN,
 };
 
-static struct fec_platform_data tx28_fec0_data = {
+static const struct fec_platform_data tx28_fec0_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
-static struct fec_platform_data tx28_fec1_data = {
+static const struct fec_platform_data tx28_fec1_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 1297bb5..9ff90a7 100644 (file)
@@ -16,6 +16,8 @@
 
 #include <linux/kernel.h>
 #include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
 #include <linux/io.h>
 
 #include <asm/mach-types.h>  /* for machine_is_* */
@@ -927,16 +929,22 @@ int __init omap1_clk_init(void)
 
 void __init omap1_clk_late_init(void)
 {
-       if (ck_dpll1.rate >= OMAP1_DPLL1_SANE_VALUE)
+       unsigned long rate = ck_dpll1.rate;
+
+       if (rate >= OMAP1_DPLL1_SANE_VALUE)
                return;
 
+       /* System booting at unusable rate, force reprogramming of DPLL1 */
+       ck_dpll1_p->rate = 0;
+
        /* Find the highest supported frequency and enable it */
        if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
                pr_err("System frequencies not set, using default. Check your config.\n");
                omap_writew(0x2290, DPLL_CTL);
-               omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
+               omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL);
                ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
        }
        propagate_rate(&ck_dpll1);
        omap1_show_rates();
+       loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
 }
index ba1aa07..c15c5c9 100644 (file)
@@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = {
 static void __init rx51_charger_init(void)
 {
        WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
-               GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+               GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
 
        platform_device_register(&rx51_charger_device);
 }
index 292eee3..28fcb27 100644 (file)
@@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
                pdata->reg_size = 4;
                pdata->has_ccr = true;
        }
+       pdata->set_clk_src = omap2_mcbsp_set_clk_src;
+       if (id == 1)
+               pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
 
        if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
                if (id == 2)
@@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
                                        name, oh->name);
                return PTR_ERR(pdev);
        }
-       pdata->set_clk_src = omap2_mcbsp_set_clk_src;
-       if (id == 1)
-               pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
        omap_mcbsp_count++;
        return 0;
 }
index cb53160..26ebb57 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/suspend.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
index ef555c0..a12b689 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <asm/sizes.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <linux/of.h>
index 5e6b420..3341fd1 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <linux/export.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/gpio.h>
index 7a3bc32..51c00f2 100644 (file)
@@ -70,7 +70,7 @@ void __init s3c6400_init_irq(void)
        s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
 }
 
-struct sysdev_class s3c6400_sysclass = {
+static struct sysdev_class s3c6400_sysclass = {
        .name   = "s3c6400-core",
 };
 
index 83d2afb..2cf8002 100644 (file)
@@ -20,7 +20,7 @@
 #include <plat/fb.h>
 #include <plat/gpio-cfg.h>
 
-extern void s3c64xx_fb_gpio_setup_24bpp(void)
+void s3c64xx_fb_gpio_setup_24bpp(void)
 {
        s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
        s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
index a9106c3..8662ef6 100644 (file)
@@ -273,6 +273,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
 
 static struct platform_pwm_backlight_data smdkv210_bl_data = {
        .pwm_id = 3,
+       .pwm_period_ns = 1000,
 };
 
 static void __init smdkv210_map_io(void)
index 5a616f6..f7951aa 100644 (file)
@@ -1,5 +1,5 @@
-ifeq ($(CONFIG_ARCH_SA1100),y)
-   zreladdr-$(CONFIG_SA1111)           += 0xc0208000
+ifeq ($(CONFIG_SA1111),y)
+   zreladdr-y  += 0xc0208000
 else
    zreladdr-y  += 0xc0008000
 endif
index b862e9f..7119b87 100644 (file)
@@ -607,6 +607,7 @@ struct sys_timer ag5evm_timer = {
 
 MACHINE_START(AG5EVM, "ag5evm")
        .map_io         = ag5evm_map_io,
+       .nr_irqs        = NR_IRQS_LEGACY,
        .init_irq       = sh73a0_init_irq,
        .handle_irq     = shmobile_handle_irq_gic,
        .init_machine   = ag5evm_init,
index bd9a784..f44150b 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/input/sh_keysc.h>
 #include <linux/gpio_keys.h>
 #include <linux/leds.h>
+#include <linux/platform_data/leds-renesas-tpu.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/sh_mmcif.h>
 #include <linux/mfd/tmio.h>
@@ -56,7 +57,7 @@ static struct resource smsc9220_resources[] = {
                .flags          = IORESOURCE_MEM,
        },
        [1] = {
-               .start          = gic_spi(33), /* PINTA2 @ PORT144 */
+               .start          = SH73A0_PINT0_IRQ(2), /* PINTA2 */
                .flags          = IORESOURCE_IRQ,
        },
 };
@@ -157,10 +158,6 @@ static struct platform_device gpio_keys_device = {
 #define GPIO_LED(n, g) { .name = n, .gpio = g }
 
 static struct gpio_led gpio_leds[] = {
-       GPIO_LED("V2513", GPIO_PORT153), /* PORT153 [TPU1T02] -> V2513 */
-       GPIO_LED("V2514", GPIO_PORT199), /* PORT199 [TPU4TO1] -> V2514 */
-       GPIO_LED("V2515", GPIO_PORT197), /* PORT197 [TPU2TO1] -> V2515 */
-       GPIO_LED("KEYLED", GPIO_PORT163), /* PORT163 [TPU3TO0] -> KEYLED */
        GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */
        GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */
        GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */
@@ -179,6 +176,119 @@ static struct platform_device gpio_leds_device = {
        },
 };
 
+/* TPU LED */
+static struct led_renesas_tpu_config led_renesas_tpu12_pdata = {
+       .name           = "V2513",
+       .pin_gpio_fn    = GPIO_FN_TPU1TO2,
+       .pin_gpio       = GPIO_PORT153,
+       .channel_offset = 0x90,
+       .timer_bit = 2,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu12_resources[] = {
+       [0] = {
+               .name   = "TPU12",
+               .start  = 0xe6610090,
+               .end    = 0xe66100b5,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu12_device = {
+       .name = "leds-renesas-tpu",
+       .id = 12,
+       .dev = {
+               .platform_data  = &led_renesas_tpu12_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu12_resources),
+       .resource       = tpu12_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu41_pdata = {
+       .name           = "V2514",
+       .pin_gpio_fn    = GPIO_FN_TPU4TO1,
+       .pin_gpio       = GPIO_PORT199,
+       .channel_offset = 0x50,
+       .timer_bit = 1,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu41_resources[] = {
+       [0] = {
+               .name   = "TPU41",
+               .start  = 0xe6640050,
+               .end    = 0xe6640075,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu41_device = {
+       .name = "leds-renesas-tpu",
+       .id = 41,
+       .dev = {
+               .platform_data  = &led_renesas_tpu41_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu41_resources),
+       .resource       = tpu41_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu21_pdata = {
+       .name           = "V2515",
+       .pin_gpio_fn    = GPIO_FN_TPU2TO1,
+       .pin_gpio       = GPIO_PORT197,
+       .channel_offset = 0x50,
+       .timer_bit = 1,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu21_resources[] = {
+       [0] = {
+               .name   = "TPU21",
+               .start  = 0xe6620050,
+               .end    = 0xe6620075,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu21_device = {
+       .name = "leds-renesas-tpu",
+       .id = 21,
+       .dev = {
+               .platform_data  = &led_renesas_tpu21_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu21_resources),
+       .resource       = tpu21_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu30_pdata = {
+       .name           = "KEYLED",
+       .pin_gpio_fn    = GPIO_FN_TPU3TO0,
+       .pin_gpio       = GPIO_PORT163,
+       .channel_offset = 0x10,
+       .timer_bit = 0,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu30_resources[] = {
+       [0] = {
+               .name   = "TPU30",
+               .start  = 0xe6630010,
+               .end    = 0xe6630035,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu30_device = {
+       .name = "leds-renesas-tpu",
+       .id = 30,
+       .dev = {
+               .platform_data  = &led_renesas_tpu30_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu30_resources),
+       .resource       = tpu30_resources,
+};
+
 /* MMCIF */
 static struct resource mmcif_resources[] = {
        [0] = {
@@ -291,6 +401,10 @@ static struct platform_device *kota2_devices[] __initdata = {
        &keysc_device,
        &gpio_keys_device,
        &gpio_leds_device,
+       &leds_tpu12_device,
+       &leds_tpu41_device,
+       &leds_tpu21_device,
+       &leds_tpu30_device,
        &mmcif_device,
        &sdhi0_device,
        &sdhi1_device,
@@ -317,18 +431,6 @@ static void __init kota2_map_io(void)
        shmobile_setup_console();
 }
 
-#define PINTER0A       0xe69000a0
-#define PINTCR0A       0xe69000b0
-
-void __init kota2_init_irq(void)
-{
-       sh73a0_init_irq();
-
-       /* setup PINT: enable PINTA2 as active low */
-       __raw_writel(1 << 29, PINTER0A);
-       __raw_writew(2 << 10, PINTCR0A);
-}
-
 static void __init kota2_init(void)
 {
        sh73a0_pinmux_init();
@@ -447,7 +549,8 @@ struct sys_timer kota2_timer = {
 
 MACHINE_START(KOTA2, "kota2")
        .map_io         = kota2_map_io,
-       .init_irq       = kota2_init_irq,
+       .nr_irqs        = NR_IRQS_LEGACY,
+       .init_irq       = sh73a0_init_irq,
        .handle_irq     = shmobile_handle_irq_gic,
        .init_machine   = kota2_init,
        .timer          = &kota2_timer,
index 61a846b..1370a89 100644 (file)
@@ -113,6 +113,12 @@ static struct clk main_clk = {
        .ops            = &main_clk_ops,
 };
 
+/* Divide Main clock by two */
+static struct clk main_div2_clk = {
+       .ops            = &div2_clk_ops,
+       .parent         = &main_clk,
+};
+
 /* PLL0, PLL1, PLL2, PLL3 */
 static unsigned long pll_recalc(struct clk *clk)
 {
@@ -181,6 +187,7 @@ static struct clk *main_clks[] = {
        &extal1_div2_clk,
        &extal2_div2_clk,
        &main_clk,
+       &main_div2_clk,
        &pll0_clk,
        &pll1_clk,
        &pll2_clk,
@@ -243,7 +250,7 @@ static struct clk div6_clks[DIV6_NR] = {
        [DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
        [DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
        [DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
-       [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
+       [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, CLK_ENABLE_ON_INIT),
        [DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
        [DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
        [DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
@@ -268,6 +275,7 @@ enum { MSTP001,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
        MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
        MSTP314, MSTP313, MSTP312, MSTP311,
+       MSTP303, MSTP302, MSTP301, MSTP300,
        MSTP411, MSTP410, MSTP403,
        MSTP_NR };
 
@@ -301,6 +309,10 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
        [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
        [MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
+       [MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */
+       [MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */
+       [MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */
+       [MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */
        [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
        [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
        [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
@@ -350,6 +362,10 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
        CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
        CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.12", &mstp_clks[MSTP303]), /* TPU1 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.21", &mstp_clks[MSTP302]), /* TPU2 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.30", &mstp_clks[MSTP301]), /* TPU3 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.41", &mstp_clks[MSTP300]), /* TPU4 */
        CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
        CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
        CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
index 74aac96..adbff70 100644 (file)
@@ -17,6 +17,7 @@
  * the CPU clock speed on the fly.
  */
 
+#include <linux/module.h>
 #include <linux/cpufreq.h>
 #include <linux/clk.h>
 #include <linux/err.h>
index 42d74ea..845de59 100644 (file)
@@ -32,6 +32,9 @@
 #define MX3_PWMSAR                0x0C    /* PWM Sample Register */
 #define MX3_PWMPR                 0x10    /* PWM Period Register */
 #define MX3_PWMCR_PRESCALER(x)    (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN                (1 << 24)
+#define MX3_PWMCR_WAITEN                (1 << 23)
+#define MX3_PWMCR_DBGEN                        (1 << 22)
 #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
 #define MX3_PWMCR_CLKSRC_IPG      (1 << 16)
 #define MX3_PWMCR_EN              (1 << 0)
@@ -77,7 +80,9 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
                writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
                writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
 
-               cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+               cr = MX3_PWMCR_PRESCALER(prescale) |
+                       MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+                       MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
 
                if (cpu_is_mx25())
                        cr |= MX3_PWMCR_CLKSRC_IPG;
index e657305..a976c02 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/pwm_backlight.h>
-#include <linux/slab.h>
 
 #include <plat/devs.h>
 #include <plat/gpio-cfg.h>
index 43f984e..303192f 100644 (file)
 #define __NR_clock_adjtime     342
 #define __NR_syncfs            343
 #define __NR_setns             344
+#define __NR_process_vm_readv  345
+#define __NR_process_vm_writev 346
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            345
+#define NR_syscalls            347
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index c468f2e..ce827b3 100644 (file)
@@ -365,4 +365,6 @@ ENTRY(sys_call_table)
        .long sys_clock_adjtime
        .long sys_syncfs
        .long sys_setns
+       .long sys_process_vm_readv      /* 345 */
+       .long sys_process_vm_writev
 
index 6efc18b..bd58b72 100644 (file)
@@ -88,7 +88,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        if (oprofile_started)
index ec8c84c..895e337 100644 (file)
@@ -50,9 +50,9 @@ static struct platform_device heartbeat_device = {
 #define GBECONT                0xffc10100
 #define GBECONT_RMII1  BIT(17)
 #define GBECONT_RMII0  BIT(16)
-static void sh7757_eth_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_set_mdio_gate(void *addr)
 {
-       if ((addr & 0x00000fff) < 0x0800)
+       if (((unsigned long)addr & 0x00000fff) < 0x0800)
                writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
        else
                writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
@@ -116,9 +116,9 @@ static struct platform_device sh7757_eth1_device = {
        },
 };
 
-static void sh7757_eth_giga_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_giga_set_mdio_gate(void *addr)
 {
-       if ((addr & 0x00000fff) < 0x0800) {
+       if (((unsigned long)addr & 0x00000fff) < 0x0800) {
                gpio_set_value(GPIO_PTT4, 1);
                writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
        } else {
@@ -210,8 +210,12 @@ static struct resource sh_mmcif_resources[] = {
 };
 
 static struct sh_mmcif_dma sh7757lcr_mmcif_dma = {
-       .chan_priv_tx   = SHDMA_SLAVE_MMCIF_TX,
-       .chan_priv_rx   = SHDMA_SLAVE_MMCIF_RX,
+       .chan_priv_tx   = {
+               .slave_id = SHDMA_SLAVE_MMCIF_TX,
+       },
+       .chan_priv_rx   = {
+               .slave_id = SHDMA_SLAVE_MMCIF_RX,
+       }
 };
 
 static struct sh_mmcif_plat_data sh_mmcif_plat = {
index 7429b47..381edcd 100644 (file)
@@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
 
        dp->rcv_buf_len = 4096;
 
-       dp->ds_states = kzalloc(sizeof(ds_states_template),
-                               GFP_KERNEL);
+       dp->ds_states = kmemdup(ds_states_template,
+                               sizeof(ds_states_template), GFP_KERNEL);
        if (!dp->ds_states)
                goto out_free_rcv_buf;
 
-       memcpy(dp->ds_states, ds_states_template,
-              sizeof(ds_states_template));
        dp->num_ds_states = ARRAY_SIZE(ds_states_template);
 
        for (i = 0; i < dp->num_ds_states; i++)
index b272cda..af5755d 100644 (file)
@@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
        if (!irq)
                return -ENOMEM;
 
-       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
-               return -EINVAL;
        if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
                return -EINVAL;
+       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+               return -EINVAL;
 
        return irq;
 }
index 4661480..741df91 100644 (file)
@@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
        void *new_val;
        int err;
 
-       new_val = kmalloc(len, GFP_KERNEL);
+       new_val = kmemdup(val, len, GFP_KERNEL);
        if (!new_val)
                return -ENOMEM;
 
-       memcpy(new_val, val, len);
-
        err = -ENODEV;
 
        mutex_lock(&of_set_property_mutex);
index 5175ac2..8a7f817 100644 (file)
@@ -302,8 +302,7 @@ void __init btfixup(void)
                                case 'i':       /* INT */
                                        if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
                                                set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
-                                       else if ((insn & 0x80002000) == 0x80002000 &&
-                                                (insn & 0x01800000) != 0x01800000) /* %LO */
+                                       else if ((insn & 0x80002000) == 0x80002000) /* %LO */
                                                set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
                                        else {
                                                prom_printf(insn_i, p, addr, insn);
index 94e9a51..f80f8ce 100644 (file)
@@ -74,16 +74,6 @@ enum {
  */
 void tile_irq_activate(unsigned int irq, int tile_irq_type);
 
-/*
- * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
- * how to use enable/disable_percpu_irq() to manage interrupts on each
- * core.  We can't use the generic enable/disable_irq() because they
- * use a single reference count per irq, rather than per cpu per irq.
- */
-void enable_percpu_irq(unsigned int irq);
-void disable_percpu_irq(unsigned int irq);
-
-
 void setup_irq_regs(void);
 
 #endif /* _ASM_TILE_IRQ_H */
index aa0134d..02e6280 100644 (file)
@@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
  * Remove an irq from the disabled mask.  If we're in an interrupt
  * context, defer enabling the HW interrupt until we leave.
  */
-void enable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_enable(struct irq_data *d)
 {
-       get_cpu_var(irq_disable_mask) &= ~(1UL << irq);
+       get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
        if (__get_cpu_var(irq_depth) == 0)
-               unmask_irqs(1UL << irq);
+               unmask_irqs(1UL << d->irq);
        put_cpu_var(irq_disable_mask);
 }
-EXPORT_SYMBOL(enable_percpu_irq);
 
 /*
  * Add an irq to the disabled mask.  We disable the HW interrupt
@@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq);
  * in an interrupt context, the return path is careful to avoid
  * unmasking a newly disabled interrupt.
  */
-void disable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_disable(struct irq_data *d)
 {
-       get_cpu_var(irq_disable_mask) |= (1UL << irq);
-       mask_irqs(1UL << irq);
+       get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
+       mask_irqs(1UL << d->irq);
        put_cpu_var(irq_disable_mask);
 }
-EXPORT_SYMBOL(disable_percpu_irq);
 
 /* Mask an interrupt. */
 static void tile_irq_chip_mask(struct irq_data *d)
@@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d)
 
 static struct irq_chip tile_irq_chip = {
        .name = "tile_irq_chip",
+       .irq_enable = tile_irq_chip_enable,
+       .irq_disable = tile_irq_chip_disable,
        .irq_ack = tile_irq_chip_ack,
        .irq_eoi = tile_irq_chip_eoi,
        .irq_mask = tile_irq_chip_mask,
index 658f2ce..b3ed19f 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
+#include <linux/export.h>
 #include <asm/tlbflush.h>
 #include <asm/homecache.h>
 
index 2a8014c..9d610d3 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
+#include <linux/export.h>
 
 #include <asm/processor.h>
 #include <asm/sections.h>
index b671a86..6029082 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/cpu.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
+#include <linux/stat.h>
 #include <hv/hypervisor.h>
 
 /* Return a string queried from the hypervisor, truncated to page size. */
index a87d2a8..2a81d32 100644 (file)
@@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm);
 EXPORT_SYMBOL(current_text_addr);
 EXPORT_SYMBOL(dump_stack);
 
+/* arch/tile/kernel/head.S */
+EXPORT_SYMBOL(empty_zero_page);
+
 /* arch/tile/lib/, various memcpy files */
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(__copy_to_user_inatomic);
index cbe6f4f..1cc6ae4 100644 (file)
@@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order)
        VM_BUG_ON(!virt_addr_valid((void *)addr));
        page = virt_to_page((void *)addr);
        if (put_page_testzero(page)) {
-               int pages = (1 << order);
                homecache_change_page_home(page, order, initial_page_home());
-               while (pages--)
-                       __free_page(page++);
+               if (order == 0) {
+                       free_hot_cold_page(page, 0);
+               } else {
+                       init_page_count(page);
+                       __free_pages(page, order);
+               }
        }
 }
index cb9a104..efb4294 100644 (file)
@@ -390,7 +390,7 @@ config X86_INTEL_CE
          This option compiles in support for the CE4100 SOC for settop
          boxes and media devices.
 
-config X86_INTEL_MID
+config X86_WANT_INTEL_MID
        bool "Intel MID platform support"
        depends on X86_32
        depends on X86_EXTENDED_PLATFORM
@@ -399,7 +399,10 @@ config X86_INTEL_MID
          systems which do not have the PCI legacy interfaces (Moorestown,
          Medfield). If you are building for a PC class system say N here.
 
-if X86_INTEL_MID
+if X86_WANT_INTEL_MID
+
+config X86_INTEL_MID
+       bool
 
 config X86_MRST
        bool "Moorestown MID platform"
@@ -411,6 +414,7 @@ config X86_MRST
        select SPI
        select INTEL_SCU_IPC
        select X86_PLATFORM_DEVICES
+       select X86_INTEL_MID
        ---help---
          Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
          Internet Device(MID) platform. Moorestown consists of two chips:
index 3b97a80..c99f9ed 100644 (file)
@@ -116,16 +116,16 @@ void show_registers(struct pt_regs *regs)
                for (i = 0; i < code_len; i++, ip++) {
                        if (ip < (u8 *)PAGE_OFFSET ||
                                        probe_kernel_address(ip, c)) {
-                               printk(" Bad EIP value.");
+                               printk(KERN_CONT " Bad EIP value.");
                                break;
                        }
                        if (ip == (u8 *)regs->ip)
-                               printk("<%02x> ", c);
+                               printk(KERN_CONT "<%02x> ", c);
                        else
-                               printk("%02x ", c);
+                               printk(KERN_CONT "%02x ", c);
                }
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 
 int is_valid_bugaddr(unsigned long ip)
index 19853ad..6d728d9 100644 (file)
@@ -284,16 +284,16 @@ void show_registers(struct pt_regs *regs)
                for (i = 0; i < code_len; i++, ip++) {
                        if (ip < (u8 *)PAGE_OFFSET ||
                                        probe_kernel_address(ip, c)) {
-                               printk(" Bad RIP value.");
+                               printk(KERN_CONT " Bad RIP value.");
                                break;
                        }
                        if (ip == (u8 *)regs->ip)
-                               printk("<%02x> ", c);
+                               printk(KERN_CONT "<%02x> ", c);
                        else
-                               printk("%02x ", c);
+                               printk(KERN_CONT "%02x ", c);
                }
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 
 int is_valid_bugaddr(unsigned long ip)
index b946a9e..1bb0bf4 100644 (file)
@@ -1049,6 +1049,14 @@ int hpet_rtc_timer_init(void)
 }
 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
 
+static void hpet_disable_rtc_channel(void)
+{
+       unsigned long cfg;
+       cfg = hpet_readl(HPET_T1_CFG);
+       cfg &= ~HPET_TN_ENABLE;
+       hpet_writel(cfg, HPET_T1_CFG);
+}
+
 /*
  * The functions below are called from rtc driver.
  * Return 0 if HPET is not being used.
@@ -1060,6 +1068,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
                return 0;
 
        hpet_rtc_flags &= ~bit_mask;
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
+
        return 1;
 }
 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
@@ -1125,15 +1136,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
 
 static void hpet_rtc_timer_reinit(void)
 {
-       unsigned int cfg, delta;
+       unsigned int delta;
        int lost_ints = -1;
 
-       if (unlikely(!hpet_rtc_flags)) {
-               cfg = hpet_readl(HPET_T1_CFG);
-               cfg &= ~HPET_TN_ENABLE;
-               hpet_writel(cfg, HPET_T1_CFG);
-               return;
-       }
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
 
        if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
                delta = hpet_default_delta;
index ea30585..dd74e46 100644 (file)
@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
        do {
                VM_BUG_ON(compound_head(page) != head);
                pages[*nr] = page;
+               if (PageTail(page))
+                       get_huge_page_tail(page);
                (*nr)++;
                page++;
                refs++;
index bfab3fa..7b65f75 100644 (file)
@@ -568,8 +568,8 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf] - addrs[i];
                                        break;
                                }
                                if (filter[i].jt != 0) {
-                                       if (filter[i].jf)
-                                               t_offset += is_near(f_offset) ? 2 : 6;
+                                       if (filter[i].jf && f_offset)
+                                               t_offset += is_near(f_offset) ? 2 : 5;
                                        EMIT_COND_JMP(t_op, t_offset);
                                        if (filter[i].jf)
                                                EMIT_JMP(f_offset);
index e36bf71..40e4469 100644 (file)
  */
 
 static unsigned long efi_rt_eflags;
-static pgd_t efi_bak_pg_dir_pointer[2];
 
 void efi_call_phys_prelog(void)
 {
-       unsigned long cr4;
-       unsigned long temp;
        struct desc_ptr gdt_descr;
 
        local_irq_save(efi_rt_eflags);
 
-       /*
-        * If I don't have PAE, I should just duplicate two entries in page
-        * directory. If I have PAE, I just need to duplicate one entry in
-        * page directory.
-        */
-       cr4 = read_cr4_safe();
-
-       if (cr4 & X86_CR4_PAE) {
-               efi_bak_pg_dir_pointer[0].pgd =
-                   swapper_pg_dir[pgd_index(0)].pgd;
-               swapper_pg_dir[0].pgd =
-                   swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-       } else {
-               efi_bak_pg_dir_pointer[0].pgd =
-                   swapper_pg_dir[pgd_index(0)].pgd;
-               efi_bak_pg_dir_pointer[1].pgd =
-                   swapper_pg_dir[pgd_index(0x400000)].pgd;
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-               temp = PAGE_OFFSET + 0x400000;
-               swapper_pg_dir[pgd_index(0x400000)].pgd =
-                   swapper_pg_dir[pgd_index(temp)].pgd;
-       }
-
-       /*
-        * After the lock is released, the original page table is restored.
-        */
+       load_cr3(initial_page_table);
        __flush_tlb_all();
 
        gdt_descr.address = __pa(get_cpu_gdt_table(0));
@@ -85,28 +56,13 @@ void efi_call_phys_prelog(void)
 
 void efi_call_phys_epilog(void)
 {
-       unsigned long cr4;
        struct desc_ptr gdt_descr;
 
        gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
        gdt_descr.size = GDT_SIZE - 1;
        load_gdt(&gdt_descr);
 
-       cr4 = read_cr4_safe();
-
-       if (cr4 & X86_CR4_PAE) {
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   efi_bak_pg_dir_pointer[0].pgd;
-       } else {
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   efi_bak_pg_dir_pointer[0].pgd;
-               swapper_pg_dir[pgd_index(0x400000)].pgd =
-                   efi_bak_pg_dir_pointer[1].pgd;
-       }
-
-       /*
-        * After the lock is released, the original page table is restored.
-        */
+       load_cr3(swapper_pg_dir);
        __flush_tlb_all();
 
        local_irq_restore(efi_rt_eflags);
index 1093f80..b2c7179 100644 (file)
@@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void)
        domid_t domid = DOMID_SELF;
        int ret;
 
-       ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
-       if (ret > 0)
-               max_pages = ret;
+       /*
+        * For the initial domain we use the maximum reservation as
+        * the maximum page.
+        *
+        * For guest domains the current maximum reservation reflects
+        * the current maximum rather than the static maximum. In this
+        * case the e820 map provided to us will cover the static
+        * maximum region.
+        */
+       if (xen_initial_domain()) {
+               ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+               if (ret > 0)
+                       max_pages = ret;
+       }
+
        return min(max_pages, MAX_DOMAIN_PAGES);
 }
 
index ea70e6c..15de223 100644 (file)
@@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                if (drain_all)
                        blk_throtl_drain(q);
 
-               __blk_run_queue(q);
+               /*
+                * This function might be called on a queue which failed
+                * driver init after queue creation.  Some drivers
+                * (e.g. fd) get unhappy in such cases.  Kick queue iff
+                * dispatch queue has something on it.
+                */
+               if (!list_empty(&q->queue_head))
+                       __blk_run_queue(q);
 
                if (drain_all)
                        nr_rqs = q->rq.count[0] + q->rq.count[1];
@@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
        q->backing_dev_info.name = "block";
+       q->node = node_id;
 
        err = bdi_init(&q->backing_dev_info);
        if (err) {
@@ -551,7 +559,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        if (!uninit_q)
                return NULL;
 
-       q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+       q = blk_init_allocated_queue(uninit_q, rfn, lock);
        if (!q)
                blk_cleanup_queue(uninit_q);
 
@@ -562,19 +570,10 @@ EXPORT_SYMBOL(blk_init_queue_node);
 struct request_queue *
 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
                         spinlock_t *lock)
-{
-       return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
-                             spinlock_t *lock, int node_id)
 {
        if (!q)
                return NULL;
 
-       q->node = node_id;
        if (blk_init_free_list(q))
                return NULL;
 
@@ -604,7 +603,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
 
        return NULL;
 }
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
 
 int blk_get_queue(struct request_queue *q)
 {
index 16ace89..4c12869 100644 (file)
@@ -3184,7 +3184,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
                }
        }
 
-       if (ret)
+       if (ret && ret != -EEXIST)
                printk(KERN_ERR "cfq: cic link failed!\n");
 
        return ret;
@@ -3200,6 +3200,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 {
        struct io_context *ioc = NULL;
        struct cfq_io_context *cic;
+       int ret;
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
 
@@ -3207,6 +3208,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        if (!ioc)
                return NULL;
 
+retry:
        cic = cfq_cic_lookup(cfqd, ioc);
        if (cic)
                goto out;
@@ -3215,7 +3217,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        if (cic == NULL)
                goto err;
 
-       if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+       ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
+       if (ret == -EEXIST) {
+               /* someone has linked cic to ioc already */
+               cfq_cic_free(cic);
+               goto retry;
+       } else if (ret)
                goto err_free;
 
 out:
@@ -4036,6 +4043,11 @@ static void *cfq_init_queue(struct request_queue *q)
 
        if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
                kfree(cfqg);
+
+               spin_lock(&cic_index_lock);
+               ida_remove(&cic_index_ida, cfqd->cic_index);
+               spin_unlock(&cic_index_lock);
+
                kfree(cfqd);
                return NULL;
        }
index 6bdedd7..cf047c4 100644 (file)
@@ -820,7 +820,7 @@ config PATA_PLATFORM
 
 config PATA_OF_PLATFORM
        tristate "OpenFirmware platform device PATA support"
-       depends on PATA_PLATFORM && OF
+       depends on PATA_PLATFORM && OF && OF_IRQ
        help
          This option enables support for generic directly connected ATA
          devices commonly found on embedded systems with OpenFirmware
index d8b3d89..919daa7 100644 (file)
@@ -1743,8 +1743,10 @@ void device_shutdown(void)
                 */
                list_del_init(&dev->kobj.entry);
                spin_unlock(&devices_kset->list_lock);
-               /* Disable all device's runtime power management */
-               pm_runtime_disable(dev);
+
+               /* Don't allow any more runtime suspends */
+               pm_runtime_get_noresume(dev);
+               pm_runtime_barrier(dev);
 
                if (dev->bus && dev->bus->shutdown) {
                        dev_dbg(dev, "shutdown\n");
index 8004ac3..587cce5 100644 (file)
@@ -2601,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
                        c->Request.Timeout = 0;
                        c->Request.CDB[0] = BMIC_WRITE;
                        c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+                       c->Request.CDB[7] = (size >> 8) & 0xFF;
+                       c->Request.CDB[8] = size & 0xFF;
                        break;
                case TEST_UNIT_READY:
                        c->Request.CDBLen = 6;
@@ -4880,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
 {
        if (h->msix_vector || h->msi_vector) {
                if (!request_irq(h->intr[h->intr_mode], msixhandler,
-                               IRQF_DISABLED, h->devname, h))
+                               0, h->devname, h))
                        return 0;
                dev_err(&h->pdev->dev, "Unable to get msi irq %d"
                        " for %s\n", h->intr[h->intr_mode],
@@ -4889,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
        }
 
        if (!request_irq(h->intr[h->intr_mode], intxhandler,
-                       IRQF_DISABLED, h->devname, h))
+                       IRQF_SHARED, h->devname, h))
                return 0;
        dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
                h->intr[h->intr_mode], h->devname);
index 68b205a..1e888c9 100644 (file)
@@ -422,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
 
                /*
                 * We use punch hole to reclaim the free space used by the
-                * image a.k.a. discard. However we do support discard if
+                * image a.k.a. discard. However we do not support discard if
                 * encryption is enabled, because it may give an attacker
                 * useful information.
                 */
@@ -797,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
        }
 
        q->limits.discard_granularity = inode->i_sb->s_blocksize;
-       q->limits.discard_alignment = inode->i_sb->s_blocksize;
+       q->limits.discard_alignment = 0;
        q->limits.max_discard_sectors = UINT_MAX >> 9;
        q->limits.discard_zeroes_data = 1;
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
index 65cc424..148ab94 100644 (file)
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list);      /* clients */
 
 static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
 static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf,
-                                size_t size);
 static ssize_t rbd_snap_add(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
        u32 snap_count = le32_to_cpu(ondisk->snap_count);
        int ret = -ENOMEM;
 
+       if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+               return -ENXIO;
+       }
+
        init_rwsem(&header->snap_rwsem);
        header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
        header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1355,32 +1355,6 @@ fail:
        return ret;
 }
 
-/*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
-                                    u64 snapid,
-                                    const char *obj)
-{
-       struct ceph_osd_req_op *ops;
-       int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
-       if (ret < 0)
-               return ret;
-
-       ops[0].snap.snapid = snapid;
-
-       ret = rbd_req_sync_op(dev, NULL,
-                              CEPH_NOSNAP,
-                              0,
-                              CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
-                              ops,
-                              1, obj, 0, 0, NULL, NULL, NULL);
-
-       rbd_destroy_ops(ops);
-
-       return ret;
-}
-
 /*
  * Request sync osd read
  */
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
                        goto out_dh;
 
                rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
-               if (rc < 0)
+               if (rc < 0) {
+                       if (rc == -ENXIO) {
+                               pr_warning("unrecognized header format"
+                                          " for image %s", rbd_dev->obj);
+                       }
                        goto out_dh;
+               }
 
                if (snap_count != header->total_snaps) {
                        snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
 static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
 
 static struct attribute *rbd_attrs[] = {
        &dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
        &dev_attr_current_snap.attr,
        &dev_attr_refresh.attr,
        &dev_attr_create_snap.attr,
-       &dev_attr_rollback_snap.attr,
        NULL
 };
 
@@ -2424,64 +2401,6 @@ err_unlock:
        return ret;
 }
 
-static ssize_t rbd_snap_rollback(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf,
-                                size_t count)
-{
-       struct rbd_device *rbd_dev = dev_to_rbd(dev);
-       int ret;
-       u64 snapid;
-       u64 cur_ofs;
-       char *seg_name = NULL;
-       char *snap_name = kmalloc(count + 1, GFP_KERNEL);
-       ret = -ENOMEM;
-       if (!snap_name)
-               return ret;
-
-       /* parse snaps add command */
-       snprintf(snap_name, count, "%s", buf);
-       seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
-       if (!seg_name)
-               goto done;
-
-       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
-       ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
-       if (ret < 0)
-               goto done_unlock;
-
-       dout("snapid=%lld\n", snapid);
-
-       cur_ofs = 0;
-       while (cur_ofs < rbd_dev->header.image_size) {
-               cur_ofs += rbd_get_segment(&rbd_dev->header,
-                                          rbd_dev->obj,
-                                          cur_ofs, (u64)-1,
-                                          seg_name, NULL);
-               dout("seg_name=%s\n", seg_name);
-
-               ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
-               if (ret < 0)
-                       pr_warning("could not roll back obj %s err=%d\n",
-                                  seg_name, ret);
-       }
-
-       ret = __rbd_update_snaps(rbd_dev);
-       if (ret < 0)
-               goto done_unlock;
-
-       ret = count;
-
-done_unlock:
-       mutex_unlock(&ctl_mutex);
-done:
-       kfree(seg_name);
-       kfree(snap_name);
-
-       return ret;
-}
-
 static struct bus_attribute rbd_bus_attrs[] = {
        __ATTR(add, S_IWUSR, NULL, rbd_add),
        __ATTR(remove, S_IWUSR, NULL, rbd_remove),
index ae3e167..89ddab1 100644 (file)
@@ -16,6 +16,8 @@
  * handle GCR disks
  */
 
+#undef DEBUG
+
 #include <linux/stddef.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
 #define MAX_FLOPPIES   2
 
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
 enum swim_state {
        idle,
        locating,
@@ -177,7 +177,6 @@ struct swim3 {
 
 struct floppy_state {
        enum swim_state state;
-       spinlock_t lock;
        struct swim3 __iomem *swim3;    /* hardware registers */
        struct dbdma_regs __iomem *dma; /* DMA controller registers */
        int     swim3_intr;     /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
        int     wanted;
        struct macio_dev *mdev;
        char    dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+       int     index;
+       struct request *cur_req;
 };
 
+#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...)        dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...)        dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...) do { } while(0)
+#endif
+
 static struct floppy_state floppy_states[MAX_FLOPPIES];
 static int floppy_count = 0;
 static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
        0, 0, 0, 0, 0, 0
 };
 
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
-                       void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
 static void seek_track(struct floppy_state *fs, int n);
 static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
 static void act(struct floppy_state *fs);
 static void scan_timeout(unsigned long data);
 static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
                                        unsigned int clearing);
 static int floppy_revalidate(struct gendisk *disk);
 
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
 {
-       if (__blk_end_request(fd_req, err, nr_bytes))
-               return true;
+       struct request *req = fs->cur_req;
+       int rc;
 
-       fd_req = NULL;
-       return false;
-}
+       swim3_dbg("  end request, err=%d nr_bytes=%d, cur_req=%p\n",
+                 err, nr_bytes, req);
 
-static bool swim3_end_request_cur(int err)
-{
-       return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+       if (err)
+               nr_bytes = blk_rq_cur_bytes(req);
+       rc = __blk_end_request(req, err, nr_bytes);
+       if (rc)
+               return true;
+       fs->cur_req = NULL;
+       return false;
 }
 
 static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
        return (stat & DATA) == 0;
 }
 
-static void do_fd_request(struct request_queue * q)
-{
-       int i;
-
-       for(i=0; i<floppy_count; i++) {
-               struct floppy_state *fs = &floppy_states[i];
-               if (fs->mdev->media_bay &&
-                   check_media_bay(fs->mdev->media_bay) != MB_FD)
-                       continue;
-               start_request(fs);
-       }
-}
-
 static void start_request(struct floppy_state *fs)
 {
        struct request *req;
        unsigned long x;
 
+       swim3_dbg("start request, initial state=%d\n", fs->state);
+
        if (fs->state == idle && fs->wanted) {
                fs->state = available;
                wake_up(&fs->wait);
                return;
        }
        while (fs->state == idle) {
-               if (!fd_req) {
-                       fd_req = blk_fetch_request(swim3_queue);
-                       if (!fd_req)
+               swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+               if (!fs->cur_req) {
+                       fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+                       swim3_dbg("  fetched request %p\n", fs->cur_req);
+                       if (!fs->cur_req)
                                break;
                }
-               req = fd_req;
-#if 0
-               printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
-                      req->rq_disk->disk_name, req->cmd,
-                      (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
-               printk("           errors=%d current_nr_sectors=%u\n",
-                      req->errors, blk_rq_cur_sectors(req));
+               req = fs->cur_req;
+
+               if (fs->mdev->media_bay &&
+                   check_media_bay(fs->mdev->media_bay) != MB_FD) {
+                       swim3_dbg("%s", "  media bay absent, dropping req\n");
+                       swim3_end_request(fs, -ENODEV, 0);
+                       continue;
+               }
+
+#if 0 /* This is really too verbose */
+               swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+                         req->rq_disk->disk_name, req->cmd,
+                         (long)blk_rq_pos(req), blk_rq_sectors(req),
+                         req->buffer);
+               swim3_dbg("           errors=%d current_nr_sectors=%u\n",
+                         req->errors, blk_rq_cur_sectors(req));
 #endif
 
                if (blk_rq_pos(req) >= fs->total_secs) {
-                       swim3_end_request_cur(-EIO);
+                       swim3_dbg("  pos out of bounds (%ld, max is %ld)\n",
+                                 (long)blk_rq_pos(req), (long)fs->total_secs);
+                       swim3_end_request(fs, -EIO, 0);
                        continue;
                }
                if (fs->ejected) {
-                       swim3_end_request_cur(-EIO);
+                       swim3_dbg("%s", "  disk ejected\n");
+                       swim3_end_request(fs, -EIO, 0);
                        continue;
                }
 
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
                        if (fs->write_prot < 0)
                                fs->write_prot = swim3_readbit(fs, WRITE_PROT);
                        if (fs->write_prot) {
-                               swim3_end_request_cur(-EIO);
+                               swim3_dbg("%s", "  try to write, disk write protected\n");
+                               swim3_end_request(fs, -EIO, 0);
                                continue;
                        }
                }
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
                x = ((long)blk_rq_pos(req)) % fs->secpercyl;
                fs->head = x / fs->secpertrack;
                fs->req_sector = x % fs->secpertrack + 1;
-               fd_req = req;
                fs->state = do_transfer;
                fs->retries = 0;
 
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
        }
 }
 
+static void do_fd_request(struct request_queue * q)
+{
+       start_request(q->queuedata);
+}
+
 static void set_timeout(struct floppy_state *fs, int nticks,
                        void (*proc)(unsigned long))
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&fs->lock, flags);
        if (fs->timeout_pending)
                del_timer(&fs->timeout);
        fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
        fs->timeout.data = (unsigned long) fs;
        add_timer(&fs->timeout);
        fs->timeout_pending = 1;
-       spin_unlock_irqrestore(&fs->lock, flags);
 }
 
 static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
        struct swim3 __iomem *sw = fs->swim3;
        struct dbdma_cmd *cp = fs->dma_cmd;
        struct dbdma_regs __iomem *dr = fs->dma;
+       struct request *req = fs->cur_req;
 
-       if (blk_rq_cur_sectors(fd_req) <= 0) {
-               printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+       if (blk_rq_cur_sectors(req) <= 0) {
+               swim3_warn("%s", "Transfer 0 sectors ?\n");
                return;
        }
-       if (rq_data_dir(fd_req) == WRITE)
+       if (rq_data_dir(req) == WRITE)
                n = 1;
        else {
                n = fs->secpertrack - fs->req_sector + 1;
-               if (n > blk_rq_cur_sectors(fd_req))
-                       n = blk_rq_cur_sectors(fd_req);
+               if (n > blk_rq_cur_sectors(req))
+                       n = blk_rq_cur_sectors(req);
        }
+
+       swim3_dbg("  setup xfer at sect %d (of %d) head %d for %d\n",
+                 fs->req_sector, fs->secpertrack, fs->head, n);
+
        fs->scount = n;
        swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
        out_8(&sw->sector, fs->req_sector);
        out_8(&sw->nsect, n);
        out_8(&sw->gap3, 0);
        out_le32(&dr->cmdptr, virt_to_bus(cp));
-       if (rq_data_dir(fd_req) == WRITE) {
+       if (rq_data_dir(req) == WRITE) {
                /* Set up 3 dma commands: write preamble, data, postamble */
                init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
                ++cp;
-               init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+               init_dma(cp, OUTPUT_MORE, req->buffer, 512);
                ++cp;
                init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
        } else {
-               init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+               init_dma(cp, INPUT_LAST, req->buffer, n * 512);
        }
        ++cp;
        out_le16(&cp->command, DBDMA_STOP);
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
        in_8(&sw->error);
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
-       if (rq_data_dir(fd_req) == WRITE)
+       if (rq_data_dir(req) == WRITE)
                out_8(&sw->control_bis, WRITE_SECTORS);
        in_8(&sw->intr);
        out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
 static void act(struct floppy_state *fs)
 {
        for (;;) {
+               swim3_dbg("  act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+                         fs->state, fs->req_cyl, fs->cur_cyl);
+
                switch (fs->state) {
                case idle:
                        return;         /* XXX shouldn't get here */
 
                case locating:
                        if (swim3_readbit(fs, TRACK_ZERO)) {
+                               swim3_dbg("%s", "    locate track 0\n");
                                fs->cur_cyl = 0;
                                if (fs->req_cyl == 0)
                                        fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
                                break;
                        }
                        if (fs->req_cyl == fs->cur_cyl) {
-                               printk("whoops, seeking 0\n");
+                               swim3_warn("%s", "Whoops, seeking 0\n");
                                fs->state = do_transfer;
                                break;
                        }
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
                case do_transfer:
                        if (fs->cur_cyl != fs->req_cyl) {
                                if (fs->retries > 5) {
-                                       swim3_end_request_cur(-EIO);
+                                       swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+                                                 fs->req_cyl, fs->cur_cyl);
+                                       swim3_end_request(fs, -EIO, 0);
                                        fs->state = idle;
                                        return;
                                }
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
                        return;
 
                default:
-                       printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+                       swim3_err("Unknown state %d\n", fs->state);
                        return;
                }
        }
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* scan timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
        out_8(&sw->select, RELAX);
        out_8(&sw->intr_enable, 0);
        fs->cur_cyl = -1;
        if (fs->retries > 5) {
-               swim3_end_request_cur(-EIO);
+               swim3_end_request(fs, -EIO, 0);
                fs->state = idle;
                start_request(fs);
        } else {
                fs->state = jogging;
                act(fs);
        }
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void seek_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* seek timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_8(&sw->control_bic, DO_SEEK);
        out_8(&sw->select, RELAX);
        out_8(&sw->intr_enable, 0);
-       printk(KERN_ERR "swim3: seek timeout\n");
-       swim3_end_request_cur(-EIO);
+       swim3_err("%s", "Seek timeout\n");
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void settle_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* settle timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        if (swim3_readbit(fs, SEEK_COMPLETE)) {
                out_8(&sw->select, RELAX);
                fs->state = locating;
                act(fs);
-               return;
+               goto unlock;
        }
        out_8(&sw->select, RELAX);
        if (fs->settle_time < 2*HZ) {
                ++fs->settle_time;
                set_timeout(fs, 1, settle_timeout);
-               return;
+               goto unlock;
        }
-       printk(KERN_ERR "swim3: seek settle timeout\n");
-       swim3_end_request_cur(-EIO);
+       swim3_err("%s", "Seek settle timeout\n");
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+ unlock:
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
        struct dbdma_regs __iomem *dr = fs->dma;
+       unsigned long flags;
        int n;
 
+       swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_le32(&dr->control, RUN << 16);
        /* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
        out_8(&sw->intr_enable, 0);
        out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
        out_8(&sw->select, RELAX);
-       printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
-              (rq_data_dir(fd_req)==WRITE? "writ": "read"),
-              (long)blk_rq_pos(fd_req));
-       swim3_end_request_cur(-EIO);
+       swim3_err("Timeout %sing sector %ld\n",
+              (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+              (long)blk_rq_pos(fs->cur_req));
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
        int stat, resid;
        struct dbdma_regs __iomem *dr;
        struct dbdma_cmd *cp;
+       unsigned long flags;
+       struct request *req = fs->cur_req;
+
+       swim3_dbg("* interrupt, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        intr = in_8(&sw->intr);
        err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
        if ((intr & ERROR_INTR) && fs->state != do_transfer)
-               printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
-                      fs->state, rq_data_dir(fd_req), intr, err);
+               swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+                         fs->state, rq_data_dir(req), intr, err);
        switch (fs->state) {
        case locating:
                if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        del_timer(&fs->timeout);
                        fs->timeout_pending = 0;
                        if (sw->ctrack == 0xff) {
-                               printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+                               swim3_err("%s", "Seen sector but cyl=ff?\n");
                                fs->cur_cyl = -1;
                                if (fs->retries > 5) {
-                                       swim3_end_request_cur(-EIO);
+                                       swim3_end_request(fs, -EIO, 0);
                                        fs->state = idle;
                                        start_request(fs);
                                } else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        fs->cur_cyl = sw->ctrack;
                        fs->cur_sector = sw->csect;
                        if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
-                               printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
-                                      fs->expect_cyl, fs->cur_cyl);
+                               swim3_err("Expected cyl %d, got %d\n",
+                                         fs->expect_cyl, fs->cur_cyl);
                        fs->state = do_transfer;
                        act(fs);
                }
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                fs->timeout_pending = 0;
                dr = fs->dma;
                cp = fs->dma_cmd;
-               if (rq_data_dir(fd_req) == WRITE)
+               if (rq_data_dir(req) == WRITE)
                        ++cp;
                /*
                 * Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                if (intr & ERROR_INTR) {
                        n = fs->scount - 1 - resid / 512;
                        if (n > 0) {
-                               blk_update_request(fd_req, 0, n << 9);
+                               blk_update_request(req, 0, n << 9);
                                fs->req_sector += n;
                        }
                        if (fs->retries < 5) {
                                ++fs->retries;
                                act(fs);
                        } else {
-                               printk("swim3: error %sing block %ld (err=%x)\n",
-                                      rq_data_dir(fd_req) == WRITE? "writ": "read",
-                                      (long)blk_rq_pos(fd_req), err);
-                               swim3_end_request_cur(-EIO);
+                               swim3_err("Error %sing block %ld (err=%x)\n",
+                                      rq_data_dir(req) == WRITE? "writ": "read",
+                                      (long)blk_rq_pos(req), err);
+                               swim3_end_request(fs, -EIO, 0);
                                fs->state = idle;
                        }
                } else {
                        if ((stat & ACTIVE) == 0 || resid != 0) {
                                /* musta been an error */
-                               printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
-                               printk(KERN_ERR "  state=%d, dir=%x, intr=%x, err=%x\n",
-                                      fs->state, rq_data_dir(fd_req), intr, err);
-                               swim3_end_request_cur(-EIO);
+                               swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+                               swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
+                                         fs->state, rq_data_dir(req), intr, err);
+                               swim3_end_request(fs, -EIO, 0);
                                fs->state = idle;
                                start_request(fs);
                                break;
                        }
-                       if (swim3_end_request(0, fs->scount << 9)) {
+                       fs->retries = 0;
+                       if (swim3_end_request(fs, 0, fs->scount << 9)) {
                                fs->req_sector += fs->scount;
                                if (fs->req_sector > fs->secpertrack) {
                                        fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        start_request(fs);
                break;
        default:
-               printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+               swim3_err("Don't know what to do in state %d\n", fs->state);
        }
+       spin_unlock_irqrestore(&swim3_lock, flags);
        return IRQ_HANDLED;
 }
 
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
 }
 */
 
+/* Called under the mutex to grab exclusive access to a drive */
 static int grab_drive(struct floppy_state *fs, enum swim_state state,
                      int interruptible)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&fs->lock, flags);
-       if (fs->state != idle) {
+       swim3_dbg("%s", "-> grab drive\n");
+
+       spin_lock_irqsave(&swim3_lock, flags);
+       if (fs->state != idle && fs->state != available) {
                ++fs->wanted;
                while (fs->state != available) {
+                       spin_unlock_irqrestore(&swim3_lock, flags);
                        if (interruptible && signal_pending(current)) {
                                --fs->wanted;
-                               spin_unlock_irqrestore(&fs->lock, flags);
                                return -EINTR;
                        }
                        interruptible_sleep_on(&fs->wait);
+                       spin_lock_irqsave(&swim3_lock, flags);
                }
                --fs->wanted;
        }
        fs->state = state;
-       spin_unlock_irqrestore(&fs->lock, flags);
+       spin_unlock_irqrestore(&swim3_lock, flags);
+
        return 0;
 }
 
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&fs->lock, flags);
+       swim3_dbg("%s", "-> release drive\n");
+
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->state = idle;
        start_request(fs);
-       spin_unlock_irqrestore(&fs->lock, flags);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
 {
        struct floppy_state *fs = disk->private_data;
        struct swim3 __iomem *sw = fs->swim3;
+
        mutex_lock(&swim3_mutex);
        if (fs->ref_count > 0 && --fs->ref_count == 0) {
                swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
        .revalidate_disk= floppy_revalidate,
 };
 
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+       struct floppy_state *fs = macio_get_drvdata(mdev);
+       struct swim3 __iomem *sw = fs->swim3;
+
+       if (!fs)
+               return;
+       if (mb_state != MB_FD)
+               return;
+
+       /* Clear state */
+       out_8(&sw->intr_enable, 0);
+       in_8(&sw->intr);
+       in_8(&sw->error);
+}
+
 static int swim3_add_device(struct macio_dev *mdev, int index)
 {
        struct device_node *swim = mdev->ofdev.dev.of_node;
        struct floppy_state *fs = &floppy_states[index];
        int rc = -EBUSY;
 
+       /* Do this first for message macros */
+       memset(fs, 0, sizeof(*fs));
+       fs->mdev = mdev;
+       fs->index = index;
+
        /* Check & Request resources */
        if (macio_resource_count(mdev) < 2) {
-               printk(KERN_WARNING "ifd%d: no address for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "No address in device-tree\n");
                return -ENXIO;
        }
-       if (macio_irq_count(mdev) < 2) {
-               printk(KERN_WARNING "fd%d: no intrs for device %s\n",
-                       index, swim->full_name);
+       if (macio_irq_count(mdev) < 1) {
+               swim3_err("%s", "No interrupt in device-tree\n");
+               return -ENXIO;
        }
        if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
-               printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Can't request mmio resource\n");
                return -EBUSY;
        }
        if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
-               printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Can't request dma resource\n");
                macio_release_resource(mdev, 0);
                return -EBUSY;
        }
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
        if (mdev->media_bay == NULL)
                pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
        
-       memset(fs, 0, sizeof(*fs));
-       spin_lock_init(&fs->lock);
        fs->state = idle;
        fs->swim3 = (struct swim3 __iomem *)
                ioremap(macio_resource_start(mdev, 0), 0x200);
        if (fs->swim3 == NULL) {
-               printk("fd%d: couldn't map registers for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Couldn't map mmio registers\n");
                rc = -ENOMEM;
                goto out_release;
        }
        fs->dma = (struct dbdma_regs __iomem *)
                ioremap(macio_resource_start(mdev, 1), 0x200);
        if (fs->dma == NULL) {
-               printk("fd%d: couldn't map DMA for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Couldn't map dma registers\n");
                iounmap(fs->swim3);
                rc = -ENOMEM;
                goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
        fs->secpercyl = 36;
        fs->secpertrack = 18;
        fs->total_secs = 2880;
-       fs->mdev = mdev;
        init_waitqueue_head(&fs->wait);
 
        fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
        memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
        st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
 
+       if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+               swim3_mb_event(mdev, MB_FD);
+
        if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
-               printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
-                      index, fs->swim3_intr, swim->full_name);
+               swim3_err("%s", "Couldn't request interrupt\n");
                pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
                goto out_unmap;
                return -EBUSY;
        }
-/*
-       if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
-               printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
-                      fs->dma_intr);
-               return -EBUSY;
-       }
-*/
 
        init_timer(&fs->timeout);
 
-       printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+       swim3_info("SWIM3 floppy controller %s\n",
                mdev->media_bay ? "in media bay" : "");
 
        return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
 
 static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
 {
-       int i, rc;
        struct gendisk *disk;
+       int index, rc;
+
+       index = floppy_count++;
+       if (index >= MAX_FLOPPIES)
+               return -ENXIO;
 
        /* Add the drive */
-       rc = swim3_add_device(mdev, floppy_count);
+       rc = swim3_add_device(mdev, index);
        if (rc)
                return rc;
+       /* Now register that disk. Same comment about failure handling */
+       disk = disks[index] = alloc_disk(1);
+       if (disk == NULL)
+               return -ENOMEM;
+       disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+       if (disk->queue == NULL) {
+               put_disk(disk);
+               return -ENOMEM;
+       }
+       disk->queue->queuedata = &floppy_states[index];
 
-       /* Now create the queue if not there yet */
-       if (swim3_queue == NULL) {
+       if (index == 0) {
                /* If we failed, there isn't much we can do as the driver is still
                 * too dumb to remove the device, just bail out
                 */
                if (register_blkdev(FLOPPY_MAJOR, "fd"))
                        return 0;
-               swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
-               if (swim3_queue == NULL) {
-                       unregister_blkdev(FLOPPY_MAJOR, "fd");
-                       return 0;
-               }
        }
 
-       /* Now register that disk. Same comment about failure handling */
-       i = floppy_count++;
-       disk = disks[i] = alloc_disk(1);
-       if (disk == NULL)
-               return 0;
-
        disk->major = FLOPPY_MAJOR;
-       disk->first_minor = i;
+       disk->first_minor = index;
        disk->fops = &floppy_fops;
-       disk->private_data = &floppy_states[i];
-       disk->queue = swim3_queue;
+       disk->private_data = &floppy_states[index];
        disk->flags |= GENHD_FL_REMOVABLE;
-       sprintf(disk->disk_name, "fd%d", i);
+       sprintf(disk->disk_name, "fd%d", index);
        set_capacity(disk, 2880);
        add_disk(disk);
 
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
                .of_match_table = swim3_match,
        },
        .probe          = swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+       .mediabay_event = swim3_mb_event,
+#endif
 #if 0
        .suspend        = swim3_suspend,
        .resume         = swim3_resume,
index c2917ff..34767a6 100644 (file)
 #define IPMI_WDOG_SET_TIMER            0x24
 #define IPMI_WDOG_GET_TIMER            0x25
 
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP  0x80
+
 /* These are here until the real ones get into the watchdog.h interface. */
 #ifndef WDIOC_GETTIMEOUT
 #define        WDIOC_GETTIMEOUT        _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@@ -596,6 +598,7 @@ static int ipmi_heartbeat(void)
        struct kernel_ipmi_msg            msg;
        int                               rv;
        struct ipmi_system_interface_addr addr;
+       int                               timeout_retries = 0;
 
        if (ipmi_ignore_heartbeat)
                return 0;
@@ -616,6 +619,7 @@ static int ipmi_heartbeat(void)
 
        mutex_lock(&heartbeat_lock);
 
+restart:
        atomic_set(&heartbeat_tofree, 2);
 
        /*
@@ -653,7 +657,33 @@ static int ipmi_heartbeat(void)
        /* Wait for the heartbeat to be sent. */
        wait_for_completion(&heartbeat_wait);
 
-       if (heartbeat_recv_msg.msg.data[0] != 0) {
+       if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)  {
+               timeout_retries++;
+               if (timeout_retries > 3) {
+                       printk(KERN_ERR PFX ": Unable to restore the IPMI"
+                              " watchdog's settings, giving up.\n");
+                       rv = -EIO;
+                       goto out_unlock;
+               }
+
+               /*
+                * The timer was not initialized, that means the BMC was
+                * probably reset and lost the watchdog information.  Attempt
+                * to restore the timer's info.  Note that we still hold
+                * the heartbeat lock, to keep a heartbeat from happening
+                * in this process, so must say no heartbeat to avoid a
+                * deadlock on this mutex.
+                */
+               rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+               if (rv) {
+                       printk(KERN_ERR PFX ": Unable to send the command to"
+                              " set the watchdog's settings, giving up.\n");
+                       goto out_unlock;
+               }
+
+               /* We might need a new heartbeat, so do it now */
+               goto restart;
+       } else if (heartbeat_recv_msg.msg.data[0] != 0) {
                /*
                 * Got an error in the heartbeat response.  It was already
                 * reported in ipmi_wdog_msg_handler, but we should return
@@ -662,6 +692,7 @@ static int ipmi_heartbeat(void)
                rv = -EINVAL;
        }
 
+out_unlock:
        mutex_unlock(&heartbeat_lock);
 
        return rv;
@@ -922,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
 static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
                                  void                 *handler_data)
 {
-       if (msg->msg.data[0] != 0) {
+       if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+                       msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+               printk(KERN_INFO PFX "response: The IPMI controller appears"
+                      " to have been reset, will attempt to reinitialize"
+                      " the watchdog timer\n");
+       else if (msg->msg.data[0] != 0)
                printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
                       msg->msg.data[0],
                       msg->msg.cmd);
-       }
 
        ipmi_free_recv_msg(msg);
 }
index c811cb1..2cce44a 100644 (file)
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
        ibft_cleanup();
 }
 
+#ifdef CONFIG_ACPI
+static const struct {
+       char *sign;
+} ibft_signs[] = {
+       /*
+        * One spec says "IBFT", the other says "iBFT". We have to check
+        * for both.
+        */
+       { ACPI_SIG_IBFT },
+       { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+       int i;
+       struct acpi_table_header *table = NULL;
+
+       if (acpi_disabled)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+               acpi_get_table(ibft_signs[i].sign, 0, &table);
+               ibft_addr = (struct acpi_table_ibft *)table;
+       }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
 /*
  * ibft_init() - creates sysfs tree entries for the iBFT data.
  */
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
 {
        int rc = 0;
 
+       /*
+          As on UEFI systems the setup_arch()/find_ibft_region()
+          is called before ACPI tables are parsed and it only does
+          legacy finding.
+       */
+       if (!ibft_addr)
+               acpi_find_ibft_region();
+
        if (ibft_addr) {
-               printk(KERN_INFO "iBFT detected at 0x%llx.\n",
-                      (u64)isa_virt_to_bus(ibft_addr));
+               pr_info("iBFT detected.\n");
 
                rc = ibft_check_device();
                if (rc)
index bfe7232..4da4eb9 100644 (file)
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
 static const struct {
        char *sign;
 } ibft_signs[] = {
-#ifdef CONFIG_ACPI
-       /*
-        * One spec says "IBFT", the other says "iBFT". We have to check
-        * for both.
-        */
-       { ACPI_SIG_IBFT },
-#endif
        { "iBFT" },
        { "BIFT" },     /* Broadcom iSCSI Offload */
 };
@@ -62,14 +55,6 @@ static const struct {
 #define VGA_MEM 0xA0000 /* VGA buffer */
 #define VGA_SIZE 0x20000 /* 128kB */
 
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
-       ibft_addr = (struct acpi_table_ibft *)header;
-       return 0;
-}
-#endif /* CONFIG_ACPI */
-
 static int __init find_ibft_in_mem(void)
 {
        unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
                                 * the table cannot be valid. */
                                if (pos + len <= (IBFT_END-1)) {
                                        ibft_addr = (struct acpi_table_ibft *)virt;
+                                       pr_info("iBFT found at 0x%lx.\n", pos);
                                        goto done;
                                }
                        }
@@ -108,20 +94,12 @@ done:
  */
 unsigned long __init find_ibft_region(unsigned long *sizep)
 {
-#ifdef CONFIG_ACPI
-       int i;
-#endif
        ibft_addr = NULL;
 
-#ifdef CONFIG_ACPI
-       for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
-               acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
        /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
         * only use ACPI for this */
 
-       if (!ibft_addr && !efi_enabled)
+       if (!efi_enabled)
                find_ibft_in_mem();
 
        if (ibft_addr) {
index 038f5eb..f8ce29e 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/mfd/da9052/da9052.h>
 #include <linux/mfd/da9052/reg.h>
 #include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
 
 #define DA9052_INPUT                           1
 #define DA9052_OUTPUT_OPENDRAIN                2
@@ -43,6 +42,9 @@
 #define DA9052_GPIO_MASK_UPPER_NIBBLE          0xF0
 #define DA9052_GPIO_MASK_LOWER_NIBBLE          0x0F
 #define DA9052_GPIO_NIBBLE_SHIFT               4
+#define DA9052_IRQ_GPI0                        16
+#define DA9052_GPIO_ODD_SHIFT                  7
+#define DA9052_GPIO_EVEN_SHIFT                 3
 
 struct da9052_gpio {
        struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
 static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
 {
        struct da9052_gpio *gpio = to_da9052_gpio(gc);
-       unsigned char register_value = 0;
        int ret;
 
        if (da9052_gpio_port_odd(offset)) {
-               if (value) {
-                       register_value = DA9052_GPIO_ODD_PORT_MODE;
                        ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
                                                DA9052_GPIO_0_1_REG,
                                                DA9052_GPIO_ODD_PORT_MODE,
-                                               register_value);
+                                               value << DA9052_GPIO_ODD_SHIFT);
                        if (ret != 0)
                                dev_err(gpio->da9052->dev,
                                        "Failed to updated gpio odd reg,%d",
                                        ret);
-               }
        } else {
-               if (value) {
-                       register_value = DA9052_GPIO_EVEN_PORT_MODE;
                        ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
                                                DA9052_GPIO_0_1_REG,
                                                DA9052_GPIO_EVEN_PORT_MODE,
-                                               register_value);
+                                               value << DA9052_GPIO_EVEN_SHIFT);
                        if (ret != 0)
                                dev_err(gpio->da9052->dev,
                                        "Failed to updated gpio even reg,%d",
                                        ret);
-               }
        }
 }
 
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
        .direction_input = da9052_gpio_direction_input,
        .direction_output = da9052_gpio_direction_output,
        .to_irq = da9052_gpio_to_irq,
-       .can_sleep = 1;
-       .ngpio = 16;
-       .base = -1;
+       .can_sleep = 1,
+       .ngpio = 16,
+       .base = -1,
 };
 
 static int __devinit da9052_gpio_probe(struct platform_device *pdev)
index ea8e738..461958f 100644 (file)
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
                  &chip->reg->regs[chip->ch].imask);
 }
 
+static void ioh_irq_disable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct ioh_gpio *chip = gc->private;
+       unsigned long flags;
+       u32 ien;
+
+       spin_lock_irqsave(&chip->spinlock, flags);
+       ien = ioread32(&chip->reg->regs[chip->ch].ien);
+       ien &= ~(1 << (d->irq - chip->irq_base));
+       iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct ioh_gpio *chip = gc->private;
+       unsigned long flags;
+       u32 ien;
+
+       spin_lock_irqsave(&chip->spinlock, flags);
+       ien = ioread32(&chip->reg->regs[chip->ch].ien);
+       ien |= 1 << (d->irq - chip->irq_base);
+       iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
 static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
 {
        struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
        int i, j;
        int ret = IRQ_NONE;
 
-       for (i = 0; i < 8; i++) {
+       for (i = 0; i < 8; i++, chip++) {
                reg_val = ioread32(&chip->reg->regs[i].istatus);
                for (j = 0; j < num_ports[i]; j++) {
                        if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
        ct->chip.irq_mask = ioh_irq_mask;
        ct->chip.irq_unmask = ioh_irq_unmask;
        ct->chip.irq_set_type = ioh_irq_type;
+       ct->chip.irq_disable = ioh_irq_disable;
+       ct->chip.irq_enable = ioh_irq_enable;
 
        irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
                               IRQ_NOREQUEST | IRQ_NOPROBE, 0);
index ec3fcf0..5cd04b6 100644 (file)
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
        return 0;
 }
 
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       /* GPIO 28..31 are input only on MPC5121 */
+       if (gpio >= 28)
+               return -EINVAL;
+
+       return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
 static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 {
        struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
        mm_gc->save_regs = mpc8xxx_gpio_save_regs;
        gc->ngpio = MPC8XXX_GPIO_PINS;
        gc->direction_input = mpc8xxx_gpio_dir_in;
-       gc->direction_output = mpc8xxx_gpio_dir_out;
-       if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
-               gc->get = mpc8572_gpio_get;
-       else
-               gc->get = mpc8xxx_gpio_get;
+       gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+               mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+       gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+               mpc8572_gpio_get : mpc8xxx_gpio_get;
        gc->set = mpc8xxx_gpio_set;
        gc->to_irq = mpc8xxx_gpio_to_irq;
 
index 093c90b..4102f63 100644 (file)
@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
        int ret, irq, i;
        static DECLARE_BITMAP(init_irq, NR_IRQS);
 
-       pdata = dev->dev.platform_data;
-       if (pdata == NULL)
-               return -ENODEV;
-
        chip = kzalloc(sizeof(*chip), GFP_KERNEL);
        if (chip == NULL)
                return -ENOMEM;
index d09a6e0..004b048 100644 (file)
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
        const struct intel_device_info *info = INTEL_INFO(dev);
 
        seq_printf(m, "gen: %d\n", info->gen);
+       seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
        B(is_mobile);
        B(is_i85x);
index a9533c5..a9ae374 100644 (file)
@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 
        diff1 = now - dev_priv->last_time1;
 
+       /* Prevent division-by-zero if we are asking too fast.
+        * Also, we don't get interesting results if we are polling
+        * faster than once in 10ms, so just return the saved value
+        * in such cases.
+        */
+       if (diff1 <= 10)
+               return dev_priv->chipset_power;
+
        count1 = I915_READ(DMIEC);
        count2 = I915_READ(DDREC);
        count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
        dev_priv->last_count1 = total_count;
        dev_priv->last_time1 = now;
 
+       dev_priv->chipset_power = ret;
+
        return ret;
 }
 
index 15bfa91..a1103fc 100644 (file)
@@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
 MODULE_PARM_DESC(powersave,
                "Enable powersavings, fbc, downclocking, etc. (default: true)");
 
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
 module_param_named(semaphores, i915_semaphores, int, 0600);
 MODULE_PARM_DESC(semaphores,
-               "Use semaphores for inter-ring sync (default: false)");
+               "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
 MODULE_PARM_DESC(i915_enable_rc6,
-               "Enable power-saving render C-state 6 (default: true)");
+               "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
 
 int i915_enable_fbc __read_mostly = -1;
 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
        }
 }
 
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
        int count;
 
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
                udelay(10);
 }
 
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+       int count;
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+               udelay(10);
+
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+       POSTING_READ(FORCEWAKE_MT);
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+               udelay(10);
+}
+
 /*
  * Generally this is called implicitly by the register read function. However,
  * if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 
        /* Forcewake is atomic in case we get in here without the lock */
        if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
-               __gen6_gt_force_wake_get(dev_priv);
+               dev_priv->display.force_wake_get(dev_priv);
 }
 
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE, 0);
        POSTING_READ(FORCEWAKE);
 }
 
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+       POSTING_READ(FORCEWAKE_MT);
+}
+
 /*
  * see gen6_gt_force_wake_get()
  */
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
        if (atomic_dec_and_test(&dev_priv->forcewake_count))
-               __gen6_gt_force_wake_put(dev_priv);
+               dev_priv->display.force_wake_put(dev_priv);
 }
 
 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
        (((dev_priv)->info->gen >= 6) && \
-       ((reg) < 0x40000) && \
-       ((reg) != FORCEWAKE))
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE) &&         \
+        ((reg) != ECOBUS))
 
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
index 4a9c1b9..554bef7 100644 (file)
@@ -107,6 +107,7 @@ struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
 struct opregion_asle;
+struct drm_i915_private;
 
 struct intel_opregion {
        struct opregion_header *header;
@@ -221,6 +222,8 @@ struct drm_i915_display_funcs {
                          struct drm_i915_gem_object *obj);
        int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                            int x, int y);
+       void (*force_wake_get)(struct drm_i915_private *dev_priv);
+       void (*force_wake_put)(struct drm_i915_private *dev_priv);
        /* clock updates for mode set */
        /* cursor updates */
        /* render clock increase/decrease */
@@ -710,6 +713,7 @@ typedef struct drm_i915_private {
 
        u64 last_count1;
        unsigned long last_time1;
+       unsigned long chipset_power;
        u64 last_count2;
        struct timespec last_time2;
        unsigned long gfx_power;
@@ -998,11 +1002,11 @@ extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
 extern int i915_panel_ignore_lid __read_mostly;
 extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
 extern unsigned int i915_lvds_downclock __read_mostly;
 extern int i915_panel_use_ssc __read_mostly;
 extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
 extern int i915_enable_fbc __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
 
@@ -1308,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
 /* overlay */
 #ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1352,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
        (((dev_priv)->info->gen >= 6) && \
-       ((reg) < 0x40000) && \
-       ((reg) != FORCEWAKE))
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE) &&         \
+        ((reg) != ECOBUS))
 
 #define __i915_read(x, y) \
        u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
index 3693e83..c681dc1 100644 (file)
@@ -32,6 +32,7 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include <linux/dma_remapping.h>
 
 struct change_domains {
        uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
        return 0;
 }
 
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+       if (INTEL_INFO(dev)->gen < 6)
+               return 0;
+
+       if (i915_semaphores >= 0)
+               return i915_semaphores;
+
+       /* Enable semaphores on SNB when IO remapping is off */
+       if (INTEL_INFO(dev)->gen == 6)
+               return !intel_iommu_enabled;
+
+       return 1;
+}
+
 static int
 i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
                               struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
                return 0;
 
        /* XXX gpu semaphores are implicated in various hard hangs on SNB */
-       if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+       if (!intel_enable_semaphores(obj->base.dev))
                return i915_gem_object_wait_rendering(obj);
 
        idx = intel_ring_sync_index(from, to);
index b080cc8..a26d5b0 100644 (file)
 /* or SDVOB */
 #define HDMIB   0xe1140
 #define  PORT_ENABLE    (1 << 31)
-#define  TRANSCODER_A   (0)
-#define  TRANSCODER_B   (1 << 30)
-#define  TRANSCODER(pipe)      ((pipe) << 30)
-#define  TRANSCODER_MASK   (1 << 30)
+#define  TRANSCODER(pipe)       ((pipe) << 30)
+#define  TRANSCODER_CPT(pipe)   ((pipe) << 29)
+#define  TRANSCODER_MASK        (1 << 30)
+#define  TRANSCODER_MASK_CPT    (3 << 29)
 #define  COLOR_FORMAT_8bpc      (0)
 #define  COLOR_FORMAT_12bpc     (3 << 26)
 #define  SDVOB_HOTPLUG_ENABLE   (1 << 23)
 #define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B   (0x38<<22)
 #define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB       (0x3f<<22)
 
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB           (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB         (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB           (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB           (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB         (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB           (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB         (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB           (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB          (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB         (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB                (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB          (0x23 <<22)
+
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB       (0x3f<<22)
+
 #define  FORCEWAKE                             0xA18C
 #define  FORCEWAKE_ACK                         0x130090
+#define  FORCEWAKE_MT                          0xa188 /* multi-threaded */
+#define  FORCEWAKE_MT_ACK                      0x130040
+#define  ECOBUS                                        0xa180
+#define    FORCEWAKE_MT_ENABLE                 (1<<5)
 
 #define  GT_FIFO_FREE_ENTRIES                  0x120008
 #define    GT_FIFO_NUM_RESERVED_ENTRIES                20
index e77a863..d809b03 100644 (file)
@@ -38,8 +38,8 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "drm_dp_helper.h"
-
 #include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
 
 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 
@@ -4670,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 /**
  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
  * @crtc: CRTC structure
+ * @mode: requested mode
  *
  * A pipe may be connected to one or more outputs.  Based on the depth of the
  * attached framebuffer, choose a good color depth to use on the pipe.
@@ -4681,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
  *    Displays may support a restricted set as well, check EDID and clamp as
  *      appropriate.
+ *    DP may want to dither down to 6bpc to fit larger modes
  *
  * RETURNS:
  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
  * true if they don't match).
  */
 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
-                                        unsigned int *pipe_bpp)
+                                        unsigned int *pipe_bpp,
+                                        struct drm_display_mode *mode)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4758,6 +4761,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                }
        }
 
+       if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+               DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+               display_bpc = 6;
+       }
+
        /*
         * We could just drive the pipe at the highest bpc all the time and
         * enable dithering as needed, but that costs bandwidth.  So choose
@@ -5019,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                        pipeconf &= ~PIPECONF_DOUBLE_WIDE;
        }
 
+       /* default to 8bpc */
+       pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+       if (is_dp) {
+               if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+                       pipeconf |= PIPECONF_BPP_6 |
+                                   PIPECONF_DITHER_EN |
+                                   PIPECONF_DITHER_TYPE_SP;
+               }
+       }
+
        dpll |= DPLL_VCO_ENABLE;
 
        DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5480,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        /* determine panel color depth */
        temp = I915_READ(PIPECONF(pipe));
        temp &= ~PIPE_BPC_MASK;
-       dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+       dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
        switch (pipe_bpp) {
        case 18:
                temp |= PIPE_6BPC;
@@ -7189,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        work->old_fb_obj = intel_fb->obj;
        INIT_WORK(&work->work, intel_unpin_work_fn);
 
+       ret = drm_vblank_get(dev, intel_crtc->pipe);
+       if (ret)
+               goto free_work;
+
        /* We borrow the event spin lock for protecting unpin_work */
        spin_lock_irqsave(&dev->event_lock, flags);
        if (intel_crtc->unpin_work) {
                spin_unlock_irqrestore(&dev->event_lock, flags);
                kfree(work);
+               drm_vblank_put(dev, intel_crtc->pipe);
 
                DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
                return -EBUSY;
@@ -7212,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        crtc->fb = fb;
 
-       ret = drm_vblank_get(dev, intel_crtc->pipe);
-       if (ret)
-               goto cleanup_objs;
-
        work->pending_flip_obj = obj;
 
        work->enable_stall_check = true;
@@ -7238,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 cleanup_pending:
        atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
        drm_gem_object_unreference(&work->old_fb_obj->base);
        drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
@@ -7247,6 +7265,8 @@ cleanup_objs:
        intel_crtc->unpin_work = NULL;
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
+       drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
        kfree(work);
 
        return ret;
@@ -7887,6 +7907,33 @@ void intel_init_emon(struct drm_device *dev)
        dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+       /*
+        * Respect the kernel parameter if it is set
+        */
+       if (i915_enable_rc6 >= 0)
+               return i915_enable_rc6;
+
+       /*
+        * Disable RC6 on Ironlake
+        */
+       if (INTEL_INFO(dev)->gen == 5)
+               return 0;
+
+       /*
+        * Enable rc6 on Sandybridge if DMA remapping is disabled
+        */
+       if (INTEL_INFO(dev)->gen == 6) {
+               DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
+                                intel_iommu_enabled ? "true" : "false",
+                                !intel_iommu_enabled ? "en" : "dis");
+               return !intel_iommu_enabled;
+       }
+       DRM_DEBUG_DRIVER("RC6 enabled\n");
+       return 1;
+}
+
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
        u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7923,7 +7970,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
        I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
-       if (i915_enable_rc6)
+       if (intel_enable_rc6(dev_priv->dev))
                rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
                        GEN6_RC_CTL_RC6_ENABLE;
 
@@ -8372,7 +8419,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
        /* rc6 disabled by default due to repeated reports of hanging during
         * boot and resume.
         */
-       if (!i915_enable_rc6)
+       if (!intel_enable_rc6(dev))
                return;
 
        mutex_lock(&dev->struct_mutex);
@@ -8491,6 +8538,28 @@ static void intel_init_display(struct drm_device *dev)
 
        /* For FIFO watermark updates */
        if (HAS_PCH_SPLIT(dev)) {
+               dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+               dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+               /* IVB configs may use multi-threaded forcewake */
+               if (IS_IVYBRIDGE(dev)) {
+                       u32     ecobus;
+
+                       mutex_lock(&dev->struct_mutex);
+                       __gen6_gt_force_wake_mt_get(dev_priv);
+                       ecobus = I915_READ(ECOBUS);
+                       __gen6_gt_force_wake_mt_put(dev_priv);
+                       mutex_unlock(&dev->struct_mutex);
+
+                       if (ecobus & FORCEWAKE_MT_ENABLE) {
+                               DRM_DEBUG_KMS("Using MT version of forcewake\n");
+                               dev_priv->display.force_wake_get =
+                                       __gen6_gt_force_wake_mt_get;
+                               dev_priv->display.force_wake_put =
+                                       __gen6_gt_force_wake_mt_put;
+                       }
+               }
+
                if (HAS_PCH_IBX(dev))
                        dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
                else if (HAS_PCH_CPT(dev))
index 4d0358f..92b041b 100644 (file)
@@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
  */
 
 static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
 {
        struct drm_crtc *crtc = intel_dp->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int bpp = 24;
 
-       if (intel_crtc)
+       if (check_bpp)
+               bpp = check_bpp;
+       else if (intel_crtc)
                bpp = intel_crtc->bpp;
 
        return (pixel_clock * bpp + 9) / 10;
@@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
        int max_lanes = intel_dp_max_lane_count(intel_dp);
+       int max_rate, mode_rate;
 
        if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
                if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
                        return MODE_PANEL;
        }
 
-       if (intel_dp_link_required(intel_dp, mode->clock)
-           > intel_dp_max_data_rate(max_link_clock, max_lanes))
-               return MODE_CLOCK_HIGH;
+       mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+       max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+       if (mode_rate > max_rate) {
+                       mode_rate = intel_dp_link_required(intel_dp,
+                                                          mode->clock, 18);
+                       if (mode_rate > max_rate)
+                               return MODE_CLOCK_HIGH;
+                       else
+                               mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+       }
 
        if (mode->clock < 10000)
                return MODE_CLOCK_LOW;
@@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
         * clock divider.
         */
        if (is_cpu_edp(intel_dp)) {
-               if (IS_GEN6(dev))
-                       aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+               if (IS_GEN6(dev) || IS_GEN7(dev))
+                       aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
                else
                        aux_clock_divider = 225; /* eDP input clock at 450Mhz */
        } else if (HAS_PCH_SPLIT(dev))
@@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
        int lane_count, clock;
        int max_lane_count = intel_dp_max_lane_count(intel_dp);
        int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+       int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
        static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
        if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
                for (clock = 0; clock <= max_clock; clock++) {
                        int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
-                       if (intel_dp_link_required(intel_dp, mode->clock)
+                       if (intel_dp_link_required(intel_dp, mode->clock, bpp)
                                        <= link_avail) {
                                intel_dp->link_bw = bws[clock];
                                intel_dp->lane_count = lane_count;
@@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        }
 
        /*
-        * There are three kinds of DP registers:
+        * There are four kinds of DP registers:
         *
         *      IBX PCH
-        *      CPU
+        *      SNB CPU
+        *      IVB CPU
         *      CPT PCH
         *
         * IBX PCH and CPU are the same for almost everything,
@@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
        /* Split out the IBX/CPU vs CPT settings */
 
-       if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+       if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+                       intel_dp->DP |= DP_SYNC_HS_HIGH;
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+                       intel_dp->DP |= DP_SYNC_VS_HIGH;
+               intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+               if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+                       intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+               intel_dp->DP |= intel_crtc->pipe << 29;
+
+               /* don't miss out required setting for eDP */
+               intel_dp->DP |= DP_PLL_ENABLE;
+               if (adjusted_mode->clock < 200000)
+                       intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+               else
+                       intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+       } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
                intel_dp->DP |= intel_dp->color_range;
 
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1375,34 +1406,59 @@ static char     *link_train_names[] = {
  * These are source-specific values; current Intel hardware supports
  * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
  */
-#define I830_DP_VOLTAGE_MAX        DP_TRAIN_VOLTAGE_SWING_800
-#define I830_DP_VOLTAGE_MAX_CPT            DP_TRAIN_VOLTAGE_SWING_1200
 
 static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
-       switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-       case DP_TRAIN_VOLTAGE_SWING_400:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_600:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_800:
-               return DP_TRAIN_PRE_EMPHASIS_3_5;
-       case DP_TRAIN_VOLTAGE_SWING_1200:
-       default:
-               return DP_TRAIN_PRE_EMPHASIS_0;
+       struct drm_device *dev = intel_dp->base.base.dev;
+
+       if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+               return DP_TRAIN_VOLTAGE_SWING_800;
+       else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               return DP_TRAIN_VOLTAGE_SWING_1200;
+       else
+               return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+       struct drm_device *dev = intel_dp->base.base.dev;
+
+       if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_400:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_600:
+               case DP_TRAIN_VOLTAGE_SWING_800:
+                       return DP_TRAIN_PRE_EMPHASIS_3_5;
+               default:
+                       return DP_TRAIN_PRE_EMPHASIS_0;
+               }
+       } else {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_400:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_600:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_800:
+                       return DP_TRAIN_PRE_EMPHASIS_3_5;
+               case DP_TRAIN_VOLTAGE_SWING_1200:
+               default:
+                       return DP_TRAIN_PRE_EMPHASIS_0;
+               }
        }
 }
 
 static void
 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
        uint8_t v = 0;
        uint8_t p = 0;
        int lane;
        uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
-       int voltage_max;
+       uint8_t voltage_max;
+       uint8_t preemph_max;
 
        for (lane = 0; lane < intel_dp->lane_count; lane++) {
                uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
                        p = this_p;
        }
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
-               voltage_max = I830_DP_VOLTAGE_MAX_CPT;
-       else
-               voltage_max = I830_DP_VOLTAGE_MAX;
+       voltage_max = intel_dp_voltage_max(intel_dp);
        if (v >= voltage_max)
                v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 
-       if (p >= intel_dp_pre_emphasis_max(v))
-               p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+       preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+       if (p >= preemph_max)
+               p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
        for (lane = 0; lane < 4; lane++)
                intel_dp->train_set[lane] = v | p;
@@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
        }
 }
 
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+                                        DP_TRAIN_PRE_EMPHASIS_MASK);
+       switch (signal_levels) {
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_400MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+               return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_600MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_800MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+       default:
+               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+                             "0x%x\n", signal_levels);
+               return EDP_LINK_TRAIN_500MV_0DB_IVB;
+       }
+}
+
 static uint8_t
 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
                      int lane)
@@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                                  DP_LINK_CONFIGURATION_SIZE);
 
        DP |= DP_PORT_EN;
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
        else
                DP &= ~DP_LINK_TRAIN_MASK;
@@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                uint8_t     link_status[DP_LINK_STATUS_SIZE];
                uint32_t    signal_levels;
 
-               if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+
+               if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+                       signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+               } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
                        signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
@@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
 
-               if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                        reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
                else
                        reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        break;
                }
 
-               if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+               if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+                       signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+               } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
                        signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
@@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
 
-               if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                        reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
                else
                        reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                ++tries;
        }
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                reg = DP | DP_LINK_TRAIN_OFF_CPT;
        else
                reg = DP | DP_LINK_TRAIN_OFF;
@@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
                udelay(100);
        }
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
                I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
        } else {
@@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
        msleep(17);
 
        if (is_edp(intel_dp)) {
-               if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                        DP |= DP_LINK_TRAIN_OFF_CPT;
                else
                        DP |= DP_LINK_TRAIN_OFF;
index bd9a604..a1b4343 100644 (file)
 /* drm_display_mode->private_flags */
 #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
 #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
 
 static inline void
 intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
index 42f165a..e441911 100644 (file)
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Asus AT5NM10T-I",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+               },
+       },
 
        { }     /* terminating entry */
 };
index 21f60b7..04d79fd 100644 (file)
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                max >>= 16;
        } else {
-               if (IS_PINEVIEW(dev)) {
+               if (INTEL_INFO(dev)->gen < 4)
                        max >>= 17;
-               } else {
+               else
                        max >>= 16;
-                       if (INTEL_INFO(dev)->gen < 4)
-                               max &= ~1;
-               }
 
                if (is_backlight_combination_mode(dev))
                        max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
                val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
        } else {
                val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-               if (IS_PINEVIEW(dev))
+               if (INTEL_INFO(dev)->gen < 4)
                        val >>= 1;
 
                if (is_backlight_combination_mode(dev)) {
                        u8 lbpc;
 
-                       val &= ~1;
                        pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
                        val *= lbpc;
                }
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
        }
 
        tmp = I915_READ(BLC_PWM_CTL);
-       if (IS_PINEVIEW(dev)) {
-               tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+       if (INTEL_INFO(dev)->gen < 4) 
                level <<= 1;
-       } else
-               tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+       tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
        I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
 
index 3003fb2..f7b9268 100644 (file)
@@ -50,6 +50,7 @@
 #define IS_TMDS(c)     (c->output_flag & SDVO_TMDS_MASK)
 #define IS_LVDS(c)     (c->output_flag & SDVO_LVDS_MASK)
 #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
 
 
 static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                }
                sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
        }
-       if (intel_crtc->pipe == 1)
-               sdvox |= SDVO_PIPE_B_SELECT;
+
+       if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+               sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+       else
+               sdvox |= TRANSCODER(intel_crtc->pipe);
+
        if (intel_sdvo->has_hdmi_audio)
                sdvox |= SDVO_AUDIO_ENABLE;
 
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
        return status;
 }
 
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+                                 struct edid *edid)
+{
+       bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+       bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+       DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+                     connector_is_digital, monitor_is_digital);
+       return connector_is_digital == monitor_is_digital;
+}
+
 static enum drm_connector_status
 intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
                if (edid == NULL)
                        edid = intel_sdvo_get_analog_edid(connector);
                if (edid != NULL) {
-                       if (edid->input & DRM_EDID_INPUT_DIGITAL)
-                               ret = connector_status_disconnected;
-                       else
+                       if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+                                                             edid))
                                ret = connector_status_connected;
+                       else
+                               ret = connector_status_disconnected;
+
                        connector->display_info.raw_edid = NULL;
                        kfree(edid);
                } else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
                edid = intel_sdvo_get_analog_edid(connector);
 
        if (edid != NULL) {
-               struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-               bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
-               bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
-               if (connector_is_digital == monitor_is_digital) {
+               if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+                                                     edid)) {
                        drm_mode_connector_update_edid_property(connector, edid);
                        drm_add_edid_modes(connector, edid);
                }
index 06e413e..4b27efa 100644 (file)
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_TRAVIS:
                case ENCODER_OBJECT_ID_NUTMEG:
-                       return true;
+                       return radeon_encoder->encoder_id;
                default:
-                       return false;
+                       return ENCODER_OBJECT_ID_NONE;
                }
        }
-
-       return false;
+       return ENCODER_OBJECT_ID_NONE;
 }
 
 void radeon_panel_mode_fixup(struct drm_encoder *encoder,
index 8cca91a..dc27970 100644 (file)
@@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
                             struct ttm_object_file *tfile,
                             int id,
                             struct vmw_resource **p_res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+                                 struct ttm_object_file *tfile,
+                                 uint32_t handle,
+                                 struct vmw_surface **out_surf,
+                                 struct vmw_dma_buffer **out_buf);
 extern void vmw_surface_res_free(struct vmw_resource *res);
 extern int vmw_surface_init(struct vmw_private *dev_priv,
                            struct vmw_surface *srf,
index 03bbc2a..a0c2f12 100644 (file)
@@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 {
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
        uint32_t fifo_min, hwversion;
+       const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
                return false;
@@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
        if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
                return false;
 
-       hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+       hwversion = ioread32(fifo_mem +
+                            ((fifo->capabilities &
+                              SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+                             SVGA_FIFO_3D_HWVERSION_REVISED :
+                             SVGA_FIFO_3D_HWVERSION));
+
        if (hwversion == 0)
                return false;
 
index 3f63435..66917c6 100644 (file)
@@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
        case DRM_VMW_PARAM_FIFO_HW_VERSION:
        {
                __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
-
-               param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+               const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+               param->value =
+                       ioread32(fifo_mem +
+                                ((fifo->capabilities &
+                                  SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+                                 SVGA_FIFO_3D_HWVERSION_REVISED :
+                                 SVGA_FIFO_3D_HWVERSION));
                break;
        }
        default:
@@ -140,7 +146,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
                goto out_clips;
        }
 
-       clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+       clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
        if (clips == NULL) {
                DRM_ERROR("Failed to allocate clip rect list.\n");
                ret = -ENOMEM;
@@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
                ret = -EINVAL;
                goto out_no_fb;
        }
-
        vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
-       if (!vfb->dmabuf) {
-               DRM_ERROR("Framebuffer not dmabuf backed.\n");
-               ret = -EINVAL;
-               goto out_no_fb;
-       }
 
        ret = ttm_read_lock(&vmaster->lock, true);
        if (unlikely(ret != 0))
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
                goto out_clips;
        }
 
-       clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+       clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
        if (clips == NULL) {
                DRM_ERROR("Failed to allocate clip rect list.\n");
                ret = -ENOMEM;
index 37d4054..8aa1dbb 100644 (file)
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
+
+struct vmw_clip_rect {
+       int x1, x2, y1, y2;
+};
+
+/**
+ * Clip @num_rects number of @rects against @clip storing the
+ * results in @out_rects and the number of passed rects in @out_num.
+ */
+void vmw_clip_cliprects(struct drm_clip_rect *rects,
+                       int num_rects,
+                       struct vmw_clip_rect clip,
+                       SVGASignedRect *out_rects,
+                       int *out_num)
+{
+       int i, k;
+
+       for (i = 0, k = 0; i < num_rects; i++) {
+               int x1 = max_t(int, clip.x1, rects[i].x1);
+               int y1 = max_t(int, clip.y1, rects[i].y1);
+               int x2 = min_t(int, clip.x2, rects[i].x2);
+               int y2 = min_t(int, clip.y2, rects[i].y2);
+
+               if (x1 >= x2)
+                       continue;
+               if (y1 >= y2)
+                       continue;
+
+               out_rects[k].left   = x1;
+               out_rects[k].top    = y1;
+               out_rects[k].right  = x2;
+               out_rects[k].bottom = y2;
+               k++;
+       }
+
+       *out_num = k;
+}
+
 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
 {
        if (du->cursor_surface)
@@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
        return 0;
 }
 
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+                            struct vmw_dma_buffer *dmabuf,
+                            u32 width, u32 height,
+                            u32 hotspotX, u32 hotspotY)
+{
+       struct ttm_bo_kmap_obj map;
+       unsigned long kmap_offset;
+       unsigned long kmap_num;
+       void *virtual;
+       bool dummy;
+       int ret;
+
+       kmap_offset = 0;
+       kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("reserve failed\n");
+               return -EINVAL;
+       }
+
+       ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+       if (unlikely(ret != 0))
+               goto err_unreserve;
+
+       virtual = ttm_kmap_obj_virtual(&map, &dummy);
+       ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+                                     hotspotX, hotspotY);
+
+       ttm_bo_kunmap(&map);
+err_unreserve:
+       ttm_bo_unreserve(&dmabuf->base);
+
+       return ret;
+}
+
+
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
                                bool show, int x, int y)
 {
@@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                return -EINVAL;
 
        if (handle) {
-               ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
-                                                    handle, &surface);
-               if (!ret) {
-                       if (!surface->snooper.image) {
-                               DRM_ERROR("surface not suitable for cursor\n");
-                               vmw_surface_unreference(&surface);
-                               return -EINVAL;
-                       }
-               } else {
-                       ret = vmw_user_dmabuf_lookup(tfile,
-                                                    handle, &dmabuf);
-                       if (ret) {
-                               DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
-                               return -EINVAL;
-                       }
+               ret = vmw_user_lookup_handle(dev_priv, tfile,
+                                            handle, &surface, &dmabuf);
+               if (ret) {
+                       DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+                       return -EINVAL;
                }
        }
 
+       /* need to do this before taking down old image */
+       if (surface && !surface->snooper.image) {
+               DRM_ERROR("surface not suitable for cursor\n");
+               vmw_surface_unreference(&surface);
+               return -EINVAL;
+       }
+
        /* takedown old cursor */
        if (du->cursor_surface) {
                du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                vmw_cursor_update_image(dev_priv, surface->snooper.image,
                                        64, 64, du->hotspot_x, du->hotspot_y);
        } else if (dmabuf) {
-               struct ttm_bo_kmap_obj map;
-               unsigned long kmap_offset;
-               unsigned long kmap_num;
-               void *virtual;
-               bool dummy;
-
                /* vmw_user_surface_lookup takes one reference */
                du->cursor_dmabuf = dmabuf;
 
-               kmap_offset = 0;
-               kmap_num = (64*64*4) >> PAGE_SHIFT;
-
-               ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("reserve failed\n");
-                       return -EINVAL;
-               }
-
-               ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
-               if (unlikely(ret != 0))
-                       goto err_unreserve;
-
-               virtual = ttm_kmap_obj_virtual(&map, &dummy);
-               vmw_cursor_update_image(dev_priv, virtual, 64, 64,
-                                       du->hotspot_x, du->hotspot_y);
-
-               ttm_bo_kunmap(&map);
-err_unreserve:
-               ttm_bo_unreserve(&dmabuf->base);
-
+               ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
+                                              du->hotspot_x, du->hotspot_y);
        } else {
                vmw_cursor_update_position(dev_priv, false, 0, 0);
                return 0;
@@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
                                struct drm_clip_rect *clips,
                                unsigned num_clips, int inc)
 {
-       struct drm_clip_rect *clips_ptr;
        struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+       struct drm_clip_rect *clips_ptr;
+       struct drm_clip_rect *tmp;
        struct drm_crtc *crtc;
        size_t fifo_size;
        int i, num_units;
@@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
        } *cmd;
        SVGASignedRect *blits;
 
-
        num_units = 0;
        list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
                            head) {
@@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
 
        BUG_ON(!clips || !num_clips);
 
+       tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+       if (unlikely(tmp == NULL)) {
+               DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+               return -ENOMEM;
+       }
+
        fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
        cmd = kzalloc(fifo_size, GFP_KERNEL);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Temporary fifo memory alloc failed.\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_free_tmp;
        }
 
+       /* setup blits pointer */
+       blits = (SVGASignedRect *)&cmd[1];
+
+       /* initial clip region */
        left = clips->x1;
        right = clips->x2;
        top = clips->y1;
@@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
        cmd->body.srcRect.bottom = bottom;
 
        clips_ptr = clips;
-       blits = (SVGASignedRect *)&cmd[1];
        for (i = 0; i < num_clips; i++, clips_ptr += inc) {
-               blits[i].left   = clips_ptr->x1 - left;
-               blits[i].right  = clips_ptr->x2 - left;
-               blits[i].top    = clips_ptr->y1 - top;
-               blits[i].bottom = clips_ptr->y2 - top;
+               tmp[i].x1 = clips_ptr->x1 - left;
+               tmp[i].x2 = clips_ptr->x2 - left;
+               tmp[i].y1 = clips_ptr->y1 - top;
+               tmp[i].y2 = clips_ptr->y2 - top;
        }
 
        /* do per unit writing, reuse fifo for each */
        for (i = 0; i < num_units; i++) {
                struct vmw_display_unit *unit = units[i];
-               int clip_x1 = left - unit->crtc.x;
-               int clip_y1 = top - unit->crtc.y;
-               int clip_x2 = right - unit->crtc.x;
-               int clip_y2 = bottom - unit->crtc.y;
+               struct vmw_clip_rect clip;
+               int num;
+
+               clip.x1 = left - unit->crtc.x;
+               clip.y1 = top - unit->crtc.y;
+               clip.x2 = right - unit->crtc.x;
+               clip.y2 = bottom - unit->crtc.y;
 
                /* skip any crtcs that misses the clip region */
-               if (clip_x1 >= unit->crtc.mode.hdisplay ||
-                   clip_y1 >= unit->crtc.mode.vdisplay ||
-                   clip_x2 <= 0 || clip_y2 <= 0)
+               if (clip.x1 >= unit->crtc.mode.hdisplay ||
+                   clip.y1 >= unit->crtc.mode.vdisplay ||
+                   clip.x2 <= 0 || clip.y2 <= 0)
                        continue;
 
+               /*
+                * In order for the clip rects to be correctly scaled
+                * the src and dest rects needs to be the same size.
+                */
+               cmd->body.destRect.left = clip.x1;
+               cmd->body.destRect.right = clip.x2;
+               cmd->body.destRect.top = clip.y1;
+               cmd->body.destRect.bottom = clip.y2;
+
+               /* create a clip rect of the crtc in dest coords */
+               clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+               clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+               clip.x1 = 0 - clip.x1;
+               clip.y1 = 0 - clip.y1;
+
                /* need to reset sid as it is changed by execbuf */
                cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-
                cmd->body.destScreenId = unit->unit;
 
-               /*
-                * The blit command is a lot more resilient then the
-                * readback command when it comes to clip rects. So its
-                * okay to go out of bounds.
-                */
+               /* clip and write blits to cmd stream */
+               vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
-               cmd->body.destRect.left = clip_x1;
-               cmd->body.destRect.right = clip_x2;
-               cmd->body.destRect.top = clip_y1;
-               cmd->body.destRect.bottom = clip_y2;
+               /* if no cliprects hit skip this */
+               if (num == 0)
+                       continue;
 
 
+               /* recalculate package length */
+               fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+               cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
                ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
                                          fifo_size, 0, NULL);
 
@@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
                        break;
        }
 
+
        kfree(cmd);
+out_free_tmp:
+       kfree(tmp);
 
        return ret;
 }
@@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
         * Sanity checks.
         */
 
+       /* Surface must be marked as a scanout. */
+       if (unlikely(!surface->scanout))
+               return -EINVAL;
+
        if (unlikely(surface->mip_levels[0] != 1 ||
                     surface->num_sizes != 1 ||
                     surface->sizes[0].width < mode_cmd->width ||
@@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
                        int clip_y1 = clips_ptr->y1 - unit->crtc.y;
                        int clip_x2 = clips_ptr->x2 - unit->crtc.x;
                        int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+                       int move_x, move_y;
 
                        /* skip any crtcs that misses the clip region */
                        if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
                            clip_x2 <= 0 || clip_y2 <= 0)
                                continue;
 
+                       /* clip size to crtc size */
+                       clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
+                       clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
+
+                       /* translate both src and dest to bring clip into screen */
+                       move_x = min_t(int, clip_x1, 0);
+                       move_y = min_t(int, clip_y1, 0);
+
+                       /* actual translate done here */
                        blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
                        blits[hit_num].body.destScreenId = unit->unit;
-                       blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
-                       blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
-                       blits[hit_num].body.destRect.left = clip_x1;
-                       blits[hit_num].body.destRect.top = clip_y1;
+                       blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
+                       blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
+                       blits[hit_num].body.destRect.left = clip_x1 - move_x;
+                       blits[hit_num].body.destRect.top = clip_y1 - move_y;
                        blits[hit_num].body.destRect.right = clip_x2;
                        blits[hit_num].body.destRect.bottom = clip_y2;
                        hit_num++;
@@ -1033,46 +1123,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
                return ERR_PTR(-ENOENT);
        }
 
-       /**
-        * End conditioned code.
-        */
-
-       ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
-                                            mode_cmd->handle, &surface);
+       /* returns either a dmabuf or surface */
+       ret = vmw_user_lookup_handle(dev_priv, tfile,
+                                    mode_cmd->handle,
+                                    &surface, &bo);
        if (ret)
-               goto try_dmabuf;
-
-       if (!surface->scanout)
-               goto err_not_scanout;
-
-       ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
-                                             &vfb, mode_cmd);
-
-       /* vmw_user_surface_lookup takes one ref so does new_fb */
-       vmw_surface_unreference(&surface);
-
-       if (ret) {
-               DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
-               ttm_base_object_unref(&user_obj);
-               return ERR_PTR(ret);
-       } else
-               vfb->user_obj = user_obj;
-       return &vfb->base;
-
-try_dmabuf:
-       DRM_INFO("%s: trying buffer\n", __func__);
-
-       ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
-       if (ret) {
-               DRM_ERROR("failed to find buffer: %i\n", ret);
-               return ERR_PTR(-ENOENT);
-       }
-
-       ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-                                            mode_cmd);
+               goto err_out;
+
+       /* Create the new framebuffer depending one what we got back */
+       if (bo)
+               ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+                                                    mode_cmd);
+       else if (surface)
+               ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+                                                     surface, &vfb, mode_cmd);
+       else
+               BUG();
 
-       /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
-       vmw_dmabuf_unreference(&bo);
+err_out:
+       /* vmw_user_lookup_handle takes one ref so does new_fb */
+       if (bo)
+               vmw_dmabuf_unreference(&bo);
+       if (surface)
+               vmw_surface_unreference(&surface);
 
        if (ret) {
                DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1082,14 +1155,6 @@ try_dmabuf:
                vfb->user_obj = user_obj;
 
        return &vfb->base;
-
-err_not_scanout:
-       DRM_ERROR("surface not marked as scanout\n");
-       /* vmw_user_surface_lookup takes one ref */
-       vmw_surface_unreference(&surface);
-       ttm_base_object_unref(&user_obj);
-
-       return ERR_PTR(-EINVAL);
 }
 
 static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1106,10 +1171,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
                    uint32_t num_clips)
 {
        struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+       struct drm_clip_rect *tmp;
        struct drm_crtc *crtc;
        size_t fifo_size;
        int i, k, num_units;
        int ret = 0; /* silence warning */
+       int left, right, top, bottom;
 
        struct {
                SVGA3dCmdHeader header;
@@ -1127,60 +1194,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
        BUG_ON(surface == NULL);
        BUG_ON(!clips || !num_clips);
 
+       tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+       if (unlikely(tmp == NULL)) {
+               DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+               return -ENOMEM;
+       }
+
        fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
        cmd = kmalloc(fifo_size, GFP_KERNEL);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Failed to allocate temporary fifo memory.\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_free_tmp;
+       }
+
+       left = clips->x;
+       right = clips->x + clips->w;
+       top = clips->y;
+       bottom = clips->y + clips->h;
+
+       for (i = 1; i < num_clips; i++) {
+               left = min_t(int, left, (int)clips[i].x);
+               right = max_t(int, right, (int)clips[i].x + clips[i].w);
+               top = min_t(int, top, (int)clips[i].y);
+               bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
        }
 
        /* only need to do this once */
        memset(cmd, 0, fifo_size);
        cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-       cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
-       cmd->body.srcRect.left = 0;
-       cmd->body.srcRect.right = surface->sizes[0].width;
-       cmd->body.srcRect.top = 0;
-       cmd->body.srcRect.bottom = surface->sizes[0].height;
 
        blits = (SVGASignedRect *)&cmd[1];
+
+       cmd->body.srcRect.left = left;
+       cmd->body.srcRect.right = right;
+       cmd->body.srcRect.top = top;
+       cmd->body.srcRect.bottom = bottom;
+
        for (i = 0; i < num_clips; i++) {
-               blits[i].left   = clips[i].x;
-               blits[i].right  = clips[i].x + clips[i].w;
-               blits[i].top    = clips[i].y;
-               blits[i].bottom = clips[i].y + clips[i].h;
+               tmp[i].x1 = clips[i].x - left;
+               tmp[i].x2 = clips[i].x + clips[i].w - left;
+               tmp[i].y1 = clips[i].y - top;
+               tmp[i].y2 = clips[i].y + clips[i].h - top;
        }
 
        for (k = 0; k < num_units; k++) {
                struct vmw_display_unit *unit = units[k];
-               int clip_x1 = destX - unit->crtc.x;
-               int clip_y1 = destY - unit->crtc.y;
-               int clip_x2 = clip_x1 + surface->sizes[0].width;
-               int clip_y2 = clip_y1 + surface->sizes[0].height;
+               struct vmw_clip_rect clip;
+               int num;
+
+               clip.x1 = left + destX - unit->crtc.x;
+               clip.y1 = top + destY - unit->crtc.y;
+               clip.x2 = right + destX - unit->crtc.x;
+               clip.y2 = bottom + destY - unit->crtc.y;
 
                /* skip any crtcs that misses the clip region */
-               if (clip_x1 >= unit->crtc.mode.hdisplay ||
-                   clip_y1 >= unit->crtc.mode.vdisplay ||
-                   clip_x2 <= 0 || clip_y2 <= 0)
+               if (clip.x1 >= unit->crtc.mode.hdisplay ||
+                   clip.y1 >= unit->crtc.mode.vdisplay ||
+                   clip.x2 <= 0 || clip.y2 <= 0)
                        continue;
 
+               /*
+                * In order for the clip rects to be correctly scaled
+                * the src and dest rects needs to be the same size.
+                */
+               cmd->body.destRect.left = clip.x1;
+               cmd->body.destRect.right = clip.x2;
+               cmd->body.destRect.top = clip.y1;
+               cmd->body.destRect.bottom = clip.y2;
+
+               /* create a clip rect of the crtc in dest coords */
+               clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+               clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+               clip.x1 = 0 - clip.x1;
+               clip.y1 = 0 - clip.y1;
+
                /* need to reset sid as it is changed by execbuf */
                cmd->body.srcImage.sid = sid;
-
                cmd->body.destScreenId = unit->unit;
 
-               /*
-                * The blit command is a lot more resilient then the
-                * readback command when it comes to clip rects. So its
-                * okay to go out of bounds.
-                */
+               /* clip and write blits to cmd stream */
+               vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
-               cmd->body.destRect.left = clip_x1;
-               cmd->body.destRect.right = clip_x2;
-               cmd->body.destRect.top = clip_y1;
-               cmd->body.destRect.bottom = clip_y2;
+               /* if no cliprects hit skip this */
+               if (num == 0)
+                       continue;
 
+               /* recalculate package length */
+               fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+               cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
                ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
                                          fifo_size, 0, NULL);
 
@@ -1189,6 +1291,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
        }
 
        kfree(cmd);
+out_free_tmp:
+       kfree(tmp);
 
        return ret;
 }
index af8e6e5..e1cb855 100644 (file)
@@ -62,9 +62,14 @@ struct vmw_framebuffer {
 int vmw_cursor_update_image(struct vmw_private *dev_priv,
                            u32 *image, u32 width, u32 height,
                            u32 hotspotX, u32 hotspotY);
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+                            struct vmw_dma_buffer *dmabuf,
+                            u32 width, u32 height,
+                            u32 hotspotX, u32 hotspotY);
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
                                bool show, int x, int y);
 
+
 /**
  * Base class display unit.
  *
index 90c5e39..8f8dbd4 100644 (file)
@@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
 {
        struct vmw_legacy_display *lds = dev_priv->ldu_priv;
        struct vmw_legacy_display_unit *entry;
+       struct vmw_display_unit *du = NULL;
        struct drm_framebuffer *fb = NULL;
        struct drm_crtc *crtc = NULL;
-       int i = 0;
+       int i = 0, ret;
 
        /* If there is no display topology the host just assumes
         * that the guest will set the same layout as the host.
@@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
 
        lds->last_num_active = lds->num_active;
 
+
+       /* Find the first du with a cursor. */
+       list_for_each_entry(entry, &lds->active, active) {
+               du = &entry->base;
+
+               if (!du->cursor_dmabuf)
+                       continue;
+
+               ret = vmw_cursor_update_dmabuf(dev_priv,
+                                              du->cursor_dmabuf,
+                                              64, 64,
+                                              du->hotspot_x,
+                                              du->hotspot_y);
+               if (ret == 0)
+                       break;
+
+               DRM_ERROR("Could not update cursor image\n");
+       }
+
        return 0;
 }
 
index 86c5e4c..1c7f09e 100644 (file)
@@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
                write_unlock(lock);
 }
 
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+                          struct ttm_object_file *tfile,
+                          uint32_t handle,
+                          struct vmw_surface **out_surf,
+                          struct vmw_dma_buffer **out_buf)
+{
+       int ret;
+
+       BUG_ON(*out_surf || *out_buf);
+
+       ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
+       if (!ret)
+               return 0;
+
+       ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+       return ret;
+}
+
 
 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
                                   struct ttm_object_file *tfile,
index 7a48b1e..5253d23 100644 (file)
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
 {
        struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
        struct completion *completion = &hwmon->read_completion;
-       unsigned long t;
+       long t;
        unsigned long val;
        int ret;
 
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
        .probe  = jz4740_hwmon_probe,
        .remove = __devexit_p(jz4740_hwmon_remove),
        .driver = {
index 8cebef4..18936ac 100644 (file)
@@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
        /* Set the number of I2C channel instance */
        adap_info->ch_num = id->driver_data;
 
+       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+                 KBUILD_MODNAME, adap_info);
+       if (ret) {
+               pch_pci_err(pdev, "request_irq FAILED\n");
+               goto err_request_irq;
+       }
+
        for (i = 0; i < adap_info->ch_num; i++) {
                pch_adap = &adap_info->pch_data[i].pch_adapter;
                adap_info->pch_i2c_suspended = false;
@@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
 
                pch_adap->dev.parent = &pdev->dev;
 
+               pch_i2c_init(&adap_info->pch_data[i]);
                ret = i2c_add_adapter(pch_adap);
                if (ret) {
                        pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
-                       goto err_i2c_add_adapter;
+                       goto err_add_adapter;
                }
-
-               pch_i2c_init(&adap_info->pch_data[i]);
-       }
-       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
-                 KBUILD_MODNAME, adap_info);
-       if (ret) {
-               pch_pci_err(pdev, "request_irq FAILED\n");
-               goto err_i2c_add_adapter;
        }
 
        pci_set_drvdata(pdev, adap_info);
        pch_pci_dbg(pdev, "returns %d.\n", ret);
        return 0;
 
-err_i2c_add_adapter:
+err_add_adapter:
        for (j = 0; j < i; j++)
                i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
+       free_irq(pdev->irq, adap_info);
+err_request_irq:
        pci_iounmap(pdev, base_addr);
 err_pci_iomap:
        pci_release_regions(pdev);
index a43d002..fa23faa 100644 (file)
@@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
                 * size. This is to ensure that we can handle the status on int
                 * call back latencies.
                 */
-               if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
-                       dev->fifo_size = 0;
+
+               dev->fifo_size = (dev->fifo_size / 2);
+
+               if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
                        dev->b_hw = 0; /* Disable hardware fixes */
-               } else {
-                       dev->fifo_size = (dev->fifo_size / 2);
+               else
                        dev->b_hw = 1; /* Enable hardware fixes */
-               }
+
                /* calculate wakeup latency constraint for MPU */
                if (dev->set_mpu_wkup_lat != NULL)
                        dev->latency = (1000000 * dev->fifo_size) /
index 2754cef..4c17180 100644 (file)
@@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
 
        /* first, try busy waiting briefly */
        do {
+               cpu_relax();
                iicstat = readl(i2c->regs + S3C2410_IICSTAT);
        } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
 
@@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
 #else
 static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
 {
-       return -EINVAL;
+       return 0;
 }
 
 static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
index 75ff821..d0d4aa9 100644 (file)
@@ -2513,6 +2513,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
 
        req.private_data_len = sizeof(struct cma_hdr) +
                               conn_param->private_data_len;
+       if (req.private_data_len < conn_param->private_data_len)
+               return -EINVAL;
+
        req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
        if (!req.private_data)
                return -ENOMEM;
@@ -2562,6 +2565,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
        memset(&req, 0, sizeof req);
        offset = cma_user_data_offset(id_priv->id.ps);
        req.private_data_len = offset + conn_param->private_data_len;
+       if (req.private_data_len < conn_param->private_data_len)
+               return -EINVAL;
+
        private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
        if (!private_data)
                return -ENOMEM;
index 77f3dbc..18836cd 100644 (file)
@@ -1244,7 +1244,8 @@ err_reg:
 
 err_counter:
        for (; i; --i)
-               mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+               if (ibdev->counters[i - 1] != -1)
+                       mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
 
 err_map:
        iounmap(ibdev->uar_map);
@@ -1275,7 +1276,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
        }
        iounmap(ibdev->uar_map);
        for (p = 0; p < ibdev->num_ports; ++p)
-               mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+               if (ibdev->counters[p] != -1)
+                       mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
        mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
                mlx4_CLOSE_PORT(dev, p);
 
index 574600e..a740324 100644 (file)
@@ -1285,7 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
        strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
        ctxt_fp(fp) = rcd;
        qib_stats.sps_ctxts++;
-       dd->freectxts++;
+       dd->freectxts--;
        ret = 0;
        goto bail;
 
@@ -1794,7 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
                if (dd->pageshadow)
                        unlock_expected_tids(rcd);
                qib_stats.sps_ctxts--;
-               dd->freectxts--;
+               dd->freectxts++;
        }
 
        mutex_unlock(&qib_mutex);
index 80793f1..06517e6 100644 (file)
@@ -115,8 +115,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
 static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
 {
        struct cma3000_accl_data *data = dev_id;
-       int datax, datay, dataz;
-       u8 ctrl, mode, range, intr_status;
+       int datax, datay, dataz, intr_status;
+       u8 ctrl, mode, range;
 
        intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
        if (intr_status < 0)
index c080b82..a6dcd18 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/input/mt.h>
 #include <linux/serio.h>
@@ -1220,6 +1221,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
 
        do {
                psmouse_reset(psmouse);
+               if (retry) {
+                       /*
+                        * On some boxes, right after resuming, the touchpad
+                        * needs some time to finish initializing (I assume
+                        * it needs time to calibrate) and start responding
+                        * to Synaptics-specific queries, so let's wait a
+                        * bit.
+                        */
+                       ssleep(1);
+               }
                error = synaptics_detect(psmouse, 0);
        } while (error && ++retry < 3);
 
index da0d876..2ee47d0 100644 (file)
@@ -1470,6 +1470,9 @@ static const struct wacom_features wacom_features_0xE3 =
 static const struct wacom_features wacom_features_0xE6 =
        { "Wacom ISDv4 E6",       WACOM_PKGLEN_TPC2FG,    27760, 15694,  255,
          0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xEC =
+       { "Wacom ISDv4 EC",       WACOM_PKGLEN_GRAPHIRE,  25710, 14500,  255,
+         0, TABLETPC,    WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x47 =
        { "Wacom Intuos2 6x8",    WACOM_PKGLEN_INTUOS,    20320, 16240, 1023,
          31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1611,6 +1614,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0xE2) },
        { USB_DEVICE_WACOM(0xE3) },
        { USB_DEVICE_WACOM(0xE6) },
+       { USB_DEVICE_WACOM(0xEC) },
        { USB_DEVICE_WACOM(0x47) },
        { USB_DEVICE_LENOVO(0x6004) },
        { }
index a004c39..bdc447f 100644 (file)
@@ -405,6 +405,9 @@ int dmar_disabled = 0;
 int dmar_disabled = 1;
 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
 
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
 static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
@@ -3647,6 +3650,8 @@ int __init intel_iommu_init(void)
 
        bus_register_notifier(&pci_bus_type, &device_nb);
 
+       intel_iommu_enabled = 1;
+
        return 0;
 }
 
index 7878712..6d03774 100644 (file)
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
         */
        int i;
 
+       spin_lock_irq(&bitmap->lock);
        for (i = 0; i < bitmap->file_pages; i++)
                set_page_attr(bitmap, bitmap->filemap[i],
                              BITMAP_PAGE_NEEDWRITE);
        bitmap->allclean = 0;
+       spin_unlock_irq(&bitmap->lock);
 }
 
 static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1391,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                         atomic_read(&bitmap->behind_writes),
                         bitmap->mddev->bitmap_info.max_write_behind);
        }
-       if (bitmap->mddev->degraded)
-               /* Never clear bits or update events_cleared when degraded */
-               success = 0;
 
        while (sectors) {
                sector_t blocks;
@@ -1407,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                        return;
                }
 
-               if (success &&
+               if (success && !bitmap->mddev->degraded &&
                    bitmap->events_cleared < bitmap->mddev->events) {
                        bitmap->events_cleared = bitmap->mddev->events;
                        bitmap->need_sync = 1;
@@ -1605,7 +1604,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
        for (chunk = s; chunk <= e; chunk++) {
                sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
                bitmap_set_memory_bits(bitmap, sec, 1);
+               spin_lock_irq(&bitmap->lock);
                bitmap_file_set_bit(bitmap, sec);
+               spin_unlock_irq(&bitmap->lock);
                if (sec < bitmap->mddev->recovery_cp)
                        /* We are asserting that the array is dirty,
                         * so move the recovery_cp address back so
index c3273ef..6274565 100644 (file)
@@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
                return -EINVAL;
 
        rdev->raid_disk = rdev->saved_raid_disk;
+       rdev->saved_raid_disk = -1;
 
        newconf = linear_conf(mddev,mddev->raid_disks+1);
 
index 84acfe7..f47f1f8 100644 (file)
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
            mddev->ctime == 0 && !mddev->hold_active) {
                /* Array is not configured at all, and not held active,
                 * so destroy it */
-               list_del(&mddev->all_mddevs);
+               list_del_init(&mddev->all_mddevs);
                bs = mddev->bio_set;
                mddev->bio_set = NULL;
                if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
                sep = ",";
        }
        if (test_bit(Blocked, &rdev->flags) ||
-           rdev->badblocks.unacked_exist) {
+           (rdev->badblocks.unacked_exist
+            && !test_bit(Faulty, &rdev->flags))) {
                len += sprintf(page+len, "%sblocked", sep);
                sep = ",";
        }
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        if (err)
                return err;
        else {
+               if (mddev->hold_active == UNTIL_IOCTL)
+                       mddev->hold_active = 0;
                sysfs_notify_dirent_safe(mddev->sysfs_state);
                return len;
        }
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 
        if (!entry->show)
                return -EIO;
+       spin_lock(&all_mddevs_lock);
+       if (list_empty(&mddev->all_mddevs)) {
+               spin_unlock(&all_mddevs_lock);
+               return -EBUSY;
+       }
+       mddev_get(mddev);
+       spin_unlock(&all_mddevs_lock);
+
        rv = mddev_lock(mddev);
        if (!rv) {
                rv = entry->show(mddev, page);
                mddev_unlock(mddev);
        }
+       mddev_put(mddev);
        return rv;
 }
 
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
                return -EIO;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
+       spin_lock(&all_mddevs_lock);
+       if (list_empty(&mddev->all_mddevs)) {
+               spin_unlock(&all_mddevs_lock);
+               return -EBUSY;
+       }
+       mddev_get(mddev);
+       spin_unlock(&all_mddevs_lock);
        rv = mddev_lock(mddev);
-       if (mddev->hold_active == UNTIL_IOCTL)
-               mddev->hold_active = 0;
        if (!rv) {
                rv = entry->store(mddev, page, length);
                mddev_unlock(mddev);
        }
+       mddev_put(mddev);
        return rv;
 }
 
@@ -7342,8 +7360,7 @@ static int remove_and_add_spares(struct mddev *mddev)
                                        spares++;
                                        md_new_event(mddev);
                                        set_bit(MD_CHANGE_DEVS, &mddev->flags);
-                               } else
-                                       break;
+                               }
                        }
                }
        }
@@ -7840,6 +7857,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
                                  s + rdev->data_offset, sectors, acknowledged);
        if (rv) {
                /* Make sure they get written out promptly */
+               sysfs_notify_dirent_safe(rdev->sysfs_state);
                set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
                md_wakeup_thread(rdev->mddev->thread);
        }
index 297e260..858fdbb 100644 (file)
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                if (dev->written)
                        s->written++;
                rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && test_bit(Faulty, &rdev->flags))
+                       rdev = NULL;
                if (rdev) {
                        is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
                                             &first_bad, &bad_sectors);
@@ -3063,12 +3065,18 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        }
                } else if (test_bit(In_sync, &rdev->flags))
                        set_bit(R5_Insync, &dev->flags);
-               else if (!test_bit(Faulty, &rdev->flags)) {
+               else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
                        /* in sync if before recovery_offset */
-                       if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
-                               set_bit(R5_Insync, &dev->flags);
-               }
-               if (test_bit(R5_WriteError, &dev->flags)) {
+                       set_bit(R5_Insync, &dev->flags);
+               else if (test_bit(R5_UPTODATE, &dev->flags) &&
+                        test_bit(R5_Expanded, &dev->flags))
+                       /* If we've reshaped into here, we assume it is Insync.
+                        * We will shortly update recovery_offset to make
+                        * it official.
+                        */
+                       set_bit(R5_Insync, &dev->flags);
+
+               if (rdev && test_bit(R5_WriteError, &dev->flags)) {
                        clear_bit(R5_Insync, &dev->flags);
                        if (!test_bit(Faulty, &rdev->flags)) {
                                s->handle_bad_blocks = 1;
@@ -3076,7 +3084,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        } else
                                clear_bit(R5_WriteError, &dev->flags);
                }
-               if (test_bit(R5_MadeGood, &dev->flags)) {
+               if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
                        if (!test_bit(Faulty, &rdev->flags)) {
                                s->handle_bad_blocks = 1;
                                atomic_inc(&rdev->nr_pending);
index 7eb1bf7..5d02221 100644 (file)
@@ -488,9 +488,10 @@ static int mxl5007t_write_regs(struct mxl5007t_state *state,
 
 static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
 {
+       u8 buf[2] = { 0xfb, reg };
        struct i2c_msg msg[] = {
                { .addr = state->i2c_props.addr, .flags = 0,
-                 .buf = &reg, .len = 1 },
+                 .buf = buf, .len = 2 },
                { .addr = state->i2c_props.addr, .flags = I2C_M_RD,
                  .buf = val, .len = 1 },
        };
index aacfe23..4fc2973 100644 (file)
@@ -141,7 +141,7 @@ static int tda18218_set_params(struct dvb_frontend *fe,
        switch (params->u.ofdm.bandwidth) {
        case BANDWIDTH_6_MHZ:
                LP_Fc = 0;
-               LO_Frac = params->frequency + 4000000;
+               LO_Frac = params->frequency + 3000000;
                break;
        case BANDWIDTH_7_MHZ:
                LP_Fc = 1;
index 303f22e..01bb8da 100644 (file)
@@ -189,7 +189,7 @@ struct ati_remote {
        dma_addr_t inbuf_dma;
        dma_addr_t outbuf_dma;
 
-       unsigned char old_data[2];  /* Detect duplicate events */
+       unsigned char old_data;     /* Detect duplicate events */
        unsigned long old_jiffies;
        unsigned long acc_jiffies;  /* handle acceleration */
        unsigned long first_jiffies;
@@ -221,35 +221,35 @@ struct ati_remote {
 /* Translation table from hardware messages to input events. */
 static const struct {
        short kind;
-       unsigned char data1, data2;
+       unsigned char data;
        int type;
        unsigned int code;
        int value;
 }  ati_remote_tbl[] = {
        /* Directional control pad axes */
-       {KIND_ACCEL,   0x35, 0x70, EV_REL, REL_X, -1},   /* left */
-       {KIND_ACCEL,   0x36, 0x71, EV_REL, REL_X, 1},    /* right */
-       {KIND_ACCEL,   0x37, 0x72, EV_REL, REL_Y, -1},   /* up */
-       {KIND_ACCEL,   0x38, 0x73, EV_REL, REL_Y, 1},    /* down */
+       {KIND_ACCEL,   0x70, EV_REL, REL_X, -1},   /* left */
+       {KIND_ACCEL,   0x71, EV_REL, REL_X, 1},    /* right */
+       {KIND_ACCEL,   0x72, EV_REL, REL_Y, -1},   /* up */
+       {KIND_ACCEL,   0x73, EV_REL, REL_Y, 1},    /* down */
        /* Directional control pad diagonals */
-       {KIND_LU,      0x39, 0x74, EV_REL, 0, 0},        /* left up */
-       {KIND_RU,      0x3a, 0x75, EV_REL, 0, 0},        /* right up */
-       {KIND_LD,      0x3c, 0x77, EV_REL, 0, 0},        /* left down */
-       {KIND_RD,      0x3b, 0x76, EV_REL, 0, 0},        /* right down */
+       {KIND_LU,      0x74, EV_REL, 0, 0},        /* left up */
+       {KIND_RU,      0x75, EV_REL, 0, 0},        /* right up */
+       {KIND_LD,      0x77, EV_REL, 0, 0},        /* left down */
+       {KIND_RD,      0x76, EV_REL, 0, 0},        /* right down */
 
        /* "Mouse button" buttons */
-       {KIND_LITERAL, 0x3d, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
-       {KIND_LITERAL, 0x3e, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
-       {KIND_LITERAL, 0x41, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
-       {KIND_LITERAL, 0x42, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
+       {KIND_LITERAL, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
+       {KIND_LITERAL, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
+       {KIND_LITERAL, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
+       {KIND_LITERAL, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
 
        /* Artificial "doubleclick" events are generated by the hardware.
         * They are mapped to the "side" and "extra" mouse buttons here. */
-       {KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
-       {KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
+       {KIND_FILTERED, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
+       {KIND_FILTERED, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
 
        /* Non-mouse events are handled by rc-core */
-       {KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
+       {KIND_END, 0x00, EV_MAX + 1, 0, 0}
 };
 
 /* Local function prototypes */
@@ -396,25 +396,6 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
        return retval;
 }
 
-/*
- *     ati_remote_event_lookup
- */
-static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
-{
-       int i;
-
-       for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
-               /*
-                * Decide if the table entry matches the remote input.
-                */
-               if (ati_remote_tbl[i].data1 == d1 &&
-                   ati_remote_tbl[i].data2 == d2)
-                       return i;
-
-       }
-       return -1;
-}
-
 /*
  *     ati_remote_compute_accel
  *
@@ -463,7 +444,15 @@ static void ati_remote_input_report(struct urb *urb)
        int index = -1;
        int acc;
        int remote_num;
-       unsigned char scancode[2];
+       unsigned char scancode;
+       int i;
+
+       /*
+        * data[0] = 0x14
+        * data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
+        * data[2] = the key code (with toggle bit in MSB with some models)
+        * data[3] = channel << 4 (the low 4 bits must be zero)
+        */
 
        /* Deal with strange looking inputs */
        if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@@ -472,6 +461,13 @@ static void ati_remote_input_report(struct urb *urb)
                return;
        }
 
+       if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
+               dbginfo(&ati_remote->interface->dev,
+                       "wrong checksum in input: %02x %02x %02x %02x\n",
+                       data[0], data[1], data[2], data[3]);
+               return;
+       }
+
        /* Mask unwanted remote channels.  */
        /* note: remote_num is 0-based, channel 1 on remote == 0 here */
        remote_num = (data[3] >> 4) & 0x0f;
@@ -482,31 +478,30 @@ static void ati_remote_input_report(struct urb *urb)
                return;
        }
 
-       scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
-
        /*
-        * Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
-        * so we have to clear them. The first bit is a bit tricky as the
-        * "non-toggled" state depends on remote_num, so we xor it with the
-        * second bit which is only used for toggle.
+        * MSB is a toggle code, though only used by some devices
+        * (e.g. SnapStream Firefly)
         */
-       scancode[0] ^= (data[2] & 0x80);
-
-       scancode[1] = data[2] & ~0x80;
+       scancode = data[2] & 0x7f;
 
-       /* Look up event code index in mouse translation table. */
-       index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
+       /* Look up event code index in the mouse translation table. */
+       for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+               if (scancode == ati_remote_tbl[i].data) {
+                       index = i;
+                       break;
+               }
+       }
 
        if (index >= 0) {
                dbginfo(&ati_remote->interface->dev,
-                       "channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
-                       remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+                       "channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
+                       remote_num, data[2], index, ati_remote_tbl[index].code);
                if (!dev)
                        return; /* no mouse device */
        } else
                dbginfo(&ati_remote->interface->dev,
-                       "channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
-                       remote_num, data[1], data[2], scancode[0], scancode[1]);
+                       "channel 0x%02x; key data %02x, scancode %02x\n",
+                       remote_num, data[2], scancode);
 
 
        if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
@@ -523,8 +518,7 @@ static void ati_remote_input_report(struct urb *urb)
                unsigned long now = jiffies;
 
                /* Filter duplicate events which happen "too close" together. */
-               if (ati_remote->old_data[0] == data[1] &&
-                   ati_remote->old_data[1] == data[2] &&
+               if (ati_remote->old_data == data[2] &&
                    time_before(now, ati_remote->old_jiffies +
                                     msecs_to_jiffies(repeat_filter))) {
                        ati_remote->repeat_count++;
@@ -533,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
                        ati_remote->first_jiffies = now;
                }
 
-               ati_remote->old_data[0] = data[1];
-               ati_remote->old_data[1] = data[2];
+               ati_remote->old_data = data[2];
                ati_remote->old_jiffies = now;
 
                /* Ensure we skip at least the 4 first duplicate events (generated
@@ -549,14 +542,13 @@ static void ati_remote_input_report(struct urb *urb)
 
                if (index < 0) {
                        /* Not a mouse event, hand it to rc-core. */
-                       u32 rc_code = (scancode[0] << 8) | scancode[1];
 
                        /*
                         * We don't use the rc-core repeat handling yet as
                         * it would cause ghost repeats which would be a
                         * regression for this driver.
                         */
-                       rc_keydown_notimeout(ati_remote->rdev, rc_code,
+                       rc_keydown_notimeout(ati_remote->rdev, scancode,
                                             data[2]);
                        rc_keyup(ati_remote->rdev);
                        return;
@@ -607,8 +599,7 @@ static void ati_remote_input_report(struct urb *urb)
                input_sync(dev);
 
                ati_remote->old_jiffies = jiffies;
-               ati_remote->old_data[0] = data[1];
-               ati_remote->old_data[1] = data[2];
+               ati_remote->old_data = data[2];
        }
 }
 
index e1b8b26..8150644 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table ati_x10[] = {
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xc500, KEY_A },
-       { 0xc601, KEY_B },
-       { 0xde19, KEY_C },
-       { 0xe01b, KEY_D },
-       { 0xe621, KEY_E },
-       { 0xe823, KEY_F },
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x00, KEY_A },
+       { 0x01, KEY_B },
+       { 0x19, KEY_C },
+       { 0x1b, KEY_D },
+       { 0x21, KEY_E },
+       { 0x23, KEY_F },
 
-       { 0xdd18, KEY_KPENTER },    /* "check" */
-       { 0xdb16, KEY_MENU },       /* "menu" */
-       { 0xc702, KEY_POWER },      /* Power */
-       { 0xc803, KEY_TV },         /* TV */
-       { 0xc904, KEY_DVD },        /* DVD */
-       { 0xca05, KEY_WWW },        /* WEB */
-       { 0xcb06, KEY_BOOKMARKS },  /* "book" */
-       { 0xcc07, KEY_EDIT },       /* "hand" */
-       { 0xe11c, KEY_COFFEE },     /* "timer" */
-       { 0xe520, KEY_FRONT },      /* "max" */
-       { 0xe21d, KEY_LEFT },       /* left */
-       { 0xe41f, KEY_RIGHT },      /* right */
-       { 0xe722, KEY_DOWN },       /* down */
-       { 0xdf1a, KEY_UP },         /* up */
-       { 0xe31e, KEY_OK },         /* "OK" */
-       { 0xce09, KEY_VOLUMEDOWN }, /* VOL + */
-       { 0xcd08, KEY_VOLUMEUP },   /* VOL - */
-       { 0xcf0a, KEY_MUTE },       /* MUTE  */
-       { 0xd00b, KEY_CHANNELUP },  /* CH + */
-       { 0xd10c, KEY_CHANNELDOWN },/* CH - */
-       { 0xec27, KEY_RECORD },     /* ( o) red */
-       { 0xea25, KEY_PLAY },       /* ( >) */
-       { 0xe924, KEY_REWIND },     /* (<<) */
-       { 0xeb26, KEY_FORWARD },    /* (>>) */
-       { 0xed28, KEY_STOP },       /* ([]) */
-       { 0xee29, KEY_PAUSE },      /* ('') */
-       { 0xf02b, KEY_PREVIOUS },   /* (<-) */
-       { 0xef2a, KEY_NEXT },       /* (>+) */
-       { 0xf22d, KEY_INFO },       /* PLAYING */
-       { 0xf32e, KEY_HOME },       /* TOP */
-       { 0xf42f, KEY_END },        /* END */
-       { 0xf530, KEY_SELECT },     /* SELECT */
+       { 0x18, KEY_KPENTER },    /* "check" */
+       { 0x16, KEY_MENU },       /* "menu" */
+       { 0x02, KEY_POWER },      /* Power */
+       { 0x03, KEY_TV },         /* TV */
+       { 0x04, KEY_DVD },        /* DVD */
+       { 0x05, KEY_WWW },        /* WEB */
+       { 0x06, KEY_BOOKMARKS },  /* "book" */
+       { 0x07, KEY_EDIT },       /* "hand" */
+       { 0x1c, KEY_COFFEE },     /* "timer" */
+       { 0x20, KEY_FRONT },      /* "max" */
+       { 0x1d, KEY_LEFT },       /* left */
+       { 0x1f, KEY_RIGHT },      /* right */
+       { 0x22, KEY_DOWN },       /* down */
+       { 0x1a, KEY_UP },         /* up */
+       { 0x1e, KEY_OK },         /* "OK" */
+       { 0x09, KEY_VOLUMEDOWN }, /* VOL + */
+       { 0x08, KEY_VOLUMEUP },   /* VOL - */
+       { 0x0a, KEY_MUTE },       /* MUTE  */
+       { 0x0b, KEY_CHANNELUP },  /* CH + */
+       { 0x0c, KEY_CHANNELDOWN },/* CH - */
+       { 0x27, KEY_RECORD },     /* ( o) red */
+       { 0x25, KEY_PLAY },       /* ( >) */
+       { 0x24, KEY_REWIND },     /* (<<) */
+       { 0x26, KEY_FORWARD },    /* (>>) */
+       { 0x28, KEY_STOP },       /* ([]) */
+       { 0x29, KEY_PAUSE },      /* ('') */
+       { 0x2b, KEY_PREVIOUS },   /* (<-) */
+       { 0x2a, KEY_NEXT },       /* (>+) */
+       { 0x2d, KEY_INFO },       /* PLAYING */
+       { 0x2e, KEY_HOME },       /* TOP */
+       { 0x2f, KEY_END },        /* END */
+       { 0x30, KEY_SELECT },     /* SELECT */
 };
 
 static struct rc_map_list ati_x10_map = {
index 09e2cc0..479cdb8 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table medion_x10[] = {
-       { 0xf12c, KEY_TV },    /* TV */
-       { 0xf22d, KEY_VCR },   /* VCR */
-       { 0xc904, KEY_DVD },   /* DVD */
-       { 0xcb06, KEY_AUDIO }, /* MUSIC */
-
-       { 0xf32e, KEY_RADIO },     /* RADIO */
-       { 0xca05, KEY_DIRECTORY }, /* PHOTO */
-       { 0xf42f, KEY_INFO },      /* TV-PREVIEW */
-       { 0xf530, KEY_LIST },      /* CHANNEL-LST */
-
-       { 0xe01b, KEY_SETUP }, /* SETUP */
-       { 0xf631, KEY_VIDEO }, /* VIDEO DESKTOP */
-
-       { 0xcd08, KEY_VOLUMEDOWN },  /* VOL - */
-       { 0xce09, KEY_VOLUMEUP },    /* VOL + */
-       { 0xd00b, KEY_CHANNELUP },   /* CHAN + */
-       { 0xd10c, KEY_CHANNELDOWN }, /* CHAN - */
-       { 0xc500, KEY_MUTE },        /* MUTE */
-
-       { 0xf732, KEY_RED }, /* red */
-       { 0xf833, KEY_GREEN }, /* green */
-       { 0xf934, KEY_YELLOW }, /* yellow */
-       { 0xfa35, KEY_BLUE }, /* blue */
-       { 0xdb16, KEY_TEXT }, /* TXT */
-
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xe11c, KEY_SEARCH }, /* TV/RAD, CH SRC */
-       { 0xe520, KEY_DELETE }, /* DELETE */
-
-       { 0xfb36, KEY_KEYBOARD }, /* RENAME */
-       { 0xdd18, KEY_SCREEN },   /* SNAPSHOT */
-
-       { 0xdf1a, KEY_UP },    /* up */
-       { 0xe722, KEY_DOWN },  /* down */
-       { 0xe21d, KEY_LEFT },  /* left */
-       { 0xe41f, KEY_RIGHT }, /* right */
-       { 0xe31e, KEY_OK },    /* OK */
-
-       { 0xfc37, KEY_SELECT }, /* ACQUIRE IMAGE */
-       { 0xfd38, KEY_EDIT },   /* EDIT IMAGE */
-
-       { 0xe924, KEY_REWIND },   /* rewind  (<<) */
-       { 0xea25, KEY_PLAY },     /* play    ( >) */
-       { 0xeb26, KEY_FORWARD },  /* forward (>>) */
-       { 0xec27, KEY_RECORD },   /* record  ( o) */
-       { 0xed28, KEY_STOP },     /* stop    ([]) */
-       { 0xee29, KEY_PAUSE },    /* pause   ('') */
-
-       { 0xe621, KEY_PREVIOUS },        /* prev */
-       { 0xfe39, KEY_SWITCHVIDEOMODE }, /* F SCR */
-       { 0xe823, KEY_NEXT },            /* next */
-       { 0xde19, KEY_MENU },            /* MENU */
-       { 0xff3a, KEY_LANGUAGE },        /* AUDIO */
-
-       { 0xc702, KEY_POWER }, /* POWER */
+       { 0x2c, KEY_TV },    /* TV */
+       { 0x2d, KEY_VCR },   /* VCR */
+       { 0x04, KEY_DVD },   /* DVD */
+       { 0x06, KEY_AUDIO }, /* MUSIC */
+
+       { 0x2e, KEY_RADIO },     /* RADIO */
+       { 0x05, KEY_DIRECTORY }, /* PHOTO */
+       { 0x2f, KEY_INFO },      /* TV-PREVIEW */
+       { 0x30, KEY_LIST },      /* CHANNEL-LST */
+
+       { 0x1b, KEY_SETUP }, /* SETUP */
+       { 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
+
+       { 0x08, KEY_VOLUMEDOWN },  /* VOL - */
+       { 0x09, KEY_VOLUMEUP },    /* VOL + */
+       { 0x0b, KEY_CHANNELUP },   /* CHAN + */
+       { 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
+       { 0x00, KEY_MUTE },        /* MUTE */
+
+       { 0x32, KEY_RED }, /* red */
+       { 0x33, KEY_GREEN }, /* green */
+       { 0x34, KEY_YELLOW }, /* yellow */
+       { 0x35, KEY_BLUE }, /* blue */
+       { 0x16, KEY_TEXT }, /* TXT */
+
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
+       { 0x20, KEY_DELETE }, /* DELETE */
+
+       { 0x36, KEY_KEYBOARD }, /* RENAME */
+       { 0x18, KEY_SCREEN },   /* SNAPSHOT */
+
+       { 0x1a, KEY_UP },    /* up */
+       { 0x22, KEY_DOWN },  /* down */
+       { 0x1d, KEY_LEFT },  /* left */
+       { 0x1f, KEY_RIGHT }, /* right */
+       { 0x1e, KEY_OK },    /* OK */
+
+       { 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
+       { 0x38, KEY_EDIT },   /* EDIT IMAGE */
+
+       { 0x24, KEY_REWIND },   /* rewind  (<<) */
+       { 0x25, KEY_PLAY },     /* play    ( >) */
+       { 0x26, KEY_FORWARD },  /* forward (>>) */
+       { 0x27, KEY_RECORD },   /* record  ( o) */
+       { 0x28, KEY_STOP },     /* stop    ([]) */
+       { 0x29, KEY_PAUSE },    /* pause   ('') */
+
+       { 0x21, KEY_PREVIOUS },        /* prev */
+       { 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
+       { 0x23, KEY_NEXT },            /* next */
+       { 0x19, KEY_MENU },            /* MENU */
+       { 0x3a, KEY_LANGUAGE },        /* AUDIO */
+
+       { 0x02, KEY_POWER }, /* POWER */
 };
 
 static struct rc_map_list medion_x10_map = {
index ef14652..c7f33ec 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table snapstream_firefly[] = {
-       { 0xf12c, KEY_ZOOM },       /* Maximize */
-       { 0xc702, KEY_CLOSE },
-
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xdb16, KEY_BACK },
-       { 0xdd18, KEY_KPENTER },    /* ent */
-
-       { 0xce09, KEY_VOLUMEUP },
-       { 0xcd08, KEY_VOLUMEDOWN },
-       { 0xcf0a, KEY_MUTE },
-       { 0xd00b, KEY_CHANNELUP },
-       { 0xd10c, KEY_CHANNELDOWN },
-       { 0xc500, KEY_VENDOR },     /* firefly */
-
-       { 0xf32e, KEY_INFO },
-       { 0xf42f, KEY_OPTION },
-
-       { 0xe21d, KEY_LEFT },
-       { 0xe41f, KEY_RIGHT },
-       { 0xe722, KEY_DOWN },
-       { 0xdf1a, KEY_UP },
-       { 0xe31e, KEY_OK },
-
-       { 0xe11c, KEY_MENU },
-       { 0xe520, KEY_EXIT },
-
-       { 0xec27, KEY_RECORD },
-       { 0xea25, KEY_PLAY },
-       { 0xed28, KEY_STOP },
-       { 0xe924, KEY_REWIND },
-       { 0xeb26, KEY_FORWARD },
-       { 0xee29, KEY_PAUSE },
-       { 0xf02b, KEY_PREVIOUS },
-       { 0xef2a, KEY_NEXT },
-
-       { 0xcb06, KEY_AUDIO },      /* Music */
-       { 0xca05, KEY_IMAGES },     /* Photos */
-       { 0xc904, KEY_DVD },
-       { 0xc803, KEY_TV },
-       { 0xcc07, KEY_VIDEO },
-
-       { 0xc601, KEY_HELP },
-       { 0xf22d, KEY_MODE },       /* Mouse */
-
-       { 0xde19, KEY_A },
-       { 0xe01b, KEY_B },
-       { 0xe621, KEY_C },
-       { 0xe823, KEY_D },
+       { 0x2c, KEY_ZOOM },       /* Maximize */
+       { 0x02, KEY_CLOSE },
+
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x16, KEY_BACK },
+       { 0x18, KEY_KPENTER },    /* ent */
+
+       { 0x09, KEY_VOLUMEUP },
+       { 0x08, KEY_VOLUMEDOWN },
+       { 0x0a, KEY_MUTE },
+       { 0x0b, KEY_CHANNELUP },
+       { 0x0c, KEY_CHANNELDOWN },
+       { 0x00, KEY_VENDOR },     /* firefly */
+
+       { 0x2e, KEY_INFO },
+       { 0x2f, KEY_OPTION },
+
+       { 0x1d, KEY_LEFT },
+       { 0x1f, KEY_RIGHT },
+       { 0x22, KEY_DOWN },
+       { 0x1a, KEY_UP },
+       { 0x1e, KEY_OK },
+
+       { 0x1c, KEY_MENU },
+       { 0x20, KEY_EXIT },
+
+       { 0x27, KEY_RECORD },
+       { 0x25, KEY_PLAY },
+       { 0x28, KEY_STOP },
+       { 0x24, KEY_REWIND },
+       { 0x26, KEY_FORWARD },
+       { 0x29, KEY_PAUSE },
+       { 0x2b, KEY_PREVIOUS },
+       { 0x2a, KEY_NEXT },
+
+       { 0x06, KEY_AUDIO },      /* Music */
+       { 0x05, KEY_IMAGES },     /* Photos */
+       { 0x04, KEY_DVD },
+       { 0x03, KEY_TV },
+       { 0x07, KEY_VIDEO },
+
+       { 0x01, KEY_HELP },
+       { 0x2d, KEY_MODE },       /* Mouse */
+
+       { 0x19, KEY_A },
+       { 0x1b, KEY_B },
+       { 0x21, KEY_C },
+       { 0x23, KEY_D },
 };
 
 static struct rc_map_list snapstream_firefly_map = {
index 39fc923..1c6015a 100644 (file)
@@ -162,11 +162,14 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
        switch (tv.model) {
        case 72000: /* WinTV-HVR950q (Retail, IR, ATSC/QAM */
        case 72001: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72101: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72201: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72211: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
        case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
        case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
                break;
@@ -324,6 +327,10 @@ struct usb_device_id au0828_usb_id_table[] = {
                .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
        { USB_DEVICE(0x2040, 0x8200),
                .driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
+       { USB_DEVICE(0x2040, 0x7260),
+               .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+       { USB_DEVICE(0x2040, 0x7213),
+               .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
        { },
 };
 
index 89d09a8..82c8817 100644 (file)
@@ -162,7 +162,6 @@ struct m5mols_version {
  * @pad: media pad
  * @ffmt: current fmt according to resolution type
  * @res_type: current resolution type
- * @code: current code
  * @irq_waitq: waitqueue for the capture
  * @work_irq: workqueue for the IRQ
  * @flags: state variable for the interrupt handler
@@ -192,7 +191,6 @@ struct m5mols_info {
        struct media_pad pad;
        struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
        int res_type;
-       enum v4l2_mbus_pixelcode code;
        wait_queue_head_t irq_waitq;
        struct work_struct work_irq;
        unsigned long flags;
index 05ab370..e0f09e5 100644 (file)
@@ -334,7 +334,7 @@ int m5mols_mode(struct m5mols_info *info, u8 mode)
        int ret = -EINVAL;
        u8 reg;
 
-       if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+       if (mode < REG_PARAMETER || mode > REG_CAPTURE)
                return ret;
 
        ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
@@ -511,9 +511,6 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        struct m5mols_info *info = to_m5mols(sd);
        struct v4l2_mbus_framefmt *format;
 
-       if (fmt->pad != 0)
-               return -EINVAL;
-
        format = __find_format(info, fh, fmt->which, info->res_type);
        if (!format)
                return -EINVAL;
@@ -532,9 +529,6 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        u32 resolution = 0;
        int ret;
 
-       if (fmt->pad != 0)
-               return -EINVAL;
-
        ret = __find_resolution(sd, format, &type, &resolution);
        if (ret < 0)
                return ret;
@@ -543,13 +537,14 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        if (!sfmt)
                return 0;
 
-       *sfmt           = m5mols_default_ffmt[type];
-       sfmt->width     = format->width;
-       sfmt->height    = format->height;
+
+       format->code = m5mols_default_ffmt[type].code;
+       format->colorspace = V4L2_COLORSPACE_JPEG;
+       format->field = V4L2_FIELD_NONE;
 
        if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+               *sfmt = *format;
                info->resolution = resolution;
-               info->code = format->code;
                info->res_type = type;
        }
 
@@ -626,13 +621,14 @@ static int m5mols_start_monitor(struct m5mols_info *info)
 static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
 {
        struct m5mols_info *info = to_m5mols(sd);
+       u32 code = info->ffmt[info->res_type].code;
 
        if (enable) {
                int ret = -EINVAL;
 
-               if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+               if (is_code(code, M5MOLS_RESTYPE_MONITOR))
                        ret = m5mols_start_monitor(info);
-               if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+               if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
                        ret = m5mols_start_capture(info);
 
                return ret;
index cf2c0fb..398f96f 100644 (file)
@@ -955,6 +955,7 @@ static int mt9m111_probe(struct i2c_client *client,
        mt9m111->rect.height    = MT9M111_MAX_HEIGHT;
        mt9m111->fmt            = &mt9m111_colour_fmts[0];
        mt9m111->lastpage       = -1;
+       mutex_init(&mt9m111->power_lock);
 
        ret = mt9m111_video_probe(client);
        if (ret) {
index 32114a3..7b34b11 100644 (file)
@@ -1083,8 +1083,10 @@ static int mt9t112_probe(struct i2c_client *client,
        v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
 
        ret = mt9t112_camera_probe(client);
-       if (ret)
+       if (ret) {
                kfree(priv);
+               return ret;
+       }
 
        /* Cannot fail: using the default supported pixel code */
        mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
index 9c5c19f..ee0d0b3 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/irq.h>
 #include <linux/videodev2.h>
 #include <linux/dma-mapping.h>
+#include <linux/slab.h>
 
 #include <media/videobuf-dma-contig.h>
 #include <media/v4l2-device.h>
@@ -2169,6 +2170,14 @@ static int __init omap_vout_probe(struct platform_device *pdev)
        vid_dev->num_displays = 0;
        for_each_dss_dev(dssdev) {
                omap_dss_get_device(dssdev);
+
+               if (!dssdev->driver) {
+                       dev_warn(&pdev->dev, "no driver for display: %s\n",
+                                       dssdev->name);
+                       omap_dss_put_device(dssdev);
+                       continue;
+               }
+
                vid_dev->displays[vid_dev->num_displays++] = dssdev;
        }
 
index e87ae2f..6a6cf38 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
index 1d54b86..3ea38a8 100644 (file)
@@ -506,7 +506,7 @@ int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
        unsigned long flags;
        struct sgdma_state *sg_state;
 
-       if ((sglen < 0) || ((sglen > 0) & !sglist))
+       if ((sglen < 0) || ((sglen > 0) && !sglist))
                return -EINVAL;
 
        spin_lock_irqsave(&sgdma->lock, flags);
index b0b0fa5..54a4a3f 100644 (file)
@@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
 {
        struct isp_pipeline *pipe =
                to_isp_pipeline(&ccdc->video_out.video.entity);
-       struct video_device *vdev = &ccdc->subdev.devnode;
+       struct video_device *vdev = ccdc->subdev.devnode;
        struct v4l2_event event;
 
        memset(&event, 0, sizeof(event));
index 68d5394..bc0b2c7 100644 (file)
@@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
 
 static void isp_stat_queue_event(struct ispstat *stat, int err)
 {
-       struct video_device *vdev = &stat->subdev.devnode;
+       struct video_device *vdev = stat->subdev.devnode;
        struct v4l2_event event;
        struct omap3isp_stat_event_status *status = (void *)event.u.data;
 
index d100072..f229057 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/cacheflush.h>
 #include <linux/clk.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
index 9f2d26b..6806345 100644 (file)
@@ -540,7 +540,7 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe,
 static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        struct soc_camera_sense *sense = icd->sense;
        struct ov6650 *priv = to_ov6650(client);
        bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
index c8d91b0..2cc3b91 100644 (file)
@@ -98,6 +98,10 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
                        vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
        }
        set_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+       fimc_hw_reset(fimc);
+       cap->buf_index = 0;
+
        spin_unlock_irqrestore(&fimc->slock, flags);
 
        if (streaming)
@@ -137,7 +141,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
        struct fimc_dev *fimc = ctx->fimc_dev;
        int ret;
 
-       if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+       if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
                return 0;
 
        spin_lock(&ctx->slock);
@@ -150,7 +154,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
                fimc_hw_set_rotation(ctx);
                fimc_prepare_dma_offset(ctx, &ctx->d_frame);
                fimc_hw_set_out_dma(ctx);
-               set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+               clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
        }
        spin_unlock(&ctx->slock);
        return ret;
@@ -164,7 +168,6 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
        int min_bufs;
        int ret;
 
-       fimc_hw_reset(fimc);
        vid_cap->frame_count = 0;
 
        ret = fimc_init_capture(fimc);
@@ -523,7 +526,7 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
        max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
        min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
        min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
-       if (fimc->id == 1 && var->pix_hoff)
+       if (var->min_vsize_align == 1 && !rotation)
                align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
 
        depth = fimc_get_format_depth(ffmt);
@@ -1239,6 +1242,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
 
        mutex_lock(&fimc->lock);
        set_frame_bounds(ff, mf->width, mf->height);
+       fimc->vid_cap.mf = *mf;
        ff->fmt = ffmt;
 
        /* Reset the crop rectangle if required. */
@@ -1375,7 +1379,7 @@ static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
        media_entity_cleanup(&sd->entity);
        v4l2_device_unregister_subdev(sd);
        kfree(sd);
-       sd = NULL;
+       fimc->vid_cap.subdev = NULL;
 }
 
 /* Set default format at the sensor and host interface */
index 19ca6db..07c6254 100644 (file)
@@ -37,7 +37,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
 static struct fimc_fmt fimc_formats[] = {
        {
                .name           = "RGB565",
-               .fourcc         = V4L2_PIX_FMT_RGB565X,
+               .fourcc         = V4L2_PIX_FMT_RGB565,
                .depth          = { 16 },
                .color          = S5P_FIMC_RGB565,
                .memplanes      = 1,
@@ -1038,12 +1038,11 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
                mod_x = 6; /* 64 x 32 pixels tile */
                mod_y = 5;
        } else {
-               if (fimc->id == 1 && variant->pix_hoff)
+               if (variant->min_vsize_align == 1)
                        mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
                else
-                       mod_y = mod_x;
+                       mod_y = ffs(variant->min_vsize_align) - 1;
        }
-       dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_w);
 
        v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
                &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
@@ -1226,10 +1225,10 @@ static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
                fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
 
        /* Get pixel alignment constraints. */
-       if (fimc->id == 1 && fimc->variant->pix_hoff)
+       if (fimc->variant->min_vsize_align == 1)
                halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
        else
-               halign = ffs(min_size) - 1;
+               halign = ffs(fimc->variant->min_vsize_align) - 1;
 
        for (i = 0; i < f->fmt->colplanes; i++)
                depth += f->fmt->depth[i];
@@ -1615,7 +1614,6 @@ static int fimc_probe(struct platform_device *pdev)
        pdata = pdev->dev.platform_data;
        fimc->pdata = pdata;
 
-       set_bit(ST_LPM, &fimc->state);
 
        init_waitqueue_head(&fimc->irq_queue);
        spin_lock_init(&fimc->slock);
@@ -1707,8 +1705,6 @@ static int fimc_runtime_resume(struct device *dev)
        /* Enable clocks and perform basic initalization */
        clk_enable(fimc->clock[CLK_GATE]);
        fimc_hw_reset(fimc);
-       if (fimc->variant->out_buf_count > 4)
-               fimc_hw_set_dma_seq(fimc, 0xF);
 
        /* Resume the capture or mem-to-mem device */
        if (fimc_capture_busy(fimc))
@@ -1750,8 +1746,6 @@ static int fimc_resume(struct device *dev)
                return 0;
        }
        fimc_hw_reset(fimc);
-       if (fimc->variant->out_buf_count > 4)
-               fimc_hw_set_dma_seq(fimc, 0xF);
        spin_unlock_irqrestore(&fimc->slock, flags);
 
        if (fimc_capture_busy(fimc))
@@ -1780,7 +1774,6 @@ static int __devexit fimc_remove(struct platform_device *pdev)
        struct fimc_dev *fimc = platform_get_drvdata(pdev);
 
        pm_runtime_disable(&pdev->dev);
-       fimc_runtime_suspend(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
 
        vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
@@ -1840,6 +1833,7 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[0],
 };
@@ -1849,6 +1843,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit = &s5p_pix_limit[1],
 };
@@ -1861,6 +1856,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[1],
 };
@@ -1874,6 +1870,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 1,
+       .min_vsize_align = 1,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[2],
 };
@@ -1884,6 +1881,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[2],
 };
@@ -1898,6 +1896,7 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 2,
+       .min_vsize_align = 1,
        .out_buf_count   = 32,
        .pix_limit       = &s5p_pix_limit[1],
 };
@@ -1910,6 +1909,7 @@ static struct samsung_fimc_variant fimc3_variant_exynos4 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 2,
+       .min_vsize_align = 1,
        .out_buf_count   = 32,
        .pix_limit       = &s5p_pix_limit[3],
 };
index a6936da..c7f01c4 100644 (file)
@@ -377,6 +377,7 @@ struct fimc_pix_limit {
  * @min_inp_pixsize: minimum input pixel size
  * @min_out_pixsize: minimum output pixel size
  * @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
  * @out_buf_count: the number of buffers in output DMA sequence
  */
 struct samsung_fimc_variant {
@@ -390,6 +391,7 @@ struct samsung_fimc_variant {
        u16             min_inp_pixsize;
        u16             min_out_pixsize;
        u16             hor_offs_align;
+       u16             min_vsize_align;
        u16             out_buf_count;
 };
 
index cc337b1..615c862 100644 (file)
@@ -220,6 +220,7 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
        sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
                                       s_info->pdata->board_info, NULL);
        if (IS_ERR_OR_NULL(sd)) {
+               i2c_put_adapter(adapter);
                v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
                return NULL;
        }
@@ -234,12 +235,15 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
 static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
+       struct i2c_adapter *adapter;
 
        if (!client)
                return;
        v4l2_device_unregister_subdev(sd);
+       adapter = client->adapter;
        i2c_unregister_device(client);
-       i2c_put_adapter(client->adapter);
+       if (adapter)
+               i2c_put_adapter(adapter);
 }
 
 static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
@@ -381,20 +385,28 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
 
 static int fimc_md_register_video_nodes(struct fimc_md *fmd)
 {
+       struct video_device *vdev;
        int i, ret = 0;
 
        for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
                if (!fmd->fimc[i])
                        continue;
 
-               if (fmd->fimc[i]->m2m.vfd)
-                       ret = video_register_device(fmd->fimc[i]->m2m.vfd,
-                                                   VFL_TYPE_GRABBER, -1);
-               if (ret)
-                       break;
-               if (fmd->fimc[i]->vid_cap.vfd)
-                       ret = video_register_device(fmd->fimc[i]->vid_cap.vfd,
-                                                   VFL_TYPE_GRABBER, -1);
+               vdev = fmd->fimc[i]->m2m.vfd;
+               if (vdev) {
+                       ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+                       if (ret)
+                               break;
+                       v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+                                 vdev->name, video_device_node_name(vdev));
+               }
+
+               vdev = fmd->fimc[i]->vid_cap.vfd;
+               if (vdev == NULL)
+                       continue;
+               ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+               v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+                         vdev->name, video_device_node_name(vdev));
        }
 
        return ret;
@@ -502,7 +514,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
                        if (WARN(csis == NULL,
                                 "MIPI-CSI interface specified "
                                 "but s5p-csis module is not loaded!\n"))
-                               continue;
+                               return -EINVAL;
 
                        ret = media_entity_create_link(&sensor->entity, 0,
                                              &csis->entity, CSIS_PAD_SINK,
@@ -742,9 +754,6 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
        struct fimc_md *fmd;
        int ret;
 
-       if (WARN(!pdev->dev.platform_data, "Platform data not specified!\n"))
-               return -EINVAL;
-
        fmd = kzalloc(sizeof(struct fimc_md), GFP_KERNEL);
        if (!fmd)
                return -ENOMEM;
@@ -782,9 +791,11 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
        if (ret)
                goto err3;
 
-       ret = fimc_md_register_sensor_entities(fmd);
-       if (ret)
-               goto err3;
+       if (pdev->dev.platform_data) {
+               ret = fimc_md_register_sensor_entities(fmd);
+               if (ret)
+                       goto err3;
+       }
        ret = fimc_md_create_links(fmd);
        if (ret)
                goto err3;
index 20e664e..44f5c2d 100644 (file)
@@ -35,6 +35,9 @@ void fimc_hw_reset(struct fimc_dev *dev)
        cfg = readl(dev->regs + S5P_CIGCTRL);
        cfg &= ~S5P_CIGCTRL_SWRST;
        writel(cfg, dev->regs + S5P_CIGCTRL);
+
+       if (dev->variant->out_buf_count > 4)
+               fimc_hw_set_dma_seq(dev, 0xF);
 }
 
 static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
@@ -251,7 +254,14 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
        struct fimc_scaler *sc = &ctx->scaler;
        struct fimc_frame *src_frame = &ctx->s_frame;
        struct fimc_frame *dst_frame = &ctx->d_frame;
-       u32 cfg = 0;
+
+       u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+
+       cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
+                S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
+                S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
+                S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
+                S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
 
        if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
                cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -308,9 +318,9 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
        fimc_hw_set_scaler(ctx);
 
        cfg = readl(dev->regs + S5P_CISCCTRL);
+       cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
 
        if (variant->has_mainscaler_ext) {
-               cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
                cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
                cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CISCCTRL);
@@ -323,7 +333,6 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
                cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CIEXTEN);
        } else {
-               cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
                cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
                cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CISCCTRL);
index 1e8cdb7..dff9dc7 100644 (file)
@@ -61,7 +61,7 @@ static struct s5p_mfc_fmt formats[] = {
                .num_planes = 1,
        },
        {
-               .name = "H264 Encoded Stream",
+               .name = "H263 Encoded Stream",
                .fourcc = V4L2_PIX_FMT_H263,
                .codec_mode = S5P_FIMV_CODEC_H263_ENC,
                .type = MFC_FMT_ENC,
index e16d3a4..b47d0c0 100644 (file)
@@ -16,6 +16,7 @@
 #include <media/v4l2-ioctl.h>
 #include <linux/videodev2.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/version.h>
 #include <linux/timer.h>
 #include <media/videobuf2-dma-contig.h>
index f390682..c51decf 100644 (file)
@@ -566,8 +566,10 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
        ret = sh_mobile_ceu_soft_reset(pcdev);
 
        csi2_sd = find_csi2(pcdev);
-       if (csi2_sd)
-               csi2_sd->grp_id = (long)icd;
+       if (csi2_sd) {
+               csi2_sd->grp_id = soc_camera_grp_id(icd);
+               v4l2_set_subdev_hostdata(csi2_sd, icd);
+       }
 
        ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
        if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
@@ -768,7 +770,7 @@ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
 {
        if (pcdev->csi2_pdev) {
                struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
-               if (csi2_sd && csi2_sd->grp_id == (u32)icd)
+               if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
                        return csi2_sd;
        }
 
@@ -1089,8 +1091,9 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
                        /* Try 2560x1920, 1280x960, 640x480, 320x240 */
                        mf.width        = 2560 >> shift;
                        mf.height       = 1920 >> shift;
-                       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                        s_mbus_fmt, &mf);
+                       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, &mf);
                        if (ret < 0)
                                return ret;
                        shift++;
@@ -1389,7 +1392,8 @@ static int client_s_fmt(struct soc_camera_device *icd,
        bool ceu_1to1;
        int ret;
 
-       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
+       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                        soc_camera_grp_id(icd), video,
                                         s_mbus_fmt, mf);
        if (ret < 0)
                return ret;
@@ -1426,8 +1430,9 @@ static int client_s_fmt(struct soc_camera_device *icd,
                tmp_h = min(2 * tmp_h, max_height);
                mf->width = tmp_w;
                mf->height = tmp_h;
-               ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                s_mbus_fmt, mf);
+               ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, mf);
                dev_geo(dev, "Camera scaled to %ux%u\n",
                        mf->width, mf->height);
                if (ret < 0) {
@@ -1580,8 +1585,9 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
        }
 
        if (interm_width < icd->user_width || interm_height < icd->user_height) {
-               ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video,
-                                                s_mbus_fmt, &mf);
+               ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, &mf);
                if (ret < 0)
                        return ret;
 
@@ -1867,7 +1873,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
        mf.code         = xlate->code;
        mf.colorspace   = pix->colorspace;
 
-       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf);
+       ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+                                        video, try_mbus_fmt, &mf);
        if (ret < 0)
                return ret;
 
@@ -1891,8 +1898,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
                         */
                        mf.width = 2560;
                        mf.height = 1920;
-                       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                        try_mbus_fmt, &mf);
+                       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       try_mbus_fmt, &mf);
                        if (ret < 0) {
                                /* Shouldn't actually happen... */
                                dev_err(icd->parent,
index ea4f047..8a652b5 100644 (file)
@@ -143,7 +143,7 @@ static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
                                 const struct v4l2_mbus_config *cfg)
 {
        struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
        struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
                                              .flags = priv->mipi_flags};
@@ -202,7 +202,7 @@ static void sh_csi2_hwinit(struct sh_csi2 *priv)
 static int sh_csi2_client_connect(struct sh_csi2 *priv)
 {
        struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
-       struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
        struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
        struct device *dev = v4l2_get_subdevdata(&priv->subdev);
        struct v4l2_mbus_config cfg;
index b72580c..62e4312 100644 (file)
@@ -1103,7 +1103,8 @@ static int soc_camera_probe(struct soc_camera_device *icd)
        }
 
        sd = soc_camera_to_subdev(icd);
-       sd->grp_id = (long)icd;
+       sd->grp_id = soc_camera_grp_id(icd);
+       v4l2_set_subdev_hostdata(sd, icd);
 
        if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
                goto ectrl;
index 43c0ebb..b7b2d34 100644 (file)
@@ -4,7 +4,7 @@
  * Debugfs support for the AB5500 MFD driver
  */
 
-#include <linux/export.h>
+#include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/mfd/ab5500/ab5500.h>
index 1e91738..d3d572b 100644 (file)
@@ -620,6 +620,7 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
 
 static struct resource __devinitdata ab8500_chargalg_resources[] = {};
 
+#ifdef CONFIG_DEBUG_FS
 static struct resource __devinitdata ab8500_debug_resources[] = {
        {
                .name   = "IRQ_FIRST",
@@ -634,6 +635,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
                .flags  = IORESOURCE_IRQ,
        },
 };
+#endif
 
 static struct resource __devinitdata ab8500_usb_resources[] = {
        {
index f1d8848..8d816cc 100644 (file)
@@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
 
        ret = __adp5520_read(chip->client, reg, &reg_val);
 
-       if (!ret && ((reg_val & bit_mask) == 0)) {
+       if (!ret && ((reg_val & bit_mask) != bit_mask)) {
                reg_val |= bit_mask;
                ret = __adp5520_write(chip->client, reg, reg_val);
        }
index 1b79c37..1924b85 100644 (file)
@@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __da903x_write(chip->client, reg, reg_val);
        }
@@ -549,6 +549,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
        struct da903x_chip *chip = i2c_get_clientdata(client);
 
        da903x_remove_subdevs(chip);
+       free_irq(client->irq, chip);
        kfree(chip);
        return 0;
 }
index 1e9ee53..ef39528 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
index bba26d9..a5ddf31 100644 (file)
@@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
        }
index 6f5b8cf..c1da84b 100644 (file)
@@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
                goto out;
        }
 
-       data &= mask;
+       data &= ~mask;
        err = tps65910_i2c_write(tps65910, reg, 1, &data);
        if (err)
                dev_err(tps65910->dev, "write to reg %x failed\n", reg);
index bfbd660..61e70cf 100644 (file)
@@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /*
         * [MSG1]: fill the register address data
@@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /* [MSG1] fill the register address data */
        msg = &twl->xfer_msg[0];
index f062c8c..29f11e0 100644 (file)
@@ -432,6 +432,7 @@ struct sih_agent {
        u32                     edge_change;
 
        struct mutex            irq_lock;
+       char                    *irq_name;
 };
 
 /*----------------------------------------------------------------------*/
@@ -589,7 +590,7 @@ static inline int sih_read_isr(const struct sih *sih)
  * Generic handler for SIH interrupts ... we "know" this is called
  * in task context, with IRQs enabled.
  */
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
 {
        struct sih_agent *agent = irq_get_handler_data(irq);
        const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                pr_err("twl4030: %s SIH, read ISR error %d\n",
                        sih->name, isr);
                /* REVISIT:  recover; eventually mask it all, etc */
-               return;
+               return IRQ_HANDLED;
        }
 
        while (isr) {
@@ -616,6 +617,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                        pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
                                sih->name, irq);
        }
+       return IRQ_HANDLED;
 }
 
 static unsigned twl4030_irq_next;
@@ -668,18 +670,19 @@ int twl4030_sih_setup(int module)
                activate_irq(irq);
        }
 
-       status = irq_base;
        twl4030_irq_next += i;
 
        /* replace generic PIH handler (handle_simple_irq) */
        irq = sih_mod + twl4030_irq_base;
        irq_set_handler_data(irq, agent);
-       irq_set_chained_handler(irq, handle_twl4030_sih);
+       agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+       status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+                                     agent->irq_name ?: sih->name, NULL);
 
        pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
                        irq, irq_base, twl4030_irq_next - 1);
 
-       return status;
+       return status < 0 ? status : irq_base;
 }
 
 /* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +736,9 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
        }
 
        /* install an irq handler to demultiplex the TWL4030 interrupt */
-       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
-                                       "TWL4030-PIH", NULL);
+       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+                                     IRQF_ONESHOT,
+                                     "TWL4030-PIH", NULL);
        if (status < 0) {
                pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
                goto fail_rqirq;
index 5d6ba13..61894fc 100644 (file)
@@ -239,6 +239,7 @@ static int wm8994_suspend(struct device *dev)
 
        switch (wm8994->type) {
        case WM8958:
+       case WM1811:
                ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
                if (ret < 0) {
                        dev_err(dev, "Failed to read power status: %d\n", ret);
index a1cb21f..1e0e27c 100644 (file)
@@ -1606,6 +1606,14 @@ static const struct mmc_fixup blk_fixups[] =
                  MMC_QUIRK_BLK_NO_CMD23),
        MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
                  MMC_QUIRK_BLK_NO_CMD23),
+
+       /*
+        * Some Micron MMC cards needs longer data read timeout than
+        * indicated in CSD.
+        */
+       MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+                 MMC_QUIRK_LONG_READ_TIME),
+
        END_FIXUP
 };
 
index 5278ffb..950b97d 100644 (file)
@@ -529,6 +529,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
                        data->timeout_clks = 0;
                }
        }
+
+       /*
+        * Some cards require longer data read timeout than indicated in CSD.
+        * Address this by setting the read timeout to a "reasonably high"
+        * value. For the cards tested, 300ms has proven enough. If necessary,
+        * this value can be increased if other problematic cards require this.
+        */
+       if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+               data->timeout_ns = 300000000;
+               data->timeout_clks = 0;
+       }
+
        /*
         * Some cards need very high timeouts if driven in SPI mode.
         * The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
        mmc_host_clk_release(host);
 }
 
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+       struct mmc_card *card;
+       unsigned int timeout;
+       unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+       int err = 0;
+
+       card = host->card;
+
+       /*
+        * Send power notify command only if card
+        * is mmc and notify state is powered ON
+        */
+       if (card && mmc_card_mmc(card) &&
+           (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+               if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+                       notify_type = EXT_CSD_POWER_OFF_SHORT;
+                       timeout = card->ext_csd.generic_cmd6_time;
+                       card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+               } else {
+                       notify_type = EXT_CSD_POWER_OFF_LONG;
+                       timeout = card->ext_csd.power_off_longtime;
+                       card->poweroff_notify_state = MMC_POWEROFF_LONG;
+               }
+
+               err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+                                EXT_CSD_POWER_OFF_NOTIFICATION,
+                                notify_type, timeout);
+
+               if (err && err != -EBADMSG)
+                       pr_err("Device failed to respond within %d poweroff "
+                              "time. Forcefully powering down the device\n",
+                              timeout);
+
+               /* Set the card state to no notification after the poweroff */
+               card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+       }
+}
+
 /*
  * Apply power to the MMC stack.  This is a two-stage process.
  * First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@ static void mmc_power_up(struct mmc_host *host)
 
 void mmc_power_off(struct mmc_host *host)
 {
-       struct mmc_card *card;
-       unsigned int notify_type;
-       unsigned int timeout;
-       int err;
-
        mmc_host_clk_hold(host);
 
-       card = host->card;
        host->ios.clock = 0;
        host->ios.vdd = 0;
 
-       if (card && mmc_card_mmc(card) &&
-           (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
-               if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
-                       notify_type = EXT_CSD_POWER_OFF_SHORT;
-                       timeout = card->ext_csd.generic_cmd6_time;
-                       card->poweroff_notify_state = MMC_POWEROFF_SHORT;
-               } else {
-                       notify_type = EXT_CSD_POWER_OFF_LONG;
-                       timeout = card->ext_csd.power_off_longtime;
-                       card->poweroff_notify_state = MMC_POWEROFF_LONG;
-               }
-
-               err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                                EXT_CSD_POWER_OFF_NOTIFICATION,
-                                notify_type, timeout);
-
-               if (err && err != -EBADMSG)
-                       pr_err("Device failed to respond within %d poweroff "
-                              "time. Forcefully powering down the device\n",
-                              timeout);
-
-               /* Set the card state to no notification after the poweroff */
-               card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
-       }
+       mmc_poweroff_notify(host);
 
        /*
         * Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@ int mmc_card_sleep(struct mmc_host *host)
 
        mmc_bus_get(host);
 
-       if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+       if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
                err = host->bus_ops->sleep(host);
 
        mmc_bus_put(host);
@@ -2302,8 +2324,17 @@ int mmc_suspend_host(struct mmc_host *host)
                 * pre-claim the host.
                 */
                if (mmc_try_claim_host(host)) {
-                       if (host->bus_ops->suspend)
+                       if (host->bus_ops->suspend) {
+                               /*
+                                * For eMMC 4.5 device send notify command
+                                * before sleep, because in sleep state eMMC 4.5
+                                * devices respond to only RESET and AWAKE cmd
+                                */
+                               mmc_poweroff_notify(host);
                                err = host->bus_ops->suspend(host);
+                       }
+                       mmc_do_release_host(host);
+
                        if (err == -ENOSYS || !host->bus_ops->resume) {
                                /*
                                 * We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@ int mmc_suspend_host(struct mmc_host *host)
                                host->pm_flags = 0;
                                err = 0;
                        }
-                       mmc_do_release_host(host);
                } else {
                        err = -EBUSY;
                }
index e8a5eb3..d31c78b 100644 (file)
@@ -302,17 +302,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
        host->max_blk_size = 512;
        host->max_blk_count = PAGE_CACHE_SIZE / 512;
 
-       /*
-        * Enable runtime power management by default. This flag was added due
-        * to runtime power management causing disruption for some users, but
-        * the power on/off code has been improved since then.
-        *
-        * We'll enable this flag by default as an experiment, and if no
-        * problems are reported, we will follow up later and remove the flag
-        * altogether.
-        */
-       host->caps = MMC_CAP_POWER_OFF_CARD;
-
        return host;
 
 free:
index dbf421a..d240427 100644 (file)
@@ -876,17 +876,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
         * set the notification byte in the ext_csd register of device
         */
        if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
-           (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+           (card->ext_csd.rev >= 6)) {
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                 EXT_CSD_POWER_OFF_NOTIFICATION,
                                 EXT_CSD_POWER_ON,
                                 card->ext_csd.generic_cmd6_time);
                if (err && err != -EBADMSG)
                        goto free_card;
-       }
 
-       if (!err)
-               card->poweroff_notify_state = MMC_POWERED_ON;
+               /*
+                * The err can be -EBADMSG or 0,
+                * so check for success and update the flag
+                */
+               if (!err)
+                       card->poweroff_notify_state = MMC_POWERED_ON;
+       }
 
        /*
         * Activate high speed (if supported)
index 325ea61..8e0fbe9 100644 (file)
@@ -732,6 +732,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                                "failed to config DMA channel. Falling back to PIO\n");
                        dma_release_channel(host->dma);
                        host->do_dma = 0;
+                       host->dma = NULL;
                }
        }
 
index 101cd31..d5fe43d 100644 (file)
@@ -1010,6 +1010,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
                        host->data->sg_len,
                        omap_hsmmc_get_dma_dir(host, host->data));
                omap_free_dma(dma_ch);
+               host->data->host_cookie = 0;
        }
        host->data = NULL;
 }
@@ -1575,8 +1576,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
        struct mmc_data *data = mrq->data;
 
        if (host->use_dma) {
-               dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-                            omap_hsmmc_get_dma_dir(host, data));
+               if (data->host_cookie)
+                       dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+                                    data->sg_len,
+                                    omap_hsmmc_get_dma_dir(host, data));
                data->host_cookie = 0;
        }
 }
index 4b920b7..b4257e7 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/mmc/host.h>
+#include <linux/module.h>
 #include <mach/cns3xxx.h>
 #include "sdhci-pltfm.h"
 
@@ -108,13 +109,10 @@ static struct platform_driver sdhci_cns3xxx_driver = {
        .driver         = {
                .name   = "sdhci-cns3xxx",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_cns3xxx_probe,
        .remove         = __devexit_p(sdhci_cns3xxx_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_cns3xxx_init(void)
index f2d29dc..a81312c 100644 (file)
@@ -82,13 +82,10 @@ static struct platform_driver sdhci_dove_driver = {
        .driver         = {
                .name   = "sdhci-dove",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_dove_probe,
        .remove         = __devexit_p(sdhci_dove_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_dove_init(void)
index 4b976f0..38ebc4e 100644 (file)
@@ -599,14 +599,11 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
                .name   = "sdhci-esdhc-imx",
                .owner  = THIS_MODULE,
                .of_match_table = imx_esdhc_dt_ids,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .id_table       = imx_esdhc_devtype,
        .probe          = sdhci_esdhc_imx_probe,
        .remove         = __devexit_p(sdhci_esdhc_imx_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_esdhc_imx_init(void)
index 59e9d00..01e5f62 100644 (file)
@@ -125,13 +125,10 @@ static struct platform_driver sdhci_esdhc_driver = {
                .name = "sdhci-esdhc",
                .owner = THIS_MODULE,
                .of_match_table = sdhci_esdhc_of_match,
+               .pm = SDHCI_PLTFM_PMOPS,
        },
        .probe = sdhci_esdhc_probe,
        .remove = __devexit_p(sdhci_esdhc_remove),
-#ifdef CONFIG_PM
-       .suspend = sdhci_pltfm_suspend,
-       .resume = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_esdhc_init(void)
index 9b0d794..3619adc 100644 (file)
@@ -87,13 +87,10 @@ static struct platform_driver sdhci_hlwd_driver = {
                .name = "sdhci-hlwd",
                .owner = THIS_MODULE,
                .of_match_table = sdhci_hlwd_of_match,
+               .pm = SDHCI_PLTFM_PMOPS,
        },
        .probe = sdhci_hlwd_probe,
        .remove = __devexit_p(sdhci_hlwd_remove),
-#ifdef CONFIG_PM
-       .suspend = sdhci_pltfm_suspend,
-       .resume = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_hlwd_init(void)
index d833d9c..6878a94 100644 (file)
@@ -54,8 +54,7 @@ struct sdhci_pci_fixes {
        int                     (*probe_slot) (struct sdhci_pci_slot *);
        void                    (*remove_slot) (struct sdhci_pci_slot *, int);
 
-       int                     (*suspend) (struct sdhci_pci_chip *,
-                                       pm_message_t);
+       int                     (*suspend) (struct sdhci_pci_chip *);
        int                     (*resume) (struct sdhci_pci_chip *);
 };
 
@@ -549,7 +548,7 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
                jmicron_enable_mmc(slot->host, 0);
 }
 
-static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
+static int jmicron_suspend(struct sdhci_pci_chip *chip)
 {
        int i;
 
@@ -993,8 +992,9 @@ static struct sdhci_ops sdhci_pci_ops = {
 
 #ifdef CONFIG_PM
 
-static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int sdhci_pci_suspend(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
        mmc_pm_flag_t slot_pm_flags;
@@ -1010,7 +1010,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                if (!slot)
                        continue;
 
-               ret = sdhci_suspend_host(slot->host, state);
+               ret = sdhci_suspend_host(slot->host);
 
                if (ret) {
                        for (i--; i >= 0; i--)
@@ -1026,7 +1026,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
        }
 
        if (chip->fixes && chip->fixes->suspend) {
-               ret = chip->fixes->suspend(chip, state);
+               ret = chip->fixes->suspend(chip);
                if (ret) {
                        for (i = chip->num_slots - 1; i >= 0; i--)
                                sdhci_resume_host(chip->slots[i]->host);
@@ -1042,16 +1042,17 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                }
                pci_set_power_state(pdev, PCI_D3hot);
        } else {
-               pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+               pci_enable_wake(pdev, PCI_D3hot, 0);
                pci_disable_device(pdev);
-               pci_set_power_state(pdev, pci_choose_state(pdev, state));
+               pci_set_power_state(pdev, PCI_D3hot);
        }
 
        return 0;
 }
 
-static int sdhci_pci_resume(struct pci_dev *pdev)
+static int sdhci_pci_resume(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
        int i, ret;
@@ -1099,7 +1100,6 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
        struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
-       pm_message_t state = { .event = PM_EVENT_SUSPEND };
        int i, ret;
 
        chip = pci_get_drvdata(pdev);
@@ -1121,7 +1121,7 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
        }
 
        if (chip->fixes && chip->fixes->suspend) {
-               ret = chip->fixes->suspend(chip, state);
+               ret = chip->fixes->suspend(chip);
                if (ret) {
                        for (i = chip->num_slots - 1; i >= 0; i--)
                                sdhci_runtime_resume_host(chip->slots[i]->host);
@@ -1176,6 +1176,8 @@ static int sdhci_pci_runtime_idle(struct device *dev)
 #endif
 
 static const struct dev_pm_ops sdhci_pci_pm_ops = {
+       .suspend = sdhci_pci_suspend,
+       .resume = sdhci_pci_resume,
        .runtime_suspend = sdhci_pci_runtime_suspend,
        .runtime_resume = sdhci_pci_runtime_resume,
        .runtime_idle = sdhci_pci_runtime_idle,
@@ -1428,8 +1430,6 @@ static struct pci_driver sdhci_driver = {
        .id_table =     pci_ids,
        .probe =        sdhci_pci_probe,
        .remove =       __devexit_p(sdhci_pci_remove),
-       .suspend =      sdhci_pci_suspend,
-       .resume =       sdhci_pci_resume,
        .driver =       {
                .pm =   &sdhci_pci_pm_ops
        },
index a9e12ea..03970bc 100644 (file)
@@ -194,21 +194,25 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
 EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
 
 #ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+static int sdhci_pltfm_suspend(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
-       return sdhci_suspend_host(host, state);
+       return sdhci_suspend_host(host);
 }
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
 
-int sdhci_pltfm_resume(struct platform_device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
        return sdhci_resume_host(host);
 }
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+
+const struct dev_pm_ops sdhci_pltfm_pmops = {
+       .suspend        = sdhci_pltfm_suspend,
+       .resume         = sdhci_pltfm_resume,
+};
+EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
 #endif /* CONFIG_PM */
 
 static int __init sdhci_pltfm_drv_init(void)
index 3a9fc3f..37e0e18 100644 (file)
@@ -99,8 +99,10 @@ extern int sdhci_pltfm_register(struct platform_device *pdev,
 extern int sdhci_pltfm_unregister(struct platform_device *pdev);
 
 #ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
-extern int sdhci_pltfm_resume(struct platform_device *dev);
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
+#else
+#define SDHCI_PLTFM_PMOPS NULL
 #endif
 
 #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
index d4bf6d3..7a039c3 100644 (file)
@@ -218,13 +218,10 @@ static struct platform_driver sdhci_pxav2_driver = {
        .driver         = {
                .name   = "sdhci-pxav2",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_pxav2_probe,
        .remove         = __devexit_p(sdhci_pxav2_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 static int __init sdhci_pxav2_init(void)
 {
index cff4ad3..15673a7 100644 (file)
@@ -264,13 +264,10 @@ static struct platform_driver sdhci_pxav3_driver = {
        .driver         = {
                .name   = "sdhci-pxav3",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_pxav3_probe,
        .remove         = __devexit_p(sdhci_pxav3_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 static int __init sdhci_pxav3_init(void)
 {
index 3d00e72..0d33ff0 100644 (file)
@@ -622,33 +622,38 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
 
 #ifdef CONFIG_PM
 
-static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+static int sdhci_s3c_suspend(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
-       return sdhci_suspend_host(host, pm);
+       return sdhci_suspend_host(host);
 }
 
-static int sdhci_s3c_resume(struct platform_device *dev)
+static int sdhci_s3c_resume(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
        return sdhci_resume_host(host);
 }
 
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+       .suspend        = sdhci_s3c_suspend,
+       .resume         = sdhci_s3c_resume,
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
 #else
-#define sdhci_s3c_suspend NULL
-#define sdhci_s3c_resume NULL
+#define SDHCI_S3C_PMOPS NULL
 #endif
 
 static struct platform_driver sdhci_s3c_driver = {
        .probe          = sdhci_s3c_probe,
        .remove         = __devexit_p(sdhci_s3c_remove),
-       .suspend        = sdhci_s3c_suspend,
-       .resume         = sdhci_s3c_resume,
        .driver         = {
                .owner  = THIS_MODULE,
                .name   = "s3c-sdhci",
+               .pm     = SDHCI_S3C_PMOPS,
        },
 };
 
index 89699e8..e2e18d3 100644 (file)
@@ -318,13 +318,10 @@ static struct platform_driver sdhci_tegra_driver = {
                .name   = "sdhci-tegra",
                .owner  = THIS_MODULE,
                .of_match_table = sdhci_tegra_dt_match,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_tegra_probe,
        .remove         = __devexit_p(sdhci_tegra_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_tegra_init(void)
index 6d8eea3..19ed580 100644 (file)
@@ -2327,7 +2327,7 @@ out:
 
 #ifdef CONFIG_PM
 
-int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host)
 {
        int ret;
 
index 0a5b654..a04d4d0 100644 (file)
@@ -374,7 +374,7 @@ extern int sdhci_add_host(struct sdhci_host *host);
 extern void sdhci_remove_host(struct sdhci_host *host, int dead);
 
 #ifdef CONFIG_PM
-extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
+extern int sdhci_suspend_host(struct sdhci_host *host);
 extern int sdhci_resume_host(struct sdhci_host *host);
 extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
 #endif
index 369366c..d5505f3 100644 (file)
@@ -908,7 +908,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                if (host->power) {
                        pm_runtime_put(&host->pd->dev);
                        host->power = false;
-                       if (p->down_pwr)
+                       if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
                                p->down_pwr(host->pd);
                }
                host->state = STATE_IDLE;
index d85a60c..4208b39 100644 (file)
@@ -798,7 +798,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                /* start bus clock */
                tmio_mmc_clk_start(host);
        } else if (ios->power_mode != MMC_POWER_UP) {
-               if (host->set_pwr)
+               if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
                        host->set_pwr(host->pdev, 0);
                if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
                    pdata->power) {
index e8f6e65..2ec978b 100644 (file)
@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
 static int firmware_rom_wait_states = 0x1C;
 #endif
 
-module_param(firmware_rom_wait_states, bool, 0644);
+module_param(firmware_rom_wait_states, int, 0644);
 MODULE_PARM_DESC(firmware_rom_wait_states,
                 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
 
index 94f5534..45876d0 100644 (file)
@@ -227,10 +227,14 @@ static int platram_probe(struct platform_device *pdev)
        if (!err)
                dev_info(&pdev->dev, "registered mtd device\n");
 
-       /* add the whole device. */
-       err = mtd_device_register(info->mtd, NULL, 0);
-       if (err)
-               dev_err(&pdev->dev, "failed to register the entire device\n");
+       if (pdata->nr_partitions) {
+               /* add the whole device. */
+               err = mtd_device_register(info->mtd, NULL, 0);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "failed to register the entire device\n");
+               }
+       }
 
        return err;
 
index 411a17d..2a25b67 100644 (file)
@@ -98,7 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
        }
        info->mtd->owner = THIS_MODULE;
 
-       mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+       mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
 
        platform_set_drvdata(pdev, info);
        return 0;
index 071b634..493ec2f 100644 (file)
@@ -21,9 +21,9 @@
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/mtd/gpmi-nand.h>
 #include <linux/mtd/partitions.h>
-
 #include "gpmi-nand.h"
 
 /* add our owner bbt descriptor */
index ee17139..f8aacf4 100644 (file)
@@ -188,7 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
        if (!flash_np)
                return -ENODEV;
 
-       ppdata->of_node = flash_np;
+       ppdata.of_node = flash_np;
        ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
                        dev_name(&ndfc->ofdev->dev), flash_np->name);
        if (!ndfc->mtd.name) {
index 67bf078..c8f47f1 100644 (file)
@@ -477,7 +477,6 @@ enum rtl_register_content {
        /* Config1 register p.24 */
        LEDS1           = (1 << 7),
        LEDS0           = (1 << 6),
-       MSIEnable       = (1 << 5),     /* Enable Message Signaled Interrupt */
        Speed_down      = (1 << 4),
        MEMMAP          = (1 << 3),
        IOMAP           = (1 << 2),
@@ -485,6 +484,7 @@ enum rtl_register_content {
        PMEnable        = (1 << 0),     /* Power Management Enable */
 
        /* Config2 register p. 25 */
+       MSIEnable       = (1 << 5),     /* 8169 only. Reserved in the 8168. */
        PCI_Clock_66MHz = 0x01,
        PCI_Clock_33MHz = 0x00,
 
@@ -3426,22 +3426,24 @@ static const struct rtl_cfg_info {
 };
 
 /* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
                            const struct rtl_cfg_info *cfg)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
        unsigned msi = 0;
        u8 cfg2;
 
        cfg2 = RTL_R8(Config2) & ~MSIEnable;
        if (cfg->features & RTL_FEATURE_MSI) {
-               if (pci_enable_msi(pdev)) {
-                       dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+               if (pci_enable_msi(tp->pci_dev)) {
+                       netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
                } else {
                        cfg2 |= MSIEnable;
                        msi = RTL_FEATURE_MSI;
                }
        }
-       RTL_W8(Config2, cfg2);
+       if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+               RTL_W8(Config2, cfg2);
        return msi;
 }
 
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                tp->features |= RTL_FEATURE_WOL;
        if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
                tp->features |= RTL_FEATURE_WOL;
-       tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+       tp->features |= rtl_try_msi(tp, cfg);
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
        if (rtl_tbi_enabled(tp)) {
index dca9d33..c97d2f5 100644 (file)
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
        chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
 
        /* handle completed packets */
+       spin_unlock_irqrestore(&chan->lock, flags);
        do {
                ret = __cpdma_chan_process(chan);
                if (ret < 0)
                        break;
        } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+       spin_lock_irqsave(&chan->lock, flags);
 
        /* remaining packets haven't been tx/rx'ed, clean them up */
        while (chan->head) {
index 10826d8..1187a11 100644 (file)
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
                goto done;
 
        /* Re-enable the ingress interrupt. */
-       enable_percpu_irq(priv->intr_id);
+       enable_percpu_irq(priv->intr_id, 0);
 
        /* HACK: Avoid the "rotting packet" problem (see above). */
        if (qup->__packet_receive_read !=
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
        info->napi_enabled = true;
 
        /* Enable the ingress interrupt. */
-       enable_percpu_irq(priv->intr_id);
+       enable_percpu_irq(priv->intr_id, 0);
 }
 
 
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
        for (i = 0; i < sh->nr_frags; i++) {
 
                skb_frag_t *f = &sh->frags[i];
-               unsigned long pfn = page_to_pfn(f->page);
+               unsigned long pfn = page_to_pfn(skb_frag_page(f));
 
                /* FIXME: Compute "hash_for_home" properly. */
                /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
                /* FIXME: Hmmm. */
                if (!hash_default) {
                        void *va = pfn_to_kaddr(pfn) + f->page_offset;
-                       BUG_ON(PageHighMem(f->page));
+                       BUG_ON(PageHighMem(skb_frag_page(f)));
                        finv_buffer_remote(va, f->size, 0);
                }
 
index e6fed4d..e95f0e6 100644 (file)
@@ -1655,6 +1655,10 @@ static const struct usb_device_id        products [] = {
        // ASIX 88772a
        USB_DEVICE(0x0db0, 0xa877),
        .driver_info = (unsigned long) &ax88772_info,
+}, {
+       // Asus USB Ethernet Adapter
+       USB_DEVICE (0x0b95, 0x7e2b),
+       .driver_info = (unsigned long) &ax88772_info,
 },
        { },            // END
 };
index 888abc2..528d5f3 100644 (file)
@@ -1271,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc,
 
        ath_rc_priv->max_valid_rate = k;
        ath_rc_sort_validrates(rate_table, ath_rc_priv);
-       ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
+       ath_rc_priv->rate_max_phy = (k > 4) ?
+                                       ath_rc_priv->valid_rate_index[k-4] :
+                                       ath_rc_priv->valid_rate_index[k-1];
        ath_rc_priv->rate_table = rate_table;
 
        ath_dbg(common, ATH_DBG_CONFIG,
index a7a6def..5c7c17c 100644 (file)
@@ -606,8 +606,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
                        if (ctx->ht.enabled) {
                                /* if HT40 is used, it should not change
                                 * after associated except channel switch */
-                               if (iwl_is_associated_ctx(ctx) &&
-                                    !ctx->ht.is_40mhz)
+                               if (!ctx->ht.is_40mhz ||
+                                               !iwl_is_associated_ctx(ctx))
                                        iwlagn_config_ht40(conf, ctx);
                        } else
                                ctx->ht.is_40mhz = false;
index 35a6b71..df1540c 100644 (file)
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
                tx_cmd->tid_tspec = qc[0] & 0xf;
                tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
        } else {
-               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               else
+                       tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
        }
 
        iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
index bacc06c..e0e9a3d 100644 (file)
@@ -2850,6 +2850,9 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
        int ret;
        u8 sta_id;
 
+       if (ctx->ctxid != IWL_RXON_CTX_PAN)
+               return 0;
+
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->shrd->mutex);
 
@@ -2898,6 +2901,9 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
        struct iwl_rxon_context *ctx = vif_priv->ctx;
 
+       if (ctx->ctxid != IWL_RXON_CTX_PAN)
+               return;
+
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->shrd->mutex);
 
index ce91898..5f17ab8 100644 (file)
@@ -1197,9 +1197,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
 
        /* Set up entry for this TFD in Tx byte-count array */
-       if (is_agg)
-               iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
-                                              le16_to_cpu(tx_cmd->len));
+       iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
        dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
                        DMA_BIDIRECTIONAL);
index ac27815..6e0a3ea 100644 (file)
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
 {
        struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
        unsigned long cmd_flags;
-       unsigned long cmd_pending_q_flags;
        unsigned long scan_pending_q_flags;
        uint16_t cancel_scan_cmd = false;
 
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                cmd_node = adapter->curr_cmd;
                cmd_node->wait_q_enabled = false;
                cmd_node->cmd_flag |= CMD_F_CANCELED;
-               spin_lock_irqsave(&adapter->cmd_pending_q_lock,
-                                 cmd_pending_q_flags);
-               list_del(&cmd_node->list);
-               spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
-                                      cmd_pending_q_flags);
                mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+               mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+               adapter->curr_cmd = NULL;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
        }
 
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
        }
        adapter->cmd_wait_q.status = -1;
-       mwifiex_complete_cmd(adapter, adapter->curr_cmd);
 }
 
 /*
index 19c0115..0f0cfa3 100644 (file)
 #include <linux/string.h>
 #include <linux/slab.h>
 
-/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
 /**
  * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
  * @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
        struct of_irq oirq;
 
        if (of_irq_map_one(dev, index, &oirq))
-               return NO_IRQ;
+               return 0;
 
        return irq_create_of_mapping(oirq.controller, oirq.specifier,
                                     oirq.size);
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 
        /* Only dereference the resource if both the
         * resource and the irq are valid. */
-       if (r && irq != NO_IRQ) {
+       if (r && irq) {
                r->start = r->end = irq;
                r->flags = IORESOURCE_IRQ;
                r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
 {
        int nr = 0;
 
-       while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+       while (of_irq_to_resource(dev, nr, NULL))
                nr++;
 
        return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
        int i;
 
        for (i = 0; i < nr_irqs; i++, res++)
-               if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+               if (!of_irq_to_resource(dev, i, res))
                        break;
 
        return i;
index 89f6345..84a208d 100644 (file)
@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_timeout(val);
@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
+       retval = 0;
        if (val)
                retval = oprofile_start();
        else
index d0de6cc..2f0aa0f 100644 (file)
@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
 }
 
 
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
 int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
 {
        char tmpbuf[TMPBUFSIZE];
@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
        raw_spin_lock_irqsave(&oprofilefs_lock, flags);
        *val = simple_strtoul(tmpbuf, NULL, 0);
        raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
-       return 0;
+       return count;
 }
 
 
@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&value, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_ulong(file->private_data, value);
index 7ec56fb..b0dd08e 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/pci-ats.h>
 #include <linux/pci.h>
+#include <linux/slab.h>
 
 #include "pci.h"
 
index fce1c54..9ddf69e 100644 (file)
@@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
        if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
                return AE_OK;
 
+       pdev = pbus->self;
+       if (pdev && pci_is_pcie(pdev)) {
+               tmp = acpi_find_root_bridge_handle(pdev);
+               if (tmp) {
+                       struct acpi_pci_root *root = acpi_pci_find_root(tmp);
+
+                       if (root && (root->osc_control_set &
+                                       OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+                               return AE_OK;
+               }
+       }
+
        acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
        device = (adr >> 16) & 0xffff;
        function = adr & 0xffff;
@@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
 
        pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
        if (pdev) {
-               pdev->current_state = PCI_D0;
                slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
                pci_dev_put(pdev);
        }
@@ -459,17 +470,8 @@ static int add_bridge(acpi_handle handle)
 {
        acpi_status status;
        unsigned long long tmp;
-       struct acpi_pci_root *root;
        acpi_handle dummy_handle;
 
-       /*
-        * We shouldn't use this bridge if PCIe native hotplug control has been
-        * granted by the BIOS for it.
-        */
-       root = acpi_pci_find_root(handle);
-       if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
-               return -ENODEV;
-
        /* if the bridge doesn't have _STA, we assume it is always there */
        status = acpi_get_handle(handle, "_STA", &dummy_handle);
        if (ACPI_SUCCESS(status)) {
@@ -1385,19 +1387,11 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
 static acpi_status
 find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
 {
-       struct acpi_pci_root *root;
        int *count = (int *)context;
 
        if (!acpi_is_root_bridge(handle))
                return AE_OK;
 
-       root = acpi_pci_find_root(handle);
-       if (!root)
-               return AE_OK;
-
-       if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
-               return AE_OK;
-
        (*count)++;
        acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
                                    handle_hotplug_event_bridge, NULL);
index b82c155..1969a3e 100644 (file)
@@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
        struct resource *res;
        struct pci_dev *pdev;
        struct pci_sriov *iov = dev->sriov;
+       int bars = 0;
 
        if (!nr_virtfn)
                return 0;
@@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
 
        nres = 0;
        for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+               bars |= (1 << (i + PCI_IOV_RESOURCES));
                res = dev->resource + PCI_IOV_RESOURCES + i;
                if (res->parent)
                        nres++;
@@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
                return -ENOMEM;
        }
 
+       if (pci_enable_resources(dev, bars)) {
+               dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+               return -ENOMEM;
+       }
+
        if (iov->link != dev->devfn) {
                pdev = pci_get_slot(dev->bus, iov->link);
                if (!pdev)
index 6f45a73..6d4a531 100644 (file)
@@ -664,6 +664,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
                error = platform_pci_set_power_state(dev, state);
                if (!error)
                        pci_update_current_state(dev, state);
+               /* Fall back to PCI_D0 if native PM is not supported */
+               if (!dev->pm_cap)
+                       dev->current_state = PCI_D0;
        } else {
                error = -ENODEV;
                /* Fall back to PCI_D0 if native PM is not supported */
@@ -1126,7 +1129,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
        if (atomic_add_return(1, &dev->enable_cnt) > 1)
                return 0;               /* already enabled */
 
-       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       /* only skip sriov related */
+       for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+               if (dev->resource[i].flags & flags)
+                       bars |= (1 << i);
+       for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
                if (dev->resource[i].flags & flags)
                        bars |= (1 << i);
 
index cf3f999..10451a1 100644 (file)
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
 
 static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
 {
-       return 1; /* always round timer functions to one nanosecond */
+       tp->tv_sec = 0;
+       tp->tv_nsec = 1;
+       return 0;
 }
 
 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
index 5225930..691b1ab 100644 (file)
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
        INIT_WORK(&priv->idb_work, tsi721_db_dpc);
 
        /* Allocate buffer for inbound doorbells queue */
-       priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+       priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
                                IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
                                &priv->idb_dma, GFP_KERNEL);
        if (!priv->idb_base)
                return -ENOMEM;
 
-       memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
        dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
                priv->idb_base, (unsigned long long)priv->idb_dma);
 
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
         */
 
        /* Allocate space for DMA descriptors */
-       bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+       bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                        bd_num * sizeof(struct tsi721_dma_desc),
                                        &bd_phys, GFP_KERNEL);
        if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        priv->bdma[chnum].bd_phys = bd_phys;
        priv->bdma[chnum].bd_base = bd_ptr;
 
-       memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
        dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
                bd_ptr, (unsigned long long)bd_phys);
 
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
                                        bd_num : TSI721_DMA_MINSTSSZ;
        sts_size = roundup_pow_of_two(sts_size);
-       sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+       sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                     sts_size * sizeof(struct tsi721_dma_sts),
                                     &sts_phys, GFP_KERNEL);
        if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        priv->bdma[chnum].sts_base = sts_ptr;
        priv->bdma[chnum].sts_size = sts_size;
 
-       memset(sts_ptr, 0, sts_size);
-
        dev_dbg(&priv->pdev->dev,
                "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
                sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
 
        /* Outbound message descriptor status FIFO allocation */
        priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
-       priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+       priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
                        priv->omsg_ring[mbox].sts_size *
                                                sizeof(struct tsi721_dma_sts),
                        &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
                goto out_desc;
        }
 
-       memset(priv->omsg_ring[mbox].sts_base, 0,
-               entries * sizeof(struct tsi721_dma_sts));
-
        /*
         * Configure Outbound Messaging Engine
         */
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
        INIT_LIST_HEAD(&mport->dbells);
 
        rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
-       rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
-       rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+       rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+       rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
        strcpy(mport->name, "Tsi721 mport");
 
        /* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
                                  const struct pci_device_id *id)
 {
        struct tsi721_device *priv;
-       int i;
+       int i, cap;
        int err;
        u32 regval;
 
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
                        dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
        }
 
-       /* Clear "no snoop" and "relaxed ordering" bits. */
-       pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
-       regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
-       pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+       cap = pci_pcie_cap(pdev);
+       BUG_ON(cap == 0);
+
+       /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+       pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
+       regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+                   PCI_EXP_DEVCTL_NOSNOOP_EN);
+       regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+       pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+       /* Adjust PCIe completion timeout. */
+       pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
+       regval &= ~(0x0f);
+       pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
 
        /*
         * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
index 58be4de..822e54c 100644 (file)
@@ -72,6 +72,8 @@
 #define TSI721_MSIXPBA_OFFSET  0x2a000
 #define TSI721_PCIECFG_EPCTL   0x400
 
+#define MAX_READ_REQUEST_SZ_SHIFT      12
+
 /*
  * Event Management Registers
  */
index fa4d9f3..3bcc7cf 100644 (file)
@@ -73,6 +73,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
                err = -EINVAL;
 
        mutex_unlock(&rtc->ops_lock);
+       /* A timer might have just expired */
+       schedule_work(&rtc->irqwork);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_set_time);
@@ -112,6 +114,8 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
                err = -EINVAL;
 
        mutex_unlock(&rtc->ops_lock);
+       /* A timer might have just expired */
+       schedule_work(&rtc->irqwork);
 
        return err;
 }
@@ -403,6 +407,8 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
                timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
        }
        mutex_unlock(&rtc->ops_lock);
+       /* maybe that was in the past.*/
+       schedule_work(&rtc->irqwork);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
index eda128f..64aedd8 100644 (file)
@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
 static struct rtc_class_ops m41t80_rtc_ops = {
        .read_time = m41t80_rtc_read_time,
        .set_time = m41t80_rtc_set_time,
+       /*
+        * XXX - m41t80 alarm functionality is reported broken.
+        * until it is fixed, don't register alarm functions.
+        *
        .read_alarm = m41t80_rtc_read_alarm,
        .set_alarm = m41t80_rtc_set_alarm,
+       */
        .proc = m41t80_rtc_proc,
+       /*
+        * See above comment on broken alarm
+        *
        .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
+       */
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
index 7639ab9..5b979d9 100644 (file)
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
        void __iomem *base = s3c_rtc_base;
        int year = tm->tm_year - 100;
 
-       clk_enable(rtc_clk);
        pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
                 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
                 tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
                return -EINVAL;
        }
 
+       clk_enable(rtc_clk);
        writeb(bin2bcd(tm->tm_sec),  base + S3C2410_RTCSEC);
        writeb(bin2bcd(tm->tm_min),  base + S3C2410_RTCMIN);
        writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
index 11f07f8..b79576b 100644 (file)
@@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
 {
        struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
+       /* if previous slave_alloc returned early, there is nothing to do */
+       if (!zfcp_sdev->port)
+               return;
+
        zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
        put_device(&zfcp_sdev->port->dev);
 }
index 5f94d22..5426682 100644 (file)
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
        int ret = 0;
 
        while (len > 0) {
-               int err = bbc_i2c_writeb(client, *buf, off);
-
-               if (err < 0) {
-                       ret = err;
+               ret = bbc_i2c_writeb(client, *buf, off);
+               if (ret < 0)
                        break;
-               }
-
                len--;
                buf++;
                off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
        int ret = 0;
 
        while (len > 0) {
-               int err = bbc_i2c_readb(client, buf, off);
-               if (err < 0) {
-                       ret = err;
+               ret = bbc_i2c_readb(client, buf, off);
+               if (ret < 0)
                        break;
-               }
                len--;
                buf++;
                off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
        .remove         = __devexit_p(bbc_i2c_remove),
 };
 
-static int __init bbc_i2c_init(void)
-{
-       return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
-       platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
 
 MODULE_LICENSE("GPL");
index 965a1fc..4b99397 100644 (file)
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
        .remove         = __devexit_p(d7s_remove),
 };
 
-static int __init d7s_init(void)
-{
-       return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
-       platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
index be7b4e5..339fd6f 100644 (file)
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
        .remove         = __devexit_p(envctrl_remove),
 };
 
-static int __init envctrl_init(void)
-{
-       return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
-       platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
 
-module_init(envctrl_init);
-module_exit(envctrl_exit);
 MODULE_LICENSE("GPL");
index 73dd4e7..826157f 100644 (file)
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
        .remove         = __devexit_p(flash_remove),
 };
 
-static int __init flash_init(void)
-{
-       return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
-       platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
 
-module_init(flash_init);
-module_exit(flash_cleanup);
 MODULE_LICENSE("GPL");
index ebce963..0b31658 100644 (file)
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
 };
 
 
-static int __init uctrl_init(void)
-{
-       return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
-       platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
 
-module_init(uctrl_init);
-module_exit(uctrl_exit);
 MODULE_LICENSE("GPL");
index dba72a4..1ad0b82 100644 (file)
@@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
        spin_lock(&session->lock);
        task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
                                 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
-       if (!task) {
+       if (!task || !task->sc) {
                spin_unlock(&session->lock);
                return -EINVAL;
        }
        sc = task->sc;
-       spin_unlock(&session->lock);
 
        if (!blk_rq_cpu_valid(sc->request))
                cpu = smp_processor_id();
        else
                cpu = sc->request->cpu;
 
+       spin_unlock(&session->lock);
+
        p = &per_cpu(bnx2i_percpu, cpu);
        spin_lock(&p->p_work_lock);
        if (unlikely(!p->iothread)) {
index cefbe44..8d67467 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/sysfs.h>
 #include <linux/ctype.h>
 #include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
@@ -101,6 +103,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
 static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
                           unsigned int);
 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+                                    ulong event, void *ptr);
 
 static bool fcoe_match(struct net_device *netdev);
 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +133,11 @@ static struct notifier_block fcoe_cpu_notifier = {
        .notifier_call = fcoe_cpu_callback,
 };
 
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+       .notifier_call = fcoe_dcb_app_notification,
+};
+
 static struct scsi_transport_template *fcoe_nport_scsi_transport;
 static struct scsi_transport_template *fcoe_vport_scsi_transport;
 
@@ -1522,6 +1531,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        skb_reset_network_header(skb);
        skb->mac_len = elen;
        skb->protocol = htons(ETH_P_FCOE);
+       skb->priority = port->priority;
+
        if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
            fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
                skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
        stats->InvalidCRCCount++;
        if (stats->InvalidCRCCount < 5)
                printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+       put_cpu();
        return -EINVAL;
 }
 
@@ -1746,6 +1758,7 @@ int fcoe_percpu_receive_thread(void *arg)
  */
 static void fcoe_dev_setup(void)
 {
+       register_dcbevent_notifier(&dcb_notifier);
        register_netdevice_notifier(&fcoe_notifier);
 }
 
@@ -1754,9 +1767,69 @@ static void fcoe_dev_setup(void)
  */
 static void fcoe_dev_cleanup(void)
 {
+       unregister_dcbevent_notifier(&dcb_notifier);
        unregister_netdevice_notifier(&fcoe_notifier);
 }
 
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+       struct fcoe_interface *fcoe;
+       struct net_device *real_dev;
+
+       list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+               if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+                       real_dev = vlan_dev_real_dev(fcoe->netdev);
+               else
+                       real_dev = fcoe->netdev;
+
+               if (netdev == real_dev)
+                       return fcoe;
+       }
+       return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+                                    ulong event, void *ptr)
+{
+       struct dcb_app_type *entry = ptr;
+       struct fcoe_interface *fcoe;
+       struct net_device *netdev;
+       struct fcoe_port *port;
+       int prio;
+
+       if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+               return NOTIFY_OK;
+
+       netdev = dev_get_by_index(&init_net, entry->ifindex);
+       if (!netdev)
+               return NOTIFY_OK;
+
+       fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+       dev_put(netdev);
+       if (!fcoe)
+               return NOTIFY_OK;
+
+       if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+               prio = ffs(entry->app.priority) - 1;
+       else
+               prio = entry->app.priority;
+
+       if (prio < 0)
+               return NOTIFY_OK;
+
+       if (entry->app.protocol == ETH_P_FIP ||
+           entry->app.protocol == ETH_P_FCOE)
+               fcoe->ctlr.priority = prio;
+
+       if (entry->app.protocol == ETH_P_FCOE) {
+               port = lport_priv(fcoe->ctlr.lp);
+               port->priority = prio;
+       }
+
+       return NOTIFY_OK;
+}
+
 /**
  * fcoe_device_notification() - Handler for net device events
  * @notifier: The context of the notification
@@ -1964,6 +2037,46 @@ static bool fcoe_match(struct net_device *netdev)
        return true;
 }
 
+/**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+       int dcbx;
+       u8 fup, up;
+       struct net_device *netdev = fcoe->realdev;
+       struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+       struct dcb_app app = {
+                               .priority = 0,
+                               .protocol = ETH_P_FCOE
+                            };
+
+       /* setup DCB priority attributes. */
+       if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+               dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+               if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+                       app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+                       up = dcb_ieee_getapp_mask(netdev, &app);
+                       app.protocol = ETH_P_FIP;
+                       fup = dcb_ieee_getapp_mask(netdev, &app);
+               } else {
+                       app.selector = DCB_APP_IDTYPE_ETHTYPE;
+                       up = dcb_getapp(netdev, &app);
+                       app.protocol = ETH_P_FIP;
+                       fup = dcb_getapp(netdev, &app);
+               }
+
+               port->priority = ffs(up) ? ffs(up) - 1 : 0;
+               fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+       }
+#endif
+}
+
 /**
  * fcoe_create() - Create a fcoe interface
  * @netdev  : The net_device object the Ethernet interface to create on
@@ -2007,6 +2120,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        /* Make this the "master" N_Port */
        fcoe->ctlr.lp = lport;
 
+       /* setup DCB priority attributes. */
+       fcoe_dcb_create(fcoe);
+
        /* add to lports list */
        fcoe_hostlist_add(lport);
 
index c74c4b8..e7522dc 100644 (file)
@@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
 
        skb_put(skb, sizeof(*sol));
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        fip->send(fip, skb);
@@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
        }
        skb_put(skb, len);
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        fip->send(fip, skb);
@@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
        cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
 
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        return 0;
@@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
 
        skb_put(skb, len);
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
 
index 4e041f6..d570573 100644 (file)
@@ -4335,7 +4335,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
        /* insert into event log */
        sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
             sizeof(Mpi2EventDataSasDeviceStatusChange_t);
-       event_reply = kzalloc(sz, GFP_KERNEL);
+       event_reply = kzalloc(sz, GFP_ATOMIC);
        if (!event_reply) {
                printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
                    ioc->name, __FILE__, __LINE__, __func__);
index ac326c4..6465dae 100644 (file)
@@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
        scsi_qla_host_t *vha = shost_priv(shost);
        struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
 
-       if (!base_vha->flags.online)
+       if (!base_vha->flags.online) {
                fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
-       else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
-               fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
-       else
+               return;
+       }
+
+       switch (atomic_read(&base_vha->loop_state)) {
+       case LOOP_UPDATE:
+               fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+               break;
+       case LOOP_DOWN:
+               if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+                       fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+               else
+                       fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+               break;
+       case LOOP_DEAD:
+               fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+               break;
+       case LOOP_READY:
                fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+               break;
+       default:
+               fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+               break;
+       }
 }
 
 static int
index 9df4787..f3cddd5 100644 (file)
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
  * | Module Init and Probe        |       0x0116       |               |
- * | Mailbox commands             |       0x1129       |               |
+ * | Mailbox commands             |       0x112b       |               |
  * | Device Discovery             |       0x2083       |               |
  * | Queue Command and IO tracing |       0x302e       |     0x3008     |
  * | DPC Thread                   |       0x401c       |               |
  * | Async Events                 |       0x5059       |               |
- * | Timer Routines               |       0x600d       |               |
+ * | Timer Routines               |       0x6010       | 0x600e,0x600f  |
  * | User Space Interactions      |       0x709d       |               |
- * | Task Management              |       0x8041       |               |
+ * | Task Management              |       0x8041       | 0x800b         |
  * | AER/EEH                      |       0x900f       |               |
  * | Virtual Port                 |       0xa007       |               |
- * | ISP82XX Specific             |       0xb051       |               |
+ * | ISP82XX Specific             |       0xb052       |               |
  * | MultiQ                       |       0xc00b       |               |
  * | Misc                         |       0xd00b       |               |
  * ----------------------------------------------------------------------
index ce32d81..c0c11af 100644 (file)
@@ -578,6 +578,7 @@ extern int qla82xx_check_md_needed(scsi_qla_host_t *);
 extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
 extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
 extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
 
 /* BSG related functions */
 extern int qla24xx_bsg_request(struct fc_bsg_job *);
index f03e915..54ea68c 100644 (file)
@@ -1509,7 +1509,8 @@ enable_82xx_npiv:
                                    &ha->fw_xcb_count, NULL, NULL,
                                    &ha->max_npiv_vports, NULL);
 
-                               if (!fw_major_version && ql2xallocfwdump)
+                               if (!fw_major_version && ql2xallocfwdump
+                                   && !IS_QLA82XX(ha))
                                        qla2x00_alloc_fw_dump(vha);
                        }
                } else {
index dbec896..a4b267e 100644 (file)
@@ -120,11 +120,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  * Returns a pointer to the continuation type 1 IOCB packet.
  */
 static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 {
        cont_a64_entry_t *cont_pkt;
 
-       struct req_que *req = vha->req;
        /* Adjust ring index. */
        req->ring_index++;
        if (req->ring_index == req->length) {
@@ -292,7 +291,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -684,7 +683,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -2070,7 +2069,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
                        * Five DSDs are available in the Cont.
                        * Type 1 IOCB.
                               */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+                           vha->hw->req_q_map[0]);
                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                        cont_iocb_prsnt = 1;
@@ -2096,6 +2096,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
        int index;
        uint16_t tot_dsds;
         scsi_qla_host_t *vha = sp->fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
        struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
        int loop_iterartion = 0;
        int cont_iocb_prsnt = 0;
@@ -2141,7 +2142,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
                        * Five DSDs are available in the Cont.
                        * Type 1 IOCB.
                               */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+                           ha->req_q_map[0]);
                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                        cont_iocb_prsnt = 1;
index 2516adf..7b91b29 100644 (file)
@@ -1741,7 +1741,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                                    resid, scsi_bufflen(cp));
 
                                cp->result = DID_ERROR << 16 | lscsi_status;
-                               break;
+                               goto check_scsi_status;
                        }
 
                        if (!lscsi_status &&
index 3b3cec9..82a3353 100644 (file)
@@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                mcp->mb[0] = MBS_LINK_DOWN_ERROR;
                ql_log(ql_log_warn, base_vha, 0x1004,
                    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
-               rval = QLA_FUNCTION_FAILED;
-               goto premature_exit;
+               return QLA_FUNCTION_TIMEOUT;
        }
 
        /*
@@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                HINT_MBX_INT_PENDING) {
                                spin_unlock_irqrestore(&ha->hardware_lock,
                                        flags);
+                               ha->flags.mbox_busy = 0;
                                ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
                                    "Pending mailbox timeout, exiting.\n");
                                rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                HINT_MBX_INT_PENDING) {
                                spin_unlock_irqrestore(&ha->hardware_lock,
                                        flags);
+                               ha->flags.mbox_busy = 0;
                                ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
                                    "Pending mailbox timeout, exiting.\n");
                                rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
                            !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
                            !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+                               if (IS_QLA82XX(ha)) {
+                                       ql_dbg(ql_dbg_mbx, vha, 0x112a,
+                                           "disabling pause transmit on port "
+                                           "0 & 1.\n");
+                                       qla82xx_wr_32(ha,
+                                           QLA82XX_CRB_NIU + 0x98,
+                                           CRB_NIU_XG_PAUSE_CTL_P0|
+                                           CRB_NIU_XG_PAUSE_CTL_P1);
+                               }
                                ql_log(ql_log_info, base_vha, 0x101c,
                                    "Mailbox cmd timeout occured. "
                                    "Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
                            !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
                            !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+                               if (IS_QLA82XX(ha)) {
+                                       ql_dbg(ql_dbg_mbx, vha, 0x112b,
+                                           "disabling pause transmit on port "
+                                           "0 & 1.\n");
+                                       qla82xx_wr_32(ha,
+                                           QLA82XX_CRB_NIU + 0x98,
+                                           CRB_NIU_XG_PAUSE_CTL_P0|
+                                           CRB_NIU_XG_PAUSE_CTL_P1);
+                               }
                                ql_log(ql_log_info, base_vha, 0x101e,
                                    "Mailbox cmd timeout occured. "
                                    "Scheduling ISP abort.\n");
index 94bded5..0355493 100644 (file)
@@ -3817,6 +3817,20 @@ exit:
        return rval;
 }
 
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (ha->flags.mbox_busy) {
+               ha->flags.mbox_int = 1;
+               ha->flags.mbox_busy = 0;
+               ql_log(ql_log_warn, vha, 0x6010,
+                   "Doing premature completion of mbx command.\n");
+               if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+                       complete(&ha->mbx_intr_comp);
+       }
+}
+
 void qla82xx_watchdog(scsi_qla_host_t *vha)
 {
        uint32_t dev_state, halt_status;
@@ -3839,9 +3853,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                        qla2xxx_wake_dpc(vha);
                } else {
                        if (qla82xx_check_fw_alive(vha)) {
+                               ql_dbg(ql_dbg_timer, vha, 0x6011,
+                                   "disabling pause transmit on port 0 & 1.\n");
+                               qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                                   CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
                                halt_status = qla82xx_rd_32(ha,
                                    QLA82XX_PEG_HALT_STATUS1);
-                               ql_dbg(ql_dbg_timer, vha, 0x6005,
+                               ql_log(ql_log_info, vha, 0x6005,
                                    "dumping hw/fw registers:.\n "
                                    " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
                                    " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3876,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                            QLA82XX_CRB_PEG_NET_3 + 0x3c),
                                    qla82xx_rd_32(ha,
                                            QLA82XX_CRB_PEG_NET_4 + 0x3c));
+                               if (LSW(MSB(halt_status)) == 0x67)
+                                       ql_log(ql_log_warn, vha, 0xb052,
+                                           "Firmware aborted with "
+                                           "error code 0x00006700. Device is "
+                                           "being reset.\n");
                                if (halt_status & HALT_STATUS_UNRECOVERABLE) {
                                        set_bit(ISP_UNRECOVERABLE,
                                            &vha->dpc_flags);
@@ -3869,16 +3892,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                }
                                qla2xxx_wake_dpc(vha);
                                ha->flags.isp82xx_fw_hung = 1;
-                               if (ha->flags.mbox_busy) {
-                                       ha->flags.mbox_int = 1;
-                                       ql_log(ql_log_warn, vha, 0x6007,
-                                           "Due to FW hung, doing "
-                                           "premature completion of mbx "
-                                           "command.\n");
-                                       if (test_bit(MBX_INTR_WAIT,
-                                           &ha->mbx_cmd_flags))
-                                               complete(&ha->mbx_intr_comp);
-                               }
+                               ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+                               qla82xx_clear_pending_mbx(vha);
                        }
                }
        }
@@ -4073,10 +4088,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
                        msleep(1000);
                        if (qla82xx_check_fw_alive(vha)) {
                                ha->flags.isp82xx_fw_hung = 1;
-                               if (ha->flags.mbox_busy) {
-                                       ha->flags.mbox_int = 1;
-                                       complete(&ha->mbx_intr_comp);
-                               }
+                               qla82xx_clear_pending_mbx(vha);
                                break;
                        }
                }
index 57820c1..57a226b 100644 (file)
@@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
 
 static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
        0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0        0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1        0x8
+
 #endif
index fd14c7b..f9e5b85 100644 (file)
@@ -201,12 +201,12 @@ MODULE_PARM_DESC(ql2xmdcapmask,
                "Set the Minidump driver capture mask level. "
                "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
 
-int ql2xmdenable;
+int ql2xmdenable = 1;
 module_param(ql2xmdenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xmdenable,
                "Enable/disable MiniDump. "
-               "0 (Default) - MiniDump disabled. "
-               "1 - MiniDump enabled.");
+               "0 - MiniDump disabled. "
+               "1 (Default) - MiniDump enabled.");
 
 /*
  * SCSI host template entry points
@@ -423,6 +423,7 @@ fail2:
        qla25xx_delete_queues(vha);
        destroy_workqueue(ha->wq);
        ha->wq = NULL;
+       vha->req = ha->req_q_map[0];
 fail:
        ha->mqenable = 0;
        kfree(ha->req_q_map);
@@ -814,49 +815,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
        return return_status;
 }
 
-/*
- * qla2x00_wait_for_loop_ready
- *    Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
- *    to be in LOOP_READY state.
- * Input:
- *     ha - pointer to host adapter structure
- *
- * Note:
- *    Does context switching-Release SPIN_LOCK
- *    (if any) before calling this routine.
- *
- *
- * Return:
- *    Success (LOOP_READY) : 0
- *    Failed  (LOOP_NOT_READY) : 1
- */
-static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
-{
-       int      return_status = QLA_SUCCESS;
-       unsigned long loop_timeout ;
-       struct qla_hw_data *ha = vha->hw;
-       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
-
-       /* wait for 5 min at the max for loop to be ready */
-       loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-
-       while ((!atomic_read(&base_vha->loop_down_timer) &&
-           atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
-           atomic_read(&base_vha->loop_state) != LOOP_READY) {
-               if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
-                       return_status = QLA_FUNCTION_FAILED;
-                       break;
-               }
-               msleep(1000);
-               if (time_after_eq(jiffies, loop_timeout)) {
-                       return_status = QLA_FUNCTION_FAILED;
-                       break;
-               }
-       }
-       return (return_status);
-}
-
 static void
 sp_get(struct srb *sp)
 {
@@ -1035,12 +993,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
                    "Wait for hba online failed for cmd=%p.\n", cmd);
                goto eh_reset_failed;
        }
-       err = 1;
-       if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
-               ql_log(ql_log_warn, vha, 0x800b,
-                   "Wait for loop ready failed for cmd=%p.\n", cmd);
-               goto eh_reset_failed;
-       }
        err = 2;
        if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
                != QLA_SUCCESS) {
@@ -1137,10 +1089,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
                goto eh_bus_reset_done;
        }
 
-       if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
-               if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
-                       ret = SUCCESS;
-       }
+       if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+               ret = SUCCESS;
+
        if (ret == FAILED)
                goto eh_bus_reset_done;
 
@@ -1206,15 +1157,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
        if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
                goto eh_host_reset_lock;
 
-       /*
-        * Fixme-may be dpc thread is active and processing
-        * loop_resync,so wait a while for it to
-        * be completed and then issue big hammer.Otherwise
-        * it may cause I/O failure as big hammer marks the
-        * devices as lost kicking of the port_down_timer
-        * while dpc is stuck for the mailbox to complete.
-        */
-       qla2x00_wait_for_loop_ready(vha);
        if (vha != base_vha) {
                if (qla2x00_vp_abort_isp(vha))
                        goto eh_host_reset_lock;
@@ -1297,16 +1239,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
                atomic_set(&vha->loop_state, LOOP_DOWN);
                atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
                qla2x00_mark_all_devices_lost(vha, 0);
-               qla2x00_wait_for_loop_ready(vha);
        }
 
        if (ha->flags.enable_lip_reset) {
                ret = qla2x00_lip_reset(vha);
-               if (ret != QLA_SUCCESS) {
+               if (ret != QLA_SUCCESS)
                        ql_dbg(ql_dbg_taskm, vha, 0x802e,
                            "lip_reset failed (%d).\n", ret);
-               } else
-                       qla2x00_wait_for_loop_ready(vha);
        }
 
        /* Issue marker command only when we are going to start the I/O */
@@ -4070,13 +4009,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
                /* For ISP82XX complete any pending mailbox cmd */
                if (IS_QLA82XX(ha)) {
                        ha->flags.isp82xx_fw_hung = 1;
-                       if (ha->flags.mbox_busy) {
-                               ha->flags.mbox_int = 1;
-                               ql_dbg(ql_dbg_aer, vha, 0x9001,
-                                   "Due to pci channel io frozen, doing premature "
-                                   "completion of mbx command.\n");
-                               complete(&ha->mbx_intr_comp);
-                       }
+                       ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+                       qla82xx_clear_pending_mbx(vha);
                }
                qla2x00_free_irqs(vha);
                pci_disable_device(pdev);
index 13b6357..23f33a6 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.07.07-k"
+#define QLA2XXX_VERSION      "8.03.07.12-k"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   3
index ace637b..fd5edc6 100644 (file)
 #define ISCSI_ALIAS_SIZE               32      /* ISCSI Alias name size */
 #define ISCSI_NAME_SIZE                        0xE0    /* ISCSI Name size */
 
-#define QL4_SESS_RECOVERY_TMO          30      /* iSCSI session */
+#define QL4_SESS_RECOVERY_TMO          120     /* iSCSI session */
                                                /* recovery timeout */
 
 #define LSDW(x) ((u32)((u64)(x)))
 #define ISNS_DEREG_TOV                 5
 #define HBA_ONLINE_TOV                 30
 #define DISABLE_ACB_TOV                        30
+#define IP_CONFIG_TOV                  30
+#define LOGIN_TOV                      12
 
 #define MAX_RESET_HA_RETRIES           2
 
@@ -240,6 +242,45 @@ struct ddb_entry {
 
        uint16_t fw_ddb_index;  /* DDB firmware index */
        uint32_t fw_ddb_device_state; /* F/W Device State  -- see ql4_fw.h */
+       uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+       struct dev_db_entry fw_ddb_entry;
+       int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+       int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                         struct ddb_entry *ddb_entry, uint32_t state);
+
+       /* Driver Re-login  */
+       unsigned long flags;              /* DDB Flags */
+       uint16_t default_relogin_timeout; /*  Max time to wait for
+                                          *  relogin to complete */
+       atomic_t retry_relogin_timer;     /* Min Time between relogins
+                                          * (4000 only) */
+       atomic_t relogin_timer;           /* Max Time to wait for
+                                          * relogin to complete */
+       atomic_t relogin_retry_count;     /* Num of times relogin has been
+                                          * retried */
+       uint32_t default_time2wait;       /* Default Min time between
+                                          * relogins (+aens) */
+
+};
+
+struct qla_ddb_index {
+       struct list_head list;
+       uint16_t fw_ddb_idx;
+       struct dev_db_entry fw_ddb;
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+       int port;
+       int tpgt;
+       char ip_addr[DDB_IPADDR_LEN];
+       char iscsi_name[ISCSI_NAME_SIZE];
+       uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
 };
 
 /*
@@ -411,7 +452,7 @@ struct scsi_qla_host {
 #define AF_FW_RECOVERY                 19 /* 0x00080000 */
 #define AF_EEH_BUSY                    20 /* 0x00100000 */
 #define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
-
+#define AF_BUILD_DDB_LIST              22 /* 0x00400000 */
        unsigned long dpc_flags;
 
 #define DPC_RESET_HA                   1 /* 0x00000002 */
@@ -604,6 +645,7 @@ struct scsi_qla_host {
        uint16_t bootload_minor;
        uint16_t bootload_patch;
        uint16_t bootload_build;
+       uint16_t def_timeout; /* Default login timeout */
 
        uint32_t flash_state;
 #define        QLFLASH_WAITING         0
@@ -623,6 +665,11 @@ struct scsi_qla_host {
        uint16_t iscsi_pci_func_cnt;
        uint8_t model_name[16];
        struct completion disable_acb_comp;
+       struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+       uint16_t pri_ddb_idx;
+       uint16_t sec_ddb_idx;
+       int is_reset;
 };
 
 struct ql4_task_data {
@@ -835,6 +882,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
 /*---------------------------------------------------------------------------*/
 
 /* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER    0
+#define RESET_ADAPTER   1
+
 #define PRESERVE_DDB_LIST      0
 #define REBUILD_DDB_LIST       1
 
index cbd5a20..4ac07f8 100644 (file)
@@ -12,6 +12,7 @@
 #define MAX_PRST_DEV_DB_ENTRIES                64
 #define MIN_DISC_DEV_DB_ENTRY          MAX_PRST_DEV_DB_ENTRIES
 #define MAX_DEV_DB_ENTRIES             512
+#define MAX_DEV_DB_ENTRIES_40XX                256
 
 /*************************************************************************
  *
@@ -604,6 +605,13 @@ struct addr_ctrl_blk {
        uint8_t res14[140];     /* 274-2FF */
 };
 
+#define IP_ADDR_COUNT  4 /* Total 4 IP address supported in one interface
+                          * One IPv4, one IPv6 link local and 2 IPv6
+                          */
+
+#define IP_STATE_MASK  0x0F000000
+#define IP_STATE_SHIFT 24
+
 struct init_fw_ctrl_blk {
        struct addr_ctrl_blk pri;
 /*     struct addr_ctrl_blk sec;*/
index 160db9d..d0dd4b3 100644 (file)
@@ -13,7 +13,7 @@ struct iscsi_cls_conn;
 int qla4xxx_hw_reset(struct scsi_qla_host *ha);
 int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
 int qla4xxx_soft_reset(struct scsi_qla_host *ha);
 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
 
@@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                          uint32_t *mbx_sts);
 int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
 int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
 int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
                          uint16_t stats_size, dma_addr_t stats_dma);
 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                       struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+                                            struct ddb_entry *ddb_entry);
 int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
                            struct dev_db_entry *fw_ddb_entry,
                            dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
 int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
                                     uint32_t region, uint32_t field0,
                                     uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                            struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                      struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
 
 /* BSG Functions */
 int qla4xxx_bsg_request(struct bsg_job *bsg_job);
 int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
 
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
 extern int ql4xenablemsix;
index 3075fba..1bdfa81 100644 (file)
@@ -773,22 +773,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
  * be freed so that when login happens from user space there are free DDB
  * indices available.
  **/
-static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
 {
        int max_ddbs;
        int ret;
        uint32_t idx = 0, next_idx = 0;
        uint32_t state = 0, conn_err = 0;
 
-       max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
                                     MAX_DEV_DB_ENTRIES;
 
        for (idx = 0; idx < max_ddbs; idx = next_idx) {
                ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
                                              &next_idx, &state, &conn_err,
                                                NULL, NULL);
-               if (ret == QLA_ERROR)
+               if (ret == QLA_ERROR) {
+                       next_idx++;
                        continue;
+               }
                if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
                    state == DDB_DS_SESSION_FAILED) {
                        DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +806,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
        }
 }
 
-
 /**
  * qla4xxx_initialize_adapter - initiailizes hba
  * @ha: Pointer to host adapter structure.
@@ -812,7 +813,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
  * This routine parforms all of the steps necessary to initialize the adapter.
  *
  **/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
 {
        int status = QLA_ERROR;
 
@@ -840,7 +841,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
        if (status == QLA_ERROR)
                goto exit_init_hba;
 
-       qla4xxx_free_ddb_index(ha);
+       if (is_reset == RESET_ADAPTER)
+               qla4xxx_build_ddb_list(ha, is_reset);
 
        set_bit(AF_ONLINE, &ha->flags);
 exit_init_hba:
@@ -855,38 +857,12 @@ exit_init_hba:
        return status;
 }
 
-/**
- * qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
- *
- * This routine processes a Decive Database Changed AEN Event.
- **/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
-               uint32_t state, uint32_t conn_err)
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                      struct ddb_entry *ddb_entry, uint32_t state)
 {
-       struct ddb_entry * ddb_entry;
        uint32_t old_fw_ddb_device_state;
        int status = QLA_ERROR;
 
-       /* check for out of range index */
-       if (fw_ddb_index >= MAX_DDB_ENTRIES)
-               goto exit_ddb_event;
-
-       /* Get the corresponging ddb entry */
-       ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
-       /* Device does not currently exist in our database. */
-       if (ddb_entry == NULL) {
-               ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
-                          __func__, fw_ddb_index);
-
-               if (state == DDB_DS_NO_CONNECTION_ACTIVE)
-                       clear_bit(fw_ddb_index, ha->ddb_idx_map);
-
-               goto exit_ddb_event;
-       }
-
        old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
        DEBUG2(ql4_printk(KERN_INFO, ha,
                          "%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +876,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                switch (state) {
                case DDB_DS_SESSION_ACTIVE:
                case DDB_DS_DISCOVERY:
-                       iscsi_conn_start(ddb_entry->conn);
-                       iscsi_conn_login_event(ddb_entry->conn,
-                                              ISCSI_CONN_STATE_LOGGED_IN);
+                       ddb_entry->unblock_sess(ddb_entry->sess);
                        qla4xxx_update_session_conn_param(ha, ddb_entry);
                        status = QLA_SUCCESS;
                        break;
@@ -936,9 +910,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                switch (state) {
                case DDB_DS_SESSION_ACTIVE:
                case DDB_DS_DISCOVERY:
-                       iscsi_conn_start(ddb_entry->conn);
-                       iscsi_conn_login_event(ddb_entry->conn,
-                                              ISCSI_CONN_STATE_LOGGED_IN);
+                       ddb_entry->unblock_sess(ddb_entry->sess);
                        qla4xxx_update_session_conn_param(ha, ddb_entry);
                        status = QLA_SUCCESS;
                        break;
@@ -954,7 +926,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                                __func__));
                break;
        }
+       return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+       /*
+        * This triggers a relogin.  After the relogin_timer
+        * expires, the relogin gets scheduled.  We must wait a
+        * minimum amount of time since receiving an 0x8014 AEN
+        * with failed device_state or a logout response before
+        * we can issue another relogin.
+        *
+        * Firmware pads this timeout: (time2wait +1).
+        * Driver retry to login should be longer than F/W.
+        * Otherwise F/W will fail
+        * set_ddb() mbx cmd with 0x4005 since it still
+        * counting down its time2wait.
+        */
+       atomic_set(&ddb_entry->relogin_timer, 0);
+       atomic_set(&ddb_entry->retry_relogin_timer,
+                  ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                            struct ddb_entry *ddb_entry, uint32_t state)
+{
+       uint32_t old_fw_ddb_device_state;
+       int status = QLA_ERROR;
+
+       old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "%s: DDB - old state = 0x%x, new state = 0x%x for "
+                         "index [%d]\n", __func__,
+                         ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+       ddb_entry->fw_ddb_device_state = state;
+
+       switch (old_fw_ddb_device_state) {
+       case DDB_DS_LOGIN_IN_PROCESS:
+       case DDB_DS_NO_CONNECTION_ACTIVE:
+               switch (state) {
+               case DDB_DS_SESSION_ACTIVE:
+                       ddb_entry->unblock_sess(ddb_entry->sess);
+                       qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               case DDB_DS_SESSION_FAILED:
+                       iscsi_block_session(ddb_entry->sess);
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       case DDB_DS_SESSION_ACTIVE:
+               switch (state) {
+               case DDB_DS_SESSION_FAILED:
+                       iscsi_block_session(ddb_entry->sess);
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       case DDB_DS_SESSION_FAILED:
+               switch (state) {
+               case DDB_DS_SESSION_ACTIVE:
+                       ddb_entry->unblock_sess(ddb_entry->sess);
+                       qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               case DDB_DS_SESSION_FAILED:
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       default:
+               DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+                                 __func__));
+               break;
+       }
+       return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+                               uint32_t fw_ddb_index,
+                               uint32_t state, uint32_t conn_err)
+{
+       struct ddb_entry *ddb_entry;
+       int status = QLA_ERROR;
+
+       /* check for out of range index */
+       if (fw_ddb_index >= MAX_DDB_ENTRIES)
+               goto exit_ddb_event;
+
+       /* Get the corresponging ddb entry */
+       ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+       /* Device does not currently exist in our database. */
+       if (ddb_entry == NULL) {
+               ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+                          __func__, fw_ddb_index);
+
+               if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+                       clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+               goto exit_ddb_event;
+       }
+
+       ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
 
 exit_ddb_event:
        return status;
 }
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+       struct dev_db_entry *fw_ddb_entry = NULL;
+       dma_addr_t fw_ddb_dma;
+       uint32_t mbx_sts = 0;
+       int ret;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha =  ddb_entry->ha;
+
+       if (!test_bit(AF_LINK_UP, &ha->flags))
+               return;
+
+       if (ddb_entry->ddb_type != FLASH_DDB) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Skipping login to non FLASH DB"));
+               goto exit_login;
+       }
+
+       fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+                                     &fw_ddb_dma);
+       if (fw_ddb_entry == NULL) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+               goto exit_login;
+       }
+
+       if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+               ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+               if (ret == QLA_ERROR)
+                       goto exit_login;
+
+               ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+               ha->tot_ddbs++;
+       }
+
+       memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+              sizeof(struct dev_db_entry));
+       ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+       ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+                                   fw_ddb_dma, &mbx_sts);
+       if (ret == QLA_ERROR) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+               goto exit_login;
+       }
+
+       ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+       ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+       if (ret == QLA_ERROR) {
+               ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+                          sess->targetname);
+               goto exit_login;
+       }
+
+exit_login:
+       if (fw_ddb_entry)
+               dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
index 4c2b848..c259378 100644 (file)
@@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                return status;
        }
 
+       if (is_qla40XX(ha)) {
+               if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+                       DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+                                         "prematurely completing mbx cmd as "
+                                         "adapter removal detected\n",
+                                         ha->host_no, __func__));
+                       return status;
+               }
+       }
+
        if (is_qla8022(ha)) {
                if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
                        DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -413,6 +423,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
        memcpy(ha->name_string, init_fw_cb->iscsi_name,
                min(sizeof(ha->name_string),
                sizeof(init_fw_cb->iscsi_name)));
+       ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
        /*memcpy(ha->alias, init_fw_cb->Alias,
               min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
 
index 30f31b1..4169c8b 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
 
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
@@ -31,6 +32,13 @@ static struct kmem_cache *srb_cachep;
 /*
  * Module parameter information and variables
  */
+int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+               "Set to disable exporting boot targets to sysfs\n"
+               " 0 - Export boot targets\n"
+               " 1 - Do not export boot targets (Default)");
+
 int ql4xdontresethba = 0;
 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdontresethba,
@@ -63,7 +71,7 @@ static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
                "Target Session Recovery Timeout.\n"
-               " Default: 30 sec.");
+               " Default: 120 sec.");
 
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
 /*
@@ -415,7 +423,7 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
        qla_ep = ep->dd_data;
        ha = to_qla_host(qla_ep->host);
 
-       if (adapter_up(ha))
+       if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
                ret = 1;
 
        return ret;
@@ -975,6 +983,150 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
 
 }
 
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+       uint32_t mbx_sts = 0;
+       uint16_t tmp_ddb_index;
+       int ret;
+
+get_ddb_index:
+       tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+       if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Free DDB index not available\n"));
+               ret = QLA_ERROR;
+               goto exit_get_ddb_index;
+       }
+
+       if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+               goto get_ddb_index;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Found a free DDB index at %d\n", tmp_ddb_index));
+       ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+       if (ret == QLA_ERROR) {
+               if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "DDB index = %d not available trying next\n",
+                                  tmp_ddb_index);
+                       goto get_ddb_index;
+               }
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Free FW DDB not available\n"));
+       }
+
+       *ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+       return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+                                  struct ddb_entry *ddb_entry,
+                                  char *existing_ipaddr,
+                                  char *user_ipaddr)
+{
+       uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+       char formatted_ipaddr[DDB_IPADDR_LEN];
+       int status = QLA_SUCCESS, ret = 0;
+
+       if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+               ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+                              '\0', NULL);
+               if (ret == 0) {
+                       status = QLA_ERROR;
+                       goto out_match;
+               }
+               ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+       } else {
+               ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+                              '\0', NULL);
+               if (ret == 0) {
+                       status = QLA_ERROR;
+                       goto out_match;
+               }
+               ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+       }
+
+       if (strcmp(existing_ipaddr, formatted_ipaddr))
+               status = QLA_ERROR;
+
+out_match:
+       return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+                                     struct iscsi_cls_conn *cls_conn)
+{
+       int idx = 0, max_ddbs, rval;
+       struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+       struct iscsi_session *sess, *existing_sess;
+       struct iscsi_conn *conn, *existing_conn;
+       struct ddb_entry *ddb_entry;
+
+       sess = cls_sess->dd_data;
+       conn = cls_conn->dd_data;
+
+       if (sess->targetname == NULL ||
+           conn->persistent_address == NULL ||
+           conn->persistent_port == 0)
+               return QLA_ERROR;
+
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+
+       for (idx = 0; idx < max_ddbs; idx++) {
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if (ddb_entry == NULL)
+                       continue;
+
+               if (ddb_entry->ddb_type != FLASH_DDB)
+                       continue;
+
+               existing_sess = ddb_entry->sess->dd_data;
+               existing_conn = ddb_entry->conn->dd_data;
+
+               if (existing_sess->targetname == NULL ||
+                   existing_conn->persistent_address == NULL ||
+                   existing_conn->persistent_port == 0)
+                       continue;
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "IQN = %s User IQN = %s\n",
+                                 existing_sess->targetname,
+                                 sess->targetname));
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "IP = %s User IP = %s\n",
+                                 existing_conn->persistent_address,
+                                 conn->persistent_address));
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Port = %d User Port = %d\n",
+                                 existing_conn->persistent_port,
+                                 conn->persistent_port));
+
+               if (strcmp(existing_sess->targetname, sess->targetname))
+                       continue;
+               rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+                                       existing_conn->persistent_address,
+                                       conn->persistent_address);
+               if (rval == QLA_ERROR)
+                       continue;
+               if (existing_conn->persistent_port != conn->persistent_port)
+                       continue;
+               break;
+       }
+
+       if (idx == max_ddbs)
+               return QLA_ERROR;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Match found in fwdb sessions\n"));
+       return QLA_SUCCESS;
+}
+
 static struct iscsi_cls_session *
 qla4xxx_session_create(struct iscsi_endpoint *ep,
                        uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1136,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
        struct scsi_qla_host *ha;
        struct qla_endpoint *qla_ep;
        struct ddb_entry *ddb_entry;
-       uint32_t ddb_index;
-       uint32_t mbx_sts = 0;
+       uint16_t ddb_index;
        struct iscsi_session *sess;
        struct sockaddr *dst_addr;
        int ret;
@@ -1000,32 +1151,9 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
        dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
        ha = to_qla_host(qla_ep->host);
 
-get_ddb_index:
-       ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
-
-       if (ddb_index >= MAX_DDB_ENTRIES) {
-               DEBUG2(ql4_printk(KERN_INFO, ha,
-                                 "Free DDB index not available\n"));
-               return NULL;
-       }
-
-       if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
-               goto get_ddb_index;
-
-       DEBUG2(ql4_printk(KERN_INFO, ha,
-                         "Found a free DDB index at %d\n", ddb_index));
-       ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
-       if (ret == QLA_ERROR) {
-               if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
-                       ql4_printk(KERN_INFO, ha,
-                                  "DDB index = %d not available trying next\n",
-                                  ddb_index);
-                       goto get_ddb_index;
-               }
-               DEBUG2(ql4_printk(KERN_INFO, ha,
-                                 "Free FW DDB not available\n"));
+       ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+       if (ret == QLA_ERROR)
                return NULL;
-       }
 
        cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
                                       cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1168,8 @@ get_ddb_index:
        ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
        ddb_entry->ha = ha;
        ddb_entry->sess = cls_sess;
+       ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+       ddb_entry->ddb_change = qla4xxx_ddb_change;
        cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
        ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
        ha->tot_ddbs++;
@@ -1077,6 +1207,9 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
        DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
        cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
                                    conn_idx);
+       if (!cls_conn)
+               return NULL;
+
        sess = cls_sess->dd_data;
        ddb_entry = sess->dd_data;
        ddb_entry->conn = cls_conn;
@@ -1109,7 +1242,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
        struct iscsi_session *sess;
        struct ddb_entry *ddb_entry;
        struct scsi_qla_host *ha;
-       struct dev_db_entry *fw_ddb_entry;
+       struct dev_db_entry *fw_ddb_entry = NULL;
        dma_addr_t fw_ddb_entry_dma;
        uint32_t mbx_sts = 0;
        int ret = 0;
@@ -1120,12 +1253,25 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
        ddb_entry = sess->dd_data;
        ha = ddb_entry->ha;
 
+       /* Check if we have  matching FW DDB, if yes then do not
+        * login to this target. This could cause target to logout previous
+        * connection
+        */
+       ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+       if (ret == QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha,
+                          "Session already exist in FW.\n");
+               ret = -EEXIST;
+               goto exit_conn_start;
+       }
+
        fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
                                          &fw_ddb_entry_dma, GFP_KERNEL);
        if (!fw_ddb_entry) {
                ql4_printk(KERN_ERR, ha,
                           "%s: Unable to allocate dma buffer\n", __func__);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto exit_conn_start;
        }
 
        ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1284,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
                if (mbx_sts)
                        if (ddb_entry->fw_ddb_device_state ==
                                                DDB_DS_SESSION_ACTIVE) {
-                               iscsi_conn_start(ddb_entry->conn);
-                               iscsi_conn_login_event(ddb_entry->conn,
-                                               ISCSI_CONN_STATE_LOGGED_IN);
+                               ddb_entry->unblock_sess(ddb_entry->sess);
                                goto exit_set_param;
                        }
 
@@ -1167,8 +1311,9 @@ exit_set_param:
        ret = 0;
 
 exit_conn_start:
-       dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
-                         fw_ddb_entry, fw_ddb_entry_dma);
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
        return ret;
 }
 
@@ -1344,6 +1489,101 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
        return -ENOSYS;
 }
 
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+                                    struct dev_db_entry *fw_ddb_entry,
+                                    struct iscsi_cls_session *cls_sess,
+                                    struct iscsi_cls_conn *cls_conn)
+{
+       int buflen = 0;
+       struct iscsi_session *sess;
+       struct iscsi_conn *conn;
+       char ip_addr[DDB_IPADDR_LEN];
+       uint16_t options = 0;
+
+       sess = cls_sess->dd_data;
+       conn = cls_conn->dd_data;
+
+       conn->max_recv_dlength = BYTE_UNITS *
+                         le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+       conn->max_xmit_dlength = BYTE_UNITS *
+                         le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+       sess->initial_r2t_en =
+                           (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+       sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+       sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+       sess->first_burst = BYTE_UNITS *
+                              le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+       sess->max_burst = BYTE_UNITS *
+                                le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+       sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+       sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+       conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+       sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+       options = le16_to_cpu(fw_ddb_entry->options);
+       if (options & DDB_OPT_IPV6_DEVICE)
+               sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+       else
+               sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+       iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+                       (char *)fw_ddb_entry->iscsi_name, buflen);
+       iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+                       (char *)ha->name_string, buflen);
+       iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+                       (char *)ip_addr, buflen);
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+                                            struct ddb_entry *ddb_entry)
+{
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_cls_conn *cls_conn;
+       uint32_t ddb_state;
+       dma_addr_t fw_ddb_entry_dma;
+       struct dev_db_entry *fw_ddb_entry;
+
+       fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                         &fw_ddb_entry_dma, GFP_KERNEL);
+       if (!fw_ddb_entry) {
+               ql4_printk(KERN_ERR, ha,
+                          "%s: Unable to allocate dma buffer\n", __func__);
+               goto exit_session_conn_fwddb_param;
+       }
+
+       if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+                                   fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+                                   NULL, NULL, NULL) == QLA_ERROR) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+                                 "get_ddb_entry for fw_ddb_index %d\n",
+                                 ha->host_no, __func__,
+                                 ddb_entry->fw_ddb_index));
+               goto exit_session_conn_fwddb_param;
+       }
+
+       cls_sess = ddb_entry->sess;
+
+       cls_conn = ddb_entry->conn;
+
+       /* Update params */
+       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
+}
+
 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                       struct ddb_entry *ddb_entry)
 {
@@ -1360,7 +1600,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
        if (!fw_ddb_entry) {
                ql4_printk(KERN_ERR, ha,
                           "%s: Unable to allocate dma buffer\n", __func__);
-               return;
+               goto exit_session_conn_param;
        }
 
        if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1610,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                  "get_ddb_entry for fw_ddb_index %d\n",
                                  ha->host_no, __func__,
                                  ddb_entry->fw_ddb_index));
-               return;
+               goto exit_session_conn_param;
        }
 
        cls_sess = ddb_entry->sess;
@@ -1379,6 +1619,12 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
        cls_conn = ddb_entry->conn;
        conn = cls_conn->dd_data;
 
+       /* Update timers after login */
+       ddb_entry->default_relogin_timeout =
+                               le16_to_cpu(fw_ddb_entry->def_timeout);
+       ddb_entry->default_time2wait =
+                               le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
        /* Update params */
        conn->max_recv_dlength = BYTE_UNITS *
                          le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1653,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
 
        memcpy(sess->initiatorname, ha->name_string,
               min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
 }
 
 /*
@@ -1607,6 +1858,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
                vfree(ha->chap_list);
        ha->chap_list = NULL;
 
+       if (ha->fw_ddb_dma_pool)
+               dma_pool_destroy(ha->fw_ddb_dma_pool);
+
        /* release io space registers  */
        if (is_qla8022(ha)) {
                if (ha->nx_pcibase)
@@ -1689,6 +1943,16 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
                goto mem_alloc_error_exit;
        }
 
+       ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+                                             DDB_DMA_BLOCK_SIZE, 8, 0);
+
+       if (ha->fw_ddb_dma_pool == NULL) {
+               ql4_printk(KERN_WARNING, ha,
+                          "%s: fw_ddb_dma_pool allocation failed..\n",
+                          __func__);
+               goto mem_alloc_error_exit;
+       }
+
        return QLA_SUCCESS;
 
 mem_alloc_error_exit:
@@ -1800,6 +2064,60 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
        }
 }
 
+void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       if (!(ddb_entry->ddb_type == FLASH_DDB))
+               return;
+
+       if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+           !iscsi_is_session_online(cls_sess)) {
+               if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+                   INVALID_ENTRY) {
+                       if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+                                       0) {
+                               atomic_set(&ddb_entry->retry_relogin_timer,
+                                          INVALID_ENTRY);
+                               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                               set_bit(DF_RELOGIN, &ddb_entry->flags);
+                               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                      "%s: index [%d] login device\n",
+                                       __func__, ddb_entry->fw_ddb_index));
+                       } else
+                               atomic_dec(&ddb_entry->retry_relogin_timer);
+               }
+       }
+
+       /* Wait for relogin to timeout */
+       if (atomic_read(&ddb_entry->relogin_timer) &&
+           (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+               /*
+                * If the relogin times out and the device is
+                * still NOT ONLINE then try and relogin again.
+                */
+               if (!iscsi_is_session_online(cls_sess)) {
+                       /* Reset retry relogin timer */
+                       atomic_inc(&ddb_entry->relogin_retry_count);
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                               "%s: index[%d] relogin timed out-retrying"
+                               " relogin (%d), retry (%d)\n", __func__,
+                               ddb_entry->fw_ddb_index,
+                               atomic_read(&ddb_entry->relogin_retry_count),
+                               ddb_entry->default_time2wait + 4));
+                       set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                       atomic_set(&ddb_entry->retry_relogin_timer,
+                                  ddb_entry->default_time2wait + 4);
+               }
+       }
+}
+
 /**
  * qla4xxx_timer - checks every second for work to do.
  * @ha: Pointer to host adapter structure.
@@ -1809,6 +2127,8 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
        int start_dpc = 0;
        uint16_t w;
 
+       iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
        /* If we are in the middle of AER/EEH processing
         * skip any processing and reschedule the timer
         */
@@ -2078,7 +2398,12 @@ static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
        sess = cls_session->dd_data;
        ddb_entry = sess->dd_data;
        ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
-       iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+
+       if (ddb_entry->ddb_type == FLASH_DDB)
+               iscsi_block_session(ddb_entry->sess);
+       else
+               iscsi_session_failure(cls_session->dd_data,
+                                     ISCSI_ERR_CONN_FAILED);
 }
 
 /**
@@ -2163,7 +2488,7 @@ recover_ha_init_adapter:
 
                /* NOTE: AF_ONLINE flag set upon successful completion of
                 *       qla4xxx_initialize_adapter */
-               status = qla4xxx_initialize_adapter(ha);
+               status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
        }
 
        /* Retry failed adapter initialization, if necessary
@@ -2245,17 +2570,108 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
                        iscsi_unblock_session(ddb_entry->sess);
                } else {
                        /* Trigger relogin */
-                       iscsi_session_failure(cls_session->dd_data,
-                                             ISCSI_ERR_CONN_FAILED);
+                       if (ddb_entry->ddb_type == FLASH_DDB) {
+                               if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                                       qla4xxx_arm_relogin_timer(ddb_entry);
+                       } else
+                               iscsi_session_failure(cls_session->dd_data,
+                                                     ISCSI_ERR_CONN_FAILED);
                }
        }
 }
 
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+       ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                  " unblock session\n", ha->host_no, __func__,
+                  ddb_entry->fw_ddb_index);
+
+       iscsi_unblock_session(ddb_entry->sess);
+
+       /* Start scan target */
+       if (test_bit(AF_ONLINE, &ha->flags)) {
+               ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                          " start scan\n", ha->host_no, __func__,
+                          ddb_entry->fw_ddb_index);
+               scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+       }
+       return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+       ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                  " unblock user space session\n", ha->host_no, __func__,
+                  ddb_entry->fw_ddb_index);
+       iscsi_conn_start(ddb_entry->conn);
+       iscsi_conn_login_event(ddb_entry->conn,
+                              ISCSI_CONN_STATE_LOGGED_IN);
+
+       return QLA_SUCCESS;
+}
+
 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
 {
        iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
 }
 
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+       uint16_t relogin_timer;
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       relogin_timer = max(ddb_entry->default_relogin_timeout,
+                           (uint16_t)RELOGIN_TOV);
+       atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+                         ddb_entry->fw_ddb_index, relogin_timer));
+
+       qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       if (!(ddb_entry->ddb_type == FLASH_DDB))
+               return;
+
+       if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+           !iscsi_is_session_online(cls_sess)) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "relogin issued\n"));
+               qla4xxx_relogin_flash_ddb(cls_sess);
+       }
+}
+
 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
 {
        if (ha->dpc_thread)
@@ -2356,6 +2772,12 @@ dpc_post_reset_ha:
        if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
                qla4xxx_get_dhcp_ip_address(ha);
 
+       /* ---- relogin device? --- */
+       if (adapter_up(ha) &&
+           test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+               iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+       }
+
        /* ---- link change? --- */
        if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
                if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2790,12 @@ dpc_post_reset_ha:
                         * fatal error recovery.  Therefore, the driver must
                         * manually relogin to devices when recovering from
                         * connection failures, logouts, expired KATO, etc. */
-
-                       qla4xxx_relogin_all_devices(ha);
+                       if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+                               qla4xxx_build_ddb_list(ha, ha->is_reset);
+                               iscsi_host_for_each_session(ha->host,
+                                               qla4xxx_login_flash_ddb);
+                       } else
+                               qla4xxx_relogin_all_devices(ha);
                }
        }
 }
@@ -2867,6 +3293,9 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
                          " target ID %d\n", __func__, ddb_index[0],
                          ddb_index[1]));
 
+       ha->pri_ddb_idx = ddb_index[0];
+       ha->sec_ddb_idx = ddb_index[1];
+
 exit_boot_info_free:
        dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
 exit_boot_info:
@@ -3034,6 +3463,9 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
                return ret;
        }
 
+       if (ql4xdisablesysfsboot)
+               return QLA_SUCCESS;
+
        if (ddb_index[0] == 0xffff)
                goto sec_target;
 
@@ -3066,7 +3498,15 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
        struct iscsi_boot_kobj *boot_kobj;
 
        if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
-               return 0;
+               return QLA_ERROR;
+
+       if (ql4xdisablesysfsboot) {
+               ql4_printk(KERN_INFO, ha,
+                          "%s: syfsboot disabled - driver will trigger login"
+                          "and publish session for discovery .\n", __func__);
+               return QLA_SUCCESS;
+       }
+
 
        ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
        if (!ha->boot_kset)
@@ -3108,7 +3548,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
        if (!boot_kobj)
                goto put_host;
 
-       return 0;
+       return QLA_SUCCESS;
 
 put_host:
        scsi_host_put(ha->host);
@@ -3174,9 +3614,507 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
 exit_chap_list:
        dma_free_coherent(&ha->pdev->dev, chap_size,
                        chap_flash_data, chap_dma);
-       return;
 }
 
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+                                 struct ql4_tuple_ddb *tddb)
+{
+       struct scsi_qla_host *ha;
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_session *sess;
+       struct iscsi_conn *conn;
+
+       DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+       ha = ddb_entry->ha;
+       cls_sess = ddb_entry->sess;
+       sess = cls_sess->dd_data;
+       cls_conn = ddb_entry->conn;
+       conn = cls_conn->dd_data;
+
+       tddb->tpgt = sess->tpgt;
+       tddb->port = conn->persistent_port;
+       strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+       strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+                                     struct ql4_tuple_ddb *tddb)
+{
+       uint16_t options = 0;
+
+       tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+       memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+              min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+       options = le16_to_cpu(fw_ddb_entry->options);
+       if (options & DDB_OPT_IPV6_DEVICE)
+               sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+       else
+               sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+       tddb->port = le16_to_cpu(fw_ddb_entry->port);
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+                                    struct ql4_tuple_ddb *old_tddb,
+                                    struct ql4_tuple_ddb *new_tddb)
+{
+       if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+               return QLA_ERROR;
+
+       if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+               return QLA_ERROR;
+
+       if (old_tddb->port != new_tddb->port)
+               return QLA_ERROR;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+                         old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+                         old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+                         new_tddb->ip_addr, new_tddb->iscsi_name));
+
+       return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+                                    struct dev_db_entry *fw_ddb_entry)
+{
+       struct ddb_entry *ddb_entry;
+       struct ql4_tuple_ddb *fw_tddb = NULL;
+       struct ql4_tuple_ddb *tmp_tddb = NULL;
+       int idx;
+       int ret = QLA_ERROR;
+
+       fw_tddb = vzalloc(sizeof(*fw_tddb));
+       if (!fw_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+       if (!tmp_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+       for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if (ddb_entry == NULL)
+                       continue;
+
+               qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+               if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+                       ret = QLA_SUCCESS; /* found */
+                       goto exit_check;
+               }
+       }
+
+exit_check:
+       if (fw_tddb)
+               vfree(fw_tddb);
+       if (tmp_tddb)
+               vfree(tmp_tddb);
+       return ret;
+}
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+                                      struct list_head *list_nt,
+                                      struct dev_db_entry *fw_ddb_entry)
+{
+       struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+       struct ql4_tuple_ddb *fw_tddb = NULL;
+       struct ql4_tuple_ddb *tmp_tddb = NULL;
+       int ret = QLA_ERROR;
+
+       fw_tddb = vzalloc(sizeof(*fw_tddb));
+       if (!fw_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+       if (!tmp_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+       list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+               qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
+               if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+                       ret = QLA_SUCCESS; /* found */
+                       goto exit_check;
+               }
+       }
+
+exit_check:
+       if (fw_tddb)
+               vfree(fw_tddb);
+       if (tmp_tddb)
+               vfree(tmp_tddb);
+       return ret;
+}
+
+static void qla4xxx_free_nt_list(struct list_head *list_nt)
+{
+       struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+
+       /* Free up the normaltargets list */
+       list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+               list_del_init(&nt_ddb_idx->list);
+               vfree(nt_ddb_idx);
+       }
+
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+                                       struct dev_db_entry *fw_ddb_entry)
+{
+       struct iscsi_endpoint *ep;
+       struct sockaddr_in *addr;
+       struct sockaddr_in6 *addr6;
+       struct sockaddr *dst_addr;
+       char *ip;
+
+       /* TODO: need to destroy on unload iscsi_endpoint*/
+       dst_addr = vmalloc(sizeof(*dst_addr));
+       if (!dst_addr)
+               return NULL;
+
+       if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+               dst_addr->sa_family = AF_INET6;
+               addr6 = (struct sockaddr_in6 *)dst_addr;
+               ip = (char *)&addr6->sin6_addr;
+               memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+               addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+       } else {
+               dst_addr->sa_family = AF_INET;
+               addr = (struct sockaddr_in *)dst_addr;
+               ip = (char *)&addr->sin_addr;
+               memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+               addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+       }
+
+       ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+       vfree(dst_addr);
+       return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+       if (ql4xdisablesysfsboot)
+               return QLA_SUCCESS;
+       if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+               return QLA_ERROR;
+       return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+                                         struct ddb_entry *ddb_entry)
+{
+       ddb_entry->ddb_type = FLASH_DDB;
+       ddb_entry->fw_ddb_index = INVALID_ENTRY;
+       ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+       ddb_entry->ha = ha;
+       ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+       ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+
+       atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+       atomic_set(&ddb_entry->relogin_timer, 0);
+       atomic_set(&ddb_entry->relogin_retry_count, 0);
+
+       ddb_entry->default_relogin_timeout =
+               le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+       ddb_entry->default_time2wait =
+               le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+       uint32_t idx = 0;
+       uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+       uint32_t sts[MBOX_REG_COUNT];
+       uint32_t ip_state;
+       unsigned long wtime;
+       int ret;
+
+       wtime = jiffies + (HZ * IP_CONFIG_TOV);
+       do {
+               for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+                       if (ip_idx[idx] == -1)
+                               continue;
+
+                       ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+                       if (ret == QLA_ERROR) {
+                               ip_idx[idx] = -1;
+                               continue;
+                       }
+
+                       ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Waiting for IP state for idx = %d, state = 0x%x\n",
+                                         ip_idx[idx], ip_state));
+                       if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+                           ip_state == IP_ADDRSTATE_INVALID ||
+                           ip_state == IP_ADDRSTATE_PREFERRED ||
+                           ip_state == IP_ADDRSTATE_DEPRICATED ||
+                           ip_state == IP_ADDRSTATE_DISABLING)
+                               ip_idx[idx] = -1;
+
+               }
+
+               /* Break if all IP states checked */
+               if ((ip_idx[0] == -1) &&
+                   (ip_idx[1] == -1) &&
+                   (ip_idx[2] == -1) &&
+                   (ip_idx[3] == -1))
+                       break;
+               schedule_timeout_uninterruptible(HZ);
+       } while (time_after(wtime, jiffies));
+}
+
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+       int max_ddbs;
+       int ret;
+       uint32_t idx = 0, next_idx = 0;
+       uint32_t state = 0, conn_err = 0;
+       uint16_t conn_id;
+       struct dev_db_entry *fw_ddb_entry;
+       struct ddb_entry *ddb_entry = NULL;
+       dma_addr_t fw_ddb_dma;
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_session *sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_endpoint *ep;
+       uint16_t cmds_max = 32, tmo = 0;
+       uint32_t initial_cmdsn = 0;
+       struct list_head list_st, list_nt; /* List of sendtargets */
+       struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
+       int fw_idx_size;
+       unsigned long wtime;
+       struct qla_ddb_index  *nt_ddb_idx;
+
+       if (!test_bit(AF_LINK_UP, &ha->flags)) {
+               set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+               ha->is_reset = is_reset;
+               return;
+       }
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+
+       fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+                                     &fw_ddb_dma);
+       if (fw_ddb_entry == NULL) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+               goto exit_ddb_list;
+       }
+
+       INIT_LIST_HEAD(&list_st);
+       INIT_LIST_HEAD(&list_nt);
+       fw_idx_size = sizeof(struct qla_ddb_index);
+
+       for (idx = 0; idx < max_ddbs; idx = next_idx) {
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+                                             fw_ddb_dma, NULL,
+                                             &next_idx, &state, &conn_err,
+                                             NULL, &conn_id);
+               if (ret == QLA_ERROR)
+                       break;
+
+               if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+                       goto continue_next_st;
+
+               /* Check if ST, add to the list_st */
+               if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+                       goto continue_next_st;
+
+               st_ddb_idx = vzalloc(fw_idx_size);
+               if (!st_ddb_idx)
+                       break;
+
+               st_ddb_idx->fw_ddb_idx = idx;
+
+               list_add_tail(&st_ddb_idx->list, &list_st);
+continue_next_st:
+               if (next_idx == 0)
+                       break;
+       }
+
+       /* Before issuing conn open mbox, ensure all IPs states are configured
+        * Note, conn open fails if IPs are not configured
+        */
+       qla4xxx_wait_for_ip_configuration(ha);
+
+       /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+               qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+       }
+
+       /* Wait to ensure all sendtargets are done for min 12 sec wait */
+       tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Default time to wait for build ddb %d\n", tmo));
+
+       wtime = jiffies + (HZ * tmo);
+       do {
+               list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
+                                        list) {
+                       ret = qla4xxx_get_fwddb_entry(ha,
+                                                     st_ddb_idx->fw_ddb_idx,
+                                                     NULL, 0, NULL, &next_idx,
+                                                     &state, &conn_err, NULL,
+                                                     NULL);
+                       if (ret == QLA_ERROR)
+                               continue;
+
+                       if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                           state == DDB_DS_SESSION_FAILED) {
+                               list_del_init(&st_ddb_idx->list);
+                               vfree(st_ddb_idx);
+                       }
+               }
+               schedule_timeout_uninterruptible(HZ / 10);
+       } while (time_after(wtime, jiffies));
+
+       /* Free up the sendtargets list */
+       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+               list_del_init(&st_ddb_idx->list);
+               vfree(st_ddb_idx);
+       }
+
+       for (idx = 0; idx < max_ddbs; idx = next_idx) {
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+                                             fw_ddb_dma, NULL,
+                                             &next_idx, &state, &conn_err,
+                                             NULL, &conn_id);
+               if (ret == QLA_ERROR)
+                       break;
+
+               if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+                       goto continue_next_nt;
+
+               /* Check if NT, then add to list it */
+               if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+                       goto continue_next_nt;
+
+               if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                   state == DDB_DS_SESSION_FAILED) {
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Adding  DDB to session = 0x%x\n",
+                                         idx));
+                       if (is_reset == INIT_ADAPTER) {
+                               nt_ddb_idx = vmalloc(fw_idx_size);
+                               if (!nt_ddb_idx)
+                                       break;
+
+                               nt_ddb_idx->fw_ddb_idx = idx;
+
+                               memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+                                      sizeof(struct dev_db_entry));
+
+                               if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
+                                               fw_ddb_entry) == QLA_SUCCESS) {
+                                       vfree(nt_ddb_idx);
+                                       goto continue_next_nt;
+                               }
+                               list_add_tail(&nt_ddb_idx->list, &list_nt);
+                       } else if (is_reset == RESET_ADAPTER) {
+                               if (qla4xxx_is_session_exists(ha,
+                                                  fw_ddb_entry) == QLA_SUCCESS)
+                                       goto continue_next_nt;
+                       }
+
+                       /* Create session object, with INVALID_ENTRY,
+                        * the targer_id would get set when we issue the login
+                        */
+                       cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
+                                               ha->host, cmds_max,
+                                               sizeof(struct ddb_entry),
+                                               sizeof(struct ql4_task_data),
+                                               initial_cmdsn, INVALID_ENTRY);
+                       if (!cls_sess)
+                               goto exit_ddb_list;
+
+                       /*
+                        * iscsi_session_setup increments the driver reference
+                        * count which wouldn't let the driver to be unloaded.
+                        * so calling module_put function to decrement the
+                        * reference count.
+                        **/
+                       module_put(qla4xxx_iscsi_transport.owner);
+                       sess = cls_sess->dd_data;
+                       ddb_entry = sess->dd_data;
+                       ddb_entry->sess = cls_sess;
+
+                       cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+                       memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+                              sizeof(struct dev_db_entry));
+
+                       qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+                       cls_conn = iscsi_conn_setup(cls_sess,
+                                                   sizeof(struct qla_conn),
+                                                   conn_id);
+                       if (!cls_conn)
+                               goto exit_ddb_list;
+
+                       ddb_entry->conn = cls_conn;
+
+                       /* Setup ep, for displaying attributes in sysfs */
+                       ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+                       if (ep) {
+                               ep->conn = cls_conn;
+                               cls_conn->ep = ep;
+                       } else {
+                               DEBUG2(ql4_printk(KERN_ERR, ha,
+                                                 "Unable to get ep\n"));
+                       }
+
+                       /* Update sess/conn params */
+                       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
+                                                cls_conn);
+
+                       if (is_reset == RESET_ADAPTER) {
+                               iscsi_block_session(cls_sess);
+                               /* Use the relogin path to discover new devices
+                                *  by short-circuting the logic of setting
+                                *  timer to relogin - instead set the flags
+                                *  to initiate login right away.
+                                */
+                               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                               set_bit(DF_RELOGIN, &ddb_entry->flags);
+                       }
+               }
+continue_next_nt:
+               if (next_idx == 0)
+                       break;
+       }
+exit_ddb_list:
+       qla4xxx_free_nt_list(&list_nt);
+       if (fw_ddb_entry)
+               dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+       qla4xxx_free_ddb_index(ha);
+}
+
+
 /**
  * qla4xxx_probe_adapter - callback function to probe HBA
  * @pdev: pointer to pci_dev structure
@@ -3298,7 +4236,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
         * firmware
         * NOTE: interrupts enabled upon successful completion
         */
-       status = qla4xxx_initialize_adapter(ha);
+       status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
        while ((!test_bit(AF_ONLINE, &ha->flags)) &&
            init_retry_count++ < MAX_INIT_RETRIES) {
 
@@ -3319,7 +4257,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
                if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
                        continue;
 
-               status = qla4xxx_initialize_adapter(ha);
+               status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
        }
 
        if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4324,16 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
               ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
               ha->patch_number, ha->build_number);
 
-       qla4xxx_create_chap_list(ha);
-
        if (qla4xxx_setup_boot_info(ha))
                ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
                           __func__);
 
+               /* Perform the build ddb list and login to each */
+       qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+       iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+
+       qla4xxx_create_chap_list(ha);
+
        qla4xxx_create_ifaces(ha);
        return 0;
 
@@ -3449,6 +4391,38 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
        }
 }
 
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+       struct ddb_entry *ddb_entry;
+       int options;
+       int idx;
+
+       for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if ((ddb_entry != NULL) &&
+                   (ddb_entry->ddb_type == FLASH_DDB)) {
+
+                       options = LOGOUT_OPTION_CLOSE_SESSION;
+                       if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
+                           == QLA_ERROR)
+                               ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
+                                          __func__);
+
+                       qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+                       /*
+                        * we have decremented the reference count of the driver
+                        * when we setup the session to have the driver unload
+                        * to be seamless without actually destroying the
+                        * session
+                        **/
+                       try_module_get(qla4xxx_iscsi_transport.owner);
+                       iscsi_destroy_endpoint(ddb_entry->conn->ep);
+                       qla4xxx_free_ddb(ha, ddb_entry);
+                       iscsi_session_teardown(ddb_entry->sess);
+               }
+       }
+}
 /**
  * qla4xxx_remove_adapter - calback function to remove adapter.
  * @pci_dev: PCI device pointer
@@ -3465,9 +4439,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
        /* destroy iface from sysfs */
        qla4xxx_destroy_ifaces(ha);
 
-       if (ha->boot_kset)
+       if ((!ql4xdisablesysfsboot) && ha->boot_kset)
                iscsi_boot_destroy_kset(ha->boot_kset);
 
+       qla4xxx_destroy_fw_ddb_session(ha);
+
        scsi_remove_host(ha->host);
 
        qla4xxx_free_adapter(ha);
@@ -4115,7 +5091,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
 
                qla4_8xxx_idc_unlock(ha);
                clear_bit(AF_FW_RECOVERY, &ha->flags);
-               rval = qla4xxx_initialize_adapter(ha);
+               rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
                qla4_8xxx_idc_lock(ha);
 
                if (rval != QLA_SUCCESS) {
@@ -4151,7 +5127,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
                    QLA82XX_DEV_READY)) {
                        clear_bit(AF_FW_RECOVERY, &ha->flags);
-                       rval = qla4xxx_initialize_adapter(ha);
+                       rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
                        if (rval == QLA_SUCCESS) {
                                ret = qla4xxx_request_irqs(ha);
                                if (ret) {
index c15347d..5254e57 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
index a1fd73d..8ba4510 100644 (file)
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
        depends on FSL_SOC
 
 config SPI_FSL_SPI
-       tristate "Freescale SPI controller"
+       bool "Freescale SPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
          MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
 
 config SPI_FSL_ESPI
-       tristate "Freescale eSPI controller"
+       bool "Freescale eSPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
index 024b48a..acc88b4 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/spinlock.h>
index e093d3e..0094c64 100644 (file)
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
        spi_bitbang_cleanup(spi);
 }
 
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
 {
        int value;
 
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
        return value;
 }
 
-static int __init
+static int __devinit
 spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
        u16 *res_flags)
 {
index 21c70b2..182e9c8 100644 (file)
@@ -8,6 +8,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
index fb2e89c..5385da2 100644 (file)
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
        {USB_DEVICE(0x0DF6, 0x0045)},
        {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
        {USB_DEVICE(0x0DF6, 0x004B)},
+       {USB_DEVICE(0x0DF6, 0x005D)},
        {USB_DEVICE(0x0DF6, 0x0063)},
        /* Sweex */
        {USB_DEVICE(0x177F, 0x0154)},
index 3d1279c..7eb5617 100644 (file)
@@ -54,6 +54,7 @@
 
 /* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
 #define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS                4
 
 /* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
 #define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
  */
 void dsp_clk_exit(void)
 {
+       int i;
+
        dsp_clock_disable_all(dsp_clocks);
 
+       for (i = 0; i < DM_TIMER_CLOCKS; i++)
+               omap_dm_timer_free(timer[i]);
+
        clk_put(iva2_clk);
        clk_put(ssi.sst_fck);
        clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
 void dsp_clk_init(void)
 {
        static struct platform_device dspbridge_device;
+       int i, id;
 
        dspbridge_device.dev.bus = &platform_bus_type;
 
+       for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+               timer[i] = omap_dm_timer_request_specific(id);
+
        iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
        if (IS_ERR(iva2_clk))
                dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
                clk_enable(iva2_clk);
                break;
        case GPT_CLK:
-               timer[clk_id - 1] =
-                               omap_dm_timer_request_specific(DMT_ID(clk_id));
+               status = omap_dm_timer_start(timer[clk_id - 1]);
                break;
 #ifdef CONFIG_OMAP_MCBSP
        case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
                clk_disable(iva2_clk);
                break;
        case GPT_CLK:
-               omap_dm_timer_free(timer[clk_id - 1]);
+               status = omap_dm_timer_stop(timer[clk_id - 1]);
                break;
 #ifdef CONFIG_OMAP_MCBSP
        case MCBSP_CLK:
index c43c7e3..76cfc6e 100644 (file)
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
-
-#ifdef MODULE
 #include <linux/module.h>
-#endif
-
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/moduleparam.h>
index 0fd96c1..8599545 100644 (file)
@@ -614,13 +614,12 @@ int iscsit_add_reject(
        hdr     = (struct iscsi_reject *) cmd->pdu;
        hdr->reason = reason;
 
-       cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
                iscsit_release_cmd(cmd);
                return -1;
        }
-       memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
 
        spin_lock_bh(&conn->cmd_lock);
        list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
        hdr     = (struct iscsi_reject *) cmd->pdu;
        hdr->reason = reason;
 
-       cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
                iscsit_release_cmd(cmd);
                return -1;
        }
-       memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
 
        if (add_to_conn) {
                spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
                                " non-existent or non-exported iSCSI LUN:"
                                " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
                }
-               if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
-                       return iscsit_add_reject_from_cmd(
-                                       ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-                                       1, 1, buf, cmd);
-
                send_check_condition = 1;
                goto attach_cmd;
        }
@@ -1044,6 +1037,8 @@ done:
                 */
                send_check_condition = 1;
        } else {
+               cmd->data_length = cmd->se_cmd.data_length;
+
                if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
                        return iscsit_add_reject_from_cmd(
                                ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
         * the backend memory allocation.
         */
        ret = transport_generic_new_cmd(&cmd->se_cmd);
-       if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+       if (ret < 0) {
                immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
                dump_immediate_data = 1;
                goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
 
                spin_lock_irqsave(&se_cmd->t_state_lock, flags);
                if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
-                    (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+                    (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
                        dump_unsolicited_data = 1;
                spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
        if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
                if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
                        hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
-                       hdr->residual_count = cpu_to_be32(cmd->residual_count);
+                       hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
                } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
                        hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
-                       hdr->residual_count = cpu_to_be32(cmd->residual_count);
+                       hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
                }
        }
        hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
        hdr->flags              |= ISCSI_FLAG_CMD_FINAL;
        if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
                hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
-               hdr->residual_count = cpu_to_be32(cmd->residual_count);
+               hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
        } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
                hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
-               hdr->residual_count = cpu_to_be32(cmd->residual_count);
+               hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
        }
        hdr->response           = cmd->iscsi_response;
        hdr->cmd_status         = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
        hdr                     = (struct iscsi_tm_rsp *) cmd->pdu;
        memset(hdr, 0, ISCSI_HDR_LEN);
        hdr->opcode             = ISCSI_OP_SCSI_TMFUNC_RSP;
+       hdr->flags              = ISCSI_FLAG_CMD_FINAL;
        hdr->response           = iscsit_convert_tcm_tmr_rsp(se_tmr);
        hdr->itt                = cpu_to_be32(cmd->init_task_tag);
        cmd->stat_sn            = conn->stat_sn++;
index beb3946..1cd6ce3 100644 (file)
 
 static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
 {
-       int j = DIV_ROUND_UP(len, 2);
+       int j = DIV_ROUND_UP(len, 2), rc;
 
-       hex2bin(dst, src, j);
+       rc = hex2bin(dst, src, j);
+       if (rc < 0)
+               pr_debug("CHAP string contains non hex digit symbols\n");
 
        dst[j] = '\0';
        return j;
index 3723d90..f1a02da 100644 (file)
@@ -398,7 +398,6 @@ struct iscsi_cmd {
        u32                     pdu_send_order;
        /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
        u32                     pdu_start;
-       u32                     residual_count;
        /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
        u32                     seq_send_order;
        /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
        atomic_t                connection_exit;
        atomic_t                connection_recovery;
        atomic_t                connection_reinstatement;
-       atomic_t                connection_wait;
        atomic_t                connection_wait_rcfr;
        atomic_t                sleep_on_conn_wait_comp;
        atomic_t                transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
        atomic_t                session_reinstatement;
        atomic_t                session_stop_active;
        atomic_t                sleep_on_sess_wait_comp;
-       atomic_t                transport_wait_cmds;
        /* connection list */
        struct list_head        sess_conn_list;
        struct list_head        cr_active_list;
index c4c68da..101b1be 100644 (file)
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
                 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
                 */
                if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
-                       if (se_cmd->se_cmd_flags &
-                                       SCF_SCSI_RESERVATION_CONFLICT) {
+                       if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
                                cmd->i_state = ISTATE_SEND_STATUS;
                                spin_unlock_bh(&cmd->istate_lock);
                                iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
index daad362..d734bde 100644 (file)
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                pr_err("Could not allocate memory for session\n");
-               return -1;
+               return -ENOMEM;
        }
 
        iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
                pr_err("idr_pre_get() for sess_idr failed\n");
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
        spin_lock(&sess_idr_lock);
        idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                pr_err("Unable to allocate memory for"
                                " struct iscsi_sess_ops.\n");
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
 
        sess->se_sess = transport_init_session();
-       if (!sess->se_sess) {
+       if (IS_ERR(sess->se_sess)) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
 
        return 0;
index 426cd4b..98936cb 100644 (file)
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
                return NULL;
        }
 
-       login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!login->req) {
                pr_err("Unable to allocate memory for Login Request.\n");
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                goto out;
        }
-       memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
 
        login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
        if (!login->req_buf) {
index 3df1c9b..81d5832 100644 (file)
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
                        scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
                        &tl_cmd->tl_sense_buf[0]);
 
-       /*
-        * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
-        */
        if (scsi_bidi_cmnd(sc))
-               se_cmd->t_tasks_bidi = 1;
+               se_cmd->se_cmd_flags |= SCF_BIDI;
+
        /*
         * Locate the struct se_lun pointer and attach it to struct se_cmd
         */
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
         * Allocate the necessary tasks to complete the received CDB+data
         */
        ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
-       if (ret == -ENOMEM) {
-               /* Out of Resources */
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-       } else if (ret == -EINVAL) {
-               /*
-                * Handle case for SAM_STAT_RESERVATION_CONFLICT
-                */
-               if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
-               /*
-                * Otherwise, return SAM_STAT_CHECK_CONDITION and return
-                * sense data.
-                */
-               return PYX_TRANSPORT_USE_SENSE_REASON;
-       }
-
+       if (ret != 0)
+               return ret;
        /*
         * For BIDI commands, pass in the extra READ buffer
         * to transport_generic_map_mem_to_cmd() below..
         */
-       if (se_cmd->t_tasks_bidi) {
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
                struct scsi_data_buffer *sdb = scsi_in(sc);
 
                sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
        }
 
        /* Tell the core about our preallocated memory */
-       ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+       return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
                        scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
-       if (ret < 0)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-
-       return 0;
 }
 
 /*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
 {
        struct tcm_loop_hba *tl_hba = container_of(wwn,
                                struct tcm_loop_hba, tl_hba_wwn);
-       int host_no = tl_hba->sh->host_no;
+
+       pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+               " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+               tl_hba->tl_wwn_address, tl_hba->sh->host_no);
        /*
         * Call device_unregister() on the original tl_hba->dev.
         * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
         * release *tl_hba;
         */
        device_unregister(&tl_hba->dev);
-
-       pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
-               " SAS Address: %s at Linux/SCSI Host ID: %d\n",
-               config_item_name(&wwn->wwn_group.cg_item), host_no);
 }
 
 /* Start items for tcm_loop_cit */
index 88f2ad4..1dcbef4 100644 (file)
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        int alua_access_state, primary = 0, rc;
        u16 tg_pt_id, rtpi;
 
-       if (!l_port)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+       if (!l_port) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
        buf = transport_kmap_first_data_page(cmd);
 
        /*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
        if (!l_tg_pt_gp_mem) {
                pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
        spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        if (!l_tg_pt_gp) {
                spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
                pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
        rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        if (!rc) {
                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
                                " while TPGS_EXPLICT_ALUA is disabled\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
 
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * REQUEST, and the additional sense code set to INVALID
                         * FIELD IN PARAMETER LIST.
                         */
-                       rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       rc = -EINVAL;
                        goto out;
                }
                rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * throw an exception with ASCQ: INVALID_PARAMETER_LIST
                         */
                        if (rc != 0) {
-                               rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               rc = -EINVAL;
                                goto out;
                        }
                } else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * INVALID_PARAMETER_LIST
                         */
                        if (rc != 0) {
-                               rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               rc = -EINVAL;
                                goto out;
                        }
                }
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
         * struct t10_alua_lu_gp.
         */
        spin_lock(&lu_gps_lock);
-       atomic_set(&lu_gp->lu_gp_shutdown, 1);
        list_del(&lu_gp->lu_gp_node);
        alua_lu_gps_count--;
        spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
 
        tg_pt_gp_mem->tg_pt = port;
        port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
-       atomic_set(&port->sep_tg_pt_gp_active, 1);
 
        return tg_pt_gp_mem;
 }
index 683ba02..831468b 100644 (file)
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
        if (cmd->data_length < 60)
                return 0;
 
-       buf[2] = 0x3c;
+       buf[3] = 0x3c;
        /* Set HEADSUP, ORDSUP, SIMPSUP */
        buf[5] = 0x07;
 
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
        if (cmd->data_length < 4) {
                pr_err("SCSI Inquiry payload length: %u"
                        " too small for EVPD=1\n", cmd->data_length);
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
                return -EINVAL;
        }
 
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
        }
 
        pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+       cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
        ret = -EINVAL;
 
 out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
        default:
                pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
                       cdb[2] & 0x3f, cdb[3]);
-               return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+               cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+               return -EINVAL;
        }
        offset += length;
 
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
        if (cdb[1] & 0x01) {
                pr_err("REQUEST_SENSE description emulation not"
                        " supported\n");
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -ENOSYS;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
        if (!dev->transport->do_discard) {
                pr_err("UNMAP emulation not supported for: %s\n",
                                dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        /* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
        if (!dev->transport->do_discard) {
                pr_err("WRITE_SAME emulation not supported"
                                " for: %s\n", dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
 int target_emulate_synchronize_cache(struct se_task *task)
 {
        struct se_device *dev = task->task_se_cmd->se_dev;
+       struct se_cmd *cmd = task->task_se_cmd;
 
        if (!dev->transport->do_sync_cache) {
                pr_err("SYNCHRONIZE_CACHE emulation not supported"
                        " for: %s\n", dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        dev->transport->do_sync_cache(task);
index e0c1e8a..93d4f6a 100644 (file)
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
 static struct config_group alua_group;
 static struct config_group alua_lu_gps_group;
 
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
 static inline struct se_hba *
 item_to_hba(struct config_item *item)
 {
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
                                " struct se_subsystem_dev\n");
                goto unlock;
        }
-       INIT_LIST_HEAD(&se_dev->se_dev_node);
        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
                        " from allocate_virtdevice()\n");
                goto out;
        }
-       spin_lock(&se_device_lock);
-       list_add_tail(&se_dev->se_dev_node, &se_dev_list);
-       spin_unlock(&se_device_lock);
 
        config_group_init_type_name(&se_dev->se_dev_group, name,
                        &target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
        mutex_lock(&hba->hba_access_mutex);
        t = hba->transport;
 
-       spin_lock(&se_device_lock);
-       list_del(&se_dev->se_dev_node);
-       spin_unlock(&se_device_lock);
-
        dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
        for (i = 0; dev_stat_grp->default_groups[i]; i++) {
                df_item = &dev_stat_grp->default_groups[i]->cg_item;
index ba5edec..9b86394 100644 (file)
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_cmd->se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->orig_fe_lun = 0;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        /*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
 
@@ -708,7 +705,7 @@ done:
 
        se_task->task_scsi_status = GOOD;
        transport_complete_task(se_task, 1);
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("dpo_emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("dpo_emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       if (dev->transport->fua_write_emulated == 0) {
+       if (flag && dev->transport->fua_write_emulated == 0) {
                pr_err("fua_write_emulated not supported\n");
                return -EINVAL;
        }
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("ua read emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("ua read emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       if (dev->transport->write_cache_emulated == 0) {
+       if (flag && dev->transport->write_cache_emulated == 0) {
                pr_err("write_cache_emulated not supported\n");
                return -EINVAL;
        }
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
                ret = -ENOMEM;
                goto out;
        }
-       INIT_LIST_HEAD(&se_dev->se_dev_node);
        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
index 67cd6fe..b4864fb 100644 (file)
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
                return -ENOMEM;
        }
 
-       for (i = 0; i < task->task_sg_nents; i++) {
-               iov[i].iov_len = sg[i].length;
-               iov[i].iov_base = sg_virt(&sg[i]);
+       for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+               iov[i].iov_len = sg->length;
+               iov[i].iov_base = sg_virt(sg);
        }
 
        old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
                return -ENOMEM;
        }
 
-       for (i = 0; i < task->task_sg_nents; i++) {
-               iov[i].iov_len = sg[i].length;
-               iov[i].iov_base = sg_virt(&sg[i]);
+       for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+               iov[i].iov_len = sg->length;
+               iov[i].iov_base = sg_virt(sg);
        }
 
        old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
                if (ret > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-                   cmd->t_tasks_fua) {
+                   (cmd->se_cmd_flags & SCF_FUA)) {
                        /*
                         * We might need to be a bit smarter here
                         * and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
 
        }
 
-       if (ret < 0)
+       if (ret < 0) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                return ret;
+       }
        if (ret) {
                task->task_scsi_status = GOOD;
                transport_complete_task(task, 1);
        }
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     fd_free_task(): (Part of se_subsystem_api_t template)
index 7698efe..4aa9922 100644 (file)
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
                 */
                if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
                    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-                    task->task_se_cmd->t_tasks_fua))
+                    (cmd->se_cmd_flags & SCF_FUA)))
                        rw = WRITE_FUA;
                else
                        rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
        else {
                pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
                                " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOSYS;
        }
 
        bio = iblock_get_bio(task, block_lba, sg_num);
-       if (!bio)
-               return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       if (!bio) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOMEM;
+       }
 
        bio_list_init(&list);
        bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
                submit_bio(rw, bio);
        blk_finish_plug(&plug);
 
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 
 fail:
        while ((bio = bio_list_pop(&list)))
                bio_put(bio);
-       return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 static u32 iblock_get_device_rev(struct se_device *dev)
index 5a4ebfc..95dee70 100644 (file)
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
                pr_err("Received legacy SPC-2 RESERVE/RELEASE"
                        " while active SPC-3 registrations exist,"
                        " returning RESERVATION_CONFLICT\n");
-               *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
                return true;
        }
 
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
            (cmd->t_task_cdb[1] & 0x02)) {
                pr_err("LongIO and Obselete Bits set, returning"
                                " ILLEGAL_REQUEST\n");
-               ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
                        " from %s \n", cmd->se_lun->unpacked_lun,
                        cmd->se_deve->mapped_lun,
                        sess->se_node_acl->initiatorname);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out_unlock;
        }
 
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
        tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
        if (!tidh_new) {
                pr_err("Unable to allocate tidh_new\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        INIT_LIST_HEAD(&tidh_new->dest_list);
        tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
                                sa_res_key, all_tg_pt, aptpl);
        if (!local_pr_reg) {
                kfree(tidh_new);
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOMEM;
        }
        tidh_new->dest_pr_reg = local_pr_reg;
        /*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
                pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
                        " does not equal CDB data_length: %u\n", tpdl,
                        cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
                                        " for tmp_tpg\n");
                                atomic_dec(&tmp_tpg->tpg_pr_ref_count);
                                smp_mb__after_atomic_dec();
-                               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               ret = -EINVAL;
                                goto out;
                        }
                        /*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
                                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                                smp_mb__after_atomic_dec();
                                core_scsi3_tpg_undepend_item(tmp_tpg);
-                               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               ret = -EINVAL;
                                goto out;
                        }
 
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
                if (!dest_tpg) {
                        pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
                                        " dest_tpg\n");
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
 #if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
                                " %u for Transport ID: %s\n", tid_len, ptr);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                /*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
 
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
 
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
                        smp_mb__after_atomic_dec();
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       ret = -EINVAL;
                        goto out;
                }
 #if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_lunacl_undepend_item(dest_se_deve);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       ret = -ENOMEM;
                        goto out;
                }
                INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
                        kfree(tidh_new);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        se_tpg = se_sess->se_tpg;
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
                if (res_key) {
                        pr_warn("SPC-3 PR: Reservation Key non-zero"
                                " for SA REGISTER, returning CONFLICT\n");
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * Do nothing but return GOOD status.
                 */
                if (!sa_res_key)
-                       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+                       return 0;
 
                if (!spec_i_pt) {
                        /*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
                        if (ret != 0) {
                                pr_err("Unable to allocate"
                                        " struct t10_pr_registration\n");
-                               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               return -EINVAL;
                        }
                } else {
                        /*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
                                        " 0x%016Lx\n", res_key,
                                        pr_reg->pr_res_key);
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                               return -EINVAL;
                        }
                }
                if (spec_i_pt) {
                        pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
                                " set while sa_res_key=0\n");
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       return -EINVAL;
                }
                /*
                 * An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
                                " registration exists, but ALL_TG_PT=1 bit not"
                                " present in received PROUT\n");
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_INVALID_CDB_FIELD;
+                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+                       return -EINVAL;
                }
                /*
                 * Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
                                pr_err("Unable to allocate"
                                        " pr_aptpl_buf\n");
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               return -EINVAL;
                        }
                }
                /*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
                        if (pr_holder < 0) {
                                kfree(pr_aptpl_buf);
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                               return -EINVAL;
                        }
 
                        spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        se_tpg = se_sess->se_tpg;
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RESERVE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
                        " does not match existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        /*
         * See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
 
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
 
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        /*
         * Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
        default:
                pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
                        " 0x%02x\n", type);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RELEASE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
            (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
 
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
        if (!pr_reg_n) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for CLEAR\n");
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
                        " existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
        int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
        int prh_type = 0, prh_scope = 0, ret;
 
-       if (!se_sess)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+       if (!se_sess) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
 
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
        pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for PREEMPT%s\n",
                        (abort) ? "_AND_ABORT" : "");
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        if (pr_reg_n->pr_res_key != res_key) {
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        INIT_LIST_HEAD(&preempt_and_abort_list);
 
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
        if (!all_reg && !sa_res_key) {
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        /*
         * From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
                if (!released_regs) {
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg_n);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
        default:
                pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
                        " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        memset(dest_iport, 0, 64);
        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
                        " *pr_reg for REGISTER_AND_MOVE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " res_key: 0x%016Lx does not match existing SA REGISTER"
                        " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
                        " sa_res_key\n");
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        /*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " does not equal CDB data_length: %u\n", tid_len,
                        cmd->data_length);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
                        smp_mb__after_atomic_dec();
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return -EINVAL;
                }
 
                spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " fabric ops from Relative Target Port Identifier:"
                        " %hu\n", rtpi);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " from fabric: %s\n", proto_ident,
                        dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
                        dest_tf_ops->get_fabric_name());
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
                        " containg a valid tpg_parse_pr_out_transport_id"
                        " function pointer\n");
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = -EINVAL;
                goto out;
        }
        initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!initiator_str) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
                        " initiator_str from Transport ID\n");
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
                        " matches: %s on received I_T Nexus\n", initiator_str,
                        pr_reg_nacl->initiatorname);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " matches: %s %s on received I_T Nexus\n",
                        initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
                        pr_reg->pr_reg_isid);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
                pr_err("Unable to locate %s dest_node_acl for"
                        " TransportID%s\n", dest_tf_ops->get_fabric_name(),
                        initiator_str);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_node_acl = NULL;
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 #if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
        if (!dest_se_deve) {
                pr_err("Unable to locate %s dest_se_deve from RTPI:"
                        " %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3553,7 +3615,8 @@ after_iport_check:
                atomic_dec(&dest_se_deve->pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_se_deve = NULL;
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = -EINVAL;
                goto out;
        }
 #if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
                        " currently held\n");
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3585,7 +3649,8 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
                        " Nexus is not reservation holder\n");
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3603,7 +3668,8 @@ after_iport_check:
                        " reservation for type: %s\n",
                        core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out;
        }
        pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
                                sa_res_key, 0, aptpl, 2, 1);
                if (ret != 0) {
                        spin_unlock(&dev->dev_reservation_lock);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = EINVAL;
                goto out;
        }
 
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
         * FIXME: A NULL struct se_session pointer means an this is not coming from
         * a $FABRIC_MOD's nexus, but from internal passthrough ops.
         */
-       if (!cmd->se_sess)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+       if (!cmd->se_sess) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
 
        if (cmd->data_length < 24) {
                pr_warn("SPC-PR: Received PR OUT parameter list"
                        " length too small: %u\n", cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
         * SPEC_I_PT=1 is only valid for Service action: REGISTER
         */
        if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
            (cmd->data_length != 24)) {
                pr_warn("SPC-PR: Received PR OUT illegal parameter"
                        " list length: %u\n", cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_OUT service"
                        " action: 0x%02x\n", cdb[1] & 0x1f);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                break;
        }
 
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
        if (cmd->data_length < 6) {
                pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
                        " %u too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
 
        switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_IN service"
                        " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                break;
        }
 
index ed32e1e..8b15e56 100644 (file)
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
 static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
                struct bio **hbio)
 {
+       struct se_cmd *cmd = task->task_se_cmd;
        struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
        u32 task_sg_num = task->task_sg_nents;
        struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
        u32 data_len = task->task_size, i, len, bytes, off;
        int nr_pages = (task->task_size + task_sg[0].offset +
                        PAGE_SIZE - 1) >> PAGE_SHIFT;
-       int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       int nr_vecs = 0, rc;
        int rw = (task->task_data_direction == DMA_TO_DEVICE);
 
        *hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       return ret;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 static int pscsi_do_task(struct se_task *task)
 {
+       struct se_cmd *cmd = task->task_se_cmd;
        struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
        struct pscsi_plugin_task *pt = PSCSI_TASK(task);
        struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
                if (!req || IS_ERR(req)) {
                        pr_err("PSCSI: blk_get_request() failed: %ld\n",
                                        req ? IS_ERR(req) : -ENOMEM);
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return -ENODEV;
                }
        } else {
                BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
                 * Setup the main struct request for the task->task_sg[] payload
                 */
                ret = pscsi_map_sg(task, task->task_sg, &hbio);
-               if (ret < 0)
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+               if (ret < 0) {
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return ret;
+               }
 
                req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
                                       GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
                        (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
                        pscsi_req_done);
 
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 
 fail:
        while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 /*     pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
                        " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
                        pt->pscsi_result);
                task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-               task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
-               task->task_se_cmd->transport_error_status =
-                                       PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               task->task_se_cmd->scsi_sense_reason =
+                                       TCM_UNSUPPORTED_SCSI_OPCODE;
                transport_complete_task(task, 0);
                break;
        }
index 5158d38..02e51fa 100644 (file)
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
        return NULL;
 }
 
-/*     rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
 {
        struct se_task *task = &req->rd_task;
        struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
        struct rd_dev_sg_table *table;
-       struct scatterlist *sg_d, *sg_s;
-       void *dst, *src;
-       u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
-       u32 length, page_end = 0, table_sg_end;
+       struct scatterlist *rd_sg;
+       struct sg_mapping_iter m;
        u32 rd_offset = req->rd_offset;
+       u32 src_len;
 
        table = rd_get_sg_table(dev, req->rd_page);
        if (!table)
                return -EINVAL;
 
-       table_sg_end = (table->page_end_offset - req->rd_page);
-       sg_d = task->task_sg;
-       sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+       rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
 
-       pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
-               " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
-               req->rd_page, req->rd_offset);
-
-       src_offset = rd_offset;
+       pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+                       dev->rd_dev_id, read_rd ? "Read" : "Write",
+                       task->task_lba, req->rd_size, req->rd_page,
+                       rd_offset);
 
+       src_len = PAGE_SIZE - rd_offset;
+       sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+                       read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
        while (req->rd_size) {
-               if ((sg_d[i].length - dst_offset) <
-                   (sg_s[j].length - src_offset)) {
-                       length = (sg_d[i].length - dst_offset);
-
-                       pr_debug("Step 1 - sg_d[%d]: %p length: %d"
-                               " offset: %u sg_s[%d].length: %u\n", i,
-                               &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
-                               sg_s[j].length);
-                       pr_debug("Step 1 - length: %u dst_offset: %u"
-                               " src_offset: %u\n", length, dst_offset,
-                               src_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       dst = sg_virt(&sg_d[i++]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       src = sg_virt(&sg_s[j]) + src_offset;
-                       BUG_ON(!src);
-
-                       dst_offset = 0;
-                       src_offset = length;
-                       page_end = 0;
-               } else {
-                       length = (sg_s[j].length - src_offset);
-
-                       pr_debug("Step 2 - sg_d[%d]: %p length: %d"
-                               " offset: %u sg_s[%d].length: %u\n", i,
-                               &sg_d[i], sg_d[i].length, sg_d[i].offset,
-                               j, sg_s[j].length);
-                       pr_debug("Step 2 - length: %u dst_offset: %u"
-                               " src_offset: %u\n", length, dst_offset,
-                               src_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       dst = sg_virt(&sg_d[i]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       if (sg_d[i].length == length) {
-                               i++;
-                               dst_offset = 0;
-                       } else
-                               dst_offset = length;
-
-                       src = sg_virt(&sg_s[j++]) + src_offset;
-                       BUG_ON(!src);
-
-                       src_offset = 0;
-                       page_end = 1;
-               }
+               u32 len;
+               void *rd_addr;
 
-               memcpy(dst, src, length);
+               sg_miter_next(&m);
+               len = min((u32)m.length, src_len);
+               m.consumed = len;
 
-               pr_debug("page: %u, remaining size: %u, length: %u,"
-                       " i: %u, j: %u\n", req->rd_page,
-                       (req->rd_size - length), length, i, j);
+               rd_addr = sg_virt(rd_sg) + rd_offset;
 
-               req->rd_size -= length;
-               if (!req->rd_size)
-                       return 0;
+               if (read_rd)
+                       memcpy(m.addr, rd_addr, len);
+               else
+                       memcpy(rd_addr, m.addr, len);
 
-               if (!page_end)
+               req->rd_size -= len;
+               if (!req->rd_size)
                        continue;
 
-               if (++req->rd_page <= table->page_end_offset) {
-                       pr_debug("page: %u in same page table\n",
-                               req->rd_page);
+               src_len -= len;
+               if (src_len) {
+                       rd_offset += len;
                        continue;
                }
 
-               pr_debug("getting new page table for page: %u\n",
-                               req->rd_page);
-
-               table = rd_get_sg_table(dev, req->rd_page);
-               if (!table)
-                       return -EINVAL;
-
-               sg_s = &table->sg_table[j = 0];
-       }
-
-       return 0;
-}
-
-/*     rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
-       struct se_task *task = &req->rd_task;
-       struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
-       struct rd_dev_sg_table *table;
-       struct scatterlist *sg_d, *sg_s;
-       void *dst, *src;
-       u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
-       u32 length, page_end = 0, table_sg_end;
-       u32 rd_offset = req->rd_offset;
-
-       table = rd_get_sg_table(dev, req->rd_page);
-       if (!table)
-               return -EINVAL;
-
-       table_sg_end = (table->page_end_offset - req->rd_page);
-       sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
-       sg_s = task->task_sg;
-
-       pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
-               " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
-               req->rd_page, req->rd_offset);
-
-       dst_offset = rd_offset;
-
-       while (req->rd_size) {
-               if ((sg_s[i].length - src_offset) <
-                   (sg_d[j].length - dst_offset)) {
-                       length = (sg_s[i].length - src_offset);
-
-                       pr_debug("Step 1 - sg_s[%d]: %p length: %d"
-                               " offset: %d sg_d[%d].length: %u\n", i,
-                               &sg_s[i], sg_s[i].length, sg_s[i].offset,
-                               j, sg_d[j].length);
-                       pr_debug("Step 1 - length: %u src_offset: %u"
-                               " dst_offset: %u\n", length, src_offset,
-                               dst_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       src = sg_virt(&sg_s[i++]) + src_offset;
-                       BUG_ON(!src);
-
-                       dst = sg_virt(&sg_d[j]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       src_offset = 0;
-                       dst_offset = length;
-                       page_end = 0;
-               } else {
-                       length = (sg_d[j].length - dst_offset);
-
-                       pr_debug("Step 2 - sg_s[%d]: %p length: %d"
-                               " offset: %d sg_d[%d].length: %u\n", i,
-                               &sg_s[i], sg_s[i].length, sg_s[i].offset,
-                               j, sg_d[j].length);
-                       pr_debug("Step 2 - length: %u src_offset: %u"
-                               " dst_offset: %u\n", length, src_offset,
-                               dst_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       src = sg_virt(&sg_s[i]) + src_offset;
-                       BUG_ON(!src);
-
-                       if (sg_s[i].length == length) {
-                               i++;
-                               src_offset = 0;
-                       } else
-                               src_offset = length;
-
-                       dst = sg_virt(&sg_d[j++]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       dst_offset = 0;
-                       page_end = 1;
-               }
-
-               memcpy(dst, src, length);
-
-               pr_debug("page: %u, remaining size: %u, length: %u,"
-                       " i: %u, j: %u\n", req->rd_page,
-                       (req->rd_size - length), length, i, j);
-
-               req->rd_size -= length;
-               if (!req->rd_size)
-                       return 0;
-
-               if (!page_end)
-                       continue;
-
-               if (++req->rd_page <= table->page_end_offset) {
-                       pr_debug("page: %u in same page table\n",
-                               req->rd_page);
+               /* rd page completed, next one please */
+               req->rd_page++;
+               rd_offset = 0;
+               src_len = PAGE_SIZE;
+               if (req->rd_page <= table->page_end_offset) {
+                       rd_sg++;
                        continue;
                }
 
-               pr_debug("getting new page table for page: %u\n",
-                               req->rd_page);
-
                table = rd_get_sg_table(dev, req->rd_page);
-               if (!table)
+               if (!table) {
+                       sg_miter_stop(&m);
                        return -EINVAL;
+               }
 
-               sg_d = &table->sg_table[j = 0];
+               /* since we increment, the first sg entry is correct */
+               rd_sg = table->sg_table;
        }
-
+       sg_miter_stop(&m);
        return 0;
 }
 
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
 {
        struct se_device *dev = task->task_se_cmd->se_dev;
        struct rd_request *req = RD_REQ(task);
-       unsigned long long lba;
+       u64 tmp;
        int ret;
 
-       req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
-       lba = task->task_lba;
-       req->rd_offset = (do_div(lba,
-                         (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
-                          dev->se_sub_dev->se_dev_attrib.block_size;
+       tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+       req->rd_offset = do_div(tmp, PAGE_SIZE);
+       req->rd_page = tmp;
        req->rd_size = task->task_size;
 
-       if (task->task_data_direction == DMA_FROM_DEVICE)
-               ret = rd_MEMCPY_read(req);
-       else
-               ret = rd_MEMCPY_write(req);
-
+       ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
        if (ret != 0)
                return ret;
 
        task->task_scsi_status = GOOD;
        transport_complete_task(task, 1);
-
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     rd_free_task(): (Part of se_subsystem_api_t template)
index 217e29d..6845228 100644 (file)
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
                        " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
                        "Preempt" : "", cmd, cmd->t_state,
                        atomic_read(&cmd->t_fe_count));
-               /*
-                * Signal that the command has failed via cmd->se_cmd_flags,
-                */
-               transport_new_cmd_failure(cmd);
 
                core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
                                atomic_read(&cmd->t_fe_count));
index 3400ae6..0257658 100644 (file)
@@ -61,7 +61,6 @@
 static int sub_api_initialized;
 
 static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
 static struct kmem_cache *se_sess_cache;
 struct kmem_cache *se_tmr_req_cache;
 struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
 static void transport_put_cmd(struct se_cmd *cmd);
 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
 {
-       se_cmd_cache = kmem_cache_create("se_cmd_cache",
-                       sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
-       if (!se_cmd_cache) {
-               pr_err("kmem_cache_create for struct se_cmd failed\n");
-               goto out;
-       }
        se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
                        sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
                        0, NULL);
        if (!se_tmr_req_cache) {
                pr_err("kmem_cache_create() for struct se_tmr_req"
                                " failed\n");
-               goto out_free_cmd_cache;
+               goto out;
        }
        se_sess_cache = kmem_cache_create("se_sess_cache",
                        sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
        kmem_cache_destroy(se_sess_cache);
 out_free_tmr_req_cache:
        kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
-       kmem_cache_destroy(se_cmd_cache);
 out:
        return -ENOMEM;
 }
@@ -191,7 +182,6 @@ out:
 void release_se_kmem_caches(void)
 {
        destroy_workqueue(target_completion_wq);
-       kmem_cache_destroy(se_cmd_cache);
        kmem_cache_destroy(se_tmr_req_cache);
        kmem_cache_destroy(se_sess_cache);
        kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
                task->task_scsi_status = GOOD;
        } else {
                task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-               task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
-               task->task_se_cmd->transport_error_status =
-                                       PYX_TRANSPORT_ILLEGAL_REQUEST;
+               task->task_se_cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
        }
 
        transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
 {
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 
-       transport_generic_request_failure(cmd, 1, 1);
+       transport_generic_request_failure(cmd);
 }
 
 /*     transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
        if (cmd->t_tasks_failed) {
                if (!task->task_error_status) {
                        task->task_error_status =
-                               PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
-                       cmd->transport_error_status =
-                               PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
+
                INIT_WORK(&cmd->work, target_complete_failure_work);
        } else {
                atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
        dev->se_hba             = hba;
        dev->se_sub_dev         = se_dev;
        dev->transport          = transport;
-       atomic_set(&dev->active_cmds, 0);
        INIT_LIST_HEAD(&dev->dev_list);
        INIT_LIST_HEAD(&dev->dev_sep_list);
        INIT_LIST_HEAD(&dev->dev_tmr_list);
        INIT_LIST_HEAD(&dev->execute_task_list);
        INIT_LIST_HEAD(&dev->delayed_cmd_list);
-       INIT_LIST_HEAD(&dev->ordered_cmd_list);
        INIT_LIST_HEAD(&dev->state_task_list);
        INIT_LIST_HEAD(&dev->qf_cmd_list);
        spin_lock_init(&dev->execute_task_lock);
        spin_lock_init(&dev->delayed_cmd_lock);
-       spin_lock_init(&dev->ordered_cmd_lock);
-       spin_lock_init(&dev->state_task_lock);
-       spin_lock_init(&dev->dev_alua_lock);
        spin_lock_init(&dev->dev_reservation_lock);
        spin_lock_init(&dev->dev_status_lock);
-       spin_lock_init(&dev->dev_status_thr_lock);
        spin_lock_init(&dev->se_port_lock);
        spin_lock_init(&dev->se_tmr_lock);
        spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
 {
        INIT_LIST_HEAD(&cmd->se_lun_node);
        INIT_LIST_HEAD(&cmd->se_delayed_node);
-       INIT_LIST_HEAD(&cmd->se_ordered_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
        INIT_LIST_HEAD(&cmd->se_queue_node);
        INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
                pr_err("Received SCSI CDB with command_size: %d that"
                        " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
                        scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
                return -EINVAL;
        }
        /*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
                                scsi_command_size(cdb),
                                (unsigned long)sizeof(cmd->__t_task_cdb));
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        return -ENOMEM;
                }
        } else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
         * and call transport_generic_request_failure() if necessary..
         */
        ret = transport_generic_new_cmd(cmd);
-       if (ret < 0) {
-               cmd->transport_error_status = ret;
-               transport_generic_request_failure(cmd, 0,
-                               (cmd->data_direction != DMA_TO_DEVICE));
-       }
+       if (ret < 0)
+               transport_generic_request_failure(cmd);
+
        return 0;
 }
 EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 /*
  * Handle SAM-esque emulation for generic transport request failures.
  */
-static void transport_generic_request_failure(
-       struct se_cmd *cmd,
-       int complete,
-       int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
 {
        int ret = 0;
 
        pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
                " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
                cmd->t_task_cdb[0]);
-       pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+       pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
                cmd->se_tfo->get_cmd_state(cmd),
-               cmd->t_state,
-               cmd->transport_error_status);
+               cmd->t_state, cmd->scsi_sense_reason);
        pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
                " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
                " t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
        if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
                transport_complete_task_attr(cmd);
 
-       if (complete) {
-               cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
-       }
-
-       switch (cmd->transport_error_status) {
-       case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-               break;
-       case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
-               cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
-               break;
-       case PYX_TRANSPORT_INVALID_CDB_FIELD:
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               break;
-       case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               break;
-       case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
-               if (!sc)
-                       transport_new_cmd_failure(cmd);
-               /*
-                * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
-                * we force this session to fall back to session
-                * recovery.
-                */
-               cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
-               cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
-               goto check_stop;
-       case PYX_TRANSPORT_LU_COMM_FAILURE:
-       case PYX_TRANSPORT_ILLEGAL_REQUEST:
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               break;
-       case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
-               cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
-               break;
-       case PYX_TRANSPORT_WRITE_PROTECTED:
-               cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+       switch (cmd->scsi_sense_reason) {
+       case TCM_NON_EXISTENT_LUN:
+       case TCM_UNSUPPORTED_SCSI_OPCODE:
+       case TCM_INVALID_CDB_FIELD:
+       case TCM_INVALID_PARAMETER_LIST:
+       case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+       case TCM_UNKNOWN_MODE_PAGE:
+       case TCM_WRITE_PROTECTED:
+       case TCM_CHECK_CONDITION_ABORT_CMD:
+       case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+       case TCM_CHECK_CONDITION_NOT_READY:
                break;
-       case PYX_TRANSPORT_RESERVATION_CONFLICT:
+       case TCM_RESERVATION_CONFLICT:
                /*
                 * No SENSE Data payload for this case, set SCSI Status
                 * and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
                if (ret == -EAGAIN || ret == -ENOMEM)
                        goto queue_full;
                goto check_stop;
-       case PYX_TRANSPORT_USE_SENSE_REASON:
-               /*
-                * struct se_cmd->scsi_sense_reason already set
-                */
-               break;
        default:
                pr_err("Unknown transport error for CDB 0x%02x: %d\n",
-                       cmd->t_task_cdb[0],
-                       cmd->transport_error_status);
+                       cmd->t_task_cdb[0], cmd->scsi_sense_reason);
                cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
                break;
        }
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
         * transport_send_check_condition_and_sense() after handling
         * possible unsoliticied write data payloads.
         */
-       if (!sc && !cmd->se_tfo->new_cmd_map)
-               transport_new_cmd_failure(cmd);
-       else {
-               ret = transport_send_check_condition_and_sense(cmd,
-                               cmd->scsi_sense_reason, 0);
-               if (ret == -EAGAIN || ret == -ENOMEM)
-                       goto queue_full;
-       }
+       ret = transport_send_check_condition_and_sense(cmd,
+                       cmd->scsi_sense_reason, 0);
+       if (ret == -EAGAIN || ret == -ENOMEM)
+               goto queue_full;
 
 check_stop:
        transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
         * to allow the passed struct se_cmd list of tasks to the front of the list.
         */
         if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-               atomic_inc(&cmd->se_dev->dev_hoq_count);
-               smp_mb__after_atomic_inc();
                pr_debug("Added HEAD_OF_QUEUE for CDB:"
                        " 0x%02x, se_ordered_id: %u\n",
                        cmd->t_task_cdb[0],
                        cmd->se_ordered_id);
                return 1;
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-               spin_lock(&cmd->se_dev->ordered_cmd_lock);
-               list_add_tail(&cmd->se_ordered_node,
-                               &cmd->se_dev->ordered_cmd_list);
-               spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
                atomic_inc(&cmd->se_dev->dev_ordered_sync);
                smp_mb__after_atomic_inc();
 
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
 {
        int add_tasks;
 
-       if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
-               cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
-               transport_generic_request_failure(cmd, 0, 1);
+       if (se_dev_check_online(cmd->se_dev) != 0) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               transport_generic_request_failure(cmd);
                return 0;
        }
 
@@ -2163,14 +2102,13 @@ check_depth:
        else
                error = dev->transport->do_task(task);
        if (error != 0) {
-               cmd->transport_error_status = error;
                spin_lock_irqsave(&cmd->t_state_lock, flags);
                task->task_flags &= ~TF_ACTIVE;
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                atomic_set(&cmd->t_transport_sent, 0);
                transport_stop_tasks_for_cmd(cmd);
                atomic_inc(&dev->depth_left);
-               transport_generic_request_failure(cmd, 0, 1);
+               transport_generic_request_failure(cmd);
        }
 
        goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
        return 0;
 }
 
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
-       unsigned long flags;
-       /*
-        * Any unsolicited data will get dumped for failed command inside of
-        * the fabric plugin
-        */
-       spin_lock_irqsave(&se_cmd->t_state_lock, flags);
-       se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
-       se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
 static inline u32 transport_get_sectors_6(
        unsigned char *cdb,
        struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
 
        /*
         * Everything else assume TYPE_DISK Sector CDB location.
-        * Use 8-bit sector value.
+        * Use 8-bit sector value.  SBC-3 says:
+        *
+        *   A TRANSFER LENGTH field set to zero specifies that 256
+        *   logical blocks shall be written.  Any other value
+        *   specifies the number of logical blocks that shall be
+        *   written.
         */
 type_disk:
-       return (u32)cdb[4];
+       return cdb[4] ? : 256;
 }
 
 static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
        return -1;
 }
 
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
-       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
-       cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
-       /*
-        * For UA Interlock Code 11b, a RESERVATION CONFLICT will
-        * establish a UNIT ATTENTION with PREVIOUS RESERVATION
-        * CONFLICT STATUS.
-        *
-        * See spc4r17, section 7.4.6 Control Mode Page, Table 349
-        */
-       if (cmd->se_sess &&
-           cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
-               core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
-                       cmd->orig_fe_lun, 0x2C,
-                       ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
-       return -EINVAL;
-}
-
 static inline long long transport_dev_end_lba(struct se_device *dev)
 {
        return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
         */
        if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
                if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
-                                       cmd, cdb, pr_reg_type) != 0)
-                       return transport_handle_reservation_conflict(cmd);
+                                       cmd, cdb, pr_reg_type) != 0) {
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EBUSY;
+               }
                /*
                 * This means the CDB is allowed for the SCSI Initiator port
                 * when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_32(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_32(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_64(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case XDWRITEREAD_10:
                if ((cmd->data_direction != DMA_TO_DEVICE) ||
-                   !(cmd->t_tasks_bidi))
+                   !(cmd->se_cmd_flags & SCF_BIDI))
                        goto out_invalid_cdb_field;
                sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
                if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
                 * Setup BIDI XOR callback to be run after I/O completion.
                 */
                cmd->transport_complete_callback = &transport_xor_callback;
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                break;
        case VARIABLE_LENGTH_CMD:
                service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
                         * completion.
                         */
                        cmd->transport_complete_callback = &transport_xor_callback;
-                       cmd->t_tasks_fua = (cdb[10] & 0x8);
+                       if (cdb[1] & 0x8)
+                               cmd->se_cmd_flags |= SCF_FUA;
                        break;
                case WRITE_SAME_32:
                        sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                        " SIMPLE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-               atomic_dec(&dev->dev_hoq_count);
-               smp_mb__after_atomic_dec();
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for"
                        " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-               spin_lock(&dev->ordered_cmd_lock);
-               list_del(&cmd->se_ordered_node);
                atomic_dec(&dev->dev_ordered_sync);
                smp_mb__after_atomic_dec();
-               spin_unlock(&dev->ordered_cmd_lock);
 
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
 
        if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
            (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+               /*
+                * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+                * scatterlists already have been set to follow what the fabric
+                * passes for the original expected data transfer length.
+                */
+               if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+                       pr_warn("Rejecting SCSI DATA overflow for fabric using"
+                               " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+                       return -EINVAL;
+               }
 
                cmd->t_data_sg = sgl;
                cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
            cmd->data_length) {
                ret = transport_generic_get_mem(cmd);
                if (ret < 0)
-                       return ret;
+                       goto out_fail;
        }
 
        /*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
                task_cdbs = transport_allocate_control_task(cmd);
        }
 
-       if (task_cdbs <= 0)
+       if (task_cdbs < 0)
                goto out_fail;
+       else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+               cmd->t_state = TRANSPORT_COMPLETE;
+               atomic_set(&cmd->t_transport_active, 1);
+               INIT_WORK(&cmd->work, target_complete_ok_work);
+               queue_work(target_completion_wq, &cmd->work);
+               return 0;
+       }
 
        if (set_counts) {
                atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
        else if (ret < 0)
                return ret;
 
-       return PYX_TRANSPORT_WRITE_PENDING;
+       return 1;
 
 queue_full:
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
                        atomic_inc(&cmd->t_transport_aborted);
                        smp_mb__after_atomic_inc();
-                       cmd->scsi_status = SAM_STAT_TASK_ABORTED;
-                       transport_new_cmd_failure(cmd);
-                       return;
                }
        }
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
        struct se_cmd *cmd;
        struct se_device *dev = (struct se_device *) param;
 
-       set_user_nice(current, -20);
-
        while (!kthread_should_stop()) {
                ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
                                atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
                        }
                        ret = cmd->se_tfo->new_cmd_map(cmd);
                        if (ret < 0) {
-                               cmd->transport_error_status = ret;
-                               transport_generic_request_failure(cmd,
-                                               0, (cmd->data_direction !=
-                                                   DMA_TO_DEVICE));
+                               transport_generic_request_failure(cmd);
                                break;
                        }
                        ret = transport_generic_new_cmd(cmd);
                        if (ret < 0) {
-                               cmd->transport_error_status = ret;
-                               transport_generic_request_failure(cmd,
-                                       0, (cmd->data_direction !=
-                                        DMA_TO_DEVICE));
+                               transport_generic_request_failure(cmd);
+                               break;
                        }
                        break;
                case TRANSPORT_PROCESS_WRITE:
index 4fac37c..71fc9ce 100644 (file)
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
        lport = ep->lp;
        fp = fc_frame_alloc(lport, sizeof(*txrdy));
        if (!fp)
-               return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+               return -ENOMEM; /* Signal QUEUE_FULL */
 
        txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
        memset(txrdy, 0, sizeof(*txrdy));
index 5f77041..9402b73 100644 (file)
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
        struct ft_lport_acl *lacl = container_of(wwn,
                                struct ft_lport_acl, fc_lport_wwn);
 
-       pr_debug("del lport %s\n",
-                       config_item_name(&wwn->wwn_group.cg_item));
+       pr_debug("del lport %s\n", lacl->name);
        mutex_lock(&ft_lport_lock);
        list_del(&lacl->list);
        mutex_unlock(&ft_lport_lock);
index e8c564a..a8078d0 100644 (file)
@@ -1458,6 +1458,16 @@ static const struct usb_device_id acm_ids[] = {
        },
        { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
        },
+       /* Motorola H24 HSPA module: */
+       { USB_DEVICE(0x22b8, 0x2d91) }, /* modem                                */
+       { USB_DEVICE(0x22b8, 0x2d92) }, /* modem           + diagnostics        */
+       { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port                      */
+       { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics        */
+       { USB_DEVICE(0x22b8, 0x2d96) }, /* modem                         + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d97) }, /* modem           + diagnostics + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port               + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
        { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
        .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
                                           data interface instead of
index 717ebc9..600d823 100644 (file)
@@ -264,7 +264,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
                ret = -ENODEV;
                goto err0;
        }
-       dwc->revision = reg & DWC3_GSNPSREV_MASK;
+       dwc->revision = reg;
 
        dwc3_core_soft_reset(dwc);
 
index 596a0b4..4dff83d 100644 (file)
@@ -130,9 +130,6 @@ ep_matches (
                        num_req_streams = ep_comp->bmAttributes & 0x1f;
                        if (num_req_streams > ep->max_streams)
                                return 0;
-                       /* Update the ep_comp descriptor if needed */
-                       if (num_req_streams != ep->max_streams)
-                               ep_comp->bmAttributes = ep->max_streams;
                }
 
        }
index c39d588..1a6f415 100644 (file)
@@ -2975,6 +2975,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
        fsg_common_put(common);
        usb_free_descriptors(fsg->function.descriptors);
        usb_free_descriptors(fsg->function.hs_descriptors);
+       usb_free_descriptors(fsg->function.ss_descriptors);
        kfree(fsg);
 }
 
index a7dc1e1..2ac4ac2 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "isp1760-hcd.h"
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -31,7 +31,7 @@
 #include <linux/pci.h>
 #endif
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 struct isp1760 {
        struct usb_hcd *hcd;
        int rst_gpio;
@@ -437,7 +437,7 @@ static int __init isp1760_init(void)
        ret = platform_driver_register(&isp1760_plat_driver);
        if (!ret)
                any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        ret = platform_driver_register(&isp1760_of_driver);
        if (!ret)
                any_ret = 0;
@@ -457,7 +457,7 @@ module_init(isp1760_init);
 static void __exit isp1760_exit(void)
 {
        platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        platform_driver_unregister(&isp1760_of_driver);
 #endif
 #ifdef CONFIG_PCI
index 60ddba8..79cb0af 100644 (file)
@@ -774,6 +774,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
                        if (musb->double_buffer_not_ok)
                                musb_writew(epio, MUSB_TXMAXP,
                                                hw_ep->max_packet_sz_tx);
+                       else if (can_bulk_split(musb, qh->type))
+                               musb_writew(epio, MUSB_TXMAXP, packet_sz
+                                       | ((hw_ep->max_packet_sz_tx /
+                                               packet_sz) - 1) << 11);
                        else
                                musb_writew(epio, MUSB_TXMAXP,
                                                qh->maxpacket |
index 053f86d..ad96a38 100644 (file)
@@ -349,7 +349,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
                if (mod->irq_attch)
                        intenb1 |= ATTCHE;
 
-               if (mod->irq_attch)
+               if (mod->irq_dtch)
                        intenb1 |= DTCHE;
 
                if (mod->irq_sign)
index bade761..7955de5 100644 (file)
@@ -1267,6 +1267,7 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
                dev_err(dev, "Failed to create hcd\n");
                return -ENOMEM;
        }
+       hcd->has_tt = 1; /* for low/full speed */
 
        pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
        if (!pipe_info) {
index e342660..6dd6453 100644 (file)
@@ -663,7 +663,12 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x08) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) },  /* E398 3G Modem */
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) },  /* E398 3G PC UI Interface */
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) },  /* E398 3G Application Interface */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
index 8e964b9..284798a 100644 (file)
@@ -166,7 +166,7 @@ retry:
        /*
         * Get IO TLB memory from any location.
         */
-       xen_io_tlb_start = alloc_bootmem(bytes);
+       xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
        if (!xen_io_tlb_start) {
                m = "Cannot allocate Xen-SWIOTLB buffer!\n";
                goto error;
@@ -179,7 +179,7 @@ retry:
                               bytes,
                               xen_io_tlb_nslabs);
        if (rc) {
-               free_bootmem(__pa(xen_io_tlb_start), bytes);
+               free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
                m = "Failed to get contiguous memory for DMA from Xen!\n"\
                    "You either: don't have the permissions, do not have"\
                    " enough free memory under 4GB, or the hypervisor memory"\
index b3b8f2f..ede860f 100644 (file)
@@ -621,15 +621,6 @@ static struct xenbus_watch *find_watch(const char *token)
        return NULL;
 }
 
-static void xs_reset_watches(void)
-{
-       int err;
-
-       err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
-       if (err && err != -EEXIST)
-               printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
-}
-
 /* Register callback to watch this node. */
 int register_xenbus_watch(struct xenbus_watch *watch)
 {
@@ -906,9 +897,5 @@ int xs_init(void)
        if (IS_ERR(task))
                return PTR_ERR(task);
 
-       /* shutdown watches for kexec boot */
-       if (xen_hvm_domain())
-               xs_reset_watches();
-
        return 0;
 }
index e24cd89..ea78c3a 100644 (file)
@@ -12,7 +12,7 @@ here.
 This directory is _NOT_ for adding arbitrary new firmware images. The
 place to add those is the separate linux-firmware repository:
 
-    git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git
+    git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
 
 That repository contains all these firmware images which have been
 extracted from older drivers, as well various new firmware images which
@@ -22,6 +22,7 @@ been permitted to redistribute under separate cover.
 To submit firmware to that repository, please send either a git binary
 diff or preferably a git pull request to:
       David Woodhouse <dwmw2@infradead.org>
+      Ben Hutchings <ben@decadent.org.uk>
 
 Your commit should include an update to the WHENCE file clearly
 identifying the licence under which the firmware is available, and
index 7ec1409..0b39458 100644 (file)
@@ -64,6 +64,8 @@ struct btrfs_worker_thread {
        int idle;
 };
 
+static int __btrfs_start_workers(struct btrfs_workers *workers);
+
 /*
  * btrfs_start_workers uses kthread_run, which can block waiting for memory
  * for a very long time.  It will actually throttle on page writeback,
@@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
 {
        struct worker_start *start;
        start = container_of(work, struct worker_start, work);
-       btrfs_start_workers(start->queue, 1);
+       __btrfs_start_workers(start->queue);
        kfree(start);
 }
 
-static int start_new_worker(struct btrfs_workers *queue)
-{
-       struct worker_start *start;
-       int ret;
-
-       start = kzalloc(sizeof(*start), GFP_NOFS);
-       if (!start)
-               return -ENOMEM;
-
-       start->work.func = start_new_worker_func;
-       start->queue = queue;
-       ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
-       if (ret)
-               kfree(start);
-       return ret;
-}
-
 /*
  * helper function to move a thread onto the idle list after it
  * has finished some requests.
@@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
 static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 {
        struct btrfs_workers *workers = worker->workers;
+       struct worker_start *start;
        unsigned long flags;
 
        rmb();
        if (!workers->atomic_start_pending)
                return;
 
+       start = kzalloc(sizeof(*start), GFP_NOFS);
+       if (!start)
+               return;
+
+       start->work.func = start_new_worker_func;
+       start->queue = workers;
+
        spin_lock_irqsave(&workers->lock, flags);
        if (!workers->atomic_start_pending)
                goto out;
@@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 
        workers->num_workers_starting += 1;
        spin_unlock_irqrestore(&workers->lock, flags);
-       start_new_worker(workers);
+       btrfs_queue_worker(workers->atomic_worker_start, &start->work);
        return;
 
 out:
+       kfree(start);
        spin_unlock_irqrestore(&workers->lock, flags);
 }
 
@@ -331,7 +325,7 @@ again:
                        run_ordered_completions(worker->workers, work);
 
                        check_pending_worker_creates(worker);
-
+                       cond_resched();
                }
 
                spin_lock_irq(&worker->lock);
@@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
  * starts new worker threads.  This does not enforce the max worker
  * count in case you need to temporarily go past it.
  */
-static int __btrfs_start_workers(struct btrfs_workers *workers,
-                                int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers)
 {
        struct btrfs_worker_thread *worker;
        int ret = 0;
-       int i;
 
-       for (i = 0; i < num_workers; i++) {
-               worker = kzalloc(sizeof(*worker), GFP_NOFS);
-               if (!worker) {
-                       ret = -ENOMEM;
-                       goto fail;
-               }
+       worker = kzalloc(sizeof(*worker), GFP_NOFS);
+       if (!worker) {
+               ret = -ENOMEM;
+               goto fail;
+       }
 
-               INIT_LIST_HEAD(&worker->pending);
-               INIT_LIST_HEAD(&worker->prio_pending);
-               INIT_LIST_HEAD(&worker->worker_list);
-               spin_lock_init(&worker->lock);
-
-               atomic_set(&worker->num_pending, 0);
-               atomic_set(&worker->refs, 1);
-               worker->workers = workers;
-               worker->task = kthread_run(worker_loop, worker,
-                                          "btrfs-%s-%d", workers->name,
-                                          workers->num_workers + i);
-               if (IS_ERR(worker->task)) {
-                       ret = PTR_ERR(worker->task);
-                       kfree(worker);
-                       goto fail;
-               }
-               spin_lock_irq(&workers->lock);
-               list_add_tail(&worker->worker_list, &workers->idle_list);
-               worker->idle = 1;
-               workers->num_workers++;
-               workers->num_workers_starting--;
-               WARN_ON(workers->num_workers_starting < 0);
-               spin_unlock_irq(&workers->lock);
+       INIT_LIST_HEAD(&worker->pending);
+       INIT_LIST_HEAD(&worker->prio_pending);
+       INIT_LIST_HEAD(&worker->worker_list);
+       spin_lock_init(&worker->lock);
+
+       atomic_set(&worker->num_pending, 0);
+       atomic_set(&worker->refs, 1);
+       worker->workers = workers;
+       worker->task = kthread_run(worker_loop, worker,
+                                  "btrfs-%s-%d", workers->name,
+                                  workers->num_workers + 1);
+       if (IS_ERR(worker->task)) {
+               ret = PTR_ERR(worker->task);
+               kfree(worker);
+               goto fail;
        }
+       spin_lock_irq(&workers->lock);
+       list_add_tail(&worker->worker_list, &workers->idle_list);
+       worker->idle = 1;
+       workers->num_workers++;
+       workers->num_workers_starting--;
+       WARN_ON(workers->num_workers_starting < 0);
+       spin_unlock_irq(&workers->lock);
+
        return 0;
 fail:
-       btrfs_stop_workers(workers);
+       spin_lock_irq(&workers->lock);
+       workers->num_workers_starting--;
+       spin_unlock_irq(&workers->lock);
        return ret;
 }
 
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+int btrfs_start_workers(struct btrfs_workers *workers)
 {
        spin_lock_irq(&workers->lock);
-       workers->num_workers_starting += num_workers;
+       workers->num_workers_starting++;
        spin_unlock_irq(&workers->lock);
-       return __btrfs_start_workers(workers, num_workers);
+       return __btrfs_start_workers(workers);
 }
 
 /*
@@ -568,9 +561,10 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
        struct btrfs_worker_thread *worker;
        unsigned long flags;
        struct list_head *fallback;
+       int ret;
 
-again:
        spin_lock_irqsave(&workers->lock, flags);
+again:
        worker = next_worker(workers);
 
        if (!worker) {
@@ -584,7 +578,10 @@ again:
                        workers->num_workers_starting++;
                        spin_unlock_irqrestore(&workers->lock, flags);
                        /* we're below the limit, start another worker */
-                       __btrfs_start_workers(workers, 1);
+                       ret = __btrfs_start_workers(workers);
+                       spin_lock_irqsave(&workers->lock, flags);
+                       if (ret)
+                               goto fallback;
                        goto again;
                }
        }
@@ -665,7 +662,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
 /*
  * places a struct btrfs_work into the pending queue of one of the kthreads
  */
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 {
        struct btrfs_worker_thread *worker;
        unsigned long flags;
@@ -673,7 +670,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 
        /* don't requeue something already on a list */
        if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
-               goto out;
+               return;
 
        worker = find_worker(workers);
        if (workers->ordered) {
@@ -712,7 +709,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
        if (wake)
                wake_up_process(worker->task);
        spin_unlock_irqrestore(&worker->lock, flags);
-
-out:
-       return 0;
 }
index 5077746..f34cc31 100644 (file)
@@ -109,8 +109,8 @@ struct btrfs_workers {
        char *name;
 };
 
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
+int btrfs_start_workers(struct btrfs_workers *workers);
 int btrfs_stop_workers(struct btrfs_workers *workers);
 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
                        struct btrfs_workers *async_starter);
index 50634ab..6738503 100644 (file)
@@ -2692,7 +2692,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode, int flags);
+int btrfs_dirty_inode(struct inode *inode);
+int btrfs_update_time(struct file *file);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
index 5b16357..9c1eccc 100644 (file)
@@ -640,8 +640,8 @@ static int btrfs_delayed_inode_reserve_metadata(
         * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
         * we're accounted for.
         */
-       if (!trans->bytes_reserved &&
-           src_rsv != &root->fs_info->delalloc_block_rsv) {
+       if (!src_rsv || (!trans->bytes_reserved &&
+           src_rsv != &root->fs_info->delalloc_block_rsv)) {
                ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
                /*
                 * Since we're under a transaction reserve_metadata_bytes could
index 632f8f3..f44b392 100644 (file)
@@ -2194,19 +2194,27 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        fs_info->endio_meta_write_workers.idle_thresh = 2;
        fs_info->readahead_workers.idle_thresh = 2;
 
-       btrfs_start_workers(&fs_info->workers, 1);
-       btrfs_start_workers(&fs_info->generic_worker, 1);
-       btrfs_start_workers(&fs_info->submit_workers, 1);
-       btrfs_start_workers(&fs_info->delalloc_workers, 1);
-       btrfs_start_workers(&fs_info->fixup_workers, 1);
-       btrfs_start_workers(&fs_info->endio_workers, 1);
-       btrfs_start_workers(&fs_info->endio_meta_workers, 1);
-       btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
-       btrfs_start_workers(&fs_info->endio_write_workers, 1);
-       btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
-       btrfs_start_workers(&fs_info->delayed_workers, 1);
-       btrfs_start_workers(&fs_info->caching_workers, 1);
-       btrfs_start_workers(&fs_info->readahead_workers, 1);
+       /*
+        * btrfs_start_workers can really only fail because of ENOMEM so just
+        * return -ENOMEM if any of these fail.
+        */
+       ret = btrfs_start_workers(&fs_info->workers);
+       ret |= btrfs_start_workers(&fs_info->generic_worker);
+       ret |= btrfs_start_workers(&fs_info->submit_workers);
+       ret |= btrfs_start_workers(&fs_info->delalloc_workers);
+       ret |= btrfs_start_workers(&fs_info->fixup_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_write_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
+       ret |= btrfs_start_workers(&fs_info->delayed_workers);
+       ret |= btrfs_start_workers(&fs_info->caching_workers);
+       ret |= btrfs_start_workers(&fs_info->readahead_workers);
+       if (ret) {
+               ret = -ENOMEM;
+               goto fail_sb_buffer;
+       }
 
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
index f0d5718..f5fbe57 100644 (file)
@@ -2822,7 +2822,7 @@ out_free:
        btrfs_release_path(path);
 out:
        spin_lock(&block_group->lock);
-       if (!ret)
+       if (!ret && dcs == BTRFS_DC_SETUP)
                block_group->cache_generation = trans->transid;
        block_group->disk_cache_state = dcs;
        spin_unlock(&block_group->lock);
@@ -4204,12 +4204,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
        u64 to_reserve = 0;
+       u64 csum_bytes;
        unsigned nr_extents = 0;
+       int extra_reserve = 0;
        int flush = 1;
        int ret;
 
+       /* Need to be holding the i_mutex here if we aren't free space cache */
        if (btrfs_is_free_space_inode(root, inode))
                flush = 0;
+       else
+               WARN_ON(!mutex_is_locked(&inode->i_mutex));
 
        if (flush && btrfs_transaction_in_commit(root->fs_info))
                schedule_timeout(1);
@@ -4220,11 +4225,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        BTRFS_I(inode)->outstanding_extents++;
 
        if (BTRFS_I(inode)->outstanding_extents >
-           BTRFS_I(inode)->reserved_extents) {
+           BTRFS_I(inode)->reserved_extents)
                nr_extents = BTRFS_I(inode)->outstanding_extents -
                        BTRFS_I(inode)->reserved_extents;
-               BTRFS_I(inode)->reserved_extents += nr_extents;
-       }
 
        /*
         * Add an item to reserve for updating the inode when we complete the
@@ -4232,11 +4235,12 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
         */
        if (!BTRFS_I(inode)->delalloc_meta_reserved) {
                nr_extents++;
-               BTRFS_I(inode)->delalloc_meta_reserved = 1;
+               extra_reserve = 1;
        }
 
        to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
        to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
+       csum_bytes = BTRFS_I(inode)->csum_bytes;
        spin_unlock(&BTRFS_I(inode)->lock);
 
        ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -4246,22 +4250,35 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
                spin_lock(&BTRFS_I(inode)->lock);
                dropped = drop_outstanding_extent(inode);
-               to_free = calc_csum_metadata_size(inode, num_bytes, 0);
-               spin_unlock(&BTRFS_I(inode)->lock);
-               to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
                /*
-                * Somebody could have come in and twiddled with the
-                * reservation, so if we have to free more than we would have
-                * reserved from this reservation go ahead and release those
-                * bytes.
+                * If the inodes csum_bytes is the same as the original
+                * csum_bytes then we know we haven't raced with any free()ers
+                * so we can just reduce our inodes csum bytes and carry on.
+                * Otherwise we have to do the normal free thing to account for
+                * the case that the free side didn't free up its reserve
+                * because of this outstanding reservation.
                 */
-               to_free -= to_reserve;
+               if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+                       calc_csum_metadata_size(inode, num_bytes, 0);
+               else
+                       to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+               spin_unlock(&BTRFS_I(inode)->lock);
+               if (dropped)
+                       to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
                if (to_free)
                        btrfs_block_rsv_release(root, block_rsv, to_free);
                return ret;
        }
 
+       spin_lock(&BTRFS_I(inode)->lock);
+       if (extra_reserve) {
+               BTRFS_I(inode)->delalloc_meta_reserved = 1;
+               nr_extents--;
+       }
+       BTRFS_I(inode)->reserved_extents += nr_extents;
+       spin_unlock(&BTRFS_I(inode)->lock);
+
        block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
        return 0;
@@ -5107,11 +5124,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        struct btrfs_root *root = orig_root->fs_info->extent_root;
        struct btrfs_free_cluster *last_ptr = NULL;
        struct btrfs_block_group_cache *block_group = NULL;
+       struct btrfs_block_group_cache *used_block_group;
        int empty_cluster = 2 * 1024 * 1024;
        int allowed_chunk_alloc = 0;
        int done_chunk_alloc = 0;
        struct btrfs_space_info *space_info;
-       int last_ptr_loop = 0;
        int loop = 0;
        int index = 0;
        int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
@@ -5173,6 +5190,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
 ideal_cache:
                block_group = btrfs_lookup_block_group(root->fs_info,
                                                       search_start);
+               used_block_group = block_group;
                /*
                 * we don't want to use the block group if it doesn't match our
                 * allocation bits, or if its not cached.
@@ -5210,6 +5228,7 @@ search:
                u64 offset;
                int cached;
 
+               used_block_group = block_group;
                btrfs_get_block_group(block_group);
                search_start = block_group->key.objectid;
 
@@ -5286,71 +5305,62 @@ alloc:
                spin_unlock(&block_group->free_space_ctl->tree_lock);
 
                /*
-                * Ok we want to try and use the cluster allocator, so lets look
-                * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
-                * have tried the cluster allocator plenty of times at this
-                * point and not have found anything, so we are likely way too
-                * fragmented for the clustering stuff to find anything, so lets
-                * just skip it and let the allocator find whatever block it can
-                * find
+                * Ok we want to try and use the cluster allocator, so
+                * lets look there
                 */
-               if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
+               if (last_ptr) {
                        /*
                         * the refill lock keeps out other
                         * people trying to start a new cluster
                         */
                        spin_lock(&last_ptr->refill_lock);
-                       if (!last_ptr->block_group ||
-                           last_ptr->block_group->ro ||
-                           !block_group_bits(last_ptr->block_group, data))
+                       used_block_group = last_ptr->block_group;
+                       if (used_block_group != block_group &&
+                           (!used_block_group ||
+                            used_block_group->ro ||
+                            !block_group_bits(used_block_group, data))) {
+                               used_block_group = block_group;
                                goto refill_cluster;
+                       }
+
+                       if (used_block_group != block_group)
+                               btrfs_get_block_group(used_block_group);
 
-                       offset = btrfs_alloc_from_cluster(block_group, last_ptr,
-                                                num_bytes, search_start);
+                       offset = btrfs_alloc_from_cluster(used_block_group,
+                         last_ptr, num_bytes, used_block_group->key.objectid);
                        if (offset) {
                                /* we have a block, we're done */
                                spin_unlock(&last_ptr->refill_lock);
                                goto checks;
                        }
 
-                       spin_lock(&last_ptr->lock);
-                       /*
-                        * whoops, this cluster doesn't actually point to
-                        * this block group.  Get a ref on the block
-                        * group is does point to and try again
-                        */
-                       if (!last_ptr_loop && last_ptr->block_group &&
-                           last_ptr->block_group != block_group &&
-                           index <=
-                                get_block_group_index(last_ptr->block_group)) {
-
-                               btrfs_put_block_group(block_group);
-                               block_group = last_ptr->block_group;
-                               btrfs_get_block_group(block_group);
-                               spin_unlock(&last_ptr->lock);
-                               spin_unlock(&last_ptr->refill_lock);
-
-                               last_ptr_loop = 1;
-                               search_start = block_group->key.objectid;
-                               /*
-                                * we know this block group is properly
-                                * in the list because
-                                * btrfs_remove_block_group, drops the
-                                * cluster before it removes the block
-                                * group from the list
-                                */
-                               goto have_block_group;
+                       WARN_ON(last_ptr->block_group != used_block_group);
+                       if (used_block_group != block_group) {
+                               btrfs_put_block_group(used_block_group);
+                               used_block_group = block_group;
                        }
-                       spin_unlock(&last_ptr->lock);
 refill_cluster:
+                       BUG_ON(used_block_group != block_group);
+                       /* If we are on LOOP_NO_EMPTY_SIZE, we can't
+                        * set up a new clusters, so lets just skip it
+                        * and let the allocator find whatever block
+                        * it can find.  If we reach this point, we
+                        * will have tried the cluster allocator
+                        * plenty of times and not have found
+                        * anything, so we are likely way too
+                        * fragmented for the clustering stuff to find
+                        * anything.  */
+                       if (loop >= LOOP_NO_EMPTY_SIZE) {
+                               spin_unlock(&last_ptr->refill_lock);
+                               goto unclustered_alloc;
+                       }
+
                        /*
                         * this cluster didn't work out, free it and
                         * start over
                         */
                        btrfs_return_cluster_to_free_space(NULL, last_ptr);
 
-                       last_ptr_loop = 0;
-
                        /* allocate a cluster in this block group */
                        ret = btrfs_find_space_cluster(trans, root,
                                               block_group, last_ptr,
@@ -5390,6 +5400,7 @@ refill_cluster:
                        goto loop;
                }
 
+unclustered_alloc:
                offset = btrfs_find_space_for_alloc(block_group, search_start,
                                                    num_bytes, empty_size);
                /*
@@ -5416,14 +5427,14 @@ checks:
                search_start = stripe_align(root, offset);
                /* move on to the next group */
                if (search_start + num_bytes >= search_end) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
                /* move on to the next group */
                if (search_start + num_bytes >
-                   block_group->key.objectid + block_group->key.offset) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                   used_block_group->key.objectid + used_block_group->key.offset) {
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
@@ -5431,14 +5442,14 @@ checks:
                ins->offset = num_bytes;
 
                if (offset < search_start)
-                       btrfs_add_free_space(block_group, offset,
+                       btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
                BUG_ON(offset > search_start);
 
-               ret = btrfs_update_reserved_bytes(block_group, num_bytes,
+               ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
                                                  alloc_type);
                if (ret == -EAGAIN) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
@@ -5447,15 +5458,19 @@ checks:
                ins->offset = num_bytes;
 
                if (offset < search_start)
-                       btrfs_add_free_space(block_group, offset,
+                       btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
                BUG_ON(offset > search_start);
+               if (used_block_group != block_group)
+                       btrfs_put_block_group(used_block_group);
                btrfs_put_block_group(block_group);
                break;
 loop:
                failed_cluster_refill = false;
                failed_alloc = false;
                BUG_ON(index != get_block_group_index(block_group));
+               if (used_block_group != block_group)
+                       btrfs_put_block_group(used_block_group);
                btrfs_put_block_group(block_group);
        }
        up_read(&space_info->groups_sem);
index be1bf62..49f3c9d 100644 (file)
@@ -935,8 +935,10 @@ again:
        node = tree_search(tree, start);
        if (!node) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = insert_state(tree, prealloc, start, end, &bits);
                prealloc = NULL;
                BUG_ON(err == -EEXIST);
@@ -992,8 +994,10 @@ hit_next:
         */
        if (state->start < start) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = split_state(tree, state, prealloc, start);
                BUG_ON(err == -EEXIST);
                prealloc = NULL;
@@ -1024,8 +1028,10 @@ hit_next:
                        this_end = last_start - 1;
 
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
 
                /*
                 * Avoid to free 'prealloc' if it can be merged with
@@ -1051,8 +1057,10 @@ hit_next:
         */
        if (state->start <= end && state->end > end) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
 
                err = split_state(tree, state, prealloc, end + 1);
                BUG_ON(err == -EEXIST);
index dafdfa0..97fbe93 100644 (file)
@@ -1167,6 +1167,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
        nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
                     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
                     (sizeof(struct page *)));
+       nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
+       nrptrs = max(nrptrs, 8);
        pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
        if (!pages)
                return -ENOMEM;
@@ -1387,7 +1389,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                goto out;
        }
 
-       file_update_time(file);
+       err = btrfs_update_time(file);
+       if (err) {
+               mutex_unlock(&inode->i_mutex);
+               goto out;
+       }
        BTRFS_I(inode)->sequence++;
 
        start_pos = round_down(pos, root->sectorsize);
index 2c984f7..fd1a06d 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/falloc.h>
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
+#include <linux/mount.h>
 #include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -2031,7 +2032,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
        /* insert an orphan item to track this unlinked/truncated file */
        if (insert >= 1) {
                ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
-               BUG_ON(ret);
+               BUG_ON(ret && ret != -EEXIST);
        }
 
        /* insert an orphan item to track subvolume contains orphan files */
@@ -2158,6 +2159,38 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                if (ret && ret != -ESTALE)
                        goto out;
 
+               if (ret == -ESTALE && root == root->fs_info->tree_root) {
+                       struct btrfs_root *dead_root;
+                       struct btrfs_fs_info *fs_info = root->fs_info;
+                       int is_dead_root = 0;
+
+                       /*
+                        * this is an orphan in the tree root. Currently these
+                        * could come from 2 sources:
+                        *  a) a snapshot deletion in progress
+                        *  b) a free space cache inode
+                        * We need to distinguish those two, as the snapshot
+                        * orphan must not get deleted.
+                        * find_dead_roots already ran before us, so if this
+                        * is a snapshot deletion, we should find the root
+                        * in the dead_roots list
+                        */
+                       spin_lock(&fs_info->trans_lock);
+                       list_for_each_entry(dead_root, &fs_info->dead_roots,
+                                           root_list) {
+                               if (dead_root->root_key.objectid ==
+                                   found_key.objectid) {
+                                       is_dead_root = 1;
+                                       break;
+                               }
+                       }
+                       spin_unlock(&fs_info->trans_lock);
+                       if (is_dead_root) {
+                               /* prevent this orphan from being found again */
+                               key.offset = found_key.objectid - 1;
+                               continue;
+                       }
+               }
                /*
                 * Inode is already gone but the orphan item is still there,
                 * kill the orphan item.
@@ -2191,7 +2224,14 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                continue;
                        }
                        nr_truncate++;
+                       /*
+                        * Need to hold the imutex for reservation purposes, not
+                        * a huge deal here but I have a WARN_ON in
+                        * btrfs_delalloc_reserve_space to catch offenders.
+                        */
+                       mutex_lock(&inode->i_mutex);
                        ret = btrfs_truncate(inode);
+                       mutex_unlock(&inode->i_mutex);
                } else {
                        nr_unlink++;
                }
@@ -3327,7 +3367,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                        u64 hint_byte = 0;
                        hole_size = last_byte - cur_offset;
 
-                       trans = btrfs_start_transaction(root, 2);
+                       trans = btrfs_start_transaction(root, 3);
                        if (IS_ERR(trans)) {
                                err = PTR_ERR(trans);
                                break;
@@ -3337,6 +3377,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                                 cur_offset + hole_size,
                                                 &hint_byte, 1);
                        if (err) {
+                               btrfs_update_inode(trans, root, inode);
                                btrfs_end_transaction(trans, root);
                                break;
                        }
@@ -3346,6 +3387,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                        0, hole_size, 0, hole_size,
                                        0, 0, 0);
                        if (err) {
+                               btrfs_update_inode(trans, root, inode);
                                btrfs_end_transaction(trans, root);
                                break;
                        }
@@ -3353,6 +3395,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                        btrfs_drop_extent_cache(inode, hole_start,
                                        last_byte - 1, 0);
 
+                       btrfs_update_inode(trans, root, inode);
                        btrfs_end_transaction(trans, root);
                }
                free_extent_map(em);
@@ -3370,6 +3413,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
 
 static int btrfs_setsize(struct inode *inode, loff_t newsize)
 {
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_trans_handle *trans;
        loff_t oldsize = i_size_read(inode);
        int ret;
 
@@ -3377,16 +3422,19 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
                return 0;
 
        if (newsize > oldsize) {
-               i_size_write(inode, newsize);
-               btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
                truncate_pagecache(inode, oldsize, newsize);
                ret = btrfs_cont_expand(inode, oldsize, newsize);
-               if (ret) {
-                       btrfs_setsize(inode, oldsize);
+               if (ret)
                        return ret;
-               }
 
-               mark_inode_dirty(inode);
+               trans = btrfs_start_transaction(root, 1);
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
+
+               i_size_write(inode, newsize);
+               btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+               ret = btrfs_update_inode(trans, root, inode);
+               btrfs_end_transaction_throttle(trans, root);
        } else {
 
                /*
@@ -3426,9 +3474,9 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
 
        if (attr->ia_valid) {
                setattr_copy(inode, attr);
-               mark_inode_dirty(inode);
+               err = btrfs_dirty_inode(inode);
 
-               if (attr->ia_valid & ATTR_MODE)
+               if (!err && attr->ia_valid & ATTR_MODE)
                        err = btrfs_acl_chmod(inode);
        }
 
@@ -4204,42 +4252,80 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
  * FIXME, needs more benchmarking...there are no reasons other than performance
  * to keep or drop this code.
  */
-void btrfs_dirty_inode(struct inode *inode, int flags)
+int btrfs_dirty_inode(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        int ret;
 
        if (BTRFS_I(inode)->dummy_inode)
-               return;
+               return 0;
 
        trans = btrfs_join_transaction(root);
-       BUG_ON(IS_ERR(trans));
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
 
        ret = btrfs_update_inode(trans, root, inode);
        if (ret && ret == -ENOSPC) {
                /* whoops, lets try again with the full transaction */
                btrfs_end_transaction(trans, root);
                trans = btrfs_start_transaction(root, 1);
-               if (IS_ERR(trans)) {
-                       printk_ratelimited(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %llu error %ld\n",
-                                      (unsigned long long)btrfs_ino(inode),
-                                      PTR_ERR(trans));
-                       return;
-               }
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
 
                ret = btrfs_update_inode(trans, root, inode);
-               if (ret) {
-                       printk_ratelimited(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %llu error %d\n",
-                                      (unsigned long long)btrfs_ino(inode),
-                                      ret);
-               }
        }
        btrfs_end_transaction(trans, root);
        if (BTRFS_I(inode)->delayed_node)
                btrfs_balance_delayed_items(root);
+
+       return ret;
+}
+
+/*
+ * This is a copy of file_update_time.  We need this so we can return error on
+ * ENOSPC for updating the inode in the case of file write and mmap writes.
+ */
+int btrfs_update_time(struct file *file)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct timespec now;
+       int ret;
+       enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+
+       /* First try to exhaust all avenues to not sync */
+       if (IS_NOCMTIME(inode))
+               return 0;
+
+       now = current_fs_time(inode->i_sb);
+       if (!timespec_equal(&inode->i_mtime, &now))
+               sync_it = S_MTIME;
+
+       if (!timespec_equal(&inode->i_ctime, &now))
+               sync_it |= S_CTIME;
+
+       if (IS_I_VERSION(inode))
+               sync_it |= S_VERSION;
+
+       if (!sync_it)
+               return 0;
+
+       /* Finally allowed to write? Takes lock. */
+       if (mnt_want_write_file(file))
+               return 0;
+
+       /* Only change inode inside the lock region */
+       if (sync_it & S_VERSION)
+               inode_inc_iversion(inode);
+       if (sync_it & S_CTIME)
+               inode->i_ctime = now;
+       if (sync_it & S_MTIME)
+               inode->i_mtime = now;
+       ret = btrfs_dirty_inode(inode);
+       if (!ret)
+               mark_inode_dirty_sync(inode);
+       mnt_drop_write(file->f_path.mnt);
+       return ret;
 }
 
 /*
@@ -4504,10 +4590,6 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
        int err = btrfs_add_link(trans, dir, inode,
                                 dentry->d_name.name, dentry->d_name.len,
                                 backref, index);
-       if (!err) {
-               d_instantiate(dentry, inode);
-               return 0;
-       }
        if (err > 0)
                err = -EEXIST;
        return err;
@@ -4555,13 +4637,21 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+
+       inode->i_op = &btrfs_special_inode_operations;
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
-               inode->i_op = &btrfs_special_inode_operations;
                init_special_inode(inode, inode->i_mode, rdev);
                btrfs_update_inode(trans, root, inode);
+               d_instantiate(dentry, inode);
        }
 out_unlock:
        nr = trans->blocks_used;
@@ -4613,15 +4703,23 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+       inode->i_fop = &btrfs_file_operations;
+       inode->i_op = &btrfs_file_inode_operations;
+
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
                inode->i_mapping->a_ops = &btrfs_aops;
                inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-               inode->i_fop = &btrfs_file_operations;
-               inode->i_op = &btrfs_file_inode_operations;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+               d_instantiate(dentry, inode);
        }
 out_unlock:
        nr = trans->blocks_used;
@@ -4679,6 +4777,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                struct dentry *parent = dentry->d_parent;
                err = btrfs_update_inode(trans, root, inode);
                BUG_ON(err);
+               d_instantiate(dentry, inode);
                btrfs_log_new_name(trans, inode, NULL, parent);
        }
 
@@ -6303,7 +6402,12 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        u64 page_start;
        u64 page_end;
 
+       /* Need this to keep space reservations serialized */
+       mutex_lock(&inode->i_mutex);
        ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
+       mutex_unlock(&inode->i_mutex);
+       if (!ret)
+               ret = btrfs_update_time(vma->vm_file);
        if (ret) {
                if (ret == -ENOMEM)
                        ret = VM_FAULT_OOM;
@@ -6515,8 +6619,9 @@ static int btrfs_truncate(struct inode *inode)
                        /* Just need the 1 for updating the inode */
                        trans = btrfs_start_transaction(root, 1);
                        if (IS_ERR(trans)) {
-                               err = PTR_ERR(trans);
-                               goto out;
+                               ret = err = PTR_ERR(trans);
+                               trans = NULL;
+                               break;
                        }
                }
 
@@ -7076,14 +7181,21 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+       inode->i_fop = &btrfs_file_operations;
+       inode->i_op = &btrfs_file_inode_operations;
+
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
                inode->i_mapping->a_ops = &btrfs_aops;
                inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-               inode->i_fop = &btrfs_file_operations;
-               inode->i_op = &btrfs_file_inode_operations;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
        }
        if (drop_inode)
@@ -7132,6 +7244,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                drop_inode = 1;
 
 out_unlock:
+       if (!err)
+               d_instantiate(dentry, inode);
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
        if (drop_inode) {
@@ -7353,6 +7467,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
        .follow_link    = page_follow_link_light,
        .put_link       = page_put_link,
        .getattr        = btrfs_getattr,
+       .setattr        = btrfs_setattr,
        .permission     = btrfs_permission,
        .setxattr       = btrfs_setxattr,
        .getxattr       = btrfs_getxattr,
index 72d4616..c04f02c 100644 (file)
@@ -252,11 +252,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
        trans = btrfs_join_transaction(root);
        BUG_ON(IS_ERR(trans));
 
+       btrfs_update_iflags(inode);
+       inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, inode);
        BUG_ON(ret);
 
-       btrfs_update_iflags(inode);
-       inode->i_ctime = CURRENT_TIME;
        btrfs_end_transaction(trans, root);
 
        mnt_drop_write(file->f_path.mnt);
@@ -858,8 +858,10 @@ static int cluster_pages_for_defrag(struct inode *inode,
                return 0;
        file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
 
+       mutex_lock(&inode->i_mutex);
        ret = btrfs_delalloc_reserve_space(inode,
                                           num_pages << PAGE_CACHE_SHIFT);
+       mutex_unlock(&inode->i_mutex);
        if (ret)
                return ret;
 again:
index dff29d5..cfb5543 100644 (file)
@@ -2947,7 +2947,9 @@ static int relocate_file_extent_cluster(struct inode *inode,
        index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
        last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
        while (index <= last_index) {
+               mutex_lock(&inode->i_mutex);
                ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+               mutex_unlock(&inode->i_mutex);
                if (ret)
                        goto out;
 
index c27bcb6..ddf2c90 100644 (file)
@@ -1535,18 +1535,22 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
 static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
+       int ret = 0;
 
        mutex_lock(&fs_info->scrub_lock);
        if (fs_info->scrub_workers_refcnt == 0) {
                btrfs_init_workers(&fs_info->scrub_workers, "scrub",
                           fs_info->thread_pool_size, &fs_info->generic_worker);
                fs_info->scrub_workers.idle_thresh = 4;
-               btrfs_start_workers(&fs_info->scrub_workers, 1);
+               ret = btrfs_start_workers(&fs_info->scrub_workers);
+               if (ret)
+                       goto out;
        }
        ++fs_info->scrub_workers_refcnt;
+out:
        mutex_unlock(&fs_info->scrub_lock);
 
-       return 0;
+       return ret;
 }
 
 static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
index e28ad4b..200f63b 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/slab.h>
 #include <linux/cleancache.h>
 #include <linux/mnt_namespace.h>
+#include <linux/ratelimit.h>
 #include "compat.h"
 #include "delayed-inode.h"
 #include "ctree.h"
@@ -1053,7 +1054,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
        u64 avail_space;
        u64 used_space;
        u64 min_stripe_size;
-       int min_stripes = 1;
+       int min_stripes = 1, num_stripes = 1;
        int i = 0, nr_devices;
        int ret;
 
@@ -1067,12 +1068,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
 
        /* calc min stripe number for data space alloction */
        type = btrfs_get_alloc_profile(root, 1);
-       if (type & BTRFS_BLOCK_GROUP_RAID0)
+       if (type & BTRFS_BLOCK_GROUP_RAID0) {
                min_stripes = 2;
-       else if (type & BTRFS_BLOCK_GROUP_RAID1)
+               num_stripes = nr_devices;
+       } else if (type & BTRFS_BLOCK_GROUP_RAID1) {
                min_stripes = 2;
-       else if (type & BTRFS_BLOCK_GROUP_RAID10)
+               num_stripes = 2;
+       } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
                min_stripes = 4;
+               num_stripes = 4;
+       }
 
        if (type & BTRFS_BLOCK_GROUP_DUP)
                min_stripe_size = 2 * BTRFS_STRIPE_LEN;
@@ -1141,13 +1146,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
        i = nr_devices - 1;
        avail_space = 0;
        while (nr_devices >= min_stripes) {
+               if (num_stripes > nr_devices)
+                       num_stripes = nr_devices;
+
                if (devices_info[i].max_avail >= min_stripe_size) {
                        int j;
                        u64 alloc_size;
 
-                       avail_space += devices_info[i].max_avail * min_stripes;
+                       avail_space += devices_info[i].max_avail * num_stripes;
                        alloc_size = devices_info[i].max_avail;
-                       for (j = i + 1 - min_stripes; j <= i; j++)
+                       for (j = i + 1 - num_stripes; j <= i; j++)
                                devices_info[j].max_avail -= alloc_size;
                }
                i--;
@@ -1264,6 +1272,16 @@ static int btrfs_unfreeze(struct super_block *sb)
        return 0;
 }
 
+static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
+{
+       int ret;
+
+       ret = btrfs_dirty_inode(inode);
+       if (ret)
+               printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
+                                  "error %d\n", btrfs_ino(inode), ret);
+}
+
 static const struct super_operations btrfs_super_ops = {
        .drop_inode     = btrfs_drop_inode,
        .evict_inode    = btrfs_evict_inode,
@@ -1271,7 +1289,7 @@ static const struct super_operations btrfs_super_ops = {
        .sync_fs        = btrfs_sync_fs,
        .show_options   = btrfs_show_options,
        .write_inode    = btrfs_write_inode,
-       .dirty_inode    = btrfs_dirty_inode,
+       .dirty_inode    = btrfs_fs_dirty_inode,
        .alloc_inode    = btrfs_alloc_inode,
        .destroy_inode  = btrfs_destroy_inode,
        .statfs         = btrfs_statfs,
index c37433d..f4b839f 100644 (file)
@@ -295,6 +295,12 @@ loop_lock:
                        btrfs_requeue_work(&device->work);
                        goto done;
                }
+               /* unplug every 64 requests just for good measure */
+               if (batch_run % 64 == 0) {
+                       blk_finish_plug(&plug);
+                       blk_start_plug(&plug);
+                       sync_pending = 0;
+               }
        }
 
        cond_resched();
@@ -1611,7 +1617,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
                return -EINVAL;
 
-       bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
+       bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
                                  root->fs_info->bdev_holder);
        if (IS_ERR(bdev))
                return PTR_ERR(bdev);
@@ -3258,7 +3264,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
                 */
                if (atomic_read(&bbio->error) > bbio->max_errors) {
                        err = -EIO;
-               } else if (err) {
+               } else {
                        /*
                         * this bio is actually up to date, we didn't
                         * go over the max number of errors
index 4144caf..173b1d2 100644 (file)
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
        snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
 
        /* dirty the head */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_head_snapc == NULL)
                ci->i_head_snapc = ceph_get_snap_context(snapc);
        ++ci->i_wrbuffer_ref_head;
@@ -100,7 +100,7 @@ static int ceph_set_page_dirty(struct page *page)
             ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
             ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
             snapc, snapc->seq, snapc->num_snaps);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        /* now adjust page */
        spin_lock_irq(&mapping->tree_lock);
@@ -391,7 +391,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
        struct ceph_snap_context *snapc = NULL;
        struct ceph_cap_snap *capsnap = NULL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
                dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
                     capsnap->context, capsnap->dirty_pages);
@@ -407,7 +407,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
                dout(" head snapc %p has %d dirty pages\n",
                     snapc, ci->i_wrbuffer_ref_head);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return snapc;
 }
 
index 0f327c6..8b53193 100644 (file)
@@ -309,7 +309,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
 /*
  * Find ceph_cap for given mds, if any.
  *
- * Called with i_lock held.
+ * Called with i_ceph_lock held.
  */
 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 {
@@ -332,9 +332,9 @@ struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 {
        struct ceph_cap *cap;
 
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ci, mds);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return cap;
 }
 
@@ -361,15 +361,16 @@ static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
 
 int ceph_get_cap_mds(struct inode *inode)
 {
+       struct ceph_inode_info *ci = ceph_inode(inode);
        int mds;
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        mds = __ceph_get_cap_mds(ceph_inode(inode));
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return mds;
 }
 
 /*
- * Called under i_lock.
+ * Called under i_ceph_lock.
  */
 static void __insert_cap_node(struct ceph_inode_info *ci,
                              struct ceph_cap *new)
@@ -415,7 +416,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
  *
  * If I_FLUSH is set, leave the inode at the front of the list.
  *
- * Caller holds i_lock
+ * Caller holds i_ceph_lock
  *    -> we take mdsc->cap_delay_lock
  */
 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
@@ -457,7 +458,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
 /*
  * Cancel delayed work on cap.
  *
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
  */
 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
                               struct ceph_inode_info *ci)
@@ -532,14 +533,14 @@ int ceph_add_cap(struct inode *inode,
                wanted |= ceph_caps_for_mode(fmode);
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ci, mds);
        if (!cap) {
                if (new_cap) {
                        cap = new_cap;
                        new_cap = NULL;
                } else {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        new_cap = get_cap(mdsc, caps_reservation);
                        if (new_cap == NULL)
                                return -ENOMEM;
@@ -625,7 +626,7 @@ retry:
 
        if (fmode >= 0)
                __ceph_get_fmode(ci, fmode);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        wake_up_all(&ci->i_cap_wq);
        return 0;
 }
@@ -792,7 +793,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
        struct rb_node *p;
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
                cap = rb_entry(p, struct ceph_cap, ci_node);
                if (__cap_is_valid(cap) &&
@@ -801,7 +802,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
                        break;
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("ceph_caps_revoking %p %s = %d\n", inode,
             ceph_cap_string(mask), ret);
        return ret;
@@ -855,7 +856,7 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
 }
 
 /*
- * called under i_lock
+ * called under i_ceph_lock
  */
 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 {
@@ -865,7 +866,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 /*
  * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
  *
- * caller should hold i_lock.
+ * caller should hold i_ceph_lock.
  * caller will not hold session s_mutex if called from destroy_inode.
  */
 void __ceph_remove_cap(struct ceph_cap *cap)
@@ -1028,7 +1029,7 @@ static void __queue_cap_release(struct ceph_mds_session *session,
 
 /*
  * Queue cap releases when an inode is dropped from our cache.  Since
- * inode is about to be destroyed, there is no need for i_lock.
+ * inode is about to be destroyed, there is no need for i_ceph_lock.
  */
 void ceph_queue_caps_release(struct inode *inode)
 {
@@ -1049,7 +1050,7 @@ void ceph_queue_caps_release(struct inode *inode)
 
 /*
  * Send a cap msg on the given inode.  Update our caps state, then
- * drop i_lock and send the message.
+ * drop i_ceph_lock and send the message.
  *
  * Make note of max_size reported/requested from mds, revoked caps
  * that have now been implemented.
@@ -1061,13 +1062,13 @@ void ceph_queue_caps_release(struct inode *inode)
  * Return non-zero if delayed release, or we experienced an error
  * such that the caller should requeue + retry later.
  *
- * called with i_lock, then drops it.
+ * called with i_ceph_lock, then drops it.
  * caller should hold snap_rwsem (read), s_mutex.
  */
 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
                      int op, int used, int want, int retain, int flushing,
                      unsigned *pflush_tid)
-       __releases(cap->ci->vfs_inode->i_lock)
+       __releases(cap->ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = cap->ci;
        struct inode *inode = &ci->vfs_inode;
@@ -1170,7 +1171,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
                xattr_version = ci->i_xattrs.version;
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
                op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
@@ -1198,13 +1199,13 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
  * Unless @again is true, skip cap_snaps that were already sent to
  * the MDS (i.e., during this session).
  *
- * Called under i_lock.  Takes s_mutex as needed.
+ * Called under i_ceph_lock.  Takes s_mutex as needed.
  */
 void __ceph_flush_snaps(struct ceph_inode_info *ci,
                        struct ceph_mds_session **psession,
                        int again)
-               __releases(ci->vfs_inode->i_lock)
-               __acquires(ci->vfs_inode->i_lock)
+               __releases(ci->i_ceph_lock)
+               __acquires(ci->i_ceph_lock)
 {
        struct inode *inode = &ci->vfs_inode;
        int mds;
@@ -1261,7 +1262,7 @@ retry:
                        session = NULL;
                }
                if (!session) {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        mutex_lock(&mdsc->mutex);
                        session = __ceph_lookup_mds_session(mdsc, mds);
                        mutex_unlock(&mdsc->mutex);
@@ -1275,7 +1276,7 @@ retry:
                         * deletion or migration.  retry, and we'll
                         * get a better @mds value next time.
                         */
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        goto retry;
                }
 
@@ -1285,7 +1286,7 @@ retry:
                        list_del_init(&capsnap->flushing_item);
                list_add_tail(&capsnap->flushing_item,
                              &session->s_cap_snaps_flushing);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
                     inode, capsnap, capsnap->follows, capsnap->flush_tid);
@@ -1302,7 +1303,7 @@ retry:
                next_follows = capsnap->follows + 1;
                ceph_put_cap_snap(capsnap);
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                goto retry;
        }
 
@@ -1322,11 +1323,9 @@ out:
 
 static void ceph_flush_snaps(struct ceph_inode_info *ci)
 {
-       struct inode *inode = &ci->vfs_inode;
-
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __ceph_flush_snaps(ci, NULL, 0);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -1373,7 +1372,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
  * Add dirty inode to the flushing list.  Assigned a seq number so we
  * can wait for caps to flush without starving.
  *
- * Called under i_lock.
+ * Called under i_ceph_lock.
  */
 static int __mark_caps_flushing(struct inode *inode,
                                 struct ceph_mds_session *session)
@@ -1421,9 +1420,9 @@ static int try_nonblocking_invalidate(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
        u32 invalidating_gen = ci->i_rdcache_gen;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        invalidate_mapping_pages(&inode->i_data, 0, -1);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (inode->i_data.nrpages == 0 &&
            invalidating_gen == ci->i_rdcache_gen) {
@@ -1470,7 +1469,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
        if (mdsc->stopping)
                is_delayed = 1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (ci->i_ceph_flags & CEPH_I_FLUSH)
                flags |= CHECK_CAPS_FLUSH;
@@ -1480,7 +1479,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                __ceph_flush_snaps(ci, &session, 0);
        goto retry_locked;
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 retry_locked:
        file_wanted = __ceph_caps_file_wanted(ci);
        used = __ceph_caps_used(ci);
@@ -1634,7 +1633,7 @@ ack:
                        if (mutex_trylock(&session->s_mutex) == 0) {
                                dout("inverting session/ino locks on %p\n",
                                     session);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                                if (took_snap_rwsem) {
                                        up_read(&mdsc->snap_rwsem);
                                        took_snap_rwsem = 0;
@@ -1648,7 +1647,7 @@ ack:
                        if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
                                dout("inverting snap/in locks on %p\n",
                                     inode);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                                down_read(&mdsc->snap_rwsem);
                                took_snap_rwsem = 1;
                                goto retry;
@@ -1664,10 +1663,10 @@ ack:
                mds = cap->mds;  /* remember mds, so we don't repeat */
                sent++;
 
-               /* __send_cap drops i_lock */
+               /* __send_cap drops i_ceph_lock */
                delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
                                      retain, flushing, NULL);
-               goto retry; /* retake i_lock and restart our cap scan. */
+               goto retry; /* retake i_ceph_lock and restart our cap scan. */
        }
 
        /*
@@ -1681,7 +1680,7 @@ ack:
        else if (!is_delayed || force_requeue)
                __cap_delay_requeue(mdsc, ci);
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (queue_invalidate)
                ceph_queue_invalidate(inode);
@@ -1704,7 +1703,7 @@ static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
        int flushing = 0;
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
                dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
                goto out;
@@ -1716,7 +1715,7 @@ retry:
                int delayed;
 
                if (!session) {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        session = cap->session;
                        mutex_lock(&session->s_mutex);
                        goto retry;
@@ -1727,18 +1726,18 @@ retry:
 
                flushing = __mark_caps_flushing(inode, session);
 
-               /* __send_cap drops i_lock */
+               /* __send_cap drops i_ceph_lock */
                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
                                     cap->issued | cap->implemented, flushing,
                                     flush_tid);
                if (!delayed)
                        goto out_unlocked;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __cap_delay_requeue(mdsc, ci);
        }
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 out_unlocked:
        if (session && unlock_session)
                mutex_unlock(&session->s_mutex);
@@ -1753,7 +1752,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int i, ret = 1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        for (i = 0; i < CEPH_CAP_BITS; i++)
                if ((ci->i_flushing_caps & (1 << i)) &&
                    ci->i_cap_flush_tid[i] <= tid) {
@@ -1761,7 +1760,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
                        ret = 0;
                        break;
                }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -1868,10 +1867,10 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
                struct ceph_mds_client *mdsc =
                        ceph_sb_to_client(inode->i_sb)->mdsc;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (__ceph_caps_dirty(ci))
                        __cap_delay_requeue_front(mdsc, ci);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        return err;
 }
@@ -1894,7 +1893,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
                struct inode *inode = &ci->vfs_inode;
                struct ceph_cap *cap;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                cap = ci->i_auth_cap;
                if (cap && cap->session == session) {
                        dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
@@ -1904,7 +1903,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
                        pr_err("%p auth cap %p not mds%d ???\n", inode,
                               cap, session->s_mds);
                }
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 }
 
@@ -1921,7 +1920,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                struct ceph_cap *cap;
                int delayed = 0;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                cap = ci->i_auth_cap;
                if (cap && cap->session == session) {
                        dout("kick_flushing_caps %p cap %p %s\n", inode,
@@ -1932,14 +1931,14 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                                             cap->issued | cap->implemented,
                                             ci->i_flushing_caps, NULL);
                        if (delayed) {
-                               spin_lock(&inode->i_lock);
+                               spin_lock(&ci->i_ceph_lock);
                                __cap_delay_requeue(mdsc, ci);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                        }
                } else {
                        pr_err("%p auth cap %p not mds%d ???\n", inode,
                               cap, session->s_mds);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        }
 }
@@ -1952,7 +1951,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
        struct ceph_cap *cap;
        int delayed = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = ci->i_auth_cap;
        dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
             ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
@@ -1964,12 +1963,12 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
                                     cap->issued | cap->implemented,
                                     ci->i_flushing_caps, NULL);
                if (delayed) {
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        __cap_delay_requeue(mdsc, ci);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 }
 
@@ -1978,7 +1977,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
  * Take references to capabilities we hold, so that we don't release
  * them to the MDS prematurely.
  *
- * Protected by i_lock.
+ * Protected by i_ceph_lock.
  */
 static void __take_cap_refs(struct ceph_inode_info *ci, int got)
 {
@@ -2016,7 +2015,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
 
        dout("get_cap_refs %p need %s want %s\n", inode,
             ceph_cap_string(need), ceph_cap_string(want));
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /* make sure file is actually open */
        file_wanted = __ceph_caps_file_wanted(ci);
@@ -2077,7 +2076,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
                     ceph_cap_string(have), ceph_cap_string(need));
        }
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("get_cap_refs %p ret %d got %s\n", inode,
             ret, ceph_cap_string(*got));
        return ret;
@@ -2094,7 +2093,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
        int check = 0;
 
        /* do we need to explicitly request a larger max_size? */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if ((endoff >= ci->i_max_size ||
             endoff > (inode->i_size << 1)) &&
            endoff > ci->i_wanted_max_size) {
@@ -2103,7 +2102,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
                ci->i_wanted_max_size = endoff;
                check = 1;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (check)
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 }
@@ -2140,9 +2139,9 @@ retry:
  */
 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
 {
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __take_cap_refs(ci, caps);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2160,7 +2159,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
        int last = 0, put = 0, flushsnaps = 0, wake = 0;
        struct ceph_cap_snap *capsnap;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (had & CEPH_CAP_PIN)
                --ci->i_pin_ref;
        if (had & CEPH_CAP_FILE_RD)
@@ -2193,7 +2192,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
                                }
                        }
                }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
             last ? " last" : "", put ? " put" : "");
@@ -2225,7 +2224,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
        int found = 0;
        struct ceph_cap_snap *capsnap = NULL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_wrbuffer_ref -= nr;
        last = !ci->i_wrbuffer_ref;
 
@@ -2274,7 +2273,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                }
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (last) {
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -2291,7 +2290,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
  * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
  * actually be a revocation if it specifies a smaller cap set.)
  *
- * caller holds s_mutex and i_lock, we drop both.
+ * caller holds s_mutex and i_ceph_lock, we drop both.
  *
  * return value:
  *  0 - ok
@@ -2302,7 +2301,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
                             struct ceph_mds_session *session,
                             struct ceph_cap *cap,
                             struct ceph_buffer *xattr_buf)
-               __releases(inode->i_lock)
+               __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int mds = session->s_mds;
@@ -2453,7 +2452,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
        }
        BUG_ON(cap->issued & ~cap->implemented);
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (writeback)
                /*
                 * queue inode for writeback: we can't actually call
@@ -2483,7 +2482,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                                 struct ceph_mds_caps *m,
                                 struct ceph_mds_session *session,
                                 struct ceph_cap *cap)
-       __releases(inode->i_lock)
+       __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -2539,7 +2538,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
        wake_up_all(&ci->i_cap_wq);
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (drop)
                iput(inode);
 }
@@ -2562,7 +2561,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
        dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
             inode, ci, session->s_mds, follows);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
                if (capsnap->follows == follows) {
                        if (capsnap->flush_tid != flush_tid) {
@@ -2585,7 +2584,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
                             capsnap, capsnap->follows);
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (drop)
                iput(inode);
 }
@@ -2598,7 +2597,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
 static void handle_cap_trunc(struct inode *inode,
                             struct ceph_mds_caps *trunc,
                             struct ceph_mds_session *session)
-       __releases(inode->i_lock)
+       __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int mds = session->s_mds;
@@ -2617,7 +2616,7 @@ static void handle_cap_trunc(struct inode *inode,
             inode, mds, seq, truncate_size, truncate_seq);
        queue_trunc = ceph_fill_file_size(inode, issued,
                                          truncate_seq, truncate_size, size);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (queue_trunc)
                ceph_queue_vmtruncate(inode);
@@ -2646,7 +2645,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
        dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
             inode, ci, mds, mseq);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /* make sure we haven't seen a higher mseq */
        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
@@ -2690,7 +2689,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
        }
        /* else, we already released it */
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2745,9 +2744,9 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
        up_read(&mdsc->snap_rwsem);
 
        /* make sure we re-request max_size, if necessary */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_requested_max_size = 0;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2762,6 +2761,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        struct ceph_mds_client *mdsc = session->s_mdsc;
        struct super_block *sb = mdsc->fsc->sb;
        struct inode *inode;
+       struct ceph_inode_info *ci;
        struct ceph_cap *cap;
        struct ceph_mds_caps *h;
        int mds = session->s_mds;
@@ -2815,6 +2815,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
 
        /* lookup ino */
        inode = ceph_find_inode(sb, vino);
+       ci = ceph_inode(inode);
        dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
             vino.snap, inode);
        if (!inode) {
@@ -2844,16 +2845,16 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        }
 
        /* the rest require a cap */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ceph_inode(inode), mds);
        if (!cap) {
                dout(" no cap on %p ino %llx.%llx from mds%d\n",
                     inode, ceph_ino(inode), ceph_snap(inode), mds);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto flush_cap_releases;
        }
 
-       /* note that each of these drops i_lock for us */
+       /* note that each of these drops i_ceph_lock for us */
        switch (op) {
        case CEPH_CAP_OP_REVOKE:
        case CEPH_CAP_OP_GRANT:
@@ -2869,7 +2870,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                break;
 
        default:
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
                       ceph_cap_op_name(op));
        }
@@ -2962,13 +2963,13 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
        struct inode *inode = &ci->vfs_inode;
        int last = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
             ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
        BUG_ON(ci->i_nr_by_mode[fmode] == 0);
        if (--ci->i_nr_by_mode[fmode] == 0)
                last++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (last && ci->i_vino.snap == CEPH_NOSNAP)
                ceph_check_caps(ci, 0, NULL);
@@ -2991,7 +2992,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
        int used, dirty;
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        used = __ceph_caps_used(ci);
        dirty = __ceph_caps_dirty(ci);
 
@@ -3046,7 +3047,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
                             inode, cap, ceph_cap_string(cap->issued));
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -3061,7 +3062,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
 
        /*
         * force an record for the directory caps if we have a dentry lease.
-        * this is racy (can't take i_lock and d_lock together), but it
+        * this is racy (can't take i_ceph_lock and d_lock together), but it
         * doesn't have to be perfect; the mds will revoke anything we don't
         * release.
         */
index bca3948..3eeb976 100644 (file)
@@ -281,18 +281,18 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
        }
 
        /* can we use the dcache? */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if ((filp->f_pos == 2 || fi->dentry) &&
            !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
            ceph_snap(inode) != CEPH_SNAPDIR &&
            ceph_dir_test_complete(inode) &&
            __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                err = __dcache_readdir(filp, dirent, filldir);
                if (err != -EAGAIN)
                        return err;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        if (fi->dentry) {
                err = note_last_dentry(fi, fi->dentry->d_name.name,
@@ -428,12 +428,12 @@ more:
         * were released during the whole readdir, and we should have
         * the complete dir contents in our cache.
         */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_release_count == fi->dir_release_count) {
                ceph_dir_set_complete(inode);
                ci->i_max_offset = filp->f_pos;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("readdir %p filp %p done.\n", inode, filp);
        return 0;
@@ -607,7 +607,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                struct ceph_inode_info *ci = ceph_inode(dir);
                struct ceph_dentry_info *di = ceph_dentry(dentry);
 
-               spin_lock(&dir->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
                if (strncmp(dentry->d_name.name,
                            fsc->mount_options->snapdir_name,
@@ -615,13 +615,13 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                    !is_root_ceph_dentry(dir, dentry) &&
                    ceph_dir_test_complete(dir) &&
                    (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
-                       spin_unlock(&dir->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        dout(" dir %p complete, -ENOENT\n", dir);
                        d_add(dentry, NULL);
                        di->lease_shared_gen = ci->i_shared_gen;
                        return NULL;
                }
-               spin_unlock(&dir->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 
        op = ceph_snap(dir) == CEPH_SNAPDIR ?
@@ -841,12 +841,12 @@ static int drop_caps_for_unlink(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (inode->i_nlink == 1) {
                drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
                ci->i_ceph_flags |= CEPH_I_NODELAY;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return drop;
 }
 
@@ -1015,10 +1015,10 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
        struct ceph_dentry_info *di = ceph_dentry(dentry);
        int valid = 0;
 
-       spin_lock(&dir->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_shared_gen == di->lease_shared_gen)
                valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
-       spin_unlock(&dir->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
             dir, (unsigned)ci->i_shared_gen, dentry,
             (unsigned)di->lease_shared_gen, valid);
index ce549d3..ed72428 100644 (file)
@@ -147,9 +147,9 @@ int ceph_open(struct inode *inode, struct file *file)
 
        /* trivially open snapdir */
        if (ceph_snap(inode) == CEPH_SNAPDIR) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return ceph_init_file(inode, file, fmode);
        }
 
@@ -158,7 +158,7 @@ int ceph_open(struct inode *inode, struct file *file)
         * write) or any MDS (for read).  Update wanted set
         * asynchronously.
         */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (__ceph_is_any_real_caps(ci) &&
            (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
                int mds_wanted = __ceph_caps_mds_wanted(ci);
@@ -168,7 +168,7 @@ int ceph_open(struct inode *inode, struct file *file)
                     inode, fmode, ceph_cap_string(wanted),
                     ceph_cap_string(issued));
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                /* adjust wanted? */
                if ((issued & wanted) != wanted &&
@@ -180,10 +180,10 @@ int ceph_open(struct inode *inode, struct file *file)
        } else if (ceph_snap(inode) != CEPH_NOSNAP &&
                   (ci->i_snap_caps & wanted) == wanted) {
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return ceph_init_file(inode, file, fmode);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
        req = prepare_open_request(inode->i_sb, flags, 0);
@@ -743,9 +743,9 @@ retry_snap:
                 */
                int dirty;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                ceph_put_cap_refs(ci, got);
 
                ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
@@ -764,9 +764,9 @@ retry_snap:
 
        if (ret >= 0) {
                int dirty;
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                if (dirty)
                        __mark_inode_dirty(inode, dirty);
        }
@@ -797,7 +797,8 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
 
        mutex_lock(&inode->i_mutex);
        __ceph_do_pending_vmtruncate(inode);
-       if (origin != SEEK_CUR || origin != SEEK_SET) {
+
+       if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
                ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
                if (ret < 0) {
                        offset = ret;
index 116f365..87fb132 100644 (file)
@@ -297,6 +297,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
 
        dout("alloc_inode %p\n", &ci->vfs_inode);
 
+       spin_lock_init(&ci->i_ceph_lock);
+
        ci->i_version = 0;
        ci->i_time_warp_seq = 0;
        ci->i_ceph_flags = 0;
@@ -583,7 +585,7 @@ static int fill_inode(struct inode *inode,
                               iinfo->xattr_len);
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /*
         * provided version will be odd if inode value is projected,
@@ -680,7 +682,7 @@ static int fill_inode(struct inode *inode,
                        char *sym;
 
                        BUG_ON(symlen != inode->i_size);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
 
                        err = -ENOMEM;
                        sym = kmalloc(symlen+1, GFP_NOFS);
@@ -689,7 +691,7 @@ static int fill_inode(struct inode *inode,
                        memcpy(sym, iinfo->symlink, symlen);
                        sym[symlen] = 0;
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (!ci->i_symlink)
                                ci->i_symlink = sym;
                        else
@@ -715,7 +717,7 @@ static int fill_inode(struct inode *inode,
        }
 
 no_change:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        /* queue truncate if we saw i_size decrease */
        if (queue_trunc)
@@ -750,13 +752,13 @@ no_change:
                                     info->cap.flags,
                                     caps_reservation);
                } else {
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        dout(" %p got snap_caps %s\n", inode,
                             ceph_cap_string(le32_to_cpu(info->cap.caps)));
                        ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
                        if (cap_fmode >= 0)
                                __ceph_get_fmode(ci, cap_fmode);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        } else if (cap_fmode >= 0) {
                pr_warning("mds issued no caps on %llx.%llx\n",
@@ -849,19 +851,20 @@ static void ceph_set_dentry_offset(struct dentry *dn)
 {
        struct dentry *dir = dn->d_parent;
        struct inode *inode = dir->d_inode;
+       struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_dentry_info *di;
 
        BUG_ON(!inode);
 
        di = ceph_dentry(dn);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (!ceph_dir_test_complete(inode)) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return;
        }
        di->offset = ceph_inode(inode)->i_max_offset++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        spin_lock(&dir->d_lock);
        spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
@@ -1308,7 +1311,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
        inode->i_size = size;
        inode->i_blocks = (size + (1 << 9) - 1) >> 9;
@@ -1318,7 +1321,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
            (ci->i_reported_size << 1) < ci->i_max_size)
                ret = 1;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -1376,20 +1379,20 @@ static void ceph_invalidate_work(struct work_struct *work)
        u32 orig_gen;
        int check = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("invalidate_pages %p gen %d revoking %d\n", inode,
             ci->i_rdcache_gen, ci->i_rdcache_revoking);
        if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
                /* nevermind! */
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto out;
        }
        orig_gen = ci->i_rdcache_gen;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        truncate_inode_pages(&inode->i_data, 0);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (orig_gen == ci->i_rdcache_gen &&
            orig_gen == ci->i_rdcache_revoking) {
                dout("invalidate_pages %p gen %d successful\n", inode,
@@ -1401,7 +1404,7 @@ static void ceph_invalidate_work(struct work_struct *work)
                     inode, orig_gen, ci->i_rdcache_gen,
                     ci->i_rdcache_revoking);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (check)
                ceph_check_caps(ci, 0, NULL);
@@ -1460,10 +1463,10 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
        int wrbuffer_refs, wake = 0;
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_truncate_pending == 0) {
                dout("__do_pending_vmtruncate %p none pending\n", inode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return;
        }
 
@@ -1474,7 +1477,7 @@ retry:
        if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
                dout("__do_pending_vmtruncate %p flushing snaps first\n",
                     inode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                filemap_write_and_wait_range(&inode->i_data, 0,
                                             inode->i_sb->s_maxbytes);
                goto retry;
@@ -1484,15 +1487,15 @@ retry:
        wrbuffer_refs = ci->i_wrbuffer_ref;
        dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
             ci->i_truncate_pending, to);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        truncate_inode_pages(inode->i_mapping, to);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_truncate_pending--;
        if (ci->i_truncate_pending == 0)
                wake = 1;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (wrbuffer_refs == 0)
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -1547,7 +1550,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        issued = __ceph_caps_issued(ci, NULL);
        dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
 
@@ -1695,7 +1698,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        }
 
        release &= issued;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (inode_dirty_flags)
                __mark_inode_dirty(inode, inode_dirty_flags);
@@ -1717,7 +1720,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        __ceph_do_pending_vmtruncate(inode);
        return err;
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        ceph_mdsc_put_request(req);
        return err;
 }
index 5a14c29..790914a 100644 (file)
@@ -241,11 +241,11 @@ static long ceph_ioctl_lazyio(struct file *file)
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                ci->i_nr_by_mode[fi->fmode]--;
                fi->fmode |= CEPH_FILE_MODE_LAZY;
                ci->i_nr_by_mode[fi->fmode]++;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                dout("ioctl_layzio: file %p marked lazy\n", file);
 
                ceph_check_caps(ci, 0, NULL);
index 264ab70..6203d80 100644 (file)
@@ -732,21 +732,21 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                }
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = NULL;
        if (mode == USE_AUTH_MDS)
                cap = ci->i_auth_cap;
        if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
                cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
        if (!cap) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto random;
        }
        mds = cap->session->s_mds;
        dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
             inode, ceph_vinop(inode), mds,
             cap == ci->i_auth_cap ? "auth " : "", cap);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return mds;
 
 random:
@@ -951,7 +951,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
 
        dout("removing cap %p, ci is %p, inode is %p\n",
             cap, ci, &ci->vfs_inode);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __ceph_remove_cap(cap);
        if (!__ceph_is_any_real_caps(ci)) {
                struct ceph_mds_client *mdsc =
@@ -984,7 +984,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                }
                spin_unlock(&mdsc->cap_dirty_lock);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        while (drop--)
                iput(inode);
        return 0;
@@ -1015,10 +1015,10 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
 
        wake_up_all(&ci->i_cap_wq);
        if (arg) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                ci->i_wanted_max_size = 0;
                ci->i_requested_max_size = 0;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        return 0;
 }
@@ -1151,7 +1151,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        if (session->s_trim_caps <= 0)
                return -1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        mine = cap->issued | cap->implemented;
        used = __ceph_caps_used(ci);
        oissued = __ceph_caps_issued_other(ci, cap);
@@ -1170,7 +1170,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
                __ceph_remove_cap(cap);
        } else {
                /* try to drop referring dentries */
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                d_prune_aliases(inode);
                dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
                     inode, cap, atomic_read(&inode->i_count));
@@ -1178,7 +1178,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        }
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return 0;
 }
 
@@ -1296,7 +1296,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
                                           i_flushing_item);
                        struct inode *inode = &ci->vfs_inode;
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (ci->i_cap_flush_seq <= want_flush_seq) {
                                dout("check_cap_flush still flushing %p "
                                     "seq %lld <= %lld to mds%d\n", inode,
@@ -1304,7 +1304,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
                                     session->s_mds);
                                ret = 0;
                        }
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
                mutex_unlock(&session->s_mutex);
                ceph_put_mds_session(session);
@@ -1495,6 +1495,7 @@ retry:
                             pos, temp);
                } else if (stop_on_nosnap && inode &&
                           ceph_snap(inode) == CEPH_NOSNAP) {
+                       spin_unlock(&temp->d_lock);
                        break;
                } else {
                        pos -= temp->d_name.len;
@@ -2011,10 +2012,10 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ceph_dir_clear_complete(inode);
        ci->i_release_count++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (req->r_dentry)
                ceph_invalidate_dentry_lease(req->r_dentry);
@@ -2422,7 +2423,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        if (err)
                goto out_free;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap->seq = 0;        /* reset cap seq */
        cap->issue_seq = 0;  /* and issue_seq */
 
@@ -2445,7 +2446,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                rec.v1.pathbase = cpu_to_le64(pathbase);
                reclen = sizeof(rec.v1);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (recon_state->flock) {
                int num_fcntl_locks, num_flock_locks;
index 4bb2399..a50ca0e 100644 (file)
@@ -20,7 +20,7 @@
  *
  *         mdsc->snap_rwsem
  *
- *         inode->i_lock
+ *         ci->i_ceph_lock
  *                 mdsc->snap_flush_lock
  *                 mdsc->cap_delay_lock
  *
index e264371..a559c80 100644 (file)
@@ -446,7 +446,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                return;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        used = __ceph_caps_used(ci);
        dirty = __ceph_caps_dirty(ci);
 
@@ -528,7 +528,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                kfree(capsnap);
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -537,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
  *
  * If capsnap can now be flushed, add to snap_flush list, and return 1.
  *
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
  */
 int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
                            struct ceph_cap_snap *capsnap)
@@ -739,9 +739,9 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
                inode = &ci->vfs_inode;
                ihold(inode);
                spin_unlock(&mdsc->snap_flush_lock);
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __ceph_flush_snaps(ci, &session, 0);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                iput(inode);
                spin_lock(&mdsc->snap_flush_lock);
        }
@@ -847,7 +847,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                                continue;
                        ci = ceph_inode(inode);
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (!ci->i_snap_realm)
                                goto skip_inode;
                        /*
@@ -876,7 +876,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        oldrealm = ci->i_snap_realm;
                        ci->i_snap_realm = realm;
                        spin_unlock(&realm->inodes_with_caps_lock);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
 
                        ceph_get_snap_realm(mdsc, realm);
                        ceph_put_snap_realm(mdsc, oldrealm);
@@ -885,7 +885,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        continue;
 
 skip_inode:
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        iput(inode);
                }
 
index 8dc73a5..b48f15f 100644 (file)
@@ -383,7 +383,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
        if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
                seq_printf(m, ",rsize=%d", fsopt->rsize);
        if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
-               seq_printf(m, ",rasize=%d", fsopt->rsize);
+               seq_printf(m, ",rasize=%d", fsopt->rasize);
        if (fsopt->congestion_kb != default_congestion_kb())
                seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
        if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
index 01bf189..edcbf37 100644 (file)
@@ -220,7 +220,7 @@ struct ceph_dentry_info {
  * The locking for D_COMPLETE is a bit odd:
  *  - we can clear it at almost any time (see ceph_d_prune)
  *  - it is only meaningful if:
- *    - we hold dir inode i_lock
+ *    - we hold dir inode i_ceph_lock
  *    - we hold dir FILE_SHARED caps
  *    - the dentry D_COMPLETE is set
  */
@@ -250,6 +250,8 @@ struct ceph_inode_xattrs_info {
 struct ceph_inode_info {
        struct ceph_vino i_vino;   /* ceph ino + snap */
 
+       spinlock_t i_ceph_lock;
+
        u64 i_version;
        u32 i_time_warp_seq;
 
@@ -271,7 +273,7 @@ struct ceph_inode_info {
 
        struct ceph_inode_xattrs_info i_xattrs;
 
-       /* capabilities.  protected _both_ by i_lock and cap->session's
+       /* capabilities.  protected _both_ by i_ceph_lock and cap->session's
         * s_mutex. */
        struct rb_root i_caps;           /* cap list */
        struct ceph_cap *i_auth_cap;     /* authoritative cap, if any */
@@ -437,18 +439,18 @@ static inline void ceph_i_clear(struct inode *inode, unsigned mask)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_ceph_flags &= ~mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 static inline void ceph_i_set(struct inode *inode, unsigned mask)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_ceph_flags |= mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 static inline bool ceph_i_test(struct inode *inode, unsigned mask)
@@ -456,9 +458,9 @@ static inline bool ceph_i_test(struct inode *inode, unsigned mask)
        struct ceph_inode_info *ci = ceph_inode(inode);
        bool r;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        r = (ci->i_ceph_flags & mask) == mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return r;
 }
 
@@ -508,9 +510,9 @@ extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
 static inline int ceph_caps_issued(struct ceph_inode_info *ci)
 {
        int issued;
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        issued = __ceph_caps_issued(ci, NULL);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return issued;
 }
 
@@ -518,9 +520,9 @@ static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
                                        int touch)
 {
        int r;
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        r = __ceph_caps_issued_mask(ci, mask, touch);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return r;
 }
 
@@ -743,10 +745,9 @@ extern int ceph_add_cap(struct inode *inode,
 extern void __ceph_remove_cap(struct ceph_cap *cap);
 static inline void ceph_remove_cap(struct ceph_cap *cap)
 {
-       struct inode *inode = &cap->ci->vfs_inode;
-       spin_lock(&inode->i_lock);
+       spin_lock(&cap->ci->i_ceph_lock);
        __ceph_remove_cap(cap);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&cap->ci->i_ceph_lock);
 }
 extern void ceph_put_cap(struct ceph_mds_client *mdsc,
                         struct ceph_cap *cap);
index 96c6739..a5e36e4 100644 (file)
@@ -343,8 +343,8 @@ void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
 }
 
 static int __build_xattrs(struct inode *inode)
-       __releases(inode->i_lock)
-       __acquires(inode->i_lock)
+       __releases(ci->i_ceph_lock)
+       __acquires(ci->i_ceph_lock)
 {
        u32 namelen;
        u32 numattr = 0;
@@ -372,7 +372,7 @@ start:
                end = p + ci->i_xattrs.blob->vec.iov_len;
                ceph_decode_32_safe(&p, end, numattr, bad);
                xattr_version = ci->i_xattrs.version;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
                                 GFP_NOFS);
@@ -387,7 +387,7 @@ start:
                                goto bad_lock;
                }
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (ci->i_xattrs.version != xattr_version) {
                        /* lost a race, retry */
                        for (i = 0; i < numattr; i++)
@@ -418,7 +418,7 @@ start:
 
        return err;
 bad_lock:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 bad:
        if (xattrs) {
                for (i = 0; i < numattr; i++)
@@ -512,7 +512,7 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
        if (vxattrs)
                vxattr = ceph_match_vxattr(vxattrs, name);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
             ci->i_xattrs.version, ci->i_xattrs.index_version);
 
@@ -520,14 +520,14 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
            (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
                goto get_xattr;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                /* get xattrs from mds (if we don't already have them) */
                err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
                if (err)
                        return err;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (vxattr && vxattr->readonly) {
                err = vxattr->getxattr_cb(ci, value, size);
@@ -558,7 +558,7 @@ get_xattr:
        memcpy(value, xattr->val, xattr->val_len);
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return err;
 }
 
@@ -573,7 +573,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
        u32 len;
        int i;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
             ci->i_xattrs.version, ci->i_xattrs.index_version);
 
@@ -581,13 +581,13 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
            (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
                goto list_xattr;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
                if (err)
                        return err;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        err = __build_xattrs(inode);
        if (err < 0)
@@ -619,7 +619,7 @@ list_xattr:
                }
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return err;
 }
 
@@ -739,7 +739,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
        if (!xattr)
                goto out;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 retry:
        issued = __ceph_caps_issued(ci, NULL);
        if (!(issued & CEPH_CAP_XATTR_EXCL))
@@ -752,12 +752,12 @@ retry:
            required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
                struct ceph_buffer *blob = NULL;
 
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                dout(" preaallocating new blob size=%d\n", required_blob_size);
                blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
                if (!blob)
                        goto out;
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (ci->i_xattrs.prealloc_blob)
                        ceph_buffer_put(ci->i_xattrs.prealloc_blob);
                ci->i_xattrs.prealloc_blob = blob;
@@ -770,13 +770,13 @@ retry:
        dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
        ci->i_xattrs.dirty = true;
        inode->i_ctime = CURRENT_TIME;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (dirty)
                __mark_inode_dirty(inode, dirty);
        return err;
 
 do_sync:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        err = ceph_sync_setxattr(dentry, name, value, size, flags);
 out:
        kfree(newname);
@@ -833,7 +833,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
                        return -EOPNOTSUPP;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __build_xattrs(inode);
        issued = __ceph_caps_issued(ci, NULL);
        dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
@@ -846,12 +846,12 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
        ci->i_xattrs.dirty = true;
        inode->i_ctime = CURRENT_TIME;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (dirty)
                __mark_inode_dirty(inode, dirty);
        return err;
 do_sync:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        err = ceph_send_removexattr(dentry, name);
        return err;
 }
index d6a972d..8cd4b52 100644 (file)
@@ -441,6 +441,8 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
        smb_msg.msg_controllen = 0;
 
        for (total_read = 0; to_read; total_read += length, to_read -= length) {
+               try_to_freeze();
+
                if (server_unresponsive(server)) {
                        total_read = -EAGAIN;
                        break;
index cf0b153..4dd9283 100644 (file)
@@ -702,6 +702,13 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
                                         lock->type, lock->netfid, conf_lock);
 }
 
+/*
+ * Check if there is another lock that prevents us to set the lock (mandatory
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
 static int
 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
               __u8 type, __u16 netfid, struct file_lock *flock)
@@ -739,6 +746,12 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
        mutex_unlock(&cinode->lock_mutex);
 }
 
+/*
+ * Set the byte-range lock (mandatory style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if no locks prevent us but we need to request to the server;
+ * 3) -EACCESS, if there is a lock that prevents us and wait is false.
+ */
 static int
 cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
                 bool wait)
@@ -778,6 +791,13 @@ try_again:
        return rc;
 }
 
+/*
+ * Check if there is another lock that prevents us to set the lock (posix
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
 static int
 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
 {
@@ -800,6 +820,12 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
        return rc;
 }
 
+/*
+ * Set the byte-range lock (posix style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if we need to request to the server;
+ * 3) <0, if the error occurs while setting the lock.
+ */
 static int
 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
 {
index 5de03ec..a090bbe 100644 (file)
@@ -554,7 +554,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
                                 rc);
                        return rc;
                }
-               cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+               /* FindFirst/Next set last_entry to NULL on malformed reply */
+               if (cifsFile->srch_inf.last_entry)
+                       cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+                                               cifsFile);
        }
 
        while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
@@ -562,7 +565,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
                cFYI(1, "calling findnext2");
                rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
                                  &cifsFile->srch_inf);
-               cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+               /* FindFirst/Next set last_entry to NULL on malformed reply */
+               if (cifsFile->srch_inf.last_entry)
+                       cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+                                               cifsFile);
                if (rc)
                        return -ENOENT;
        }
index 7cacba1..80d8508 100644 (file)
@@ -209,7 +209,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
 {
        int rc;
        int len;
-       __u16 wpwd[129];
+       __le16 wpwd[129];
 
        /* Password cannot be longer than 128 characters */
        if (passwd) /* Password must be converted to NT unicode */
@@ -219,8 +219,8 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
                *wpwd = 0; /* Ensure string is null terminated */
        }
 
-       rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16));
-       memset(wpwd, 0, 129 * sizeof(__u16));
+       rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
+       memset(wpwd, 0, 129 * sizeof(__le16));
 
        return rc;
 }
index ca418aa..9d8715c 100644 (file)
@@ -292,7 +292,7 @@ int __init configfs_inode_init(void)
        return bdi_init(&configfs_backing_dev_info);
 }
 
-void __exit configfs_inode_exit(void)
+void configfs_inode_exit(void)
 {
        bdi_destroy(&configfs_backing_dev_info);
 }
index ecc6217..276e15c 100644 (file)
@@ -143,28 +143,26 @@ static int __init configfs_init(void)
                goto out;
 
        config_kobj = kobject_create_and_add("config", kernel_kobj);
-       if (!config_kobj) {
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-               goto out;
-       }
+       if (!config_kobj)
+               goto out2;
+
+       err = configfs_inode_init();
+       if (err)
+               goto out3;
 
        err = register_filesystem(&configfs_fs_type);
-       if (err) {
-               printk(KERN_ERR "configfs: Unable to register filesystem!\n");
-               kobject_put(config_kobj);
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-               goto out;
-       }
+       if (err)
+               goto out4;
 
-       err = configfs_inode_init();
-       if (err) {
-               unregister_filesystem(&configfs_fs_type);
-               kobject_put(config_kobj);
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-       }
+       return 0;
+out4:
+       printk(KERN_ERR "configfs: Unable to register filesystem!\n");
+       configfs_inode_exit();
+out3:
+       kobject_put(config_kobj);
+out2:
+       kmem_cache_destroy(configfs_dir_cachep);
+       configfs_dir_cachep = NULL;
 out:
        return err;
 }
index 10ba92d..89509b5 100644 (file)
@@ -2439,16 +2439,14 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
 /**
  * prepend_path - Prepend path string to a buffer
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buffer: pointer to the end of the buffer
  * @buflen: pointer to buffer length
  *
  * Caller holds the rename_lock.
- *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
  */
-static int prepend_path(const struct path *path, struct path *root,
+static int prepend_path(const struct path *path,
+                       const struct path *root,
                        char **buffer, int *buflen)
 {
        struct dentry *dentry = path->dentry;
@@ -2483,10 +2481,10 @@ static int prepend_path(const struct path *path, struct path *root,
                dentry = parent;
        }
 
-out:
        if (!error && !slash)
                error = prepend(buffer, buflen, "/", 1);
 
+out:
        br_read_unlock(vfsmount_lock);
        return error;
 
@@ -2500,15 +2498,17 @@ global_root:
                WARN(1, "Root dentry has weird name <%.*s>\n",
                     (int) dentry->d_name.len, dentry->d_name.name);
        }
-       root->mnt = vfsmnt;
-       root->dentry = dentry;
+       if (!slash)
+               error = prepend(buffer, buflen, "/", 1);
+       if (!error)
+               error = vfsmnt->mnt_ns ? 1 : 2;
        goto out;
 }
 
 /**
  * __d_path - return the path of a dentry
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buf: buffer to return value in
  * @buflen: buffer length
  *
@@ -2519,10 +2519,10 @@ global_root:
  *
  * "buflen" should be positive.
  *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
+ * If the path is not reachable from the supplied root, return %NULL.
  */
-char *__d_path(const struct path *path, struct path *root,
+char *__d_path(const struct path *path,
+              const struct path *root,
               char *buf, int buflen)
 {
        char *res = buf + buflen;
@@ -2533,7 +2533,28 @@ char *__d_path(const struct path *path, struct path *root,
        error = prepend_path(path, root, &res, &buflen);
        write_sequnlock(&rename_lock);
 
-       if (error)
+       if (error < 0)
+               return ERR_PTR(error);
+       if (error > 0)
+               return NULL;
+       return res;
+}
+
+char *d_absolute_path(const struct path *path,
+              char *buf, int buflen)
+{
+       struct path root = {};
+       char *res = buf + buflen;
+       int error;
+
+       prepend(&res, &buflen, "\0", 1);
+       write_seqlock(&rename_lock);
+       error = prepend_path(path, &root, &res, &buflen);
+       write_sequnlock(&rename_lock);
+
+       if (error > 1)
+               error = -EINVAL;
+       if (error < 0)
                return ERR_PTR(error);
        return res;
 }
@@ -2541,8 +2562,9 @@ char *__d_path(const struct path *path, struct path *root,
 /*
  * same as __d_path but appends "(deleted)" for unlinked files.
  */
-static int path_with_deleted(const struct path *path, struct path *root,
-                                char **buf, int *buflen)
+static int path_with_deleted(const struct path *path,
+                            const struct path *root,
+                            char **buf, int *buflen)
 {
        prepend(buf, buflen, "\0", 1);
        if (d_unlinked(path->dentry)) {
@@ -2579,7 +2601,6 @@ char *d_path(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        /*
@@ -2594,9 +2615,8 @@ char *d_path(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (error)
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error < 0)
                res = ERR_PTR(error);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2617,7 +2637,6 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        if (path->dentry->d_op && path->dentry->d_op->d_dname)
@@ -2625,9 +2644,8 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (!error && !path_equal(&tmp, &root))
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error > 0)
                error = prepend_unreachable(&res, &buflen);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2758,19 +2776,18 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
        write_seqlock(&rename_lock);
        if (!d_unlinked(pwd.dentry)) {
                unsigned long len;
-               struct path tmp = root;
                char *cwd = page + PAGE_SIZE;
                int buflen = PAGE_SIZE;
 
                prepend(&cwd, &buflen, "\0", 1);
-               error = prepend_path(&pwd, &tmp, &cwd, &buflen);
+               error = prepend_path(&pwd, &root, &cwd, &buflen);
                write_sequnlock(&rename_lock);
 
-               if (error)
+               if (error < 0)
                        goto out;
 
                /* Unreachable from current root */
-               if (!path_equal(&tmp, &root)) {
+               if (error > 0) {
                        error = prepend_unreachable(&cwd, &buflen);
                        if (error)
                                goto out;
index 61fa9e1..607b155 100644 (file)
@@ -1095,7 +1095,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
                  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
                  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
 
-       neh->eh_depth = cpu_to_le16(neh->eh_depth + 1);
+       neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
        ext4_mark_inode_dirty(handle, inode);
 out:
        brelse(bh);
@@ -2955,7 +2955,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        /* Pre-conditions */
        BUG_ON(!ext4_ext_is_uninitialized(ex));
        BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
-       BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len);
 
        /*
         * Attempt to transfer newly initialized blocks from the currently
index 848f436..92655fd 100644 (file)
@@ -1339,8 +1339,11 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
                                        clear_buffer_unwritten(bh);
                                }
 
-                               /* skip page if block allocation undone */
-                               if (buffer_delay(bh) || buffer_unwritten(bh))
+                               /*
+                                * skip page if block allocation undone and
+                                * block is dirty
+                                */
+                               if (ext4_bh_delay_or_unwritten(NULL, bh))
                                        skip_page = 1;
                                bh = bh->b_this_page;
                                block_start += bh->b_size;
@@ -2387,7 +2390,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
        pgoff_t index;
        struct inode *inode = mapping->host;
        handle_t *handle;
-       loff_t page_len;
 
        index = pos >> PAGE_CACHE_SHIFT;
 
@@ -2434,13 +2436,6 @@ retry:
                 */
                if (pos + len > inode->i_size)
                        ext4_truncate_failed_write(inode);
-       } else {
-               page_len = pos & (PAGE_CACHE_SIZE - 1);
-               if (page_len > 0) {
-                       ret = ext4_discard_partial_page_buffers_no_lock(handle,
-                               inode, page, pos - page_len, page_len,
-                               EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
-               }
        }
 
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -2483,7 +2478,6 @@ static int ext4_da_write_end(struct file *file,
        loff_t new_i_size;
        unsigned long start, end;
        int write_mode = (int)(unsigned long)fsdata;
-       loff_t page_len;
 
        if (write_mode == FALL_BACK_TO_NONDELALLOC) {
                if (ext4_should_order_data(inode)) {
@@ -2508,7 +2502,7 @@ static int ext4_da_write_end(struct file *file,
         */
 
        new_i_size = pos + copied;
-       if (new_i_size > EXT4_I(inode)->i_disksize) {
+       if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
                if (ext4_da_should_update_i_disksize(page, end)) {
                        down_write(&EXT4_I(inode)->i_data_sem);
                        if (new_i_size > EXT4_I(inode)->i_disksize) {
@@ -2532,16 +2526,6 @@ static int ext4_da_write_end(struct file *file,
        }
        ret2 = generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
-
-       page_len = PAGE_CACHE_SIZE -
-                       ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
-
-       if (page_len > 0) {
-               ret = ext4_discard_partial_page_buffers_no_lock(handle,
-                       inode, page, pos + copied - 1, page_len,
-                       EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
-       }
-
        copied = ret2;
        if (ret2 < 0)
                ret = ret2;
@@ -2781,10 +2765,11 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                  iocb->private, io_end->inode->i_ino, iocb, offset,
                  size);
 
+       iocb->private = NULL;
+
        /* if not aio dio with unwritten extents, just free io and return */
        if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
                ext4_free_io_end(io_end);
-               iocb->private = NULL;
 out:
                if (is_async)
                        aio_complete(iocb, ret, 0);
@@ -2807,7 +2792,6 @@ out:
        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 
        /* queue the work to convert unwritten extents to written */
-       iocb->private = NULL;
        queue_work(wq, &io_end->work);
 
        /* XXX: probably should move into the real I/O completion handler */
@@ -3203,26 +3187,8 @@ int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
 
        iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 
-       if (!page_has_buffers(page)) {
-               /*
-                * If the range to be discarded covers a partial block
-                * we need to get the page buffers.  This is because
-                * partial blocks cannot be released and the page needs
-                * to be updated with the contents of the block before
-                * we write the zeros on top of it.
-                */
-               if ((from & (blocksize - 1)) ||
-                   ((from + length) & (blocksize - 1))) {
-                       create_empty_buffers(page, blocksize, 0);
-               } else {
-                       /*
-                        * If there are no partial blocks,
-                        * there is nothing to update,
-                        * so we can return now
-                        */
-                       return 0;
-               }
-       }
+       if (!page_has_buffers(page))
+               create_empty_buffers(page, blocksize, 0);
 
        /* Find the buffer that contains "offset" */
        bh = page_buffers(page);
index 7ce1d0b..7e106c8 100644 (file)
@@ -385,6 +385,18 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 
                block_end = block_start + blocksize;
                if (block_start >= len) {
+                       /*
+                        * Comments copied from block_write_full_page_endio:
+                        *
+                        * The page straddles i_size.  It must be zeroed out on
+                        * each and every writepage invocation because it may
+                        * be mmapped.  "A file is mapped in multiples of the
+                        * page size.  For a file that is not a multiple of
+                        * the  page size, the remaining memory is zeroed when
+                        * mapped, and writes to that region are not written
+                        * out to the file."
+                        */
+                       zero_user_segment(page, block_start, block_end);
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
                        continue;
index 3858767..3e1329e 100644 (file)
@@ -1155,9 +1155,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
                seq_puts(seq, ",block_validity");
 
        if (!test_opt(sb, INIT_INODE_TABLE))
-               seq_puts(seq, ",noinit_inode_table");
+               seq_puts(seq, ",noinit_itable");
        else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
-               seq_printf(seq, ",init_inode_table=%u",
+               seq_printf(seq, ",init_itable=%u",
                           (unsigned) sbi->s_li_wait_mult);
 
        ext4_show_quota_options(seq, sb);
@@ -1333,8 +1333,7 @@ enum {
        Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
        Opt_inode_readahead_blks, Opt_journal_ioprio,
        Opt_dioread_nolock, Opt_dioread_lock,
-       Opt_discard, Opt_nodiscard,
-       Opt_init_inode_table, Opt_noinit_inode_table,
+       Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
 };
 
 static const match_table_t tokens = {
@@ -1407,9 +1406,9 @@ static const match_table_t tokens = {
        {Opt_dioread_lock, "dioread_lock"},
        {Opt_discard, "discard"},
        {Opt_nodiscard, "nodiscard"},
-       {Opt_init_inode_table, "init_itable=%u"},
-       {Opt_init_inode_table, "init_itable"},
-       {Opt_noinit_inode_table, "noinit_itable"},
+       {Opt_init_itable, "init_itable=%u"},
+       {Opt_init_itable, "init_itable"},
+       {Opt_noinit_itable, "noinit_itable"},
        {Opt_err, NULL},
 };
 
@@ -1892,7 +1891,7 @@ set_qf_format:
                case Opt_dioread_lock:
                        clear_opt(sb, DIOREAD_NOLOCK);
                        break;
-               case Opt_init_inode_table:
+               case Opt_init_itable:
                        set_opt(sb, INIT_INODE_TABLE);
                        if (args[0].from) {
                                if (match_int(&args[0], &option))
@@ -1903,7 +1902,7 @@ set_qf_format:
                                return 0;
                        sbi->s_li_wait_mult = option;
                        break;
-               case Opt_noinit_inode_table:
+               case Opt_noinit_itable:
                        clear_opt(sb, INIT_INODE_TABLE);
                        break;
                default:
index 73c3992..517f211 100644 (file)
@@ -47,17 +47,6 @@ struct wb_writeback_work {
        struct completion *done;        /* set if the caller waits */
 };
 
-const char *wb_reason_name[] = {
-       [WB_REASON_BACKGROUND]          = "background",
-       [WB_REASON_TRY_TO_FREE_PAGES]   = "try_to_free_pages",
-       [WB_REASON_SYNC]                = "sync",
-       [WB_REASON_PERIODIC]            = "periodic",
-       [WB_REASON_LAPTOP_TIMER]        = "laptop_timer",
-       [WB_REASON_FREE_MORE_MEM]       = "free_more_memory",
-       [WB_REASON_FS_FREE_SPACE]       = "fs_free_space",
-       [WB_REASON_FORKER_THREAD]       = "forker_thread"
-};
-
 /*
  * Include the creation of the trace points after defining the
  * wb_writeback_work structure so that the definition remains local to this
@@ -156,6 +145,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
  * bdi_start_writeback - start writeback
  * @bdi: the backing device to write from
  * @nr_pages: the number of pages to write
+ * @reason: reason why some writeback work was initiated
  *
  * Description:
  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
@@ -1223,6 +1213,7 @@ static void wait_sb_inodes(struct super_block *sb)
  * writeback_inodes_sb_nr -    writeback dirty inodes from given super_block
  * @sb: the superblock
  * @nr: the number of pages to write
+ * @reason: reason why some writeback work initiated
  *
  * Start writeback on some inodes on this super_block. No guarantees are made
  * on how many (if any) will be written, and this function does not wait
@@ -1251,6 +1242,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
 /**
  * writeback_inodes_sb -       writeback dirty inodes from given super_block
  * @sb: the superblock
+ * @reason: reason why some writeback work was initiated
  *
  * Start writeback on some inodes on this super_block. No guarantees are made
  * on how many (if any) will be written, and this function does not wait
@@ -1265,6 +1257,7 @@ EXPORT_SYMBOL(writeback_inodes_sb);
 /**
  * writeback_inodes_sb_if_idle -       start writeback if none underway
  * @sb: the superblock
+ * @reason: reason why some writeback work was initiated
  *
  * Invoke writeback_inodes_sb if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
@@ -1285,6 +1278,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
  * writeback_inodes_sb_if_idle -       start writeback if none underway
  * @sb: the superblock
  * @nr: the number of pages to write
+ * @reason: reason why some writeback work was initiated
  *
  * Invoke writeback_inodes_sb if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
index 5cb8614..2aaf3ea 100644 (file)
@@ -1512,7 +1512,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        else if (outarg->offset + num > file_size)
                num = file_size - outarg->offset;
 
-       while (num) {
+       while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
                struct page *page;
                unsigned int this_num;
 
@@ -1526,6 +1526,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
 
                num -= this_num;
                total_len += this_num;
+               index++;
        }
        req->misc.retrieve_in.offset = outarg->offset;
        req->misc.retrieve_in.size = total_len;
index 594f07a..0c84100 100644 (file)
@@ -1556,7 +1556,7 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
        struct inode *inode = file->f_path.dentry->d_inode;
 
        mutex_lock(&inode->i_mutex);
-       if (origin != SEEK_CUR || origin != SEEK_SET) {
+       if (origin != SEEK_CUR && origin != SEEK_SET) {
                retval = fuse_update_attributes(inode, NULL, file, NULL);
                if (retval)
                        goto exit;
@@ -1567,6 +1567,10 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
                offset += i_size_read(inode);
                break;
        case SEEK_CUR:
+               if (offset == 0) {
+                       retval = file->f_pos;
+                       goto exit;
+               }
                offset += file->f_pos;
                break;
        case SEEK_DATA:
index 3e6d727..aa83109 100644 (file)
@@ -1138,28 +1138,28 @@ static int __init fuse_fs_init(void)
 {
        int err;
 
-       err = register_filesystem(&fuse_fs_type);
-       if (err)
-               goto out;
-
-       err = register_fuseblk();
-       if (err)
-               goto out_unreg;
-
        fuse_inode_cachep = kmem_cache_create("fuse_inode",
                                              sizeof(struct fuse_inode),
                                              0, SLAB_HWCACHE_ALIGN,
                                              fuse_inode_init_once);
        err = -ENOMEM;
        if (!fuse_inode_cachep)
-               goto out_unreg2;
+               goto out;
+
+       err = register_fuseblk();
+       if (err)
+               goto out2;
+
+       err = register_filesystem(&fuse_fs_type);
+       if (err)
+               goto out3;
 
        return 0;
 
- out_unreg2:
+ out3:
        unregister_fuseblk();
- out_unreg:
-       unregister_filesystem(&fuse_fs_type);
+ out2:
+       kmem_cache_destroy(fuse_inode_cachep);
  out:
        return err;
 }
index 6d3a196..cfc6d44 100644 (file)
@@ -1048,15 +1048,12 @@ static int show_mountinfo(struct seq_file *m, void *v)
        if (err)
                goto out;
        seq_putc(m, ' ');
-       seq_path_root(m, &mnt_path, &root, " \t\n\\");
-       if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
-               /*
-                * Mountpoint is outside root, discard that one.  Ugly,
-                * but less so than trying to do that in iterator in a
-                * race-free way (due to renames).
-                */
-               return SEQ_SKIP;
-       }
+
+       /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+       err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+       if (err)
+               goto out;
+
        seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
        show_mnt_opts(m, mnt);
 
@@ -2776,3 +2773,8 @@ void kern_unmount(struct vfsmount *mnt)
        }
 }
 EXPORT_SYMBOL(kern_unmount);
+
+bool our_mnt(struct vfsmount *mnt)
+{
+       return check_mnt(mnt);
+}
index 5b5fa33..cbd1a61 100644 (file)
@@ -548,7 +548,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
 
        error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
        if (error)
-               goto out_bdi;
+               goto out_fput;
 
        server->ncp_filp = ncp_filp;
        server->ncp_sock = sock;
@@ -559,7 +559,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
                error = -EBADF;
                server->info_filp = fget(data.info_fd);
                if (!server->info_filp)
-                       goto out_fput;
+                       goto out_bdi;
                error = -ENOTSOCK;
                sock_inode = server->info_filp->f_path.dentry->d_inode;
                if (!S_ISSOCK(sock_inode->i_mode))
@@ -746,9 +746,9 @@ out_nls:
 out_fput2:
        if (server->info_filp)
                fput(server->info_filp);
-out_fput:
-       bdi_destroy(&server->bdi);
 out_bdi:
+       bdi_destroy(&server->bdi);
+out_fput:
        /* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
         * 
         * The previously used put_filp(ncp_filp); was bogus, since
index eca56d4..606ef0f 100644 (file)
@@ -147,7 +147,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
         * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
         * the cached file length
         */
-       if (origin != SEEK_SET || origin != SEEK_CUR) {
+       if (origin != SEEK_SET && origin != SEEK_CUR) {
                struct inode *inode = filp->f_mapping->host;
 
                int retval = nfs_revalidate_file_size(inode, filp);
index be2bbac..d9f4d78 100644 (file)
@@ -39,6 +39,8 @@
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/string.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
 #include <linux/slab.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/gss_api.h>
@@ -894,6 +896,8 @@ out:
 
 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
 {
+       if (delegation == NULL)
+               return 0;
        if ((delegation->type & fmode) != fmode)
                return 0;
        if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
@@ -1036,8 +1040,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
                }
                rcu_read_lock();
                delegation = rcu_dereference(nfsi->delegation);
-               if (delegation == NULL ||
-                   !can_open_delegated(delegation, fmode)) {
+               if (!can_open_delegated(delegation, fmode)) {
                        rcu_read_unlock();
                        break;
                }
@@ -1091,7 +1094,12 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
                if (delegation)
                        delegation_flags = delegation->flags;
                rcu_read_unlock();
-               if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+               if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+                       pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
+                                       "returning a delegation for "
+                                       "OPEN(CLAIM_DELEGATE_CUR)\n",
+                                       NFS_CLIENT(inode)->cl_server);
+               } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
                        nfs_inode_set_delegation(state->inode,
                                        data->owner->so_cred,
                                        &data->o_res);
@@ -1423,11 +1431,9 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
                        goto out_no_action;
                rcu_read_lock();
                delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
-               if (delegation != NULL &&
-                   test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
-                       rcu_read_unlock();
-                       goto out_no_action;
-               }
+               if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
+                   can_open_delegated(delegation, data->o_arg.fmode))
+                       goto unlock_no_action;
                rcu_read_unlock();
        }
        /* Update sequence id. */
@@ -1444,6 +1450,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
                return;
        rpc_call_start(task);
        return;
+unlock_no_action:
+       rcu_read_unlock();
 out_no_action:
        task->tk_action = NULL;
 
index 39914be..6a7107a 100644 (file)
@@ -1156,11 +1156,13 @@ restart:
                if (status >= 0) {
                        status = nfs4_reclaim_locks(state, ops);
                        if (status >= 0) {
+                               spin_lock(&state->state_lock);
                                list_for_each_entry(lock, &state->lock_states, ls_locks) {
                                        if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
                                                printk("%s: Lock reclaim failed!\n",
                                                        __func__);
                                }
+                               spin_unlock(&state->state_lock);
                                nfs4_put_open_state(state);
                                goto restart;
                        }
@@ -1224,10 +1226,12 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
        clear_bit(NFS_O_RDONLY_STATE, &state->flags);
        clear_bit(NFS_O_WRONLY_STATE, &state->flags);
        clear_bit(NFS_O_RDWR_STATE, &state->flags);
+       spin_lock(&state->state_lock);
        list_for_each_entry(lock, &state->lock_states, ls_locks) {
                lock->ls_seqid.flags = 0;
                lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
        }
+       spin_unlock(&state->state_lock);
 }
 
 static void nfs4_reset_seqids(struct nfs_server *server,
@@ -1350,12 +1354,14 @@ static void nfs4_warn_keyexpired(const char *s)
 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
 {
        switch (error) {
+               case 0:
+                       break;
                case -NFS4ERR_CB_PATH_DOWN:
                        nfs_handle_cb_pathdown(clp);
-                       return 0;
+                       break;
                case -NFS4ERR_NO_GRACE:
                        nfs4_state_end_reclaim_reboot(clp);
-                       return 0;
+                       break;
                case -NFS4ERR_STALE_CLIENTID:
                case -NFS4ERR_LEASE_MOVED:
                        set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
@@ -1375,13 +1381,15 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
                case -NFS4ERR_SEQ_MISORDERED:
                        set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
                        /* Zero session reset errors */
-                       return 0;
+                       break;
                case -EKEYEXPIRED:
                        /* Nothing we can do */
                        nfs4_warn_keyexpired(clp->cl_hostname);
-                       return 0;
+                       break;
+               default:
+                       return error;
        }
-       return error;
+       return 0;
 }
 
 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1428,7 +1436,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
        struct rpc_cred *cred;
        const struct nfs4_state_maintenance_ops *ops =
                clp->cl_mvops->state_renewal_ops;
-       int status = -NFS4ERR_EXPIRED;
+       int status;
 
        /* Is the client already known to have an expired lease? */
        if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
@@ -1438,6 +1446,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
        spin_unlock(&clp->cl_lock);
        if (cred == NULL) {
                cred = nfs4_get_setclientid_cred(clp);
+               status = -ENOKEY;
                if (cred == NULL)
                        goto out;
        }
@@ -1525,16 +1534,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
 {
        if (!flags)
                return;
-       else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+       if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
                nfs41_handle_server_reboot(clp);
-       else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+       if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
                            SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
                            SEQ4_STATUS_ADMIN_STATE_REVOKED |
                            SEQ4_STATUS_LEASE_MOVED))
                nfs41_handle_state_revoked(clp);
-       else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+       if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
                nfs41_handle_recallable_state_revoked(clp);
-       else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+       if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
                            SEQ4_STATUS_BACKCHANNEL_FAULT |
                            SEQ4_STATUS_CB_PATH_DOWN_SESSION))
                nfs41_handle_cb_path_down(clp);
@@ -1662,10 +1671,10 @@ static void nfs4_state_manager(struct nfs_client *clp)
 
                if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
                        status = nfs4_check_lease(clp);
+                       if (status < 0)
+                               goto out_error;
                        if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
                                continue;
-                       if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
-                               goto out_error;
                }
 
                /* Initialize or reset the session */
index 41d6743..ac258be 100644 (file)
@@ -625,6 +625,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
                if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
                        goto out_free;
 
+               if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
+                       goto out_free;
+
                len = argv[n].v_size * argv[n].v_nmembs;
                base = (void __user *)(unsigned long)argv[n].v_base;
                if (len == 0) {
@@ -842,6 +845,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case FS_IOC32_GETVERSION:
                cmd = FS_IOC_GETVERSION;
                break;
+       case NILFS_IOCTL_CHANGE_CPMODE:
+       case NILFS_IOCTL_DELETE_CHECKPOINT:
+       case NILFS_IOCTL_GET_CPINFO:
+       case NILFS_IOCTL_GET_CPSTAT:
+       case NILFS_IOCTL_GET_SUINFO:
+       case NILFS_IOCTL_GET_SUSTAT:
+       case NILFS_IOCTL_GET_VINFO:
+       case NILFS_IOCTL_GET_BDESCS:
+       case NILFS_IOCTL_CLEAN_SEGMENTS:
+       case NILFS_IOCTL_SYNC:
+       case NILFS_IOCTL_RESIZE:
+       case NILFS_IOCTL_SET_ALLOC_RANGE:
+               break;
        default:
                return -ENOIOCTLCMD;
        }
index 5861741..80e4645 100644 (file)
@@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(i.freeswap),
                K(global_page_state(NR_FILE_DIRTY)),
                K(global_page_state(NR_WRITEBACK)),
-               K(global_page_state(NR_ANON_PAGES)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               K(global_page_state(NR_ANON_PAGES)
                  + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
-                 HPAGE_PMD_NR
+                 HPAGE_PMD_NR),
+#else
+               K(global_page_state(NR_ANON_PAGES)),
 #endif
-                 ),
                K(global_page_state(NR_FILE_MAPPED)),
                K(global_page_state(NR_SHMEM)),
                K(global_page_state(NR_SLAB_RECLAIMABLE) +
index 9a8a2b7..03102d9 100644 (file)
@@ -91,20 +91,18 @@ static struct file_system_type proc_fs_type = {
 
 void __init proc_root_init(void)
 {
-       struct vfsmount *mnt;
        int err;
 
        proc_init_inodecache();
        err = register_filesystem(&proc_fs_type);
        if (err)
                return;
-       mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
-       if (IS_ERR(mnt)) {
+       err = pid_ns_prepare_proc(&init_pid_ns);
+       if (err) {
                unregister_filesystem(&proc_fs_type);
                return;
        }
 
-       init_pid_ns.proc_mnt = mnt;
        proc_symlink("mounts", NULL, "self/mounts");
 
        proc_net_init();
@@ -209,5 +207,5 @@ int pid_ns_prepare_proc(struct pid_namespace *ns)
 
 void pid_ns_release_proc(struct pid_namespace *ns)
 {
-       mntput(ns->proc_mnt);
+       kern_unmount(ns->proc_mnt);
 }
index 42b274d..2a30d67 100644 (file)
@@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu)
                idle = kstat_cpu(cpu).cpustat.idle;
                idle = cputime64_add(idle, arch_idle_time(cpu));
        } else
-               idle = usecs_to_cputime(idle_time);
+               idle = nsecs_to_jiffies64(1000 * idle_time);
 
        return idle;
 }
@@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu)
                /* !NO_HZ so we can rely on cpustat.iowait */
                iowait = kstat_cpu(cpu).cpustat.iowait;
        else
-               iowait = usecs_to_cputime(iowait_time);
+               iowait = nsecs_to_jiffies64(1000 * iowait_time);
 
        return iowait;
 }
index 05d6b0e..dba43c3 100644 (file)
@@ -449,8 +449,6 @@ EXPORT_SYMBOL(seq_path);
 
 /*
  * Same as seq_path, but relative to supplied root.
- *
- * root may be changed, see __d_path().
  */
 int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                  char *esc)
@@ -463,6 +461,8 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                char *p;
 
                p = __d_path(path, root, buf, size);
+               if (!p)
+                       return SEQ_SKIP;
                res = PTR_ERR(p);
                if (!IS_ERR(p)) {
                        char *end = mangle_path(buf, p, esc);
@@ -474,7 +474,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
        }
        seq_commit(m, res);
 
-       return res < 0 ? res : 0;
+       return res < 0 && res != -ENAMETOOLONG ? res : 0;
 }
 
 /*
index 20403dc..ae0e76b 100644 (file)
@@ -2264,19 +2264,12 @@ static int __init ubifs_init(void)
                return -EINVAL;
        }
 
-       err = register_filesystem(&ubifs_fs_type);
-       if (err) {
-               ubifs_err("cannot register file system, error %d", err);
-               return err;
-       }
-
-       err = -ENOMEM;
        ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab",
                                sizeof(struct ubifs_inode), 0,
                                SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
                                &inode_slab_ctor);
        if (!ubifs_inode_slab)
-               goto out_reg;
+               return -ENOMEM;
 
        register_shrinker(&ubifs_shrinker_info);
 
@@ -2288,15 +2281,20 @@ static int __init ubifs_init(void)
        if (err)
                goto out_compr;
 
+       err = register_filesystem(&ubifs_fs_type);
+       if (err) {
+               ubifs_err("cannot register file system, error %d", err);
+               goto out_dbg;
+       }
        return 0;
 
+out_dbg:
+       dbg_debugfs_exit();
 out_compr:
        ubifs_compressors_exit();
 out_shrinker:
        unregister_shrinker(&ubifs_shrinker_info);
        kmem_cache_destroy(ubifs_inode_slab);
-out_reg:
-       unregister_filesystem(&ubifs_fs_type);
        return err;
 }
 /* late_initcall to let compressors initialize first */
index c68baeb..d0ab788 100644 (file)
@@ -2383,6 +2383,8 @@ xfs_bmap_btalloc(
        int             tryagain;
        int             error;
 
+       ASSERT(ap->length);
+
        mp = ap->ip->i_mount;
        align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
        if (unlikely(align)) {
@@ -4629,6 +4631,8 @@ xfs_bmapi_allocate(
        int                     error;
        int                     rt;
 
+       ASSERT(bma->length > 0);
+
        rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
 
        /*
@@ -4849,6 +4853,7 @@ xfs_bmapi_write(
        ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
        ASSERT(!(flags & XFS_BMAPI_IGSTATE));
        ASSERT(tp != NULL);
+       ASSERT(len > 0);
 
        whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
                XFS_ATTR_FORK : XFS_DATA_FORK;
@@ -4918,9 +4923,22 @@ xfs_bmapi_write(
                        bma.eof = eof;
                        bma.conv = !!(flags & XFS_BMAPI_CONVERT);
                        bma.wasdel = wasdelay;
-                       bma.length = len;
                        bma.offset = bno;
 
+                       /*
+                        * There's a 32/64 bit type mismatch between the
+                        * allocation length request (which can be 64 bits in
+                        * length) and the bma length request, which is
+                        * xfs_extlen_t and therefore 32 bits. Hence we have to
+                        * check for 32-bit overflows and handle them here.
+                        */
+                       if (len > (xfs_filblks_t)MAXEXTLEN)
+                               bma.length = MAXEXTLEN;
+                       else
+                               bma.length = len;
+
+                       ASSERT(len > 0);
+                       ASSERT(bma.length > 0);
                        error = xfs_bmapi_allocate(&bma, flags);
                        if (error)
                                goto error0;
index da10897..558910f 100644 (file)
@@ -98,22 +98,22 @@ xfs_fs_encode_fh(
        switch (fileid_type) {
        case FILEID_INO32_GEN_PARENT:
                spin_lock(&dentry->d_lock);
-               fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN:
-               fid->i32.ino = inode->i_ino;
+               fid->i32.ino = XFS_I(inode)->i_ino;
                fid->i32.gen = inode->i_generation;
                break;
        case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
                spin_lock(&dentry->d_lock);
-               fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
-               fid64->ino = inode->i_ino;
+               fid64->ino = XFS_I(inode)->i_ino;
                fid64->gen = inode->i_generation;
                break;
        }
index a14cd89..34817ad 100644 (file)
@@ -150,6 +150,117 @@ xlog_grant_add_space(
        } while (head_val != old);
 }
 
+STATIC bool
+xlog_reserveq_wake(
+       struct log              *log,
+       int                     *free_bytes)
+{
+       struct xlog_ticket      *tic;
+       int                     need_bytes;
+
+       list_for_each_entry(tic, &log->l_reserveq, t_queue) {
+               if (tic->t_flags & XLOG_TIC_PERM_RESERV)
+                       need_bytes = tic->t_unit_res * tic->t_cnt;
+               else
+                       need_bytes = tic->t_unit_res;
+
+               if (*free_bytes < need_bytes)
+                       return false;
+               *free_bytes -= need_bytes;
+
+               trace_xfs_log_grant_wake_up(log, tic);
+               wake_up(&tic->t_wait);
+       }
+
+       return true;
+}
+
+STATIC bool
+xlog_writeq_wake(
+       struct log              *log,
+       int                     *free_bytes)
+{
+       struct xlog_ticket      *tic;
+       int                     need_bytes;
+
+       list_for_each_entry(tic, &log->l_writeq, t_queue) {
+               ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
+
+               need_bytes = tic->t_unit_res;
+
+               if (*free_bytes < need_bytes)
+                       return false;
+               *free_bytes -= need_bytes;
+
+               trace_xfs_log_regrant_write_wake_up(log, tic);
+               wake_up(&tic->t_wait);
+       }
+
+       return true;
+}
+
+STATIC int
+xlog_reserveq_wait(
+       struct log              *log,
+       struct xlog_ticket      *tic,
+       int                     need_bytes)
+{
+       list_add_tail(&tic->t_queue, &log->l_reserveq);
+
+       do {
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+               xlog_grant_push_ail(log, need_bytes);
+
+               XFS_STATS_INC(xs_sleep_logspace);
+               trace_xfs_log_grant_sleep(log, tic);
+
+               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+               trace_xfs_log_grant_wake(log, tic);
+
+               spin_lock(&log->l_grant_reserve_lock);
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+       } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
+
+       list_del_init(&tic->t_queue);
+       return 0;
+shutdown:
+       list_del_init(&tic->t_queue);
+       return XFS_ERROR(EIO);
+}
+
+STATIC int
+xlog_writeq_wait(
+       struct log              *log,
+       struct xlog_ticket      *tic,
+       int                     need_bytes)
+{
+       list_add_tail(&tic->t_queue, &log->l_writeq);
+
+       do {
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+               xlog_grant_push_ail(log, need_bytes);
+
+               XFS_STATS_INC(xs_sleep_logspace);
+               trace_xfs_log_regrant_write_sleep(log, tic);
+
+               xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+               trace_xfs_log_regrant_write_wake(log, tic);
+
+               spin_lock(&log->l_grant_write_lock);
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+       } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
+
+       list_del_init(&tic->t_queue);
+       return 0;
+shutdown:
+       list_del_init(&tic->t_queue);
+       return XFS_ERROR(EIO);
+}
+
 static void
 xlog_tic_reset_res(xlog_ticket_t *tic)
 {
@@ -350,8 +461,19 @@ xfs_log_reserve(
                retval = xlog_grant_log_space(log, internal_ticket);
        }
 
+       if (unlikely(retval)) {
+               /*
+                * If we are failing, make sure the ticket doesn't have any
+                * current reservations.  We don't want to add this back
+                * when the ticket/ transaction gets cancelled.
+                */
+               internal_ticket->t_curr_res = 0;
+               /* ungrant will give back unit_res * t_cnt. */
+               internal_ticket->t_cnt = 0;
+       }
+
        return retval;
-}      /* xfs_log_reserve */
+}
 
 
 /*
@@ -2481,8 +2603,8 @@ restart:
 /*
  * Atomically get the log space required for a log ticket.
  *
- * Once a ticket gets put onto the reserveq, it will only return after
- * the needed reservation is satisfied.
+ * Once a ticket gets put onto the reserveq, it will only return after the
+ * needed reservation is satisfied.
  *
  * This function is structured so that it has a lock free fast path. This is
  * necessary because every new transaction reservation will come through this
@@ -2490,113 +2612,53 @@ restart:
  * every pass.
  *
  * As tickets are only ever moved on and off the reserveq under the
- * l_grant_reserve_lock, we only need to take that lock if we are going
- * to add the ticket to the queue and sleep. We can avoid taking the lock if the
- * ticket was never added to the reserveq because the t_queue list head will be
- * empty and we hold the only reference to it so it can safely be checked
- * unlocked.
+ * l_grant_reserve_lock, we only need to take that lock if we are going to add
+ * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
+ * was never added to the reserveq because the t_queue list head will be empty
+ * and we hold the only reference to it so it can safely be checked unlocked.
  */
 STATIC int
-xlog_grant_log_space(xlog_t       *log,
-                    xlog_ticket_t *tic)
+xlog_grant_log_space(
+       struct log              *log,
+       struct xlog_ticket      *tic)
 {
-       int              free_bytes;
-       int              need_bytes;
+       int                     free_bytes, need_bytes;
+       int                     error = 0;
 
-#ifdef DEBUG
-       if (log->l_flags & XLOG_ACTIVE_RECOVERY)
-               panic("grant Recovery problem");
-#endif
+       ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 
        trace_xfs_log_grant_enter(log, tic);
 
+       /*
+        * If there are other waiters on the queue then give them a chance at
+        * logspace before us.  Wake up the first waiters, if we do not wake
+        * up all the waiters then go to sleep waiting for more free space,
+        * otherwise try to get some space for this transaction.
+        */
        need_bytes = tic->t_unit_res;
        if (tic->t_flags & XFS_LOG_PERM_RESERV)
                need_bytes *= tic->t_ocnt;
-
-       /* something is already sleeping; insert new transaction at end */
-       if (!list_empty_careful(&log->l_reserveq)) {
-               spin_lock(&log->l_grant_reserve_lock);
-               /* recheck the queue now we are locked */
-               if (list_empty(&log->l_reserveq)) {
-                       spin_unlock(&log->l_grant_reserve_lock);
-                       goto redo;
-               }
-               list_add_tail(&tic->t_queue, &log->l_reserveq);
-
-               trace_xfs_log_grant_sleep1(log, tic);
-
-               /*
-                * Gotta check this before going to sleep, while we're
-                * holding the grant lock.
-                */
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
-               /*
-                * If we got an error, and the filesystem is shutting down,
-                * we'll catch it down below. So just continue...
-                */
-               trace_xfs_log_grant_wake1(log, tic);
-       }
-
-redo:
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
-
        free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
-       if (free_bytes < need_bytes) {
+       if (!list_empty_careful(&log->l_reserveq)) {
                spin_lock(&log->l_grant_reserve_lock);
-               if (list_empty(&tic->t_queue))
-                       list_add_tail(&tic->t_queue, &log->l_reserveq);
-
-               trace_xfs_log_grant_sleep2(log, tic);
-
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               xlog_grant_push_ail(log, need_bytes);
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
-               trace_xfs_log_grant_wake2(log, tic);
-               goto redo;
-       }
-
-       if (!list_empty(&tic->t_queue)) {
+               if (!xlog_reserveq_wake(log, &free_bytes) ||
+                   free_bytes < need_bytes)
+                       error = xlog_reserveq_wait(log, tic, need_bytes);
+               spin_unlock(&log->l_grant_reserve_lock);
+       } else if (free_bytes < need_bytes) {
                spin_lock(&log->l_grant_reserve_lock);
-               list_del_init(&tic->t_queue);
+               error = xlog_reserveq_wait(log, tic, need_bytes);
                spin_unlock(&log->l_grant_reserve_lock);
        }
+       if (error)
+               return error;
 
-       /* we've got enough space */
        xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
        xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
        trace_xfs_log_grant_exit(log, tic);
        xlog_verify_grant_tail(log);
        return 0;
-
-error_return_unlocked:
-       spin_lock(&log->l_grant_reserve_lock);
-error_return:
-       list_del_init(&tic->t_queue);
-       spin_unlock(&log->l_grant_reserve_lock);
-       trace_xfs_log_grant_error(log, tic);
-
-       /*
-        * If we are failing, make sure the ticket doesn't have any
-        * current reservations. We don't want to add this back when
-        * the ticket/transaction gets cancelled.
-        */
-       tic->t_curr_res = 0;
-       tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-       return XFS_ERROR(EIO);
-}      /* xlog_grant_log_space */
-
+}
 
 /*
  * Replenish the byte reservation required by moving the grant write head.
@@ -2605,10 +2667,12 @@ error_return:
  * free fast path.
  */
 STATIC int
-xlog_regrant_write_log_space(xlog_t       *log,
-                            xlog_ticket_t *tic)
+xlog_regrant_write_log_space(
+       struct log              *log,
+       struct xlog_ticket      *tic)
 {
-       int             free_bytes, need_bytes;
+       int                     free_bytes, need_bytes;
+       int                     error = 0;
 
        tic->t_curr_res = tic->t_unit_res;
        xlog_tic_reset_res(tic);
@@ -2616,104 +2680,38 @@ xlog_regrant_write_log_space(xlog_t       *log,
        if (tic->t_cnt > 0)
                return 0;
 
-#ifdef DEBUG
-       if (log->l_flags & XLOG_ACTIVE_RECOVERY)
-               panic("regrant Recovery problem");
-#endif
+       ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 
        trace_xfs_log_regrant_write_enter(log, tic);
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
 
-       /* If there are other waiters on the queue then give them a
-        * chance at logspace before us. Wake up the first waiters,
-        * if we do not wake up all the waiters then go to sleep waiting
-        * for more free space, otherwise try to get some space for
-        * this transaction.
+       /*
+        * If there are other waiters on the queue then give them a chance at
+        * logspace before us.  Wake up the first waiters, if we do not wake
+        * up all the waiters then go to sleep waiting for more free space,
+        * otherwise try to get some space for this transaction.
         */
        need_bytes = tic->t_unit_res;
-       if (!list_empty_careful(&log->l_writeq)) {
-               struct xlog_ticket *ntic;
-
-               spin_lock(&log->l_grant_write_lock);
-               free_bytes = xlog_space_left(log, &log->l_grant_write_head);
-               list_for_each_entry(ntic, &log->l_writeq, t_queue) {
-                       ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
-
-                       if (free_bytes < ntic->t_unit_res)
-                               break;
-                       free_bytes -= ntic->t_unit_res;
-                       wake_up(&ntic->t_wait);
-               }
-
-               if (ntic != list_first_entry(&log->l_writeq,
-                                               struct xlog_ticket, t_queue)) {
-                       if (list_empty(&tic->t_queue))
-                               list_add_tail(&tic->t_queue, &log->l_writeq);
-                       trace_xfs_log_regrant_write_sleep1(log, tic);
-
-                       xlog_grant_push_ail(log, need_bytes);
-
-                       XFS_STATS_INC(xs_sleep_logspace);
-                       xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-                       trace_xfs_log_regrant_write_wake1(log, tic);
-               } else
-                       spin_unlock(&log->l_grant_write_lock);
-       }
-
-redo:
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
-
        free_bytes = xlog_space_left(log, &log->l_grant_write_head);
-       if (free_bytes < need_bytes) {
+       if (!list_empty_careful(&log->l_writeq)) {
                spin_lock(&log->l_grant_write_lock);
-               if (list_empty(&tic->t_queue))
-                       list_add_tail(&tic->t_queue, &log->l_writeq);
-
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               xlog_grant_push_ail(log, need_bytes);
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               trace_xfs_log_regrant_write_sleep2(log, tic);
-               xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-
-               trace_xfs_log_regrant_write_wake2(log, tic);
-               goto redo;
-       }
-
-       if (!list_empty(&tic->t_queue)) {
+               if (!xlog_writeq_wake(log, &free_bytes) ||
+                   free_bytes < need_bytes)
+                       error = xlog_writeq_wait(log, tic, need_bytes);
+               spin_unlock(&log->l_grant_write_lock);
+       } else if (free_bytes < need_bytes) {
                spin_lock(&log->l_grant_write_lock);
-               list_del_init(&tic->t_queue);
+               error = xlog_writeq_wait(log, tic, need_bytes);
                spin_unlock(&log->l_grant_write_lock);
        }
 
-       /* we've got enough space */
+       if (error)
+               return error;
+
        xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
        trace_xfs_log_regrant_write_exit(log, tic);
        xlog_verify_grant_tail(log);
        return 0;
-
-
- error_return_unlocked:
-       spin_lock(&log->l_grant_write_lock);
- error_return:
-       list_del_init(&tic->t_queue);
-       spin_unlock(&log->l_grant_write_lock);
-       trace_xfs_log_regrant_write_error(log, tic);
-
-       /*
-        * If we are failing, make sure the ticket doesn't have any
-        * current reservations. We don't want to add this back when
-        * the ticket/transaction gets cancelled.
-        */
-       tic->t_curr_res = 0;
-       tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-       return XFS_ERROR(EIO);
-}      /* xlog_regrant_write_log_space */
-
+}
 
 /* The first cnt-1 times through here we don't need to
  * move the grant write head because the permanent
index f1d2802..4940357 100644 (file)
@@ -834,18 +834,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
index f4c38d8..2292d1a 100644 (file)
@@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs)
 __SYSCALL(__NR_setns, sys_setns)
 #define __NR_sendmmsg 269
 __SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+          compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+          compat_sys_process_vm_writev)
 
 #undef __NR_syscalls
-#define __NR_syscalls 270
+#define __NR_syscalls 272
 
 /*
  * All syscalls below here should go away really,
index 4e4fbb8..14b6cd0 100644 (file)
        {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0, 0, 0}
 
 #define r128_PCI_IDS \
index c7a6d3b..94acd81 100644 (file)
@@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *);
  */
 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
                                        spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
-                                                          request_fn_proc *,
-                                                          spinlock_t *, int node_id);
 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
 extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
                                                      request_fn_proc *, spinlock_t *);
index c86c940..081147d 100644 (file)
@@ -71,7 +71,7 @@ struct timecounter {
 
 /**
  * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @tc:                Pointer to cycle counter.
+ * @cc:                Pointer to cycle counter.
  * @cycles:    Cycles
  *
  * XXX - This could use some mult_lxl_ll() asm optimization. Same code
@@ -114,7 +114,7 @@ extern u64 timecounter_read(struct timecounter *tc);
  *                        time base as values returned by
  *                        timecounter_read()
  * @tc:                Pointer to time counter.
- * @cycle:     a value returned by tc->cc->read()
+ * @cycle_tstamp:      a value returned by tc->cc->read()
  *
  * Cycle counts that are converted correctly as long as they
  * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
@@ -156,11 +156,12 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
  * @mult:              cycle to nanosecond multiplier
  * @shift:             cycle to nanosecond divisor (power of two)
  * @max_idle_ns:       max idle time permitted by the clocksource (nsecs)
- * @maxadj             maximum adjustment value to mult (~11%)
+ * @maxadj:            maximum adjustment value to mult (~11%)
  * @flags:             flags describing special properties
  * @archdata:          arch-specific data
  * @suspend:           suspend function for the clocksource, if necessary
  * @resume:            resume function for the clocksource, if necessary
+ * @cycle_last:                most recent cycle counter value seen by ::read()
  */
 struct clocksource {
        /*
@@ -187,6 +188,7 @@ struct clocksource {
        void (*suspend)(struct clocksource *cs);
        void (*resume)(struct clocksource *cs);
 
+       /* private: */
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
        /* Watchdog related data, used by the framework */
        struct list_head wd_list;
@@ -261,6 +263,9 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
 
 /**
  * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
+ * @cycles:    cycles
+ * @mult:      cycle to nanosecond multiplier
+ * @shift:     cycle to nanosecond divisor (power of two)
  *
  * Converts cycles to nanoseconds, using the given mult and shift.
  *
index 154bf56..66ed067 100644 (file)
@@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
 
 extern void __user *compat_alloc_user_space(unsigned long len);
 
+asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
+               const struct compat_iovec __user *lvec,
+               unsigned long liovcnt, const struct compat_iovec __user *rvec,
+               unsigned long riovcnt, unsigned long flags);
+asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+               const struct compat_iovec __user *lvec,
+               unsigned long liovcnt, const struct compat_iovec __user *rvec,
+               unsigned long riovcnt, unsigned long flags);
+
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */
index 4df9261..ed9f74f 100644 (file)
@@ -339,7 +339,8 @@ extern int d_validate(struct dentry *, struct dentry *);
  */
 extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
 
-extern char *__d_path(const struct path *path, struct path *root, char *, int);
+extern char *__d_path(const struct path *, const struct path *, char *, int);
+extern char *d_absolute_path(const struct path *, char *, int);
 extern char *d_path(const struct path *, char *, int);
 extern char *d_path_with_unreachable(const struct path *, char *, int);
 extern char *dentry_path_raw(struct dentry *, char *, int);
index ef90cbd..57c9a8a 100644 (file)
@@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
 extern int iommu_calculate_agaw(struct intel_iommu *iommu);
 extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
 extern int dmar_disabled;
+extern int intel_iommu_enabled;
 #else
 static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
 {
@@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu)
 {
 }
 #define dmar_disabled  (1)
+#define intel_iommu_enabled (0)
 #endif
 
 
index e313022..e0bc4ff 100644 (file)
@@ -393,8 +393,8 @@ struct inodes_stat_t {
 #include <linux/semaphore.h>
 #include <linux/fiemap.h>
 #include <linux/rculist_bl.h>
-#include <linux/shrinker.h>
 #include <linux/atomic.h>
+#include <linux/shrinker.h>
 
 #include <asm/byteorder.h>
 
@@ -1942,6 +1942,7 @@ extern int fd_statfs(int, struct kstatfs *);
 extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
 extern int freeze_super(struct super_block *super);
 extern int thaw_super(struct super_block *super);
+extern bool our_mnt(struct vfsmount *mnt);
 
 extern int current_umask(void);
 
index f549056..87f402c 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/spinlock.h>
 #include <linux/lockdep.h>
 #include <linux/percpu.h>
+#include <linux/cpu.h>
 
 /* can make br locks by using local lock for read side, global lock for write */
 #define br_lock_init(name)     name##_lock_init()
 
 #define DEFINE_LGLOCK(name)                                            \
                                                                        \
+ DEFINE_SPINLOCK(name##_cpu_lock);                                     \
+ cpumask_t name##_cpus __read_mostly;                                  \
  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                         \
  DEFINE_LGLOCK_LOCKDEP(name);                                          \
                                                                        \
+ static int                                                            \
+ name##_lg_cpu_callback(struct notifier_block *nb,                     \
+                               unsigned long action, void *hcpu)       \
+ {                                                                     \
+       switch (action & ~CPU_TASKS_FROZEN) {                           \
+       case CPU_UP_PREPARE:                                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_set((unsigned long)hcpu, name##_cpus);              \
+               spin_unlock(&name##_cpu_lock);                          \
+               break;                                                  \
+       case CPU_UP_CANCELED: case CPU_DEAD:                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_clear((unsigned long)hcpu, name##_cpus);            \
+               spin_unlock(&name##_cpu_lock);                          \
+       }                                                               \
+       return NOTIFY_OK;                                               \
+ }                                                                     \
+ static struct notifier_block name##_lg_cpu_notifier = {               \
+       .notifier_call = name##_lg_cpu_callback,                        \
+ };                                                                    \
  void name##_lock_init(void) {                                         \
        int i;                                                          \
        LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
                lock = &per_cpu(name##_lock, i);                        \
                *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
        }                                                               \
+       register_hotcpu_notifier(&name##_lg_cpu_notifier);              \
+       get_online_cpus();                                              \
+       for_each_online_cpu(i)                                          \
+               cpu_set(i, name##_cpus);                                \
+       put_online_cpus();                                              \
  }                                                                     \
  EXPORT_SYMBOL(name##_lock_init);                                      \
                                                                        \
                                                                        \
  void name##_global_lock_online(void) {                                        \
        int i;                                                          \
-       preempt_disable();                                              \
+       spin_lock(&name##_cpu_lock);                                    \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock_online(void) {                              \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \
        }                                                               \
-       preempt_enable();                                               \
+       spin_unlock(&name##_cpu_lock);                                  \
  }                                                                     \
  EXPORT_SYMBOL(name##_global_unlock_online);                           \
                                                                        \
index 25b8086..fd7ff3d 100644 (file)
@@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
 #define rounddown_pow_of_two(n)                        \
 (                                              \
        __builtin_constant_p(n) ? (             \
-               (n == 1) ? 0 :                  \
                (1UL << ilog2(n))) :            \
        __rounddown_pow_of_two(n)               \
  )
index 3dc3a8c..4baadd1 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mmzone.h>
 #include <linux/rbtree.h>
 #include <linux/prio_tree.h>
+#include <linux/atomic.h>
 #include <linux/debug_locks.h>
 #include <linux/mm_types.h>
 #include <linux/range.h>
index 415f2db..c8ef9bc 100644 (file)
@@ -218,6 +218,7 @@ struct mmc_card {
 #define MMC_QUIRK_INAND_CMD38  (1<<6)          /* iNAND devices have broken CMD38 */
 #define MMC_QUIRK_BLK_NO_CMD23 (1<<7)          /* Avoid CMD23 for regular multiblock */
 #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8)  /* Avoid sending 512 bytes in */
+#define MMC_QUIRK_LONG_READ_TIME (1<<9)                /* Data read time > CSD says */
                                                /* byte mode */
        unsigned int    poweroff_notify_state;  /* eMMC4.5 notify feature */
 #define MMC_NO_POWER_NOTIFICATION      0
@@ -433,6 +434,11 @@ static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
        return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
 }
 
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+       return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
 #define mmc_card_name(c)       ((c)->cid.prod_name)
 #define mmc_card_id(c)         (dev_name(&(c)->dev))
 
index a83833a..07ceb97 100644 (file)
@@ -35,7 +35,7 @@ struct shrinker {
 
        /* These are for internal use */
        struct list_head list;
-       long nr;        /* objs pending delete */
+       atomic_long_t nr_in_batch; /* objs pending delete */
 };
 #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
 extern void register_shrinker(struct shrinker *);
index b1377b9..5fb2c3d 100644 (file)
@@ -254,7 +254,7 @@ unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
 static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client)
 {
        struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        return icd ? icd->vdev : NULL;
 }
 
@@ -279,6 +279,11 @@ static inline struct soc_camera_device *soc_camera_from_vbq(const struct videobu
        return container_of(vq, struct soc_camera_device, vb_vidq);
 }
 
+static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
+{
+       return (icd->iface << 8) | (icd->devnum + 1);
+}
+
 void soc_camera_lock(struct vb2_queue *vq);
 void soc_camera_unlock(struct vb2_queue *vq);
 
index 6faec1a..75766b4 100644 (file)
@@ -53,6 +53,7 @@ struct dst_entry {
 #define DST_NOHASH             0x0008
 #define DST_NOCACHE            0x0010
 #define DST_NOCOUNT            0x0020
+#define DST_NOPEER             0x0040
 
        short                   error;
        short                   obsolete;
index a094477..57f15a7 100644 (file)
@@ -207,6 +207,7 @@ extern struct flow_cache_object *flow_cache_lookup(
                u8 dir, flow_resolve_t resolver, void *ctx);
 
 extern void flow_cache_flush(void);
+extern void flow_cache_flush_deferred(void);
 extern atomic_t flow_cache_genid;
 
 #endif
index e90e7a9..a15432d 100644 (file)
@@ -241,6 +241,9 @@ extern struct sctp_globals {
         * bits is an indicator of when to send and window update SACK.
         */
        int rwnd_update_shift;
+
+       /* Threshold for autoclose timeout, in seconds. */
+       unsigned long max_autoclose;
 } sctp_globals;
 
 #define sctp_rto_initial               (sctp_globals.rto_initial)
@@ -281,6 +284,7 @@ extern struct sctp_globals {
 #define sctp_auth_enable               (sctp_globals.auth_enable)
 #define sctp_checksum_disable          (sctp_globals.checksum_disable)
 #define sctp_rwnd_upd_shift            (sctp_globals.rwnd_update_shift)
+#define sctp_max_autoclose             (sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
index abb6e0f..32e3937 100644 (file)
@@ -637,12 +637,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 
 /*
  * Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
  */
 static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
 {
        unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-       return qsize + skb->truesize > sk->sk_rcvbuf;
+       return qsize > sk->sk_rcvbuf;
 }
 
 /* The per-socket spinlock must be held here. */
index d1e95c6..5a35a2a 100644 (file)
@@ -147,6 +147,7 @@ struct fcoe_ctlr {
        u8 map_dest;
        u8 spma;
        u8 probe_tries;
+       u8 priority;
        u8 dest_addr[ETH_ALEN];
        u8 ctl_src_addr[ETH_ALEN];
 
@@ -301,6 +302,7 @@ struct fcoe_percpu_s {
  * @lport:                    The associated local port
  * @fcoe_pending_queue:               The pending Rx queue of skbs
  * @fcoe_pending_queue_active: Indicates if the pending queue is active
+ * @priority:                 Packet priority (DCB)
  * @max_queue_depth:          Max queue depth of pending queue
  * @min_queue_depth:          Min queue depth of pending queue
  * @timer:                    The queue timer
@@ -316,6 +318,7 @@ struct fcoe_port {
        struct fc_lport       *lport;
        struct sk_buff_head   fcoe_pending_queue;
        u8                    fcoe_pending_queue_active;
+       u8                    priority;
        u32                   max_queue_depth;
        u32                   min_queue_depth;
        struct timer_list     timer;
index 7f5fed3..6873c7d 100644 (file)
@@ -103,9 +103,10 @@ enum se_cmd_flags_table {
        SCF_SCSI_NON_DATA_CDB           = 0x00000040,
        SCF_SCSI_CDB_EXCEPTION          = 0x00000080,
        SCF_SCSI_RESERVATION_CONFLICT   = 0x00000100,
-       SCF_SE_CMD_FAILED               = 0x00000400,
+       SCF_FUA                         = 0x00000200,
        SCF_SE_LUN_CMD                  = 0x00000800,
        SCF_SE_ALLOW_EOO                = 0x00001000,
+       SCF_BIDI                        = 0x00002000,
        SCF_SENT_CHECK_CONDITION        = 0x00004000,
        SCF_OVERFLOW_BIT                = 0x00008000,
        SCF_UNDERFLOW_BIT               = 0x00010000,
@@ -154,6 +155,7 @@ enum tcm_sense_reason_table {
        TCM_CHECK_CONDITION_ABORT_CMD           = 0x0d,
        TCM_CHECK_CONDITION_UNIT_ATTENTION      = 0x0e,
        TCM_CHECK_CONDITION_NOT_READY           = 0x0f,
+       TCM_RESERVATION_CONFLICT                = 0x10,
 };
 
 struct se_obj {
@@ -211,7 +213,6 @@ struct t10_alua_lu_gp {
        u16     lu_gp_id;
        int     lu_gp_valid_id;
        u32     lu_gp_members;
-       atomic_t lu_gp_shutdown;
        atomic_t lu_gp_ref_cnt;
        spinlock_t lu_gp_lock;
        struct config_group lu_gp_group;
@@ -422,11 +423,9 @@ struct se_cmd {
        int                     sam_task_attr;
        /* Transport protocol dependent state, see transport_state_table */
        enum transport_state_table t_state;
-       /* Transport specific error status */
-       int                     transport_error_status;
        /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
-       int                     check_release:1;
-       int                     cmd_wait_set:1;
+       unsigned                check_release:1;
+       unsigned                cmd_wait_set:1;
        /* See se_cmd_flags_table */
        u32                     se_cmd_flags;
        u32                     se_ordered_id;
@@ -441,13 +440,10 @@ struct se_cmd {
        /* Used for sense data */
        void                    *sense_buffer;
        struct list_head        se_delayed_node;
-       struct list_head        se_ordered_node;
        struct list_head        se_lun_node;
        struct list_head        se_qf_node;
        struct se_device      *se_dev;
        struct se_dev_entry   *se_deve;
-       struct se_device        *se_obj_ptr;
-       struct se_device        *se_orig_obj_ptr;
        struct se_lun           *se_lun;
        /* Only used for internal passthrough and legacy TCM fabric modules */
        struct se_session       *se_sess;
@@ -463,8 +459,6 @@ struct se_cmd {
        unsigned char           __t_task_cdb[TCM_MAX_COMMAND_SIZE];
        unsigned long long      t_task_lba;
        int                     t_tasks_failed;
-       int                     t_tasks_fua;
-       bool                    t_tasks_bidi;
        u32                     t_tasks_sg_chained_no;
        atomic_t                t_fe_count;
        atomic_t                t_se_count;
@@ -489,14 +483,6 @@ struct se_cmd {
 
        struct work_struct      work;
 
-       /*
-        * Used for pre-registered fabric SGL passthrough WRITE and READ
-        * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
-        * and other HW target mode fabric modules.
-        */
-       struct scatterlist      *t_task_pt_sgl;
-       u32                     t_task_pt_sgl_num;
-
        struct scatterlist      *t_data_sg;
        unsigned int            t_data_nents;
        struct scatterlist      *t_bidi_data_sg;
@@ -562,7 +548,7 @@ struct se_node_acl {
 } ____cacheline_aligned;
 
 struct se_session {
-       int                     sess_tearing_down:1;
+       unsigned                sess_tearing_down:1;
        u64                     sess_bin_isid;
        struct se_node_acl      *se_node_acl;
        struct se_portal_group *se_tpg;
@@ -683,7 +669,6 @@ struct se_subsystem_dev {
        struct t10_reservation t10_pr;
        spinlock_t      se_dev_lock;
        void            *se_dev_su_ptr;
-       struct list_head se_dev_node;
        struct config_group se_dev_group;
        /* For T10 Reservations */
        struct config_group se_dev_pr_group;
@@ -692,9 +677,6 @@ struct se_subsystem_dev {
 } ____cacheline_aligned;
 
 struct se_device {
-       /* Set to 1 if thread is NOT sleeping on thread_sem */
-       u8                      thread_active;
-       u8                      dev_status_timer_flags;
        /* RELATIVE TARGET PORT IDENTIFER Counter */
        u16                     dev_rpti_counter;
        /* Used for SAM Task Attribute ordering */
@@ -719,14 +701,10 @@ struct se_device {
        u64                     write_bytes;
        spinlock_t              stats_lock;
        /* Active commands on this virtual SE device */
-       atomic_t                active_cmds;
        atomic_t                simple_cmds;
        atomic_t                depth_left;
        atomic_t                dev_ordered_id;
-       atomic_t                dev_tur_active;
        atomic_t                execute_tasks;
-       atomic_t                dev_status_thr_count;
-       atomic_t                dev_hoq_count;
        atomic_t                dev_ordered_sync;
        atomic_t                dev_qf_count;
        struct se_obj           dev_obj;
@@ -734,14 +712,9 @@ struct se_device {
        struct se_obj           dev_export_obj;
        struct se_queue_obj     dev_queue_obj;
        spinlock_t              delayed_cmd_lock;
-       spinlock_t              ordered_cmd_lock;
        spinlock_t              execute_task_lock;
-       spinlock_t              state_task_lock;
-       spinlock_t              dev_alua_lock;
        spinlock_t              dev_reservation_lock;
-       spinlock_t              dev_state_lock;
        spinlock_t              dev_status_lock;
-       spinlock_t              dev_status_thr_lock;
        spinlock_t              se_port_lock;
        spinlock_t              se_tmr_lock;
        spinlock_t              qf_cmd_lock;
@@ -753,14 +726,10 @@ struct se_device {
        struct t10_pr_registration *dev_pr_res_holder;
        struct list_head        dev_sep_list;
        struct list_head        dev_tmr_list;
-       struct timer_list       dev_status_timer;
        /* Pointer to descriptor for processing thread */
        struct task_struct      *process_thread;
-       pid_t                   process_thread_pid;
-       struct task_struct              *dev_mgmt_thread;
        struct work_struct      qf_work_queue;
        struct list_head        delayed_cmd_list;
-       struct list_head        ordered_cmd_list;
        struct list_head        execute_task_list;
        struct list_head        state_task_list;
        struct list_head        qf_cmd_list;
@@ -771,8 +740,6 @@ struct se_device {
        struct se_subsystem_api *transport;
        /* Linked list for struct se_hba struct se_device list */
        struct list_head        dev_list;
-       /* Linked list for struct se_global->g_se_dev_list */
-       struct list_head        g_se_dev_list;
 }  ____cacheline_aligned;
 
 struct se_hba {
@@ -834,7 +801,6 @@ struct se_port {
        u32             sep_index;
        struct scsi_port_stats sep_stats;
        /* Used for ALUA Target Port Groups membership */
-       atomic_t        sep_tg_pt_gp_active;
        atomic_t        sep_tg_pt_secondary_offline;
        /* Used for PR ALL_TG_PT=1 */
        atomic_t        sep_tg_pt_ref_cnt;
index c16e943..dac4f2d 100644 (file)
 
 #define PYX_TRANSPORT_STATUS_INTERVAL          5 /* In seconds */
 
-#define PYX_TRANSPORT_SENT_TO_TRANSPORT                0
-#define PYX_TRANSPORT_WRITE_PENDING            1
-
-#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE       -1
-#define PYX_TRANSPORT_HBA_QUEUE_FULL           -2
-#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS     -3
-#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES  -4
-#define PYX_TRANSPORT_INVALID_CDB_FIELD                -5
-#define PYX_TRANSPORT_INVALID_PARAMETER_LIST   -6
-#define PYX_TRANSPORT_LU_COMM_FAILURE          -7
-#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE                -8
-#define PYX_TRANSPORT_WRITE_PROTECTED          -9
-#define PYX_TRANSPORT_RESERVATION_CONFLICT     -10
-#define PYX_TRANSPORT_ILLEGAL_REQUEST          -11
-#define PYX_TRANSPORT_USE_SENSE_REASON         -12
-
-#ifndef SAM_STAT_RESERVATION_CONFLICT
-#define SAM_STAT_RESERVATION_CONFLICT          0x18
-#endif
-
-#define TRANSPORT_PLUGIN_FREE                  0
-#define TRANSPORT_PLUGIN_REGISTERED            1
-
 #define TRANSPORT_PLUGIN_PHBA_PDEV             1
 #define TRANSPORT_PLUGIN_VHBA_PDEV             2
 #define TRANSPORT_PLUGIN_VHBA_VDEV             3
@@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
 extern int transport_handle_cdb_direct(struct se_cmd *);
 extern int transport_generic_handle_cdb_map(struct se_cmd *);
 extern int transport_generic_handle_data(struct se_cmd *);
-extern void transport_new_cmd_failure(struct se_cmd *);
 extern int transport_generic_handle_tmr(struct se_cmd *);
 extern bool target_stop_task(struct se_task *task, unsigned long *flags);
 extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
index b99caa8..99d1d0d 100644 (file)
                {I_REFERENCED,          "I_REFERENCED"}         \
        )
 
+#define WB_WORK_REASON                                                 \
+               {WB_REASON_BACKGROUND,          "background"},          \
+               {WB_REASON_TRY_TO_FREE_PAGES,   "try_to_free_pages"},   \
+               {WB_REASON_SYNC,                "sync"},                \
+               {WB_REASON_PERIODIC,            "periodic"},            \
+               {WB_REASON_LAPTOP_TIMER,        "laptop_timer"},        \
+               {WB_REASON_FREE_MORE_MEM,       "free_more_memory"},    \
+               {WB_REASON_FS_FREE_SPACE,       "fs_free_space"},       \
+               {WB_REASON_FORKER_THREAD,       "forker_thread"}
+
 struct wb_writeback_work;
 
 DECLARE_EVENT_CLASS(writeback_work_class,
@@ -55,7 +65,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
                  __entry->for_kupdate,
                  __entry->range_cyclic,
                  __entry->for_background,
-                 wb_reason_name[__entry->reason]
+                 __print_symbolic(__entry->reason, WB_WORK_REASON)
        )
 );
 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -184,7 +194,8 @@ TRACE_EVENT(writeback_queue_io,
                __entry->older, /* older_than_this in jiffies */
                __entry->age,   /* older_than_this in relative milliseconds */
                __entry->moved,
-               wb_reason_name[__entry->reason])
+               __print_symbolic(__entry->reason, WB_WORK_REASON)
+       )
 );
 
 TRACE_EVENT(global_dirty_state,
index f0b6890..f6f07aa 100644 (file)
@@ -29,8 +29,7 @@ enum xsd_sockmsg_type
     XS_IS_DOMAIN_INTRODUCED,
     XS_RESUME,
     XS_SET_TARGET,
-    XS_RESTRICT,
-    XS_RESET_WATCHES
+    XS_RESTRICT
 };
 
 #define XS_WRITE_NONE "NONE"
index 2e0ecfc..5b4293d 100644 (file)
@@ -1269,7 +1269,7 @@ void mq_clear_sbinfo(struct ipc_namespace *ns)
 
 void mq_put_mnt(struct ipc_namespace *ns)
 {
-       mntput(ns->mq_mnt);
+       kern_unmount(ns->mq_mnt);
 }
 
 static int __init init_mqueue_fs(void)
@@ -1291,11 +1291,9 @@ static int __init init_mqueue_fs(void)
 
        spin_lock_init(&mq_lock);
 
-       init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
-       if (IS_ERR(init_ipc_ns.mq_mnt)) {
-               error = PTR_ERR(init_ipc_ns.mq_mnt);
+       error = mq_init_ns(&init_ipc_ns);
+       if (error)
                goto out_filesystem;
-       }
 
        return 0;
 
index 8b5ce5d..5652101 100644 (file)
@@ -27,11 +27,6 @@ DEFINE_SPINLOCK(mq_lock);
  */
 struct ipc_namespace init_ipc_ns = {
        .count          = ATOMIC_INIT(1),
-#ifdef CONFIG_POSIX_MQUEUE
-       .mq_queues_max   = DFLT_QUEUESMAX,
-       .mq_msg_max      = DFLT_MSGMAX,
-       .mq_msgsize_max  = DFLT_MSGSIZEMAX,
-#endif
        .user_ns = &init_user_ns,
 };
 
index d9d5648..a184470 100644 (file)
@@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
                        continue;
                /* get old css_set pointer */
                task_lock(tsk);
-               if (tsk->flags & PF_EXITING) {
-                       /* ignore this task if it's going away */
-                       task_unlock(tsk);
-                       continue;
-               }
                oldcg = tsk->cgroups;
                get_css_set(oldcg);
                task_unlock(tsk);
index 9fe58c4..0b1712d 100644 (file)
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
                            struct cpuset, css);
 }
 
+#ifdef CONFIG_NUMA
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+       return task->mempolicy;
+}
+#else
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+       return false;
+}
+#endif
+
+
 /* bits in struct cpuset flags field */
 typedef enum {
        CS_CPU_EXCLUSIVE,
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 static void cpuset_change_task_nodemask(struct task_struct *tsk,
                                        nodemask_t *newmems)
 {
-       bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
+       bool need_loop;
 
 repeat:
        /*
@@ -962,6 +975,14 @@ repeat:
                return;
 
        task_lock(tsk);
+       /*
+        * Determine if a loop is necessary if another thread is doing
+        * get_mems_allowed().  If at least one node remains unchanged and
+        * tsk does not have a mempolicy, then an empty nodemask will not be
+        * possible when mems_allowed is larger than a word.
+        */
+       need_loop = task_has_mempolicy(tsk) ||
+                       !nodes_intersects(*newmems, tsk->mems_allowed);
        nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
        mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
 
@@ -981,11 +1002,9 @@ repeat:
 
        /*
         * Allocation of memory is very fast, we needn't sleep when waiting
-        * for the read-side.  No wait is necessary, however, if at least one
-        * node remains unchanged.
+        * for the read-side.
         */
-       while (masks_disjoint &&
-                       ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+       while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
                task_unlock(tsk);
                if (!task_curr(tsk))
                        yield();
index 600c162..58690af 100644 (file)
@@ -2174,11 +2174,11 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         */
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 
-       perf_event_sched_in(cpuctx, ctx, task);
-
        if (ctx->nr_events)
                cpuctx->task_ctx = ctx;
 
+       perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
+
        perf_pmu_enable(ctx->pmu);
        perf_ctx_unlock(cpuctx, ctx);
 
@@ -3558,9 +3558,13 @@ static void ring_buffer_wakeup(struct perf_event *event)
 
        rcu_read_lock();
        rb = rcu_dereference(event->rb);
-       list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+       if (!rb)
+               goto unlock;
+
+       list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
                wake_up_all(&event->waitq);
-       }
+
+unlock:
        rcu_read_unlock();
 }
 
index e69434b..b2e08c9 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/stringify.h>
 #include <linux/bitops.h>
 #include <linux/gfp.h>
+#include <linux/kmemcheck.h>
 
 #include <asm/sections.h>
 
@@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
                      struct lock_class_key *key, int subclass)
 {
-       memset(lock, 0, sizeof(*lock));
+       int i;
+
+       kmemcheck_mark_initialized(lock, sizeof(*lock));
+
+       for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+               lock->class_cache[i] = NULL;
 
 #ifdef CONFIG_LOCK_STAT
        lock->cpu = raw_smp_processor_id();
index 1455a0d..7982a0a 100644 (file)
@@ -1293,10 +1293,11 @@ again:
        raw_spin_lock(&logbuf_lock);
        if (con_start != log_end)
                retry = 1;
+       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
        if (retry && console_trylock())
                goto again;
 
-       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
        if (wake_klogd)
                wake_up_klogd();
 }
index a78ed27..8a39fa3 100644 (file)
@@ -2352,13 +2352,11 @@ again:
                if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
                        continue;
 
-               if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
-                       if (!smt) {
-                               smt = 1;
-                               goto again;
-                       }
+               if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
+                       break;
+
+               if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
                        break;
-               }
 
                sg = sd->groups;
                do {
@@ -2378,6 +2376,10 @@ next:
                        sg = sg->next;
                } while (sg != sd->groups);
        }
+       if (!smt) {
+               smt = 1;
+               goto again;
+       }
 done:
        rcu_read_unlock();
 
index 6318b51..a650694 100644 (file)
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
 
        fput(file);
 out_putname:
-       putname(pathname);
+       __putname(pathname);
 out:
        return result;
 }
index c436e79..8a46f5d 100644 (file)
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
                struct alarm *alarm;
                ktime_t expired = next->expires;
 
-               if (expired.tv64 >= now.tv64)
+               if (expired.tv64 > now.tv64)
                        break;
 
                alarm = container_of(next, struct alarm, node);
index da2f760..d3ad022 100644 (file)
@@ -647,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs)
 
 /**
  * __clocksource_updatefreq_scale - Used update clocksource with new freq
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  * @scale:     Scale factor multiplied against freq to get clocksource hz
  * @freq:      clocksource frequency (cycles per second) divided by scale
  *
@@ -699,7 +699,7 @@ EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
 
 /**
  * __clocksource_register_scale - Used to install new clocksources
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  * @scale:     Scale factor multiplied against freq to get clocksource hz
  * @freq:      clocksource frequency (cycles per second) divided by scale
  *
@@ -727,7 +727,7 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
 
 /**
  * clocksource_register - Used to install new clocksources
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  *
  * Returns -EBUSY if registration fails, zero otherwise.
  */
@@ -761,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
 
 /**
  * clocksource_change_rating - Change the rating of a registered clocksource
+ * @cs:                clocksource to be changed
+ * @rating:    new rating
  */
 void clocksource_change_rating(struct clocksource *cs, int rating)
 {
@@ -772,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating);
 
 /**
  * clocksource_unregister - remove a registered clocksource
+ * @cs:        clocksource to be unregistered
  */
 void clocksource_unregister(struct clocksource *cs)
 {
@@ -787,6 +790,7 @@ EXPORT_SYMBOL(clocksource_unregister);
 /**
  * sysfs_show_current_clocksources - sysfs interface for current clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       char buffer to be filled with clocksource list
  *
  * Provides sysfs interface for listing current clocksource.
@@ -807,6 +811,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
 /**
  * sysfs_override_clocksource - interface for manually overriding clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       name of override clocksource
  * @count:     length of buffer
  *
@@ -842,6 +847,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
 /**
  * sysfs_show_available_clocksources - sysfs interface for listing clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       char buffer to be filled with clocksource list
  *
  * Provides sysfs interface for listing registered clocksources
index dbaa624..9c3c62b 100644 (file)
@@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid)
        int pid;
 
        rcu_read_lock();
-       pid = task_tgid_vnr(current->real_parent);
+       pid = task_tgid_vnr(rcu_dereference(current->real_parent));
        rcu_read_unlock();
 
        return pid;
index 74c6c7f..fea790a 100644 (file)
@@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket,
 
 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
 {
-       return ((a->dev_addr == a->dev_addr) &&
+       return ((a->dev_addr == b->dev_addr) &&
                (a->dev == b->dev)) ? true : false;
 }
 
index c0018f2..5f0a3c9 100644 (file)
@@ -1828,7 +1828,7 @@ repeat:
                page = __page_cache_alloc(gfp | __GFP_COLD);
                if (!page)
                        return ERR_PTR(-ENOMEM);
-               err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+               err = add_to_page_cache_lru(page, mapping, index, gfp);
                if (unlikely(err)) {
                        page_cache_release(page);
                        if (err == -EEXIST)
@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
  * @gfp:       the page allocator flags to use if allocating
  *
  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
  *
  * If the page does not get brought uptodate, return -EIO.
  */
@@ -2407,7 +2404,6 @@ static ssize_t generic_perform_write(struct file *file,
                                                iov_iter_count(i));
 
 again:
-
                /*
                 * Bring in the user page that we will copy from _first_.
                 * Otherwise there's a nasty deadlock on copying from the
@@ -2463,7 +2459,10 @@ again:
                written += copied;
 
                balance_dirty_pages_ratelimited(mapping);
-
+               if (fatal_signal_pending(current)) {
+                       status = -EINTR;
+                       break;
+               }
        } while (iov_iter_count(i));
 
        return written ? written : status;
index 4298aba..36b3d98 100644 (file)
@@ -2259,12 +2259,8 @@ static void khugepaged_do_scan(struct page **hpage)
 
 static void khugepaged_alloc_sleep(void)
 {
-       DEFINE_WAIT(wait);
-       add_wait_queue(&khugepaged_wait, &wait);
-       schedule_timeout_interruptible(
-               msecs_to_jiffies(
-                       khugepaged_alloc_sleep_millisecs));
-       remove_wait_queue(&khugepaged_wait, &wait);
+       wait_event_freezable_timeout(khugepaged_wait, false,
+                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 }
 
 #ifndef CONFIG_NUMA
@@ -2313,14 +2309,10 @@ static void khugepaged_loop(void)
                if (unlikely(kthread_should_stop()))
                        break;
                if (khugepaged_has_work()) {
-                       DEFINE_WAIT(wait);
                        if (!khugepaged_scan_sleep_millisecs)
                                continue;
-                       add_wait_queue(&khugepaged_wait, &wait);
-                       schedule_timeout_interruptible(
-                               msecs_to_jiffies(
-                                       khugepaged_scan_sleep_millisecs));
-                       remove_wait_queue(&khugepaged_wait, &wait);
+                       wait_event_freezable_timeout(khugepaged_wait, false,
+                           msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
                } else if (khugepaged_enabled())
                        wait_event_freezable(khugepaged_wait,
                                             khugepaged_wait_event());
index bb28a5f..73f17c0 100644 (file)
@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
index 6aff93c..b63f5f7 100644 (file)
@@ -4907,9 +4907,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                int cpu;
                enable_swap_cgroup();
                parent = NULL;
-               root_mem_cgroup = memcg;
                if (mem_cgroup_soft_limit_tree_init())
                        goto free_out;
+               root_mem_cgroup = memcg;
                for_each_possible_cpu(cpu) {
                        struct memcg_stock_pcp *stock =
                                                &per_cpu(memcg_stock, cpu);
@@ -4948,7 +4948,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        return &memcg->css;
 free_out:
        __mem_cgroup_free(memcg);
-       root_mem_cgroup = NULL;
        return ERR_PTR(error);
 }
 
index 578e291..177aca4 100644 (file)
@@ -871,9 +871,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 
        if (anon_vma)
                put_anon_vma(anon_vma);
-out:
        unlock_page(hpage);
 
+out:
        if (rc != -EAGAIN) {
                list_del(&hpage->lru);
                put_page(hpage);
index 76f2c5a..069b64e 100644 (file)
@@ -176,7 +176,7 @@ static bool oom_unkillable_task(struct task_struct *p,
 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
                      const nodemask_t *nodemask, unsigned long totalpages)
 {
-       int points;
+       long points;
 
        if (oom_unkillable_task(p, mem, nodemask))
                return 0;
index 7125248..50f0824 100644 (file)
@@ -411,8 +411,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
  *
  * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
  * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
- * And the "limit" in the name is not seriously taken as hard limit in
- * balance_dirty_pages().
+ *
+ * Note that balance_dirty_pages() will only seriously take it as a hard limit
+ * when sleeping max_pause per page is not enough to keep the dirty pages under
+ * control. For example, when the device is completely stalled due to some error
+ * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
+ * In the other normal situations, it acts more gently by throttling the tasks
+ * more (rather than completely block them) when the bdi dirty pages go high.
  *
  * It allocates high/low dirty limits to fast/slow devices, in order to prevent
  * - starving fast devices
@@ -594,6 +599,13 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
         */
        if (unlikely(bdi_thresh > thresh))
                bdi_thresh = thresh;
+       /*
+        * It's very possible that bdi_thresh is close to 0 not because the
+        * device is slow, but that it has remained inactive for long time.
+        * Honour such devices a reasonable good (hopefully IO efficient)
+        * threshold, so that the occasional writes won't be blocked and active
+        * writes can rampup the threshold quickly.
+        */
        bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
        /*
         * scale global setpoint to bdi's:
@@ -977,8 +989,7 @@ static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
         *
         * 8 serves as the safety ratio.
         */
-       if (bdi_dirty)
-               t = min(t, bdi_dirty * HZ / (8 * bw + 1));
+       t = min(t, bdi_dirty * HZ / (8 * bw + 1));
 
        /*
         * The pause time will be settled within range (max_pause/4, max_pause).
@@ -1136,6 +1147,19 @@ pause:
                if (task_ratelimit)
                        break;
 
+               /*
+                * In the case of an unresponding NFS server and the NFS dirty
+                * pages exceeds dirty_thresh, give the other good bdi's a pipe
+                * to go through, so that tasks on them still remain responsive.
+                *
+                * In theory 1 page is enough to keep the comsumer-producer
+                * pipe going: the flusher cleans 1 page => the task dirties 1
+                * more page. However bdi_dirty has accounting errors.  So use
+                * the larger and more IO friendly bdi_stat_error.
+                */
+               if (bdi_dirty <= bdi_stat_error(bdi))
+                       break;
+
                if (fatal_signal_pending(current))
                        break;
        }
index 9dd443d..2b8ba3a 100644 (file)
@@ -356,8 +356,8 @@ void prep_compound_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
-
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -3377,9 +3377,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        unsigned long block_migratetype;
        int reserve;
 
-       /* Get the start pfn, end pfn and the number of blocks to reserve */
+       /*
+        * Get the start pfn, end pfn and the number of blocks to reserve
+        * We have to be careful to be aligned to pageblock_nr_pages to
+        * make sure that we always check pfn_valid for the first page in
+        * the block.
+        */
        start_pfn = zone->zone_start_pfn;
        end_pfn = start_pfn + zone->spanned_pages;
+       start_pfn = roundup(start_pfn, pageblock_nr_pages);
        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
index 3bb810a..716eb4a 100644 (file)
@@ -1023,9 +1023,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
                if (!is_vmalloc_addr(addr))
                        return __pa(addr);
                else
-                       return page_to_phys(vmalloc_to_page(addr));
+                       return page_to_phys(vmalloc_to_page(addr)) +
+                              offset_in_page(addr);
        } else
-               return page_to_phys(pcpu_addr_to_page(addr));
+               return page_to_phys(pcpu_addr_to_page(addr)) +
+                      offset_in_page(addr);
 }
 
 /**
index 3231bf3..27be2f0 100644 (file)
@@ -1290,7 +1290,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
                unsigned long align, unsigned long flags, unsigned long start,
                unsigned long end, int node, gfp_t gfp_mask, void *caller)
 {
-       static struct vmap_area *va;
+       struct vmap_area *va;
        struct vm_struct *area;
 
        BUG_ON(in_interrupt());
@@ -1633,6 +1633,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                goto fail;
 
        addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+       if (!addr)
+               return NULL;
 
        /*
         * In this function, newly allocated vm_struct is not added
index a1893c0..f54a05b 100644 (file)
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
  */
 void register_shrinker(struct shrinker *shrinker)
 {
-       shrinker->nr = 0;
+       atomic_long_set(&shrinker->nr_in_batch, 0);
        down_write(&shrinker_rwsem);
        list_add_tail(&shrinker->list, &shrinker_list);
        up_write(&shrinker_rwsem);
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink,
 
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
-               unsigned long total_scan;
-               unsigned long max_pass;
+               long total_scan;
+               long max_pass;
                int shrink_ret = 0;
                long nr;
                long new_nr;
                long batch_size = shrinker->batch ? shrinker->batch
                                                  : SHRINK_BATCH;
 
+               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+               if (max_pass <= 0)
+                       continue;
+
                /*
                 * copy the current shrinker scan count into a local variable
                 * and zero it so that other concurrent shrinker invocations
                 * don't also do this scanning work.
                 */
-               do {
-                       nr = shrinker->nr;
-               } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+               nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
 
                total_scan = nr;
-               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
                delta = (4 * nr_pages_scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
                 * manner that handles concurrent updates. If we exhausted the
                 * scan, there is no need to do an update.
                 */
-               do {
-                       nr = shrinker->nr;
-                       new_nr = total_scan + nr;
-                       if (total_scan <= 0)
-                               break;
-               } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+               if (total_scan > 0)
+                       new_nr = atomic_long_add_return(total_scan,
+                                       &shrinker->nr_in_batch);
+               else
+                       new_nr = atomic_long_read(&shrinker->nr_in_batch);
 
                trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
        }
index e0af723..c1c597e 100644 (file)
@@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
                goto encrypt;
 
 auth:
-       if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+       if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
                return 0;
 
        if (!hci_conn_auth(conn, sec_level, auth_type))
index 5ea94a1..17b5b1c 100644 (file)
@@ -2152,7 +2152,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
        void *ptr = req->data;
        int type, olen;
        unsigned long val;
-       struct l2cap_conf_rfc rfc;
+       struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
 
        BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
 
@@ -2271,6 +2271,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
                }
        }
 
+       /* Use sane default values in case a misbehaving remote device
+        * did not send an RFC option.
+        */
+       rfc.mode = chan->mode;
+       rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+       rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+       rfc.max_pdu_size = cpu_to_le16(chan->imtu);
+
+       BT_ERR("Expected RFC option was not found, using defaults");
+
 done:
        switch (rfc.mode) {
        case L2CAP_MODE_ERTM:
index 4e32e18..2d28dfe 100644 (file)
@@ -1146,6 +1146,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
                        if (list_empty(&s->dlcs)) {
                                s->state = BT_DISCONN;
                                rfcomm_send_disc(s, 0);
+                               rfcomm_session_clear_timer(s);
                        }
 
                        break;
index d6ec372..fa8b8f7 100644 (file)
@@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
        return NULL;
 }
 
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+       return dst->dev->mtu;
+}
+
 static struct dst_ops fake_dst_ops = {
        .family =               AF_INET,
        .protocol =             cpu_to_be16(ETH_P_IP),
        .update_pmtu =          fake_update_pmtu,
        .cow_metrics =          fake_cow_metrics,
        .neigh_lookup =         fake_neigh_lookup,
+       .mtu =                  fake_mtu,
 };
 
 /*
@@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
        rt->dst.dev = br->dev;
        rt->dst.path = &rt->dst;
        dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
-       rt->dst.flags   = DST_NOXFRM;
+       rt->dst.flags   = DST_NOXFRM | DST_NOPEER;
        rt->dst.ops = &fake_dst_ops;
 }
 
index 42599e3..3a94eae 100644 (file)
@@ -477,7 +477,6 @@ int crush_do_rule(struct crush_map *map,
        int i, j;
        int numrep;
        int firstn;
-       int rc = -1;
 
        BUG_ON(ruleno >= map->max_rules);
 
@@ -491,23 +490,18 @@ int crush_do_rule(struct crush_map *map,
         * that this may or may not correspond to the specific types
         * referenced by the crush rule.
         */
-       if (force >= 0) {
-               if (force >= map->max_devices ||
-                   map->device_parents[force] == 0) {
-                       /*dprintk("CRUSH: forcefed device dne\n");*/
-                       rc = -1;  /* force fed device dne */
-                       goto out;
-               }
-               if (!is_out(map, weight, force, x)) {
-                       while (1) {
-                               force_context[++force_pos] = force;
-                               if (force >= 0)
-                                       force = map->device_parents[force];
-                               else
-                                       force = map->bucket_parents[-1-force];
-                               if (force == 0)
-                                       break;
-                       }
+       if (force >= 0 &&
+           force < map->max_devices &&
+           map->device_parents[force] != 0 &&
+           !is_out(map, weight, force, x)) {
+               while (1) {
+                       force_context[++force_pos] = force;
+                       if (force >= 0)
+                               force = map->device_parents[force];
+                       else
+                               force = map->bucket_parents[-1-force];
+                       if (force == 0)
+                               break;
                }
        }
 
@@ -600,10 +594,7 @@ int crush_do_rule(struct crush_map *map,
                        BUG_ON(1);
                }
        }
-       rc = result_len;
-
-out:
-       return rc;
+       return result_len;
 }
 
 
index 8ae42de..e318c7e 100644 (file)
@@ -358,6 +358,18 @@ void flow_cache_flush(void)
        put_online_cpus();
 }
 
+static void flow_cache_flush_task(struct work_struct *work)
+{
+       flow_cache_flush();
+}
+
+static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+
+void flow_cache_flush_deferred(void)
+{
+       schedule_work(&flow_cache_flush_work);
+}
+
 static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
 {
        struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
index c71c434..385aefe 100644 (file)
@@ -665,11 +665,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
        if (count) {
                int i;
 
-               if (count > 1<<30) {
+               if (count > INT_MAX)
+                       return -EINVAL;
+               count = roundup_pow_of_two(count);
+               if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
+                               / sizeof(struct rps_dev_flow)) {
                        /* Enforce a limit to prevent overflow */
                        return -EINVAL;
                }
-               count = roundup_pow_of_two(count);
                table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
                if (!table)
                        return -ENOMEM;
index 4ed7b1d..b23f174 100644 (file)
@@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
-       /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
-          number of warnings when compiling with -W --ANK
-        */
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf) {
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
                atomic_inc(&sk->sk_drops);
                trace_sock_rcvqueue_full(sk, skb);
                return -ENOMEM;
index 0da2afc..99ec116 100644 (file)
@@ -253,6 +253,10 @@ static int __init ic_open_devs(void)
                }
        }
 
+       /* no point in waiting if we could not bring up at least one device */
+       if (!ic_first_dev)
+               goto have_carrier;
+
        /* wait for a carrier on at least one device */
        start = jiffies;
        while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
index 46af623..94cdbc5 100644 (file)
@@ -91,6 +91,7 @@
 #include <linux/rcupdate.h>
 #include <linux/times.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/dst.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
 
 static int ip_rt_max_size;
 static int ip_rt_gc_timeout __read_mostly      = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
 static int ip_rt_redirect_number __read_mostly = 9;
 static int ip_rt_redirect_load __read_mostly   = HZ / 50;
@@ -133,6 +135,9 @@ static int ip_rt_min_advmss __read_mostly   = 256;
 static int rt_chain_length_max __read_mostly   = 20;
 static int redirect_genid;
 
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
+
 /*
  *     Interface to generic destination cache.
  */
@@ -830,6 +835,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
        return ONE;
 }
 
+static void rt_check_expire(void)
+{
+       static unsigned int rover;
+       unsigned int i = rover, goal;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
+       unsigned long samples = 0;
+       unsigned long sum = 0, sum2 = 0;
+       unsigned long delta;
+       u64 mult;
+
+       delta = jiffies - expires_ljiffies;
+       expires_ljiffies = jiffies;
+       mult = ((u64)delta) << rt_hash_log;
+       if (ip_rt_gc_timeout > 1)
+               do_div(mult, ip_rt_gc_timeout);
+       goal = (unsigned int)mult;
+       if (goal > rt_hash_mask)
+               goal = rt_hash_mask + 1;
+       for (; goal > 0; goal--) {
+               unsigned long tmo = ip_rt_gc_timeout;
+               unsigned long length;
+
+               i = (i + 1) & rt_hash_mask;
+               rthp = &rt_hash_table[i].chain;
+
+               if (need_resched())
+                       cond_resched();
+
+               samples++;
+
+               if (rcu_dereference_raw(*rthp) == NULL)
+                       continue;
+               length = 0;
+               spin_lock_bh(rt_hash_lock_addr(i));
+               while ((rth = rcu_dereference_protected(*rthp,
+                                       lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+                       prefetch(rth->dst.rt_next);
+                       if (rt_is_expired(rth)) {
+                               *rthp = rth->dst.rt_next;
+                               rt_free(rth);
+                               continue;
+                       }
+                       if (rth->dst.expires) {
+                               /* Entry is expired even if it is in use */
+                               if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+                                       tmo >>= 1;
+                                       rthp = &rth->dst.rt_next;
+                                       /*
+                                        * We only count entries on
+                                        * a chain with equal hash inputs once
+                                        * so that entries for different QOS
+                                        * levels, and other non-hash input
+                                        * attributes don't unfairly skew
+                                        * the length computation
+                                        */
+                                       length += has_noalias(rt_hash_table[i].chain, rth);
+                                       continue;
+                               }
+                       } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+                               goto nofree;
+
+                       /* Cleanup aged off entries. */
+                       *rthp = rth->dst.rt_next;
+                       rt_free(rth);
+               }
+               spin_unlock_bh(rt_hash_lock_addr(i));
+               sum += length;
+               sum2 += length*length;
+       }
+       if (samples) {
+               unsigned long avg = sum / samples;
+               unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+               rt_chain_length_max = max_t(unsigned long,
+                                       ip_rt_gc_elasticity,
+                                       (avg + 4*sd) >> FRACT_BITS);
+       }
+       rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+       rt_check_expire();
+       schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
 /*
  * Perturbation of rt_genid by a small quantity [1..256]
  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1271,7 +1367,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
 {
        struct rtable *rt = (struct rtable *) dst;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                if (rt->peer == NULL)
                        rt_bind_peer(rt, rt->rt_dst, 1);
 
@@ -1282,7 +1378,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
                        iph->id = htons(inet_getid(rt->peer, more));
                        return;
                }
-       } else
+       } else if (!rt)
                printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
                       __builtin_return_address(0));
 
@@ -3178,6 +3274,13 @@ static ctl_table ipv4_route_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
+       {
+               .procname       = "gc_interval",
+               .data           = &ip_rt_gc_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
        {
                .procname       = "redirect_load",
                .data           = &ip_rt_redirect_load,
@@ -3388,6 +3491,11 @@ int __init ip_rt_init(void)
        devinet_init();
        ip_fib_init();
 
+       INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+       expires_ljiffies = jiffies;
+       schedule_delayed_work(&expires_work,
+               net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
        if (ip_rt_proc_init())
                printk(KERN_ERR "Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
index 84d0bd5..ec56271 100644 (file)
@@ -603,7 +603,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
        static atomic_t ipv6_fragmentation_id;
        int old, new;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                struct inet_peer *peer;
 
                if (!rt->rt6i_peer)
index 3399dd3..b582a0a 100644 (file)
@@ -728,7 +728,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
                int attempts = !in_softirq();
 
                if (!(rt->rt6i_flags&RTF_GATEWAY)) {
-                       if (rt->rt6i_dst.plen != 128 &&
+                       if (ort->rt6i_dst.plen != 128 &&
                            ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
                                rt->rt6i_flags |= RTF_ANYCAST;
                        ipv6_addr_copy(&rt->rt6i_gateway, daddr);
index dfd3a64..a18e6c3 100644 (file)
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
                copied += used;
                len -= used;
 
+               /* For non stream protcols we get one packet per recvmsg call */
+               if (sk->sk_type != SOCK_STREAM)
+                       goto copy_uaddr;
+
                if (!(flags & MSG_PEEK)) {
                        sk_eat_skb(sk, skb, 0);
                        *seq = 0;
                }
 
-               /* For non stream protcols we get one packet per recvmsg call */
-               if (sk->sk_type != SOCK_STREAM)
-                       goto copy_uaddr;
-
                /* Partial read */
                if (used + offset < skb->len)
                        continue;
@@ -857,6 +857,12 @@ copy_uaddr:
        }
        if (llc_sk(sk)->cmsg_flags)
                llc_cmsg_rcv(msg, skb);
+
+       if (!(flags & MSG_PEEK)) {
+                       sk_eat_skb(sk, skb, 0);
+                       *seq = 0;
+       }
+
        goto out;
 }
 
index 5b13850..9ddf1c3 100644 (file)
@@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
                break;
        }
 
-       if (sinfo->count.to)
+       if (sinfo->count.to >= sinfo->count.from)
                return what <= sinfo->count.to && what >= sinfo->count.from;
-       else
-               return what >= sinfo->count.from;
+       else /* inverted */
+               return what < sinfo->count.to || what > sinfo->count.from;
 }
 
 static int connbytes_mt_check(const struct xt_mtchk_param *par)
index 3925c65..ea66034 100644 (file)
@@ -69,7 +69,7 @@ static int __nci_request(struct nci_dev *ndev,
        __u32 timeout)
 {
        int rc = 0;
-       unsigned long completion_rc;
+       long completion_rc;
 
        ndev->req_status = NCI_REQ_PEND;
 
index 82a6f34..3891702 100644 (file)
@@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        if (snaplen > res)
                snaplen = res;
 
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf)
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                goto drop_n_acct;
 
        if (skb_shared(skb)) {
@@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        if (po->tp_version <= TPACKET_V2) {
                if (macoff + snaplen > po->rx_ring.frame_size) {
                        if (po->copy_thresh &&
-                               atomic_read(&sk->sk_rmem_alloc) + skb->truesize
-                               < (unsigned)sk->sk_rcvbuf) {
+                           atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
                                if (skb_shared(skb)) {
                                        copy_skb = skb_clone(skb, GFP_ATOMIC);
                                } else {
index f88256c..28de430 100644 (file)
@@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
        if (!netif_is_multiqueue(dev))
                return -EOPNOTSUPP;
 
-       if (nla_len(opt) < sizeof(*qopt))
+       if (!opt || nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
 
        qopt = nla_data(opt);
index eb3b9a8..a4ab207 100644 (file)
@@ -488,7 +488,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
                return -EINVAL;
 
        s = sizeof(struct disttable) + n * sizeof(s16);
-       d = kmalloc(s, GFP_KERNEL);
+       d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
        if (!d)
                d = vmalloc(s);
        if (!d)
@@ -501,9 +501,10 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
        root_lock = qdisc_root_sleeping_lock(sch);
 
        spin_lock_bh(root_lock);
-       dist_free(q->delay_dist);
-       q->delay_dist = d;
+       swap(q->delay_dist, d);
        spin_unlock_bh(root_lock);
+
+       dist_free(d);
        return 0;
 }
 
index 152b5b3..acd2edb 100644 (file)
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-               (unsigned long)sp->autoclose * HZ;
+               min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
 
        /* Initializes the timers */
        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
index 08b3cea..817174e 100644 (file)
@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
        /* Keep track of how many bytes are in flight to the receiver. */
        asoc->outqueue.outstanding_bytes += datasize;
 
-       /* Update our view of the receiver's rwnd. Include sk_buff overhead
-        * while updating peer.rwnd so that it reduces the chances of a
-        * receiver running out of receive buffer space even when receive
-        * window is still open. This can happen when a sender is sending
-        * sending small messages.
-        */
-       datasize += sizeof(struct sk_buff);
+       /* Update our view of the receiver's rwnd. */
        if (datasize < rwnd)
                rwnd -= datasize;
        else
index 14c2b06..cfeb1d4 100644 (file)
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                                        chunk->transport->flight_size -=
                                                        sctp_data_size(chunk);
                                q->outstanding_bytes -= sctp_data_size(chunk);
-                               q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                                       sizeof(struct sk_buff));
+                               q->asoc->peer.rwnd += sctp_data_size(chunk);
                        }
                        continue;
                }
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                         * (Section 7.2.4)), add the data size of those
                         * chunks to the rwnd.
                         */
-                       q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                               sizeof(struct sk_buff));
+                       q->asoc->peer.rwnd += sctp_data_size(chunk);
                        q->outstanding_bytes -= sctp_data_size(chunk);
                        if (chunk->transport)
                                transport->flight_size -= sctp_data_size(chunk);
index 61b9fca..6f6ad86 100644 (file)
@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_max_instreams              = SCTP_DEFAULT_INSTREAMS;
        sctp_max_outstreams             = SCTP_DEFAULT_OUTSTREAMS;
 
+       /* Initialize maximum autoclose timeout. */
+       sctp_max_autoclose              = INT_MAX / HZ;
+
        /* Initialize handle used for association ids. */
        idr_init(&sctp_assocs_id);
 
index 13bf5fc..54a7cd2 100644 (file)
@@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
                return -EINVAL;
        if (copy_from_user(&sp->autoclose, optval, optlen))
                return -EFAULT;
-       /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
-       sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
 
        return 0;
 }
index 6b39529..60ffbd0 100644 (file)
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
 static int sack_timer_max = 500;
 static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
 static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+       (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+       ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
 
 extern long sysctl_sctp_mem[3];
 extern int sysctl_sctp_rmem[3];
@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
                .extra1         = &one,
                .extra2         = &rwnd_scale_max,
        },
+       {
+               .procname       = "max_autoclose",
+               .data           = &sctp_max_autoclose,
+               .maxlen         = sizeof(unsigned long),
+               .mode           = 0644,
+               .proc_handler   = &proc_doulongvec_minmax,
+               .extra1         = &max_autoclose_min,
+               .extra2         = &max_autoclose_max,
+       },
 
        { /* sentinel */ }
 };
index d12ffa5..00a1a2a 100644 (file)
@@ -590,6 +590,27 @@ void rpc_prepare_task(struct rpc_task *task)
        task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 }
 
+static void
+rpc_init_task_statistics(struct rpc_task *task)
+{
+       /* Initialize retry counters */
+       task->tk_garb_retry = 2;
+       task->tk_cred_retry = 2;
+       task->tk_rebind_retry = 2;
+
+       /* starting timestamp */
+       task->tk_start = ktime_get();
+}
+
+static void
+rpc_reset_task_statistics(struct rpc_task *task)
+{
+       task->tk_timeouts = 0;
+       task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
+
+       rpc_init_task_statistics(task);
+}
+
 /*
  * Helper that calls task->tk_ops->rpc_call_done if it exists
  */
@@ -602,6 +623,7 @@ void rpc_exit_task(struct rpc_task *task)
                        WARN_ON(RPC_ASSASSINATED(task));
                        /* Always release the RPC slot and buffer memory */
                        xprt_release(task);
+                       rpc_reset_task_statistics(task);
                }
        }
 }
@@ -804,11 +826,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        task->tk_calldata = task_setup_data->callback_data;
        INIT_LIST_HEAD(&task->tk_task);
 
-       /* Initialize retry counters */
-       task->tk_garb_retry = 2;
-       task->tk_cred_retry = 2;
-       task->tk_rebind_retry = 2;
-
        task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
        task->tk_owner = current->tgid;
 
@@ -818,8 +835,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        if (task->tk_ops->rpc_call_prepare != NULL)
                task->tk_action = rpc_prepare_task;
 
-       /* starting timestamp */
-       task->tk_start = ktime_get();
+       rpc_init_task_statistics(task);
 
        dprintk("RPC:       new task initialized, procpid %u\n",
                                task_pid_nr(current));
index f4385e4..c64c0ef 100644 (file)
@@ -995,13 +995,11 @@ out_init_req:
 
 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
-       if (xprt_dynamic_free_slot(xprt, req))
-               return;
-
-       memset(req, 0, sizeof(*req));   /* mark unused */
-
        spin_lock(&xprt->reserve_lock);
-       list_add(&req->rq_list, &xprt->free);
+       if (!xprt_dynamic_free_slot(xprt, req)) {
+               memset(req, 0, sizeof(*req));   /* mark unused */
+               list_add(&req->rq_list, &xprt->free);
+       }
        rpc_wake_up_next(&xprt->backlog);
        spin_unlock(&xprt->reserve_lock);
 }
index 2118d64..9049a5c 100644 (file)
@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
 {
        struct dst_entry *head, *next;
 
-       flow_cache_flush();
-
        spin_lock_bh(&xfrm_policy_sk_bundle_lock);
        head = xfrm_policy_sk_bundles;
        xfrm_policy_sk_bundles = NULL;
@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
        }
 }
 
+static void xfrm_garbage_collect(struct net *net)
+{
+       flow_cache_flush();
+       __xfrm_garbage_collect(net);
+}
+
+static void xfrm_garbage_collect_deferred(struct net *net)
+{
+       flow_cache_flush_deferred();
+       __xfrm_garbage_collect(net);
+}
+
 static void xfrm_init_pmtu(struct dst_entry *dst)
 {
        do {
@@ -2422,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                if (likely(dst_ops->neigh_lookup == NULL))
                        dst_ops->neigh_lookup = xfrm_neigh_lookup;
                if (likely(afinfo->garbage_collect == NULL))
-                       afinfo->garbage_collect = __xfrm_garbage_collect;
+                       afinfo->garbage_collect = xfrm_garbage_collect_deferred;
                xfrm_policy_afinfo[afinfo->family] = afinfo;
        }
        write_unlock_bh(&xfrm_policy_afinfo_lock);
@@ -2516,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
 
        switch (event) {
        case NETDEV_DOWN:
-               __xfrm_garbage_collect(dev_net(dev));
+               xfrm_garbage_collect(dev_net(dev));
        }
        return NOTIFY_DONE;
 }
index ba573fe..914833d 100644 (file)
@@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
            --directory=$(srctree) --directory=$(objtree)           \
            --output $(obj)/config.pot
        $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
-       $(Q)ln -fs Kconfig.x86 arch/um/Kconfig
-       $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`;    \
+       $(Q)(for i in `ls $(srctree)/arch/*/Kconfig      \
+           $(srctree)/arch/*/um/Kconfig`;               \
            do                                           \
                echo "  GEN $$i";                        \
                $(obj)/kxgettext $$i                     \
@@ -69,7 +69,6 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
            done )
        $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
            --output $(obj)/linux.pot
-       $(Q)rm -f $(srctree)/arch/um/Kconfig
        $(Q)rm -f $(obj)/config.pot
 
 PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
index 36cc0cc..b566eba 100644 (file)
@@ -57,23 +57,44 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
 static int d_namespace_path(struct path *path, char *buf, int buflen,
                            char **name, int flags)
 {
-       struct path root, tmp;
        char *res;
-       int connected, error = 0;
+       int error = 0;
+       int connected = 1;
+
+       if (path->mnt->mnt_flags & MNT_INTERNAL) {
+               /* it's not mounted anywhere */
+               res = dentry_path(path->dentry, buf, buflen);
+               *name = res;
+               if (IS_ERR(res)) {
+                       *name = buf;
+                       return PTR_ERR(res);
+               }
+               if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+                   strncmp(*name, "/sys/", 5) == 0) {
+                       /* TODO: convert over to using a per namespace
+                        * control instead of hard coded /proc
+                        */
+                       return prepend(name, *name - buf, "/proc", 5);
+               }
+               return 0;
+       }
 
-       /* Get the root we want to resolve too, released below */
+       /* resolve paths relative to chroot?*/
        if (flags & PATH_CHROOT_REL) {
-               /* resolve paths relative to chroot */
+               struct path root;
                get_fs_root(current->fs, &root);
-       } else {
-               /* resolve paths relative to namespace */
-               root.mnt = current->nsproxy->mnt_ns->root;
-               root.dentry = root.mnt->mnt_root;
-               path_get(&root);
+               res = __d_path(path, &root, buf, buflen);
+               if (res && !IS_ERR(res)) {
+                       /* everything's fine */
+                       *name = res;
+                       path_put(&root);
+                       goto ok;
+               }
+               path_put(&root);
+               connected = 0;
        }
 
-       tmp = root;
-       res = __d_path(path, &tmp, buf, buflen);
+       res = d_absolute_path(path, buf, buflen);
 
        *name = res;
        /* handle error conditions - and still allow a partial path to
@@ -84,7 +105,10 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                *name = buf;
                goto out;
        }
+       if (!our_mnt(path->mnt))
+               connected = 0;
 
+ok:
        /* Handle two cases:
         * 1. A deleted dentry && profile is not allowing mediation of deleted
         * 2. On some filesystems, newly allocated dentries appear to the
@@ -97,10 +121,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                        goto out;
        }
 
-       /* Determine if the path is connected to the expected root */
-       connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt;
-
-       /* If the path is not connected,
+       /* If the path is not connected to the expected root,
         * check if it is a sysctl and handle specially else remove any
         * leading / that __d_path may have returned.
         * Unless
@@ -112,17 +133,9 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
         *     namespace root.
         */
        if (!connected) {
-               /* is the disconnect path a sysctl? */
-               if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
-                   strncmp(*name, "/sys/", 5) == 0) {
-                       /* TODO: convert over to using a per namespace
-                        * control instead of hard coded /proc
-                        */
-                       error = prepend(name, *name - buf, "/proc", 5);
-               } else if (!(flags & PATH_CONNECT_PATH) &&
+               if (!(flags & PATH_CONNECT_PATH) &&
                           !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
-                            (tmp.mnt == current->nsproxy->mnt_ns->root &&
-                             tmp.dentry == tmp.mnt->mnt_root))) {
+                            our_mnt(path->mnt))) {
                        /* disconnected path, don't return pathname starting
                         * with '/'
                         */
@@ -133,8 +146,6 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        }
 
 out:
-       path_put(&root);
-
        return error;
 }
 
index 5dd5b14..8738def 100644 (file)
@@ -27,20 +27,35 @@ static int evmkey_len = MAX_KEY_SIZE;
 
 struct crypto_shash *hmac_tfm;
 
+static DEFINE_MUTEX(mutex);
+
 static struct shash_desc *init_desc(void)
 {
        int rc;
        struct shash_desc *desc;
 
        if (hmac_tfm == NULL) {
+               mutex_lock(&mutex);
+               if (hmac_tfm)
+                       goto out;
                hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC);
                if (IS_ERR(hmac_tfm)) {
                        pr_err("Can not allocate %s (reason: %ld)\n",
                               evm_hmac, PTR_ERR(hmac_tfm));
                        rc = PTR_ERR(hmac_tfm);
                        hmac_tfm = NULL;
+                       mutex_unlock(&mutex);
+                       return ERR_PTR(rc);
+               }
+               rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
+               if (rc) {
+                       crypto_free_shash(hmac_tfm);
+                       hmac_tfm = NULL;
+                       mutex_unlock(&mutex);
                        return ERR_PTR(rc);
                }
+out:
+               mutex_unlock(&mutex);
        }
 
        desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm),
@@ -51,11 +66,7 @@ static struct shash_desc *init_desc(void)
        desc->tfm = hmac_tfm;
        desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
-       rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
-       if (rc)
-               goto out;
        rc = crypto_shash_init(desc);
-out:
        if (rc) {
                kfree(desc);
                return ERR_PTR(rc);
index 0b62bd1..7b9eb1f 100644 (file)
@@ -123,7 +123,9 @@ static void sel_netport_insert(struct sel_netport *port)
        if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
                struct sel_netport *tail;
                tail = list_entry(
-                       rcu_dereference(sel_netport_hash[idx].list.prev),
+                       rcu_dereference_protected(
+                               sel_netport_hash[idx].list.prev,
+                               lockdep_is_held(&sel_netport_lock)),
                        struct sel_netport, list);
                list_del_rcu(&tail->list);
                kfree_rcu(tail, rcu);
index 738bbdf..d9f3ced 100644 (file)
@@ -101,9 +101,8 @@ static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
 {
        char *pos = ERR_PTR(-ENOMEM);
        if (buflen >= 256) {
-               struct path ns_root = { };
                /* go to whatever namespace root we are under */
-               pos = __d_path(path, &ns_root, buffer, buflen - 1);
+               pos = d_absolute_path(path, buffer, buflen - 1);
                if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
                        struct inode *inode = path->dentry->d_inode;
                        if (inode && S_ISDIR(inode->i_mode)) {
@@ -294,8 +293,16 @@ char *tomoyo_realpath_from_path(struct path *path)
                        pos = tomoyo_get_local_path(path->dentry, buf,
                                                    buf_len - 1);
                /* Get absolute name for the rest. */
-               else
+               else {
                        pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+                       /*
+                        * Fall back to local name if absolute name is not
+                        * available.
+                        */
+                       if (pos == ERR_PTR(-EINVAL))
+                               pos = tomoyo_get_local_path(path->dentry, buf,
+                                                           buf_len - 1);
+               }
 encode:
                if (IS_ERR(pos))
                        continue;
index 6e5adde..73516f6 100644 (file)
@@ -899,6 +899,10 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip)
                /* AC97 v2.2 specifications says minimum 1 us. */
                udelay(2);
                gpio_set_value(chip->reset_pin, 1);
+       } else {
+               ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA);
+               udelay(2);
+               ac97c_writel(chip, MR, AC97C_MR_ENA);
        }
 }
 
index 7d98240..c2f79e6 100644 (file)
@@ -2507,6 +2507,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
        SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+       SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
@@ -2970,7 +2971,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* SCH */
        { PCI_DEVICE(0x8086, 0x811b),
          .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE},
+         AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
+       /* ICH */
        { PCI_DEVICE(0x8086, 0x2668),
          .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
          AZX_DCAPS_BUFSIZE },  /* ICH6 */
index cbde019..1d07e8f 100644 (file)
@@ -297,6 +297,8 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
        imux = &spec->input_mux[mux_idx];
        if (!imux->num_items && mux_idx > 0)
                imux = &spec->input_mux[0];
+       if (!imux->num_items)
+               return 0;
 
        if (idx >= imux->num_items)
                idx = imux->num_items - 1;
@@ -2629,6 +2631,8 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
        case AUTO_PIN_SPEAKER_OUT:
                if (cfg->line_outs == 1)
                        return "Speaker";
+               if (cfg->line_outs == 2)
+                       return ch ? "Bass Speaker" : "Speaker";
                break;
        case AUTO_PIN_HP_OUT:
                /* for multi-io case, only the primary out */
@@ -2902,7 +2906,7 @@ static hda_nid_t alc_auto_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
                if (!nid)
                        continue;
                if (found_in_nid_list(nid, spec->multiout.dac_nids,
-                                     spec->multiout.num_dacs))
+                                     ARRAY_SIZE(spec->private_dac_nids)))
                        continue;
                if (found_in_nid_list(nid, spec->multiout.hp_out_nid,
                                      ARRAY_SIZE(spec->multiout.hp_out_nid)))
@@ -2923,6 +2927,7 @@ static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin)
        return 0;
 }
 
+/* return 0 if no possible DAC is found, 1 if one or more found */
 static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
                                    const hda_nid_t *pins, hda_nid_t *dacs)
 {
@@ -2940,7 +2945,7 @@ static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
                if (!dacs[i])
                        dacs[i] = alc_auto_look_for_dac(codec, pins[i]);
        }
-       return 0;
+       return 1;
 }
 
 static int alc_auto_fill_multi_ios(struct hda_codec *codec,
@@ -2950,7 +2955,7 @@ static int alc_auto_fill_multi_ios(struct hda_codec *codec,
 static int alc_auto_fill_dac_nids(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       const struct auto_pin_cfg *cfg = &spec->autocfg;
+       struct auto_pin_cfg *cfg = &spec->autocfg;
        bool redone = false;
        int i;
 
@@ -2961,6 +2966,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        spec->multiout.extra_out_nid[0] = 0;
        memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
        spec->multiout.dac_nids = spec->private_dac_nids;
+       spec->multi_ios = 0;
 
        /* fill hard-wired DACs first */
        if (!redone) {
@@ -2994,10 +3000,12 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        for (i = 0; i < cfg->line_outs; i++) {
                if (spec->private_dac_nids[i])
                        spec->multiout.num_dacs++;
-               else
+               else {
                        memmove(spec->private_dac_nids + i,
                                spec->private_dac_nids + i + 1,
                                sizeof(hda_nid_t) * (cfg->line_outs - i - 1));
+                       spec->private_dac_nids[cfg->line_outs - 1] = 0;
+               }
        }
 
        if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
@@ -3019,9 +3027,28 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        if (cfg->line_out_type != AUTO_PIN_HP_OUT)
                alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins,
                                 spec->multiout.hp_out_nid);
-       if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
-               alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, cfg->speaker_pins,
-                                spec->multiout.extra_out_nid);
+       if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+               int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs,
+                                       cfg->speaker_pins,
+                                       spec->multiout.extra_out_nid);
+               /* if no speaker volume is assigned, try again as the primary
+                * output
+                */
+               if (!err && cfg->speaker_outs > 0 &&
+                   cfg->line_out_type == AUTO_PIN_HP_OUT) {
+                       cfg->hp_outs = cfg->line_outs;
+                       memcpy(cfg->hp_pins, cfg->line_out_pins,
+                              sizeof(cfg->hp_pins));
+                       cfg->line_outs = cfg->speaker_outs;
+                       memcpy(cfg->line_out_pins, cfg->speaker_pins,
+                              sizeof(cfg->speaker_pins));
+                       cfg->speaker_outs = 0;
+                       memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins));
+                       cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
+                       redone = false;
+                       goto again;
+               }
+       }
 
        return 0;
 }
@@ -3171,7 +3198,8 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec,
 }
 
 static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
-                                    hda_nid_t dac, const char *pfx)
+                                    hda_nid_t dac, const char *pfx,
+                                    int cidx)
 {
        struct alc_spec *spec = codec->spec;
        hda_nid_t sw, vol;
@@ -3187,15 +3215,15 @@ static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
                if (is_ctl_used(spec->sw_ctls, val))
                        return 0; /* already created */
                mark_ctl_usage(spec->sw_ctls, val);
-               return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, val);
+               return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, cidx, val);
        }
 
        sw = alc_look_for_out_mute_nid(codec, pin, dac);
        vol = alc_look_for_out_vol_nid(codec, pin, dac);
-       err = alc_auto_add_stereo_vol(codec, pfx, 0, vol);
+       err = alc_auto_add_stereo_vol(codec, pfx, cidx, vol);
        if (err < 0)
                return err;
-       err = alc_auto_add_stereo_sw(codec, pfx, 0, sw);
+       err = alc_auto_add_stereo_sw(codec, pfx, cidx, sw);
        if (err < 0)
                return err;
        return 0;
@@ -3236,16 +3264,21 @@ static int alc_auto_create_extra_outs(struct hda_codec *codec, int num_pins,
                hda_nid_t dac = *dacs;
                if (!dac)
                        dac = spec->multiout.dac_nids[0];
-               return alc_auto_create_extra_out(codec, *pins, dac, pfx);
+               return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0);
        }
 
        if (dacs[num_pins - 1]) {
                /* OK, we have a multi-output system with individual volumes */
                for (i = 0; i < num_pins; i++) {
-                       snprintf(name, sizeof(name), "%s %s",
-                                pfx, channel_name[i]);
-                       err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
-                                                       name);
+                       if (num_pins >= 3) {
+                               snprintf(name, sizeof(name), "%s %s",
+                                        pfx, channel_name[i]);
+                               err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+                                                               name, 0);
+                       } else {
+                               err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+                                                               pfx, i);
+                       }
                        if (err < 0)
                                return err;
                }
index d8d2f9d..616678f 100644 (file)
@@ -215,6 +215,7 @@ struct sigmatel_spec {
        unsigned int gpio_mute;
        unsigned int gpio_led;
        unsigned int gpio_led_polarity;
+       unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */
        unsigned int vref_led;
 
        /* stream */
@@ -4318,12 +4319,10 @@ static void stac_store_hints(struct hda_codec *codec)
                spec->eapd_switch = val;
        get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity);
        if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) {
-               if (spec->gpio_led <= 8) {
-                       spec->gpio_mask |= spec->gpio_led;
-                       spec->gpio_dir |= spec->gpio_led;
-                       if (spec->gpio_led_polarity)
-                               spec->gpio_data |= spec->gpio_led;
-               }
+               spec->gpio_mask |= spec->gpio_led;
+               spec->gpio_dir |= spec->gpio_led;
+               if (spec->gpio_led_polarity)
+                       spec->gpio_data |= spec->gpio_led;
        }
 }
 
@@ -4443,7 +4442,7 @@ static int stac92xx_init(struct hda_codec *codec)
                /* power on when no jack detection is available */
                /* or when the VREF is used for controlling LED */
                if (!spec->hp_detect ||
-                   (spec->gpio_led > 8 && spec->gpio_led == nid)) {
+                   spec->vref_mute_led_nid == nid) {
                        stac_toggle_power_map(codec, nid, 1);
                        continue;
                }
@@ -4915,8 +4914,14 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
                        if (sscanf(dev->name, "HP_Mute_LED_%d_%x",
                                  &spec->gpio_led_polarity,
                                  &spec->gpio_led) == 2) {
-                               if (spec->gpio_led < 4)
+                               unsigned int max_gpio;
+                               max_gpio = snd_hda_param_read(codec, codec->afg,
+                                                             AC_PAR_GPIO_CAP);
+                               max_gpio &= AC_GPIO_IO_COUNT;
+                               if (spec->gpio_led < max_gpio)
                                        spec->gpio_led = 1 << spec->gpio_led;
+                               else
+                                       spec->vref_mute_led_nid = spec->gpio_led;
                                return 1;
                        }
                        if (sscanf(dev->name, "HP_Mute_LED_%d",
@@ -4924,6 +4929,12 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
                                set_hp_led_gpio(codec);
                                return 1;
                        }
+                       /* BIOS bug: unfilled OEM string */
+                       if (strstr(dev->name, "HP_Mute_LED_P_G")) {
+                               set_hp_led_gpio(codec);
+                               spec->gpio_led_polarity = 1;
+                               return 1;
+                       }
                }
 
                /*
@@ -5045,15 +5056,12 @@ static int stac92xx_pre_resume(struct hda_codec *codec)
        struct sigmatel_spec *spec = codec->spec;
 
        /* sync mute LED */
-       if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
-                       stac_gpio_set(codec, spec->gpio_mask,
-                                       spec->gpio_dir, spec->gpio_data);
-               } else {
-                       stac_vrefout_set(codec,
-                                       spec->gpio_led, spec->vref_led);
-               }
-       }
+       if (spec->vref_mute_led_nid)
+               stac_vrefout_set(codec, spec->vref_mute_led_nid,
+                                spec->vref_led);
+       else if (spec->gpio_led)
+               stac_gpio_set(codec, spec->gpio_mask,
+                             spec->gpio_dir, spec->gpio_data);
        return 0;
 }
 
@@ -5064,7 +5072,7 @@ static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg,
        struct sigmatel_spec *spec = codec->spec;
 
        if (power_state == AC_PWRST_D3) {
-               if (spec->gpio_led > 8) {
+               if (spec->vref_mute_led_nid) {
                        /* with vref-out pin used for mute led control
                         * codec AFG is prevented from D3 state
                         */
@@ -5117,7 +5125,7 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
                }
        }
        /*polarity defines *not* muted state level*/
-       if (spec->gpio_led <= 8) {
+       if (!spec->vref_mute_led_nid) {
                if (muted)
                        spec->gpio_data &= ~spec->gpio_led; /* orange */
                else
@@ -5135,7 +5143,8 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
                muted_lvl = spec->gpio_led_polarity ?
                                AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ;
                spec->vref_led = muted ? muted_lvl : notmtd_lvl;
-               stac_vrefout_set(codec, spec->gpio_led, spec->vref_led);
+               stac_vrefout_set(codec, spec->vref_mute_led_nid,
+                                spec->vref_led);
        }
        return 0;
 }
@@ -5649,7 +5658,7 @@ again:
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
+               if (!spec->vref_mute_led_nid) {
                        spec->gpio_mask |= spec->gpio_led;
                        spec->gpio_dir |= spec->gpio_led;
                        spec->gpio_data |= spec->gpio_led;
@@ -5962,7 +5971,7 @@ again:
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
+               if (!spec->vref_mute_led_nid) {
                        spec->gpio_mask |= spec->gpio_led;
                        spec->gpio_dir |= spec->gpio_led;
                        spec->gpio_data |= spec->gpio_led;
index a391e62..28dfafb 100644 (file)
@@ -41,6 +41,7 @@ MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");
 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
 static int enable = 1;
+static int codecs = 1;
 
 module_param(index, int, 0444);
 MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator.");
@@ -48,6 +49,8 @@ module_param(id, charp, 0444);
 MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator.");
 module_param(enable, bool, 0444);
 MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator.");
+module_param(codecs, int, 0444);
+MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)");
 
 static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) },
@@ -140,6 +143,9 @@ struct sis7019 {
        dma_addr_t silence_dma_addr;
 };
 
+/* These values are also used by the module param 'codecs' to indicate
+ * which codecs should be present.
+ */
 #define SIS_PRIMARY_CODEC_PRESENT      0x0001
 #define SIS_SECONDARY_CODEC_PRESENT    0x0002
 #define SIS_TERTIARY_CODEC_PRESENT     0x0004
@@ -1078,6 +1084,7 @@ static int sis_chip_init(struct sis7019 *sis)
 {
        unsigned long io = sis->ioport;
        void __iomem *ioaddr = sis->ioaddr;
+       unsigned long timeout;
        u16 status;
        int count;
        int i;
@@ -1104,21 +1111,45 @@ static int sis_chip_init(struct sis7019 *sis)
        while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count)
                udelay(1);
 
+       /* Command complete, we can let go of the semaphore now.
+        */
+       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
+       if (!count)
+               return -EIO;
+
        /* Now that we've finished the reset, find out what's attached.
+        * There are some codec/board combinations that take an extremely
+        * long time to come up. 350+ ms has been observed in the field,
+        * so we'll give them up to 500ms.
         */
-       status = inl(io + SIS_AC97_STATUS);
-       if (status & SIS_AC97_STATUS_CODEC_READY)
-               sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC2_READY)
-               sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC3_READY)
-               sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
-
-       /* All done, let go of the semaphore, and check for errors
+       sis->codecs_present = 0;
+       timeout = msecs_to_jiffies(500) + jiffies;
+       while (time_before_eq(jiffies, timeout)) {
+               status = inl(io + SIS_AC97_STATUS);
+               if (status & SIS_AC97_STATUS_CODEC_READY)
+                       sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC2_READY)
+                       sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC3_READY)
+                       sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
+
+               if (sis->codecs_present == codecs)
+                       break;
+
+               msleep(1);
+       }
+
+       /* All done, check for errors.
         */
-       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
-       if (!sis->codecs_present || !count)
+       if (!sis->codecs_present) {
+               printk(KERN_ERR "sis7019: could not find any codecs\n");
                return -EIO;
+       }
+
+       if (sis->codecs_present != codecs) {
+               printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n",
+                      sis->codecs_present, codecs);
+       }
 
        /* Let the hardware know that the audio driver is alive,
         * and enable PCM slots on the AC-link for L/R playback (3 & 4) and
@@ -1390,6 +1421,17 @@ static int __devinit snd_sis7019_probe(struct pci_dev *pci,
        if (!enable)
                goto error_out;
 
+       /* The user can specify which codecs should be present so that we
+        * can wait for them to show up if they are slow to recover from
+        * the AC97 cold reset. We default to a single codec, the primary.
+        *
+        * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2.
+        */
+       codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT |
+                 SIS_TERTIARY_CODEC_PRESENT;
+       if (!codecs)
+               codecs = SIS_PRIMARY_CODEC_PRESENT;
+
        rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card);
        if (rc < 0)
                goto error_out;
index 4584514..fa787d4 100644 (file)
@@ -33,7 +33,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_CX20442
        select SND_SOC_DA7210 if I2C
        select SND_SOC_DFBMCS320
-       select SND_SOC_JZ4740_CODEC if SOC_JZ4740
+       select SND_SOC_JZ4740_CODEC
        select SND_SOC_LM4857 if I2C
        select SND_SOC_MAX98088 if I2C
        select SND_SOC_MAX98095 if I2C
index e373f8f..3e1f4e1 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <linux/delay.h>
 
index c5ca8cf..0441893 100644 (file)
@@ -863,13 +863,13 @@ static struct i2c_driver uda1380_i2c_driver = {
 
 static int __init uda1380_modinit(void)
 {
-       int ret;
+       int ret = 0;
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
        ret = i2c_add_driver(&uda1380_i2c_driver);
        if (ret != 0)
                pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
 #endif
-       return 0;
+       return ret;
 }
 module_init(uda1380_modinit);
 
index 0293763..5a14d5c 100644 (file)
@@ -60,6 +60,8 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
        }
 
        if (memcmp(fw->data, "WMFW", 4) != 0) {
+               memcpy(&data32, fw->data, sizeof(data32));
+               data32 = be32_to_cpu(data32);
                dev_err(codec->dev, "%s: firmware has bad file magic %08x\n",
                        name, data32);
                goto err;
index 6c29885..d0c545b 100644 (file)
@@ -1325,15 +1325,15 @@ SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
-                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
-                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+                       adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+                       adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
 };
 
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
index 645c980..a33b04d 100644 (file)
@@ -1968,6 +1968,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
                break;
        case 24576000:
                ratediv = WM8996_SYSCLK_DIV;
+               wm8996->sysclk /= 2;
        case 12288000:
                snd_soc_update_bits(codec, WM8996_AIF_RATE,
                                    WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);
index b133bfc..7383917 100644 (file)
@@ -28,7 +28,7 @@ config SND_MXC_SOC_WM1133_EV1
 
 config SND_SOC_MX27VIS_AIC32X4
        tristate "SoC audio support for Visstrim M10 boards"
-       depends on MACH_IMX27_VISSTRIM_M10
+       depends on MACH_IMX27_VISSTRIM_M10 && I2C
        select SND_SOC_TLV320AIC32X4
        select SND_MXC_SOC_MX2
        help
index 8f49e16..c62d715 100644 (file)
@@ -12,6 +12,7 @@ config SND_KIRKWOOD_SOC_I2S
 config SND_KIRKWOOD_SOC_OPENRD
        tristate "SoC Audio support for Kirkwood Openrd Client"
        depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
+       depends on I2C
        select SND_KIRKWOOD_SOC_I2S
        select SND_SOC_CS42L51
        help
@@ -20,7 +21,7 @@ config SND_KIRKWOOD_SOC_OPENRD
 
 config SND_KIRKWOOD_SOC_T5325
        tristate "SoC Audio support for HP t5325"
-       depends on SND_KIRKWOOD_SOC && MACH_T5325
+       depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
        select SND_KIRKWOOD_SOC_I2S
        select SND_SOC_ALC5623
        help
index dea5aa4..f39d7dd 100644 (file)
@@ -357,3 +357,6 @@ static void __exit snd_mxs_pcm_exit(void)
        platform_driver_unregister(&mxs_pcm_driver);
 }
 module_exit(snd_mxs_pcm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-pcm-audio");
index 7fbeaec..1c57f66 100644 (file)
@@ -171,3 +171,4 @@ module_exit(mxs_sgtl5000_exit);
 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
 MODULE_DESCRIPTION("MXS ALSA SoC Machine driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-sgtl5000");
index ffd2242..a0f7d3c 100644 (file)
@@ -151,6 +151,7 @@ config SND_SOC_ZYLONITE
 config SND_SOC_RAUMFELD
        tristate "SoC Audio support Raumfeld audio adapter"
        depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR)
+       depends on I2C && SPI_MASTER
        select SND_PXA_SOC_SSP
        select SND_SOC_CS4270
        select SND_SOC_AK4104
@@ -159,7 +160,7 @@ config SND_SOC_RAUMFELD
 
 config SND_PXA2XX_SOC_HX4700
        tristate "SoC Audio support for HP iPAQ hx4700"
-       depends on SND_PXA2XX_SOC && MACH_H4700
+       depends on SND_PXA2XX_SOC && MACH_H4700 && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_AK4641
        help
index 65c1248..c664e33 100644 (file)
@@ -209,9 +209,10 @@ static int __devinit hx4700_audio_probe(struct platform_device *pdev)
        snd_soc_card_hx4700.dev = &pdev->dev;
        ret = snd_soc_register_card(&snd_soc_card_hx4700);
        if (ret)
-               return ret;
+               gpio_free_array(hx4700_audio_gpios,
+                               ARRAY_SIZE(hx4700_audio_gpios));
 
-       return 0;
+       return ret;
 }
 
 static int __devexit hx4700_audio_remove(struct platform_device *pdev)
index 1826acf..8e523fd 100644 (file)
@@ -101,7 +101,6 @@ static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_codec *codec = rtd->codec;
        struct snd_soc_dapm_context *dapm = &codec->dapm;
-       int err;
 
        /* These endpoints are not being used. */
        snd_soc_dapm_nc_pin(dapm, "LINPUT2");
@@ -131,7 +130,7 @@ static struct snd_soc_card snd_soc_machine_jive = {
        .dai_link       = &jive_dai,
        .num_links      = 1,
 
-       .dapm_widgtets  = wm8750_dapm_widgets,
+       .dapm_widgets   = wm8750_dapm_widgets,
        .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
        .dapm_routes    = audio_map,
        .num_dapm_routes = ARRAY_SIZE(audio_map),
index 3a0dbfc..8bd1dc5 100644 (file)
@@ -12,6 +12,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <sound/soc.h>
 
 static struct snd_soc_card smdk2443;
index 0c12b98..4220bb0 100644 (file)
@@ -58,7 +58,36 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
 }
 EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
 
-static struct snd_soc_platform_driver dummy_platform;
+static const struct snd_pcm_hardware dummy_dma_hardware = {
+       .formats                = 0xffffffff,
+       .channels_min           = 1,
+       .channels_max           = UINT_MAX,
+
+       /* Random values to keep userspace happy when checking constraints */
+       .info                   = SNDRV_PCM_INFO_INTERLEAVED |
+                                 SNDRV_PCM_INFO_BLOCK_TRANSFER,
+       .buffer_bytes_max       = 128*1024,
+       .period_bytes_min       = PAGE_SIZE,
+       .period_bytes_max       = PAGE_SIZE*2,
+       .periods_min            = 2,
+       .periods_max            = 128,
+};
+
+static int dummy_dma_open(struct snd_pcm_substream *substream)
+{
+       snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+
+       return 0;
+}
+
+static struct snd_pcm_ops dummy_dma_ops = {
+       .open           = dummy_dma_open,
+       .ioctl          = snd_pcm_lib_ioctl,
+};
+
+static struct snd_soc_platform_driver dummy_platform = {
+       .ops = &dummy_dma_ops,
+};
 
 static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
 {
index 7d98676..955930e 100644 (file)
@@ -463,7 +463,8 @@ static int run_perf_stat(int argc __used, const char **argv)
 
        list_for_each_entry(counter, &evsel_list->entries, node) {
                if (create_perf_stat_counter(counter, first) < 0) {
-                       if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
+                       if (errno == EINVAL || errno == ENOSYS ||
+                           errno == ENOENT || errno == EOPNOTSUPP) {
                                if (verbose)
                                        ui__warning("%s event is not supported by the kernel.\n",
                                                    event_name(counter));
index bcd05d0..33c17a2 100644 (file)
@@ -388,7 +388,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
                /*
                 * write event string as passed on cmdline
                 */
-               ret = do_write_string(fd, attr->name);
+               ret = do_write_string(fd, event_name(attr));
                if (ret < 0)
                        return ret;
                /*