Merge branch 'davem.r8169.fixes' of git://violet.fr.zoreil.com/romieu/linux
authorDavid S. Miller <davem@davemloft.net>
Tue, 20 Sep 2011 18:42:45 +0000 (14:42 -0400)
committerDavid S. Miller <davem@davemloft.net>
Tue, 20 Sep 2011 18:42:45 +0000 (14:42 -0400)
320 files changed:
Documentation/ABI/testing/sysfs-class-scsi_host [new file with mode: 0644]
Documentation/DocBook/media/v4l/controls.xml
Documentation/cgroups/memory.txt
Documentation/feature-removal-schedule.txt
Documentation/hwmon/max16065
Documentation/ioctl/ioctl-number.txt
Documentation/kernel-parameters.txt
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/arm/Kconfig
arch/arm/include/asm/hardware/cache-l2x0.h
arch/arm/include/asm/pmu.h
arch/arm/kernel/pmu.c
arch/arm/kernel/relocate_kernel.S
arch/arm/kernel/setup.c
arch/arm/kernel/smp_twd.c
arch/arm/mach-at91/at91sam9261.c
arch/arm/mach-cns3xxx/include/mach/entry-macro.S
arch/arm/mach-cns3xxx/include/mach/system.h
arch/arm/mach-cns3xxx/include/mach/uncompress.h
arch/arm/mach-cns3xxx/pcie.c
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/include/mach/psc.h
arch/arm/mach-davinci/sleep.S
arch/arm/mach-ep93xx/include/mach/ts72xx.h
arch/arm/mach-exynos4/clock.c
arch/arm/mach-exynos4/cpu.c
arch/arm/mach-exynos4/include/mach/irqs.h
arch/arm/mach-exynos4/include/mach/regs-pmu.h
arch/arm/mach-exynos4/irq-eint.c
arch/arm/mach-exynos4/mach-universal_c210.c
arch/arm/mach-exynos4/setup-usb-phy.c
arch/arm/mach-footbridge/Kconfig
arch/arm/mach-footbridge/dc21285.c
arch/arm/mach-imx/mach-cpuimx27.c
arch/arm/mach-imx/mach-cpuimx35.c
arch/arm/mach-imx/mach-eukrea_cpuimx25.c
arch/arm/mach-integrator/integrator_ap.c
arch/arm/mach-omap2/clock3xxx_data.c
arch/arm/mach-omap2/clock44xx_data.c
arch/arm/mach-omap2/clockdomain.c
arch/arm/mach-omap2/omap_hwmod_2430_data.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/powerdomain.c
arch/arm/mach-orion5x/dns323-setup.c
arch/arm/mach-orion5x/pci.c
arch/arm/mach-prima2/clock.c
arch/arm/mach-prima2/irq.c
arch/arm/mach-prima2/rstc.c
arch/arm/mach-prima2/timer.c
arch/arm/mach-realview/include/mach/system.h
arch/arm/mach-s3c64xx/pm.c
arch/arm/mach-s5p64x0/irq-eint.c
arch/arm/mach-s5pv210/pm.c
arch/arm/mach-vexpress/v2m.c
arch/arm/mm/abort-macro.S
arch/arm/mm/cache-l2x0.c
arch/arm/mm/init.c
arch/arm/mm/proc-arm920.S
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-sa1100.S
arch/arm/mm/proc-v6.S
arch/arm/mm/proc-v7.S
arch/arm/mm/proc-xsc3.S
arch/arm/plat-omap/omap_device.c
arch/arm/plat-s5p/clock.c
arch/arm/plat-s5p/irq-gpioint.c
arch/arm/plat-samsung/include/plat/backlight.h
arch/arm/plat-samsung/irq-vic-timer.c
arch/arm/tools/mach-types
arch/openrisc/include/asm/dma-mapping.h
arch/openrisc/include/asm/sigcontext.h
arch/openrisc/kernel/dma.c
arch/openrisc/kernel/signal.c
arch/parisc/kernel/syscall_table.S
arch/powerpc/boot/dts/p1023rds.dts
arch/powerpc/configs/85xx/p1023rds_defconfig
arch/powerpc/configs/corenet32_smp_defconfig
arch/powerpc/configs/corenet64_smp_defconfig
arch/powerpc/configs/mpc85xx_defconfig
arch/powerpc/configs/mpc85xx_smp_defconfig
arch/powerpc/include/asm/systbl.h
arch/sparc/kernel/setup_64.c
arch/um/Kconfig.x86
arch/um/Makefile
arch/um/drivers/line.c
arch/um/drivers/xterm.c
arch/um/include/asm/ptrace-generic.h
arch/um/include/shared/line.h
arch/um/include/shared/registers.h
arch/um/kernel/process.c
arch/um/kernel/ptrace.c
arch/um/os-Linux/registers.c
arch/um/os-Linux/skas/mem.c
arch/um/os-Linux/skas/process.c
arch/um/sys-i386/asm/ptrace.h
arch/um/sys-i386/ptrace.c
arch/um/sys-i386/shared/sysdep/ptrace.h
arch/um/sys-x86_64/ptrace.c
arch/um/sys-x86_64/shared/sysdep/ptrace.h
arch/x86/include/asm/alternative-asm.h
arch/x86/include/asm/alternative.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/pvclock.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/pci/acpi.c
arch/x86/xen/mmu.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
arch/x86/xen/time.c
arch/x86/xen/xen-asm_32.S
drivers/acpi/acpica/acconfig.h
drivers/acpi/apei/Kconfig
drivers/acpi/apei/apei-base.c
drivers/base/regmap/regmap.c
drivers/bluetooth/btusb.c
drivers/bluetooth/btwilink.c
drivers/cpufreq/pcc-cpufreq.c
drivers/dma/ste_dma40.c
drivers/firewire/ohci.c
drivers/gpio/gpio-generic.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nv04_crtc.c
drivers/gpu/drm/nouveau/nv50_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_clocks.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/hid-wacom.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/coretemp.c
drivers/hwmon/max16065.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwmon/pmbus/ucd9000.c
drivers/hwmon/pmbus/ucd9200.c
drivers/i2c/busses/i2c-pxa-pci.c
drivers/i2c/busses/i2c-tegra.c
drivers/input/keyboard/adp5588-keys.c
drivers/input/misc/cm109.c
drivers/input/mouse/bcm5974.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/wacom_w8001.c
drivers/iommu/amd_iommu.c
drivers/iommu/dmar.c
drivers/leds/ledtrig-timer.c
drivers/md/linear.h
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb/dvb-usb/vp7045.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/nuvoton-cir.h
drivers/media/video/gspca/ov519.c
drivers/media/video/gspca/sonixj.c
drivers/media/video/pwc/pwc-v4l.c
drivers/media/video/via-camera.c
drivers/mfd/max8997.c
drivers/mfd/omap-usb-host.c
drivers/mfd/tps65910-irq.c
drivers/mfd/twl4030-madc.c
drivers/mfd/wm8350-gpio.c
drivers/misc/pti.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/host.h
drivers/mmc/core/sd.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sh_mobile_sdhi.c
drivers/mtd/ubi/debug.h
drivers/net/arm/am79c961a.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/pci/hotplug/pcihp_slot.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/rtc/rtc-ep93xx.c
drivers/rtc/rtc-imxdi.c
drivers/rtc/rtc-lib.c
drivers/rtc/rtc-s3c.c
drivers/rtc/rtc-twl.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/hpsa.c
drivers/scsi/isci/host.c
drivers/scsi/isci/host.h
drivers/scsi/isci/init.c
drivers/scsi/isci/phy.c
drivers/scsi/isci/registers.h
drivers/scsi/isci/request.c
drivers/scsi/isci/unsolicited_frame_control.c
drivers/scsi/isci/unsolicited_frame_control.h
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla4xxx/Kconfig
drivers/staging/comedi/drivers/ni_labpc.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_cdb.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tcm_fc.h
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
drivers/target/tcm_fc/tfc_io.c
drivers/tty/serial/crisv10.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-ring.c
drivers/video/backlight/backlight.c
drivers/xen/events.c
fs/9p/v9fs_vfs.h
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/9p/vfs_super.c
fs/block_dev.c
fs/btrfs/btrfs_inode.h
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/transaction.c
fs/btrfs/xattr.c
fs/ceph/mds_client.c
fs/ceph/super.c
fs/ext4/ext4.h
fs/ext4/inode.c
fs/ext4/page-io.c
fs/fuse/dev.c
fs/fuse/inode.c
fs/hfsplus/super.c
fs/hfsplus/wrapper.c
fs/namei.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4renewd.c
fs/nfs/nfs4state.c
fs/nfs/super.c
fs/nfs/write.c
fs/ubifs/debug.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_super.c
include/linux/basic_mmio_gpio.h
include/linux/memcontrol.h
include/linux/mfd/wm8994/pdata.h
include/linux/perf_event.h
include/linux/regulator/consumer.h
include/linux/swap.h
include/net/9p/9p.h
include/net/cfg80211.h
include/net/tcp.h
kernel/events/core.c
kernel/irq/chip.c
kernel/sched.c
kernel/taskstats.c
kernel/time/alarmtimer.c
kernel/tsacct.c
kernel/workqueue.c
lib/Makefile
lib/sha1.c
mm/filemap.c
mm/memcontrol.c
mm/mempolicy.c
mm/slub.c
mm/vmalloc.c
mm/vmscan.c
mm/vmstat.c
net/9p/trans_virtio.c
net/bluetooth/hci_event.c
net/ceph/msgpool.c
net/ceph/osd_client.c
net/ipv4/tcp_input.c
net/wireless/reg.c
net/wireless/sme.c
sound/core/pcm_lib.c
sound/pci/hda/hda_codec.c
sound/pci/hda/patch_cirrus.c
sound/soc/blackfin/bf5xx-ad193x.c
sound/soc/fsl/mpc5200_dma.c
sound/soc/imx/imx-pcm-fiq.c
sound/soc/kirkwood/kirkwood-i2s.c
sound/soc/soc-cache.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-jack.c
tools/perf/arch/arm/util/dwarf-regs.c

diff --git a/Documentation/ABI/testing/sysfs-class-scsi_host b/Documentation/ABI/testing/sysfs-class-scsi_host
new file mode 100644 (file)
index 0000000..29a4f89
--- /dev/null
@@ -0,0 +1,13 @@
+What:          /sys/class/scsi_host/hostX/isci_id
+Date:          June 2011
+Contact:       Dave Jiang <dave.jiang@intel.com>
+Description:
+               This file contains the enumerated host ID for the Intel
+               SCU controller. The Intel(R) C600 Series Chipset SATA/SAS
+               Storage Control Unit embeds up to two 4-port controllers in
+               a single PCI device.  The controllers are enumerated in order
+               which usually means the lowest number scsi_host corresponds
+               with the first controller, but this association is not
+               guaranteed.  The 'isci_id' attribute unambiguously identifies
+               the controller index: '0' for the first controller,
+               '1' for the second.
index 8516401..23fdf79 100644 (file)
@@ -1455,7 +1455,7 @@ Applicable to the H264 encoder.</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-h264-vui-sar-idc">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC</constant>&nbsp;</entry>
                <entry>enum&nbsp;v4l2_mpeg_video_h264_vui_sar_idc</entry>
              </row>
@@ -1561,7 +1561,7 @@ Applicable to the H264 encoder.</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-h264-level">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LEVEL</constant>&nbsp;</entry>
                <entry>enum&nbsp;v4l2_mpeg_video_h264_level</entry>
              </row>
@@ -1641,7 +1641,7 @@ Possible values are:</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-mpeg4-level">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL</constant>&nbsp;</entry>
                <entry>enum&nbsp;v4l2_mpeg_video_mpeg4_level</entry>
              </row>
@@ -1689,9 +1689,9 @@ Possible values are:</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-h264-profile">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_PROFILE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_h264_profile</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_h264_profile</entry>
              </row>
              <row><entry spanname="descr">The profile information for H264.
 Applicable to the H264 encoder.
@@ -1774,9 +1774,9 @@ Possible values are:</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-mpeg4-profile">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_mpeg4_profile</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_mpeg4_profile</entry>
              </row>
              <row><entry spanname="descr">The profile information for MPEG4.
 Applicable to the MPEG4 encoder.
@@ -1820,9 +1820,9 @@ Applicable to the encoder.
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-multi-slice-mode">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_multi_slice_mode</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_multi_slice_mode</entry>
              </row>
              <row><entry spanname="descr">Determines how the encoder should handle division of frame into slices.
 Applicable to the encoder.
@@ -1868,9 +1868,9 @@ Applicable to the encoder.</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-h264-loop-filter-mode">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_h264_loop_filter_mode</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_h264_loop_filter_mode</entry>
              </row>
              <row><entry spanname="descr">Loop filter mode for H264 encoder.
 Possible values are:</entry>
@@ -1913,9 +1913,9 @@ Applicable to the H264 encoder.</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-h264-entropy-mode">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_h264_symbol_mode</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_h264_entropy_mode</entry>
              </row>
              <row><entry spanname="descr">Entropy coding mode for H264 - CABAC/CAVALC.
 Applicable to the H264 encoder.
@@ -2140,9 +2140,9 @@ previous frames. Applicable to the H264 encoder.</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-header-mode">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_HEADER_MODE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_header_mode</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_header_mode</entry>
              </row>
              <row><entry spanname="descr">Determines whether the header is returned as the first buffer or is
 it returned together with the first frame. Applicable to encoders.
@@ -2320,9 +2320,9 @@ Valid only when H.264 and macroblock level RC is enabled (<constant>V4L2_CID_MPE
 Applicable to the H264 encoder.</entry>
              </row>
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-mfc51-video-frame-skip-mode">
                <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_mfc51_frame_skip_mode</entry>
+               <entry>enum&nbsp;v4l2_mpeg_mfc51_video_frame_skip_mode</entry>
              </row>
              <row><entry spanname="descr">
 Indicates in what conditions the encoder should skip frames. If encoding a frame would cause the encoded stream to be larger then
@@ -2361,9 +2361,9 @@ the stream will meet tight bandwidth contraints. Applicable to encoders.
 </entry>
              </row>
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-mfc51-video-force-frame-type">
                <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_mfc51_force_frame_type</entry>
+               <entry>enum&nbsp;v4l2_mpeg_mfc51_video_force_frame_type</entry>
              </row>
              <row><entry spanname="descr">Force a frame type for the next queued buffer. Applicable to encoders.
 Possible values are:</entry>
index 6f3c598..06eb6d9 100644 (file)
@@ -380,7 +380,7 @@ will be charged as a new owner of it.
 
 5.2 stat file
 
-5.2.1 memory.stat file includes following statistics
+memory.stat file includes following statistics
 
 # per-memory cgroup local status
 cache          - # of bytes of page cache memory.
@@ -438,89 +438,6 @@ Note:
         file_mapped is accounted only when the memory cgroup is owner of page
         cache.)
 
-5.2.2 memory.vmscan_stat
-
-memory.vmscan_stat includes statistics information for memory scanning and
-freeing, reclaiming. The statistics shows memory scanning information since
-memory cgroup creation and can be reset to 0 by writing 0 as
-
- #echo 0 > ../memory.vmscan_stat
-
-This file contains following statistics.
-
-[param]_[file_or_anon]_pages_by_[reason]_[under_heararchy]
-[param]_elapsed_ns_by_[reason]_[under_hierarchy]
-
-For example,
-
-  scanned_file_pages_by_limit indicates the number of scanned
-  file pages at vmscan.
-
-Now, 3 parameters are supported
-
-  scanned - the number of pages scanned by vmscan
-  rotated - the number of pages activated at vmscan
-  freed   - the number of pages freed by vmscan
-
-If "rotated" is high against scanned/freed, the memcg seems busy.
-
-Now, 2 reason are supported
-
-  limit - the memory cgroup's limit
-  system - global memory pressure + softlimit
-           (global memory pressure not under softlimit is not handled now)
-
-When under_hierarchy is added in the tail, the number indicates the
-total memcg scan of its children and itself.
-
-elapsed_ns is a elapsed time in nanosecond. This may include sleep time
-and not indicates CPU usage. So, please take this as just showing
-latency.
-
-Here is an example.
-
-# cat /cgroup/memory/A/memory.vmscan_stat
-scanned_pages_by_limit 9471864
-scanned_anon_pages_by_limit 6640629
-scanned_file_pages_by_limit 2831235
-rotated_pages_by_limit 4243974
-rotated_anon_pages_by_limit 3971968
-rotated_file_pages_by_limit 272006
-freed_pages_by_limit 2318492
-freed_anon_pages_by_limit 962052
-freed_file_pages_by_limit 1356440
-elapsed_ns_by_limit 351386416101
-scanned_pages_by_system 0
-scanned_anon_pages_by_system 0
-scanned_file_pages_by_system 0
-rotated_pages_by_system 0
-rotated_anon_pages_by_system 0
-rotated_file_pages_by_system 0
-freed_pages_by_system 0
-freed_anon_pages_by_system 0
-freed_file_pages_by_system 0
-elapsed_ns_by_system 0
-scanned_pages_by_limit_under_hierarchy 9471864
-scanned_anon_pages_by_limit_under_hierarchy 6640629
-scanned_file_pages_by_limit_under_hierarchy 2831235
-rotated_pages_by_limit_under_hierarchy 4243974
-rotated_anon_pages_by_limit_under_hierarchy 3971968
-rotated_file_pages_by_limit_under_hierarchy 272006
-freed_pages_by_limit_under_hierarchy 2318492
-freed_anon_pages_by_limit_under_hierarchy 962052
-freed_file_pages_by_limit_under_hierarchy 1356440
-elapsed_ns_by_limit_under_hierarchy 351386416101
-scanned_pages_by_system_under_hierarchy 0
-scanned_anon_pages_by_system_under_hierarchy 0
-scanned_file_pages_by_system_under_hierarchy 0
-rotated_pages_by_system_under_hierarchy 0
-rotated_anon_pages_by_system_under_hierarchy 0
-rotated_file_pages_by_system_under_hierarchy 0
-freed_pages_by_system_under_hierarchy 0
-freed_anon_pages_by_system_under_hierarchy 0
-freed_file_pages_by_system_under_hierarchy 0
-elapsed_ns_by_system_under_hierarchy 0
-
 5.3 swappiness
 
 Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
index c4a6e14..4dc4654 100644 (file)
@@ -592,3 +592,11 @@ Why:    In 3.0, we can now autodetect internal 3G device and already have
        interface that was used by acer-wmi driver. It will replaced by
        information log when acer-wmi initial.
 Who:    Lee, Chun-Yi <jlee@novell.com>
+
+----------------------------
+What:  The XFS nodelaylog mount option
+When:  3.3
+Why:   The delaylog mode that has been the default since 2.6.39 has proven
+       stable, and the old code is in the way of additional improvements in
+       the log code.
+Who:   Christoph Hellwig <hch@lst.de>
index 44b4f61..c11f64a 100644 (file)
@@ -62,6 +62,13 @@ can be safely used to identify the chip. You will have to instantiate
 the devices explicitly. Please see Documentation/i2c/instantiating-devices for
 details.
 
+WARNING: Do not access chip registers using the i2cdump command, and do not use
+any of the i2ctools commands on a command register (0xa5 to 0xac). The chips
+supported by this driver interpret any access to a command register (including
+read commands) as request to execute the command in question. This may result in
+power loss, board resets, and/or Flash corruption. Worst case, your board may
+turn into a brick.
+
 
 Sysfs entries
 -------------
index 845a191..54078ed 100644 (file)
@@ -319,4 +319,6 @@ Code  Seq#(hex)     Include File            Comments
                                        <mailto:thomas@winischhofer.net>
 0xF4   00-1F   video/mbxfb.h           mbxfb
                                        <mailto:raph@8d.com>
+0xF6   all     LTTng                   Linux Trace Toolkit Next Generation
+                                       <mailto:mathieu.desnoyers@efficios.com>
 0xFD   all     linux/dm-ioctl.h
index 614d038..854ed5c 100644 (file)
@@ -2086,9 +2086,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Override pmtimer IOPort with a hex value.
                        e.g. pmtmr=0x508
 
-       pnp.debug       [PNP]
-                       Enable PNP debug messages.  This depends on the
-                       CONFIG_PNP_DEBUG_MESSAGES option.
+       pnp.debug=1     [PNP]
+                       Enable PNP debug messages (depends on the
+                       CONFIG_PNP_DEBUG_MESSAGES option).  Change at run-time
+                       via /sys/module/pnp/parameters/debug.  We always show
+                       current resource usage; turning this on also shows
+                       possible settings and some assignment information.
 
        pnpacpi=        [ACPI]
                        { off }
index 959ff41..ae8820e 100644 (file)
@@ -3259,6 +3259,17 @@ F:       Documentation/input/multi-touch-protocol.txt
 F:     drivers/input/input-mt.c
 K:     \b(ABS|SYN)_MT_
 
+INTEL C600 SERIES SAS CONTROLLER DRIVER
+M:     Intel SCU Linux support <intel-linux-scu@intel.com>
+M:     Dan Williams <dan.j.williams@intel.com>
+M:     Dave Jiang <dave.jiang@intel.com>
+M:     Ed Nadolski <edmund.nadolski@intel.com>
+L:     linux-scsi@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git
+S:     Maintained
+F:     drivers/scsi/isci/
+F:     firmware/isci/
+
 INTEL IDLE DRIVER
 M:     Len Brown <lenb@kernel.org>
 L:     linux-pm@lists.linux-foundation.org
@@ -4772,7 +4783,7 @@ F:        drivers/net/wireless/orinoco/
 
 OSD LIBRARY and FILESYSTEM
 M:     Boaz Harrosh <bharrosh@panasas.com>
-M:     Benny Halevy <bhalevy@panasas.com>
+M:     Benny Halevy <bhalevy@tonian.com>
 L:     osd-dev@open-osd.org
 W:     http://open-osd.org
 T:     git git://git.open-osd.org/open-osd.git
@@ -7198,6 +7209,9 @@ W:        http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
 S:     Supported
 F:     Documentation/hwmon/wm83??
 F:     drivers/leds/leds-wm83*.c
+F:     drivers/input/misc/wm831x-on.c
+F:     drivers/input/touchscreen/wm831x-ts.c
+F:     drivers/input/touchscreen/wm97*.c
 F:     drivers/mfd/wm8*.c
 F:     drivers/power/wm83*.c
 F:     drivers/rtc/rtc-wm83*.c
@@ -7207,6 +7221,7 @@ F:        drivers/watchdog/wm83*_wdt.c
 F:     include/linux/mfd/wm831x/
 F:     include/linux/mfd/wm8350/
 F:     include/linux/mfd/wm8400*
+F:     include/linux/wm97xx.h
 F:     include/sound/wm????.h
 F:     sound/soc/codecs/wm*
 
index c3e90c5..522fa47 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = "Divemaster Edition"
 
 # *DOCUMENTATION*
index 60cde53..8bb9362 100644 (file)
@@ -51,7 +51,7 @@ config GENERIC_CMOS_UPDATE
         def_bool y
 
 config GENERIC_GPIO
-       def_bool y
+       bool
 
 config ZONE_DMA
        bool
index 5ebc5d9..3269576 100644 (file)
@@ -1271,6 +1271,18 @@ config ARM_ERRATA_754327
          This workaround defines cpu_relax() as smp_mb(), preventing correctly
          written polling loops from denying visibility of updates to memory.
 
+config ARM_ERRATA_364296
+       bool "ARM errata: Possible cache data corruption with hit-under-miss enabled"
+       depends on CPU_V6 && !SMP
+       help
+         This options enables the workaround for the 364296 ARM1136
+         r0p2 erratum (possible cache data corruption with
+         hit-under-miss enabled). It sets the undocumented bit 31 in
+         the auxiliary control register and the FI bit in the control
+         register, thus disabling hit-under-miss without putting the
+         processor into full low interrupt latency mode. ARM11MPCore
+         is not affected.
+
 endmenu
 
 source "arch/arm/common/Kconfig"
index 16bd480..99a6ed7 100644 (file)
 #define L2X0_CLEAN_INV_LINE_PA         0x7F0
 #define L2X0_CLEAN_INV_LINE_IDX                0x7F8
 #define L2X0_CLEAN_INV_WAY             0x7FC
-#define L2X0_LOCKDOWN_WAY_D            0x900
-#define L2X0_LOCKDOWN_WAY_I            0x904
+/*
+ * The lockdown registers repeat 8 times for L310, the L210 has only one
+ * D and one I lockdown register at 0x0900 and 0x0904.
+ */
+#define L2X0_LOCKDOWN_WAY_D_BASE       0x900
+#define L2X0_LOCKDOWN_WAY_I_BASE       0x904
+#define L2X0_LOCKDOWN_STRIDE           0x08
 #define L2X0_TEST_OPERATION            0xF00
 #define L2X0_LINE_DATA                 0xF10
 #define L2X0_LINE_TAG                  0xF30
@@ -64,7 +69,7 @@
 #define L2X0_AUX_CTRL_MASK                     0xc0000fff
 #define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT      16
 #define L2X0_AUX_CTRL_WAY_SIZE_SHIFT           17
-#define L2X0_AUX_CTRL_WAY_SIZE_MASK            (0x3 << 17)
+#define L2X0_AUX_CTRL_WAY_SIZE_MASK            (0x7 << 17)
 #define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT     22
 #define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT                26
 #define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT                27
index 67c70a3..b7e82c4 100644 (file)
@@ -41,7 +41,7 @@ struct arm_pmu_platdata {
  * encoded error on failure.
  */
 extern struct platform_device *
-reserve_pmu(enum arm_pmu_type device);
+reserve_pmu(enum arm_pmu_type type);
 
 /**
  * release_pmu() - Relinquish control of the performance counters
@@ -62,26 +62,26 @@ release_pmu(enum arm_pmu_type type);
  * the actual hardware initialisation.
  */
 extern int
-init_pmu(enum arm_pmu_type device);
+init_pmu(enum arm_pmu_type type);
 
 #else /* CONFIG_CPU_HAS_PMU */
 
 #include <linux/err.h>
 
 static inline struct platform_device *
-reserve_pmu(enum arm_pmu_type device)
+reserve_pmu(enum arm_pmu_type type)
 {
        return ERR_PTR(-ENODEV);
 }
 
 static inline int
-release_pmu(struct platform_device *pdev)
+release_pmu(enum arm_pmu_type type)
 {
        return -ENODEV;
 }
 
 static inline int
-init_pmu(enum arm_pmu_type device)
+init_pmu(enum arm_pmu_type type)
 {
        return -ENODEV;
 }
index 2b70709..c53474f 100644 (file)
@@ -31,7 +31,7 @@ static int __devinit pmu_register(struct platform_device *pdev,
 {
        if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
                pr_warning("received registration request for unknown "
-                               "device %d\n", type);
+                               "PMU device type %d\n", type);
                return -EINVAL;
        }
 
@@ -112,17 +112,17 @@ static int __init register_pmu_driver(void)
 device_initcall(register_pmu_driver);
 
 struct platform_device *
-reserve_pmu(enum arm_pmu_type device)
+reserve_pmu(enum arm_pmu_type type)
 {
        struct platform_device *pdev;
 
-       if (test_and_set_bit_lock(device, &pmu_lock)) {
+       if (test_and_set_bit_lock(type, &pmu_lock)) {
                pdev = ERR_PTR(-EBUSY);
-       } else if (pmu_devices[device] == NULL) {
-               clear_bit_unlock(device, &pmu_lock);
+       } else if (pmu_devices[type] == NULL) {
+               clear_bit_unlock(type, &pmu_lock);
                pdev = ERR_PTR(-ENODEV);
        } else {
-               pdev = pmu_devices[device];
+               pdev = pmu_devices[type];
        }
 
        return pdev;
@@ -130,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device)
 EXPORT_SYMBOL_GPL(reserve_pmu);
 
 int
-release_pmu(enum arm_pmu_type device)
+release_pmu(enum arm_pmu_type type)
 {
-       if (WARN_ON(!pmu_devices[device]))
+       if (WARN_ON(!pmu_devices[type]))
                return -EINVAL;
-       clear_bit_unlock(device, &pmu_lock);
+       clear_bit_unlock(type, &pmu_lock);
        return 0;
 }
 EXPORT_SYMBOL_GPL(release_pmu);
@@ -182,17 +182,17 @@ init_cpu_pmu(void)
 }
 
 int
-init_pmu(enum arm_pmu_type device)
+init_pmu(enum arm_pmu_type type)
 {
        int err = 0;
 
-       switch (device) {
+       switch (type) {
        case ARM_PMU_DEVICE_CPU:
                err = init_cpu_pmu();
                break;
        default:
-               pr_warning("attempt to initialise unknown device %d\n",
-                               device);
+               pr_warning("attempt to initialise PMU of unknown "
+                          "type %d\n", type);
                err = -EINVAL;
        }
 
index 9cf4cbf..d0cdedf 100644 (file)
@@ -57,7 +57,8 @@ relocate_new_kernel:
        mov r0,#0
        ldr r1,kexec_mach_type
        ldr r2,kexec_boot_atags
-       mov pc,lr
+ ARM(  mov pc, lr      )
+ THUMB(        bx lr           )
 
        .align
 
index 70bca64..e514c76 100644 (file)
@@ -280,18 +280,19 @@ static void __init cacheid_init(void)
        if (arch >= CPU_ARCH_ARMv6) {
                if ((cachetype & (7 << 29)) == 4 << 29) {
                        /* ARMv7 register format */
+                       arch = CPU_ARCH_ARMv7;
                        cacheid = CACHEID_VIPT_NONALIASING;
                        if ((cachetype & (3 << 14)) == 1 << 14)
                                cacheid |= CACHEID_ASID_TAGGED;
-                       else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
-                               cacheid |= CACHEID_VIPT_I_ALIASING;
-               } else if (cachetype & (1 << 23)) {
-                       cacheid = CACHEID_VIPT_ALIASING;
                } else {
-                       cacheid = CACHEID_VIPT_NONALIASING;
-                       if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
-                               cacheid |= CACHEID_VIPT_I_ALIASING;
+                       arch = CPU_ARCH_ARMv6;
+                       if (cachetype & (1 << 23))
+                               cacheid = CACHEID_VIPT_ALIASING;
+                       else
+                               cacheid = CACHEID_VIPT_NONALIASING;
                }
+               if (cpu_has_aliasing_icache(arch))
+                       cacheid |= CACHEID_VIPT_I_ALIASING;
        } else {
                cacheid = CACHEID_VIVT;
        }
index 2c277d4..01c1862 100644 (file)
@@ -137,8 +137,8 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
        clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
        clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
 
+       clockevents_register_device(clk);
+
        /* Make sure our local interrupt controller has this enabled */
        gic_enable_ppi(clk->irq);
-
-       clockevents_register_device(clk);
 }
index d522b47..6c8e3b5 100644 (file)
@@ -157,7 +157,7 @@ static struct clk_lookup periph_clocks_lookups[] = {
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
-       CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc1_clk),
+       CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
        CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
        CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
        CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
index 6bd83ed..d87bfc3 100644 (file)
@@ -8,7 +8,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <mach/hardware.h>
 #include <asm/hardware/entry-macro-gic.S>
 
                .macro  disable_fiq
index 58bb03a..4f16c9b 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <linux/io.h>
 #include <asm/proc-fns.h>
-#include <mach/hardware.h>
 
 static inline void arch_idle(void)
 {
index de8ead9..a91b605 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <mach/cns3xxx.h>
 
 #define AMBA_UART_DR(base)     (*(volatile unsigned char *)((base) + 0x00))
index 06fd25d..0f8fca4 100644 (file)
@@ -49,7 +49,7 @@ static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
        return &cns3xxx_pcie[root->domain];
 }
 
-static struct cns3xxx_pcie *pdev_to_cnspci(struct pci_dev *dev)
+static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
 {
        return sysdata_to_cnspci(dev->sysdata);
 }
index bd53945..008d514 100644 (file)
@@ -115,6 +115,32 @@ static struct spi_board_info da850evm_spi_info[] = {
        },
 };
 
+#ifdef CONFIG_MTD
+static void da850_evm_m25p80_notify_add(struct mtd_info *mtd)
+{
+       char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
+       size_t retlen;
+
+       if (!strcmp(mtd->name, "MAC-Address")) {
+               mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
+               if (retlen == ETH_ALEN)
+                       pr_info("Read MAC addr from SPI Flash: %pM\n",
+                               mac_addr);
+       }
+}
+
+static struct mtd_notifier da850evm_spi_notifier = {
+       .add    = da850_evm_m25p80_notify_add,
+};
+
+static void da850_evm_setup_mac_addr(void)
+{
+       register_mtd_user(&da850evm_spi_notifier);
+}
+#else
+static void da850_evm_setup_mac_addr(void) { }
+#endif
+
 static struct mtd_partition da850_evm_norflash_partition[] = {
        {
                .name           = "bootloaders + env",
@@ -1244,6 +1270,8 @@ static __init void da850_evm_init(void)
        if (ret)
                pr_warning("da850_evm_init: sata registration failed: %d\n",
                                ret);
+
+       da850_evm_setup_mac_addr();
 }
 
 #ifdef CONFIG_SERIAL_8250_CONSOLE
index 47fd0bc..fa59c09 100644 (file)
 #define PSC_STATE_DISABLE      2
 #define PSC_STATE_ENABLE       3
 
-#define MDSTAT_STATE_MASK      0x1f
+#define MDSTAT_STATE_MASK      0x3f
 #define MDCTL_FORCE            BIT(31)
 
 #ifndef __ASSEMBLER__
index fb5e72b..5f1e045 100644 (file)
@@ -217,7 +217,11 @@ ddr2clk_stop_done:
 ENDPROC(davinci_ddr_psc_config)
 
 CACHE_FLUSH:
-       .word   arm926_flush_kern_cache_all
+#ifdef CONFIG_CPU_V6
+       .word   v6_flush_kern_cache_all
+#else
+       .word   arm926_flush_kern_cache_all
+#endif
 
 ENTRY(davinci_cpu_suspend_sz)
        .word   . - davinci_cpu_suspend
index 0eabec6..f1397a1 100644 (file)
@@ -6,7 +6,7 @@
  * TS72xx memory map:
  *
  * virt                phys            size
- * febff000    22000000        4K      model number register
+ * febff000    22000000        4K      model number register (bits 0-2)
  * febfe000    22400000        4K      options register
  * febfd000    22800000        4K      options register #2
  * febf9000    10800000        4K      TS-5620 RTC index register
@@ -20,6 +20,9 @@
 #define TS72XX_MODEL_TS7200            0x00
 #define TS72XX_MODEL_TS7250            0x01
 #define TS72XX_MODEL_TS7260            0x02
+#define TS72XX_MODEL_TS7300            0x03
+#define TS72XX_MODEL_TS7400            0x04
+#define TS72XX_MODEL_MASK              0x07
 
 
 #define TS72XX_OPTIONS_PHYS_BASE       0x22400000
 
 #ifndef __ASSEMBLY__
 
+static inline int ts72xx_model(void)
+{
+       return __raw_readb(TS72XX_MODEL_VIRT_BASE) & TS72XX_MODEL_MASK;
+}
+
 static inline int board_is_ts7200(void)
 {
-       return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7200;
+       return ts72xx_model() == TS72XX_MODEL_TS7200;
 }
 
 static inline int board_is_ts7250(void)
 {
-       return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7250;
+       return ts72xx_model() == TS72XX_MODEL_TS7250;
 }
 
 static inline int board_is_ts7260(void)
 {
-       return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7260;
+       return ts72xx_model() == TS72XX_MODEL_TS7260;
+}
+
+static inline int board_is_ts7300(void)
+{
+       return ts72xx_model()  == TS72XX_MODEL_TS7300;
+}
+
+static inline int board_is_ts7400(void)
+{
+       return ts72xx_model() == TS72XX_MODEL_TS7400;
 }
 
 static inline int is_max197_installed(void)
index 851dea0..1561b03 100644 (file)
@@ -520,7 +520,7 @@ static struct clk init_clocks_off[] = {
                .ctrlbit        = (1 << 21),
        }, {
                .name           = "ac97",
-               .id             = -1,
+               .devname        = "samsung-ac97",
                .enable         = exynos4_clk_ip_peril_ctrl,
                .ctrlbit        = (1 << 27),
        }, {
index 2d8a40c..746d6fc 100644 (file)
 #include <plat/exynos4.h>
 #include <plat/adc-core.h>
 #include <plat/sdhci.h>
-#include <plat/devs.h>
 #include <plat/fb-core.h>
 #include <plat/fimc-core.h>
 #include <plat/iic-core.h>
+#include <plat/reset.h>
 
 #include <mach/regs-irq.h>
+#include <mach/regs-pmu.h>
 
 extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
                         unsigned int irq_start);
@@ -128,6 +129,11 @@ static void exynos4_idle(void)
        local_irq_enable();
 }
 
+static void exynos4_sw_reset(void)
+{
+       __raw_writel(0x1, S5P_SWRESET);
+}
+
 /*
  * exynos4_map_io
  *
@@ -241,5 +247,8 @@ int __init exynos4_init(void)
        /* set idle function */
        pm_idle = exynos4_idle;
 
+       /* set sw_reset function */
+       s5p_reset_hook = exynos4_sw_reset;
+
        return sysdev_register(&exynos4_sysdev);
 }
index 934d2a4..f8952f8 100644 (file)
@@ -80,9 +80,8 @@
 #define IRQ_HSMMC3             IRQ_SPI(76)
 #define IRQ_DWMCI              IRQ_SPI(77)
 
-#define IRQ_MIPICSI0           IRQ_SPI(78)
-
-#define IRQ_MIPICSI1           IRQ_SPI(80)
+#define IRQ_MIPI_CSIS0         IRQ_SPI(78)
+#define IRQ_MIPI_CSIS1         IRQ_SPI(80)
 
 #define IRQ_ONENAND_AUDI       IRQ_SPI(82)
 #define IRQ_ROTATOR            IRQ_SPI(83)
index fa49bbb..cdf9b47 100644 (file)
@@ -29,6 +29,8 @@
 #define S5P_USE_STANDBY_WFE1                   (1 << 25)
 #define S5P_USE_MASK                           ((0x3 << 16) | (0x3 << 24))
 
+#define S5P_SWRESET                            S5P_PMUREG(0x0400)
+
 #define S5P_WAKEUP_STAT                                S5P_PMUREG(0x0600)
 #define S5P_EINT_WAKEUP_MASK                   S5P_PMUREG(0x0604)
 #define S5P_WAKEUP_MASK                                S5P_PMUREG(0x0608)
index 9d87d2a..badb8c6 100644 (file)
@@ -23,6 +23,8 @@
 
 #include <mach/regs-gpio.h>
 
+#include <asm/mach/irq.h>
+
 static DEFINE_SPINLOCK(eint_lock);
 
 static unsigned int eint0_15_data[16];
@@ -184,8 +186,11 @@ static inline void exynos4_irq_demux_eint(unsigned int start)
 
 static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
 {
+       struct irq_chip *chip = irq_get_chip(irq);
+       chained_irq_enter(chip, desc);
        exynos4_irq_demux_eint(IRQ_EINT(16));
        exynos4_irq_demux_eint(IRQ_EINT(24));
+       chained_irq_exit(chip, desc);
 }
 
 static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
@@ -193,6 +198,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
        u32 *irq_data = irq_get_handler_data(irq);
        struct irq_chip *chip = irq_get_chip(irq);
 
+       chained_irq_enter(chip, desc);
        chip->irq_mask(&desc->irq_data);
 
        if (chip->irq_ack)
@@ -201,6 +207,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
        generic_handle_irq(*irq_data);
 
        chip->irq_unmask(&desc->irq_data);
+       chained_irq_exit(chip, desc);
 }
 
 int __init exynos4_init_irq_eint(void)
index 0e280d1..b3b5d89 100644 (file)
@@ -79,7 +79,7 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
 };
 
 static struct regulator_consumer_supply max8952_consumer =
-       REGULATOR_SUPPLY("vddarm", NULL);
+       REGULATOR_SUPPLY("vdd_arm", NULL);
 
 static struct max8952_platform_data universal_max8952_pdata __initdata = {
        .gpio_vid0      = EXYNOS4_GPX0(3),
@@ -105,7 +105,7 @@ static struct max8952_platform_data universal_max8952_pdata __initdata = {
 };
 
 static struct regulator_consumer_supply lp3974_buck1_consumer =
-       REGULATOR_SUPPLY("vddint", NULL);
+       REGULATOR_SUPPLY("vdd_int", NULL);
 
 static struct regulator_consumer_supply lp3974_buck2_consumer =
        REGULATOR_SUPPLY("vddg3d", NULL);
index 0883c1b..39aca04 100644 (file)
@@ -82,7 +82,7 @@ static int exynos4_usb_phy1_init(struct platform_device *pdev)
 
        rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK);
        writel(rstcon, EXYNOS4_RSTCON);
-       udelay(50);
+       udelay(80);
 
        clk_disable(otg_clk);
        clk_put(otg_clk);
index dc26fff..c8e7afc 100644 (file)
@@ -62,6 +62,7 @@ config ARCH_EBSA285_HOST
 config ARCH_NETWINDER
        bool "NetWinder"
        select CLKSRC_I8253
+       select CLKEVT_I8253
        select FOOTBRIDGE_HOST
        select ISA
        select ISA_DMA
index 1331fff..18c32a5 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/spinlock.h>
+#include <video/vga.h>
 
 #include <asm/irq.h>
 #include <asm/system.h>
index 87887ac..f851fe9 100644 (file)
@@ -310,7 +310,7 @@ static struct sys_timer eukrea_cpuimx27_timer = {
        .init = eukrea_cpuimx27_timer_init,
 };
 
-MACHINE_START(CPUIMX27, "EUKREA CPUIMX27")
+MACHINE_START(EUKREA_CPUIMX27, "EUKREA CPUIMX27")
        .boot_params = MX27_PHYS_OFFSET + 0x100,
        .map_io = mx27_map_io,
        .init_early = imx27_init_early,
index f39a478..4bd083b 100644 (file)
@@ -192,7 +192,7 @@ struct sys_timer eukrea_cpuimx35_timer = {
        .init   = eukrea_cpuimx35_timer_init,
 };
 
-MACHINE_START(EUKREA_CPUIMX35, "Eukrea CPUIMX35")
+MACHINE_START(EUKREA_CPUIMX35SD, "Eukrea CPUIMX35")
        /* Maintainer: Eukrea Electromatique */
        .boot_params = MX3x_PHYS_OFFSET + 0x100,
        .map_io = mx35_map_io,
index da36da5..2442d5d 100644 (file)
@@ -161,7 +161,7 @@ static struct sys_timer eukrea_cpuimx25_timer = {
        .init   = eukrea_cpuimx25_timer_init,
 };
 
-MACHINE_START(EUKREA_CPUIMX25, "Eukrea CPUIMX25")
+MACHINE_START(EUKREA_CPUIMX25SD, "Eukrea CPUIMX25")
        /* Maintainer: Eukrea Electromatique */
        .boot_params = MX25_PHYS_OFFSET + 0x100,
        .map_io = mx25_map_io,
index 2fbbdd5..fcf0ae9 100644 (file)
@@ -337,15 +337,15 @@ static unsigned long timer_reload;
 static void integrator_clocksource_init(u32 khz)
 {
        void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
-       u32 ctrl = TIMER_CTRL_ENABLE;
+       u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
 
        if (khz >= 1500) {
                khz /= 16;
-               ctrl = TIMER_CTRL_DIV16;
+               ctrl |= TIMER_CTRL_DIV16;
        }
 
-       writel(ctrl, base + TIMER_CTRL);
        writel(0xffff, base + TIMER_LOAD);
+       writel(ctrl, base + TIMER_CTRL);
 
        clocksource_mmio_init(base + TIMER_VALUE, "timer2",
                khz * 1000, 200, 16, clocksource_mmio_readl_down);
index ffd55b1..b9b8446 100644 (file)
@@ -3078,6 +3078,7 @@ static struct clk gpt12_fck = {
        .name           = "gpt12_fck",
        .ops            = &clkops_null,
        .parent         = &secure_32k_fck,
+       .clkdm_name     = "wkup_clkdm",
        .recalc         = &followparent_recalc,
 };
 
@@ -3085,6 +3086,7 @@ static struct clk wdt1_fck = {
        .name           = "wdt1_fck",
        .ops            = &clkops_null,
        .parent         = &secure_32k_fck,
+       .clkdm_name     = "wkup_clkdm",
        .recalc         = &followparent_recalc,
 };
 
index 2af0e3f..c0b6fbd 100644 (file)
@@ -3376,10 +3376,18 @@ int __init omap4xxx_clk_init(void)
        } else if (cpu_is_omap446x()) {
                cpu_mask = RATE_IN_4460;
                cpu_clkflg = CK_446X;
+       } else {
+               return 0;
        }
 
        clk_init(&omap2_clk_functions);
-       omap2_clk_disable_clkdm_control();
+
+       /*
+        * Must stay commented until all OMAP SoC drivers are
+        * converted to runtime PM, or drivers may start crashing
+        *
+        * omap2_clk_disable_clkdm_control();
+        */
 
        for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks);
                                                                          c++)
index ab7db08..8f08906 100644 (file)
@@ -747,6 +747,7 @@ int clkdm_wakeup(struct clockdomain *clkdm)
        spin_lock_irqsave(&clkdm->lock, flags);
        clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
        ret = arch_clkdm->clkdm_wakeup(clkdm);
+       ret |= pwrdm_state_switch(clkdm->pwrdm.ptr);
        spin_unlock_irqrestore(&clkdm->lock, flags);
        return ret;
 }
@@ -818,6 +819,7 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
        spin_lock_irqsave(&clkdm->lock, flags);
        clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
        arch_clkdm->clkdm_deny_idle(clkdm);
+       pwrdm_state_switch(clkdm->pwrdm.ptr);
        spin_unlock_irqrestore(&clkdm->lock, flags);
 }
 
index 16743c7..408193d 100644 (file)
@@ -192,6 +192,7 @@ static struct omap_hwmod_addr_space omap2430_usbhsotg_addrs[] = {
                .pa_end         = OMAP243X_HS_BASE + SZ_4K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /*  l4_core ->usbhsotg  interface */
index 3feb359..472bf22 100644 (file)
@@ -130,7 +130,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
                } else {
                        hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]);
                        clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
-                       pwrdm_wait_transition(pwrdm);
                        sleep_switch = FORCEWAKEUP_SWITCH;
                }
        }
@@ -156,7 +155,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
                return ret;
        }
 
-       pwrdm_wait_transition(pwrdm);
        pwrdm_state_switch(pwrdm);
 err:
        return ret;
index 9af0847..ef71fdd 100644 (file)
@@ -195,28 +195,35 @@ static int _pwrdm_post_transition_cb(struct powerdomain *pwrdm, void *unused)
 
 /**
  * pwrdm_init - set up the powerdomain layer
- * @pwrdm_list: array of struct powerdomain pointers to register
+ * @pwrdms: array of struct powerdomain pointers to register
  * @custom_funcs: func pointers for arch specific implementations
  *
- * Loop through the array of powerdomains @pwrdm_list, registering all
- * that are available on the current CPU. If pwrdm_list is supplied
- * and not null, all of the referenced powerdomains will be
- * registered.  No return value.  XXX pwrdm_list is not really a
- * "list"; it is an array.  Rename appropriately.
+ * Loop through the array of powerdomains @pwrdms, registering all
+ * that are available on the current CPU.  Also, program all
+ * powerdomain target state as ON; this is to prevent domains from
+ * hitting low power states (if bootloader has target states set to
+ * something other than ON) and potentially even losing context while
+ * PM is not fully initialized.  The PM late init code can then program
+ * the desired target state for all the power domains.  No return
+ * value.
  */
-void pwrdm_init(struct powerdomain **pwrdm_list, struct pwrdm_ops *custom_funcs)
+void pwrdm_init(struct powerdomain **pwrdms, struct pwrdm_ops *custom_funcs)
 {
        struct powerdomain **p = NULL;
+       struct powerdomain *temp_p;
 
        if (!custom_funcs)
                WARN(1, "powerdomain: No custom pwrdm functions registered\n");
        else
                arch_pwrdm = custom_funcs;
 
-       if (pwrdm_list) {
-               for (p = pwrdm_list; *p; p++)
+       if (pwrdms) {
+               for (p = pwrdms; *p; p++)
                        _pwrdm_register(*p);
        }
+
+       list_for_each_entry(temp_p, &pwrdm_list, node)
+               pwrdm_set_next_pwrst(temp_p, PWRDM_POWER_ON);
 }
 
 /**
index a6eddae..c105556 100644 (file)
@@ -77,7 +77,7 @@ static int __init dns323_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        /*
         * Check for devices with hard-wired IRQs.
         */
-       irq = orion5x_pci_map_irq(const dev, slot, pin);
+       irq = orion5x_pci_map_irq(dev, slot, pin);
        if (irq != -1)
                return irq;
 
index 28b8760..bc4a920 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/mbus.h>
+#include <video/vga.h>
 #include <asm/irq.h>
 #include <asm/mach/pci.h>
 #include <plat/pcie.h>
index f9a2aaf..615a4e7 100644 (file)
@@ -481,6 +481,7 @@ static void __init sirfsoc_clk_init(void)
 
 static struct of_device_id clkc_ids[] = {
        { .compatible = "sirf,prima2-clkc" },
+       {},
 };
 
 void __init sirfsoc_of_clk_init(void)
index c3404cb..7af254d 100644 (file)
@@ -51,6 +51,7 @@ static __init void sirfsoc_irq_init(void)
 
 static struct of_device_id intc_ids[]  = {
        { .compatible = "sirf,prima2-intc" },
+       {},
 };
 
 void __init sirfsoc_of_irq_init(void)
index d074786..492cfa8 100644 (file)
@@ -19,6 +19,7 @@ static DEFINE_MUTEX(rstc_lock);
 
 static struct of_device_id rstc_ids[]  = {
        { .compatible = "sirf,prima2-rstc" },
+       {},
 };
 
 static int __init sirfsoc_of_rstc_init(void)
index 44027f3..ed7ec48 100644 (file)
@@ -190,6 +190,7 @@ static void __init sirfsoc_timer_init(void)
 
 static struct of_device_id timer_ids[] = {
        { .compatible = "sirf,prima2-tick" },
+       {},
 };
 
 static void __init sirfsoc_of_timer_map(void)
index a30f2e3..6657ff2 100644 (file)
@@ -44,6 +44,7 @@ static inline void arch_reset(char mode, const char *cmd)
         */
        if (realview_reset)
                realview_reset(mode);
+       dsb();
 }
 
 #endif
index 8bad643..055e285 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/suspend.h>
 #include <linux/serial_core.h>
 #include <linux/io.h>
+#include <linux/gpio.h>
 
 #include <mach/map.h>
 #include <mach/irqs.h>
index 69ed454..fe7380f 100644 (file)
@@ -129,7 +129,7 @@ static int s5p64x0_alloc_gc(void)
        }
 
        ct = gc->chip_types;
-       ct->chip.irq_ack = irq_gc_ack;
+       ct->chip.irq_ack = irq_gc_ack_set_bit;
        ct->chip.irq_mask = irq_gc_mask_set_bit;
        ct->chip.irq_unmask = irq_gc_mask_clr_bit;
        ct->chip.irq_set_type = s5p64x0_irq_eint_set_type;
index 309e388..f149d27 100644 (file)
@@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = {
        SAVE_ITEM(S3C2410_TCNTO(0)),
 };
 
-void s5pv210_cpu_suspend(unsigned long arg)
+static int s5pv210_cpu_suspend(unsigned long arg)
 {
        unsigned long tmp;
 
index 9e6b93b..d0d267a 100644 (file)
@@ -318,6 +318,10 @@ static struct clk v2m_sp804_clk = {
        .rate   = 1000000,
 };
 
+static struct clk v2m_ref_clk = {
+       .rate   = 32768,
+};
+
 static struct clk dummy_apb_pclk;
 
 static struct clk_lookup v2m_lookups[] = {
@@ -348,6 +352,9 @@ static struct clk_lookup v2m_lookups[] = {
        }, {    /* CLCD */
                .dev_id         = "mb:clcd",
                .clk            = &osc1_clk,
+       }, {    /* SP805 WDT */
+               .dev_id         = "mb:wdt",
+               .clk            = &v2m_ref_clk,
        }, {    /* SP804 timers */
                .dev_id         = "sp804",
                .con_id         = "v2m-timer0",
index 52162d5..2cbf68e 100644 (file)
@@ -17,7 +17,7 @@
        cmp     \tmp, # 0x5600                  @ Is it ldrsb?
        orreq   \tmp, \tmp, #1 << 11            @ Set L-bit if yes
        tst     \tmp, #1 << 11                  @ L = 0 -> write
-       orreq   \psr, \psr, #1 << 11            @ yes.
+       orreq   \fsr, \fsr, #1 << 11            @ yes.
        b       do_DataAbort
 not_thumb:
        .endm
index 44c0867..9ecfdb5 100644 (file)
@@ -277,6 +277,25 @@ static void l2x0_disable(void)
        spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
+static void __init l2x0_unlock(__u32 cache_id)
+{
+       int lockregs;
+       int i;
+
+       if (cache_id == L2X0_CACHE_ID_PART_L310)
+               lockregs = 8;
+       else
+               /* L210 and unknown types */
+               lockregs = 1;
+
+       for (i = 0; i < lockregs; i++) {
+               writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
+                              i * L2X0_LOCKDOWN_STRIDE);
+               writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
+                              i * L2X0_LOCKDOWN_STRIDE);
+       }
+}
+
 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
 {
        __u32 aux;
@@ -328,6 +347,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
         * accessing the below registers will fault.
         */
        if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+               /* Make sure that I&D is not locked down when starting */
+               l2x0_unlock(cache_id);
 
                /* l2x0 controller is disabled */
                writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
index 91bca35..cc7e2d8 100644 (file)
@@ -298,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
 int pfn_valid(unsigned long pfn)
 {
-       return memblock_is_memory(pfn << PAGE_SHIFT);
+       return memblock_is_memory(__pfn_to_phys(pfn));
 }
 EXPORT_SYMBOL(pfn_valid);
 #endif
index 92bd102..2e6849b 100644 (file)
@@ -379,7 +379,7 @@ ENTRY(cpu_arm920_set_pte_ext)
 
 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
 .globl cpu_arm920_suspend_size
-.equ   cpu_arm920_suspend_size, 4 * 3
+.equ   cpu_arm920_suspend_size, 4 * 4
 #ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_arm920_do_suspend)
        stmfd   sp!, {r4 - r7, lr}
index 2bbcf05..cd8f79c 100644 (file)
@@ -394,7 +394,7 @@ ENTRY(cpu_arm926_set_pte_ext)
 
 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
 .globl cpu_arm926_suspend_size
-.equ   cpu_arm926_suspend_size, 4 * 3
+.equ   cpu_arm926_suspend_size, 4 * 4
 #ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_arm926_do_suspend)
        stmfd   sp!, {r4 - r7, lr}
index 07219c2..69e7f2e 100644 (file)
@@ -182,11 +182,11 @@ ENDPROC(cpu_sa1100_do_suspend)
 
 ENTRY(cpu_sa1100_do_resume)
        ldmia   r0, {r4 - r7}                   @ load cp regs
-       mov     r1, #0
-       mcr     p15, 0, r1, c8, c7, 0           @ flush I+D TLBs
-       mcr     p15, 0, r1, c7, c7, 0           @ flush I&D cache
-       mcr     p15, 0, r1, c9, c0, 0           @ invalidate RB
-       mcr     p15, 0, r1, c9, c0, 5           @ allow user space to use RB
+       mov     ip, #0
+       mcr     p15, 0, ip, c8, c7, 0           @ flush I+D TLBs
+       mcr     p15, 0, ip, c7, c7, 0           @ flush I&D cache
+       mcr     p15, 0, ip, c9, c0, 0           @ invalidate RB
+       mcr     p15, 0, ip, c9, c0, 5           @ allow user space to use RB
 
        mcr     p15, 0, r4, c3, c0, 0           @ domain ID
        mcr     p15, 0, r5, c2, c0, 0           @ translation table base addr
index 219138d..a923aa0 100644 (file)
@@ -223,6 +223,22 @@ __v6_setup:
        mrc     p15, 0, r0, c1, c0, 0           @ read control register
        bic     r0, r0, r5                      @ clear bits them
        orr     r0, r0, r6                      @ set them
+#ifdef CONFIG_ARM_ERRATA_364296
+       /*
+        * Workaround for the 364296 ARM1136 r0p2 erratum (possible cache data
+        * corruption with hit-under-miss enabled). The conditional code below
+        * (setting the undocumented bit 31 in the auxiliary control register
+        * and the FI bit in the control register) disables hit-under-miss
+        * without putting the processor into full low interrupt latency mode.
+        */
+       ldr     r6, =0x4107b362                 @ id for ARM1136 r0p2
+       mrc     p15, 0, r5, c0, c0, 0           @ get processor id
+       teq     r5, r6                          @ check for the faulty core
+       mrceq   p15, 0, r5, c1, c0, 1           @ load aux control reg
+       orreq   r5, r5, #(1 << 31)              @ set the undocumented bit 31
+       mcreq   p15, 0, r5, c1, c0, 1           @ write aux control reg
+       orreq   r0, r0, #(1 << 21)              @ low interrupt latency configuration
+#endif
        mov     pc, lr                          @ return to head.S:__ret
 
        /*
index a30e785..9049c07 100644 (file)
@@ -66,6 +66,7 @@ ENDPROC(cpu_v7_proc_fin)
 ENTRY(cpu_v7_reset)
        mrc     p15, 0, r1, c1, c0, 0           @ ctrl register
        bic     r1, r1, #0x1                    @ ...............m
+ THUMB(        bic     r1, r1, #1 << 30 )              @ SCTLR.TE (Thumb exceptions)
        mcr     p15, 0, r1, c1, c0, 0           @ disable MMU
        isb
        mov     pc, r0
@@ -247,13 +248,16 @@ ENTRY(cpu_v7_do_resume)
        mcr     p15, 0, r7, c2, c0, 0   @ TTB 0
        mcr     p15, 0, r8, c2, c0, 1   @ TTB 1
        mcr     p15, 0, ip, c2, c0, 2   @ TTB control register
-       mcr     p15, 0, r10, c1, c0, 1  @ Auxiliary control register
+       mrc     p15, 0, r4, c1, c0, 1   @ Read Auxiliary control register
+       teq     r4, r10                 @ Is it already set?
+       mcrne   p15, 0, r10, c1, c0, 1  @ No, so write it
        mcr     p15, 0, r11, c1, c0, 2  @ Co-processor access control
        ldr     r4, =PRRR               @ PRRR
        ldr     r5, =NMRR               @ NMRR
        mcr     p15, 0, r4, c10, c2, 0  @ write PRRR
        mcr     p15, 0, r5, c10, c2, 1  @ write NMRR
        isb
+       dsb
        mov     r0, r9                  @ control register
        mov     r2, r7, lsr #14         @ get TTB0 base
        mov     r2, r2, lsl #14
index 28c72a2..755e1bf 100644 (file)
@@ -406,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
        .align
 
 .globl cpu_xsc3_suspend_size
-.equ   cpu_xsc3_suspend_size, 4 * 8
+.equ   cpu_xsc3_suspend_size, 4 * 7
 #ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_xsc3_do_suspend)
        stmfd   sp!, {r4 - r10, lr}
@@ -418,12 +418,12 @@ ENTRY(cpu_xsc3_do_suspend)
        mrc     p15, 0, r9, c1, c0, 1   @ auxiliary control reg
        mrc     p15, 0, r10, c1, c0, 0  @ control reg
        bic     r4, r4, #2              @ clear frequency change bit
-       stmia   r0, {r1, r4 - r10}      @ store v:p offset + cp regs
+       stmia   r0, {r4 - r10}          @ store cp regs
        ldmia   sp!, {r4 - r10, pc}
 ENDPROC(cpu_xsc3_do_suspend)
 
 ENTRY(cpu_xsc3_do_resume)
-       ldmia   r0, {r1, r4 - r10}      @ load v:p offset + cp regs
+       ldmia   r0, {r4 - r10}          @ load cp regs
        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0   @ invalidate I & D caches, BTB
        mcr     p15, 0, ip, c7, c10, 4  @ drain write (&fill) buffer
index 9a6a538..02609ee 100644 (file)
@@ -615,6 +615,9 @@ static int _od_resume_noirq(struct device *dev)
 
        return pm_generic_resume_noirq(dev);
 }
+#else
+#define _od_suspend_noirq NULL
+#define _od_resume_noirq NULL
 #endif
 
 static struct dev_pm_domain omap_device_pm_domain = {
index 02af235..5f84a3f 100644 (file)
@@ -192,7 +192,7 @@ unsigned long s5p_spdif_get_rate(struct clk *clk)
        if (IS_ERR(pclk))
                return -EINVAL;
 
-       rate = pclk->ops->get_rate(clk);
+       rate = pclk->ops->get_rate(pclk);
        clk_put(pclk);
 
        return rate;
index 327ab9f..f71078e 100644 (file)
@@ -23,6 +23,8 @@
 #include <plat/gpio-core.h>
 #include <plat/gpio-cfg.h>
 
+#include <asm/mach/irq.h>
+
 #define GPIO_BASE(chip)                (((unsigned long)(chip)->base) & 0xFFFFF000u)
 
 #define CON_OFFSET             0x700
@@ -81,6 +83,9 @@ static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
        int group, pend_offset, mask_offset;
        unsigned int pend, mask;
 
+       struct irq_chip *chip = irq_get_chip(irq);
+       chained_irq_enter(chip, desc);
+
        for (group = 0; group < bank->nr_groups; group++) {
                struct s3c_gpio_chip *chip = bank->chips[group];
                if (!chip)
@@ -102,6 +107,7 @@ static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
                        pend &= ~BIT(offset);
                }
        }
+       chained_irq_exit(chip, desc);
 }
 
 static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
index 51d8da8..ad530c7 100644 (file)
@@ -20,7 +20,7 @@ struct samsung_bl_gpio_info {
        int func;
 };
 
-extern void samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
+extern void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
        struct platform_pwm_backlight_data *bl_data);
 
 #endif /* __ASM_PLAT_BACKLIGHT_H */
index f714d06..51583cd 100644 (file)
 #include <plat/irq-vic-timer.h>
 #include <plat/regs-timer.h>
 
+#include <asm/mach/irq.h>
+
 static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc)
 {
+       struct irq_chip *chip = irq_get_chip(irq);
+       chained_irq_enter(chip, desc);
        generic_handle_irq((int)desc->irq_data.handler_data);
+       chained_irq_exit(chip, desc);
 }
 
 /* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */
index fff68d0..62cc8f9 100644 (file)
@@ -351,7 +351,7 @@ centro                      MACH_CENTRO             CENTRO                  1944
 nokia_rx51             MACH_NOKIA_RX51         NOKIA_RX51              1955
 omap_zoom2             MACH_OMAP_ZOOM2         OMAP_ZOOM2              1967
 cpuat9260              MACH_CPUAT9260          CPUAT9260               1973
-eukrea_cpuimx27                MACH_CPUIMX27           CPUIMX27                1975
+eukrea_cpuimx27                MACH_EUKREA_CPUIMX27    EUKREA_CPUIMX27         1975
 acs5k                  MACH_ACS5K              ACS5K                   1982
 snapper_9260           MACH_SNAPPER_9260       SNAPPER_9260            1987
 dsm320                 MACH_DSM320             DSM320                  1988
@@ -476,8 +476,8 @@ cns3420vb           MACH_CNS3420VB          CNS3420VB               2776
 omap4_panda            MACH_OMAP4_PANDA        OMAP4_PANDA             2791
 ti8168evm              MACH_TI8168EVM          TI8168EVM               2800
 teton_bga              MACH_TETON_BGA          TETON_BGA               2816
-eukrea_cpuimx25sd      MACH_EUKREA_CPUIMX25    EUKREA_CPUIMX25         2820
-eukrea_cpuimx35sd      MACH_EUKREA_CPUIMX35    EUKREA_CPUIMX35         2821
+eukrea_cpuimx25sd      MACH_EUKREA_CPUIMX25SD  EUKREA_CPUIMX25SD       2820
+eukrea_cpuimx35sd      MACH_EUKREA_CPUIMX35SD  EUKREA_CPUIMX35SD       2821
 eukrea_cpuimx51sd      MACH_EUKREA_CPUIMX51SD  EUKREA_CPUIMX51SD       2822
 eukrea_cpuimx51                MACH_EUKREA_CPUIMX51    EUKREA_CPUIMX51         2823
 smdkc210               MACH_SMDKC210           SMDKC210                2838
index 052f877..60b4722 100644 (file)
@@ -31,7 +31,6 @@
 
 #define DMA_ERROR_CODE         (~(dma_addr_t)0x0)
 
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
@@ -47,6 +46,12 @@ dma_addr_t or1k_map_page(struct device *dev, struct page *page,
 void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
                     size_t size, enum dma_data_direction dir,
                     struct dma_attrs *attrs);
+int or1k_map_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir,
+               struct dma_attrs *attrs);
+void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
+                  int nents, enum dma_data_direction dir,
+                  struct dma_attrs *attrs);
 void or1k_sync_single_for_cpu(struct device *dev,
                              dma_addr_t dma_handle, size_t size,
                              enum dma_data_direction dir);
@@ -98,6 +103,51 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
        debug_dma_unmap_page(dev, addr, size, dir, true);
 }
 
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+                                  int nents, enum dma_data_direction dir)
+{
+       int i, ents;
+       struct scatterlist *s;
+
+       for_each_sg(sg, s, nents, i)
+               kmemcheck_mark_initialized(sg_virt(s), s->length);
+       BUG_ON(!valid_dma_direction(dir));
+       ents = or1k_map_sg(dev, sg, nents, dir, NULL);
+       debug_dma_map_sg(dev, sg, nents, ents, dir);
+
+       return ents;
+}
+
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+                                     int nents, enum dma_data_direction dir)
+{
+       BUG_ON(!valid_dma_direction(dir));
+       debug_dma_unmap_sg(dev, sg, nents, dir);
+       or1k_unmap_sg(dev, sg, nents, dir, NULL);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+                                     size_t offset, size_t size,
+                                     enum dma_data_direction dir)
+{
+       dma_addr_t addr;
+
+       kmemcheck_mark_initialized(page_address(page) + offset, size);
+       BUG_ON(!valid_dma_direction(dir));
+       addr = or1k_map_page(dev, page, offset, size, dir, NULL);
+       debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+
+       return addr;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+                                 size_t size, enum dma_data_direction dir)
+{
+       BUG_ON(!valid_dma_direction(dir));
+       or1k_unmap_page(dev, addr, size, dir, NULL);
+       debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
                                           size_t size,
                                           enum dma_data_direction dir)
@@ -119,7 +169,12 @@ static inline void dma_sync_single_for_device(struct device *dev,
 static inline int dma_supported(struct device *dev, u64 dma_mask)
 {
        /* Support 32 bit DMA mask exclusively */
-       return dma_mask == 0xffffffffULL;
+       return dma_mask == DMA_BIT_MASK(32);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return 0;
 }
 
 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
index 54a5c50..b79c2b1 100644 (file)
 
 /* This struct is saved by setup_frame in signal.c, to keep the current
    context while a signal handler is executed. It's restored by sys_sigreturn.
-
-   To keep things simple, we use pt_regs here even though normally you just
-   specify the list of regs to save. Then we can use copy_from_user on the
-   entire regs instead of a bunch of get_user's as well...
 */
 
 struct sigcontext {
-       struct pt_regs regs;  /* needs to be first */
+       struct user_regs_struct regs;  /* needs to be first */
        unsigned long oldmask;
-       unsigned long usp;    /* usp before stacking this gunk on it */
 };
 
 #endif /* __ASM_OPENRISC_SIGCONTEXT_H */
index 968d3ee..f1c8ee2 100644 (file)
@@ -154,6 +154,33 @@ void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
        /* Nothing special to do here... */
 }
 
+int or1k_map_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i) {
+               s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
+                                              s->length, dir, NULL);
+       }
+
+       return nents;
+}
+
+void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
+                  int nents, enum dma_data_direction dir,
+                  struct dma_attrs *attrs)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i) {
+               or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
+       }
+}
+
 void or1k_sync_single_for_cpu(struct device *dev,
                              dma_addr_t dma_handle, size_t size,
                              enum dma_data_direction dir)
@@ -187,5 +214,4 @@ static int __init dma_init(void)
 
        return 0;
 }
-
 fs_initcall(dma_init);
index 5f759c7..95207ab 100644 (file)
@@ -52,31 +52,25 @@ struct rt_sigframe {
 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
 {
        unsigned int err = 0;
-       unsigned long old_usp;
 
        /* Alwys make any pending restarted system call return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
-       /* restore the regs from &sc->regs (same as sc, since regs is first)
+       /*
+        * Restore the regs from &sc->regs.
         * (sc is already checked for VERIFY_READ since the sigframe was
         *  checked in sys_sigreturn previously)
         */
-
-       if (__copy_from_user(regs, sc, sizeof(struct pt_regs)))
+       if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
+               goto badframe;
+       if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long)))
+               goto badframe;
+       if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long)))
                goto badframe;
 
        /* make sure the SM-bit is cleared so user-mode cannot fool us */
        regs->sr &= ~SPR_SR_SM;
 
-       /* restore the old USP as it was before we stacked the sc etc.
-        * (we cannot just pop the sigcontext since we aligned the sp and
-        *  stuff after pushing it)
-        */
-
-       err |= __get_user(old_usp, &sc->usp);
-
-       regs->sp = old_usp;
-
        /* TODO: the other ports use regs->orig_XX to disable syscall checks
         * after this completes, but we don't use that mechanism. maybe we can
         * use it now ?
@@ -137,18 +131,17 @@ static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
                            unsigned long mask)
 {
        int err = 0;
-       unsigned long usp = regs->sp;
 
-       /* copy the regs. they are first in sc so we can use sc directly */
+       /* copy the regs */
 
-       err |= __copy_to_user(sc, regs, sizeof(struct pt_regs));
+       err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
+       err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
+       err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
 
        /* then some other stuff */
 
        err |= __put_user(mask, &sc->oldmask);
 
-       err |= __put_user(usp, &sc->usp);
-
        return err;
 }
 
index e66366f..3735abd 100644 (file)
        ENTRY_SAME(ni_syscall)          /* query_module */
        ENTRY_SAME(poll)
        /* structs contain pointers and an in_addr... */
-       ENTRY_COMP(nfsservctl)
+       ENTRY_SAME(ni_syscall)          /* was nfsservctl */
        ENTRY_SAME(setresgid)           /* 170 */
        ENTRY_SAME(getresgid)
        ENTRY_SAME(prctl)
index bfa96aa..d9b7767 100644 (file)
                        #size-cells = <1>;
                        compatible = "cfi-flash";
                        reg = <0x0 0x0 0x02000000>;
-                       bank-width = <1>;
+                       bank-width = <2>;
                        device-width = <1>;
                        partition@0 {
                                label = "ramdisk";
index 980ff8f..3ff5a81 100644 (file)
@@ -171,3 +171,4 @@ CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=y
 CONFIG_CRYPTO_AES=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
index 10562a5..4311d02 100644 (file)
@@ -185,3 +185,4 @@ CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=y
 CONFIG_CRYPTO_AES=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
index d322835..c92c204 100644 (file)
@@ -100,5 +100,8 @@ CONFIG_DEBUG_INFO=y
 CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_VIRQ_DEBUG=y
 CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_AES=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_TALITOS=y
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
index fcd85d2..a3467bf 100644 (file)
@@ -139,6 +139,7 @@ CONFIG_SND=y
 CONFIG_SND_INTEL8X0=y
 # CONFIG_SND_PPC is not set
 # CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
 CONFIG_HID_A4TECH=y
 CONFIG_HID_APPLE=y
 CONFIG_HID_BELKIN=y
index 908c941..9693f6e 100644 (file)
@@ -140,6 +140,7 @@ CONFIG_SND=y
 CONFIG_SND_INTEL8X0=y
 # CONFIG_SND_PPC is not set
 # CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
 CONFIG_HID_A4TECH=y
 CONFIG_HID_APPLE=y
 CONFIG_HID_BELKIN=y
index f6736b7..fa0d27a 100644 (file)
@@ -171,7 +171,7 @@ SYSCALL_SPU(setresuid)
 SYSCALL_SPU(getresuid)
 SYSCALL(ni_syscall)
 SYSCALL_SPU(poll)
-COMPAT_SYS(nfsservctl)
+SYSCALL(ni_syscall)
 SYSCALL_SPU(setresgid)
 SYSCALL_SPU(getresgid)
 COMPAT_SYS_SPU(prctl)
index 3e9daea..3c5bb78 100644 (file)
@@ -440,8 +440,14 @@ static void __init init_sparc64_elf_hwcap(void)
                        cap |= AV_SPARC_VIS;
                if (tlb_type == cheetah || tlb_type == cheetah_plus)
                        cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
-               if (tlb_type == cheetah_plus)
-                       cap |= AV_SPARC_POPC;
+               if (tlb_type == cheetah_plus) {
+                       unsigned long impl, ver;
+
+                       __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
+                       impl = ((ver >> 32) & 0xffff);
+                       if (impl == PANTHER_IMPL)
+                               cap |= AV_SPARC_POPC;
+               }
                if (tlb_type == hypervisor) {
                        if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
                                cap |= AV_SPARC_ASI_BLK_INIT;
index d31ecf3..21bebe6 100644 (file)
@@ -10,6 +10,10 @@ config CMPXCHG_LOCAL
        bool
        default n
 
+config CMPXCHG_DOUBLE
+       bool
+       default n
+
 source "arch/x86/Kconfig.cpu"
 
 endmenu
index fab8121..c0f712c 100644 (file)
@@ -41,7 +41,7 @@ KBUILD_CPPFLAGS += -I$(srctree)/$(ARCH_DIR)/sys-$(SUBARCH)
 KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \
        $(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap      \
        -Din6addr_loopback=kernel_in6addr_loopback \
-       -Din6addr_any=kernel_in6addr_any
+       -Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr
 
 KBUILD_AFLAGS += $(ARCH_INCLUDE)
 
index d51c404..364c8a1 100644 (file)
@@ -399,8 +399,8 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
  * is done under a spinlock.  Checking whether the device is in use is
  * line->tty->count > 1, also under the spinlock.
  *
- * tty->count serves to decide whether the device should be enabled or
- * disabled on the host.  If it's equal to 1, then we are doing the
+ * line->count serves to decide whether the device should be enabled or
+ * disabled on the host.  If it's equal to 0, then we are doing the
  * first open or last close.  Otherwise, open and close just return.
  */
 
@@ -414,16 +414,16 @@ int line_open(struct line *lines, struct tty_struct *tty)
                goto out_unlock;
 
        err = 0;
-       if (tty->count > 1)
+       if (line->count++)
                goto out_unlock;
 
-       spin_unlock(&line->count_lock);
-
+       BUG_ON(tty->driver_data);
        tty->driver_data = line;
        line->tty = tty;
 
+       spin_unlock(&line->count_lock);
        err = enable_chan(line);
-       if (err)
+       if (err) /* line_close() will be called by our caller */
                return err;
 
        INIT_DELAYED_WORK(&line->task, line_timer_cb);
@@ -436,7 +436,7 @@ int line_open(struct line *lines, struct tty_struct *tty)
        chan_window_size(&line->chan_list, &tty->winsize.ws_row,
                         &tty->winsize.ws_col);
 
-       return err;
+       return 0;
 
 out_unlock:
        spin_unlock(&line->count_lock);
@@ -460,17 +460,16 @@ void line_close(struct tty_struct *tty, struct file * filp)
        flush_buffer(line);
 
        spin_lock(&line->count_lock);
-       if (!line->valid)
-               goto out_unlock;
+       BUG_ON(!line->valid);
 
-       if (tty->count > 1)
+       if (--line->count)
                goto out_unlock;
 
-       spin_unlock(&line->count_lock);
-
        line->tty = NULL;
        tty->driver_data = NULL;
 
+       spin_unlock(&line->count_lock);
+
        if (line->sigio) {
                unregister_winch(tty);
                line->sigio = 0;
@@ -498,7 +497,7 @@ static int setup_one_line(struct line *lines, int n, char *init, int init_prio,
 
        spin_lock(&line->count_lock);
 
-       if (line->tty != NULL) {
+       if (line->count) {
                *error_out = "Device is already open";
                goto out;
        }
@@ -722,41 +721,53 @@ struct winch {
        int pid;
        struct tty_struct *tty;
        unsigned long stack;
+       struct work_struct work;
 };
 
-static void free_winch(struct winch *winch, int free_irq_ok)
+static void __free_winch(struct work_struct *work)
 {
-       if (free_irq_ok)
-               free_irq(WINCH_IRQ, winch);
-
-       list_del(&winch->list);
+       struct winch *winch = container_of(work, struct winch, work);
+       free_irq(WINCH_IRQ, winch);
 
        if (winch->pid != -1)
                os_kill_process(winch->pid, 1);
-       if (winch->fd != -1)
-               os_close_file(winch->fd);
        if (winch->stack != 0)
                free_stack(winch->stack, 0);
        kfree(winch);
 }
 
+static void free_winch(struct winch *winch)
+{
+       int fd = winch->fd;
+       winch->fd = -1;
+       if (fd != -1)
+               os_close_file(fd);
+       list_del(&winch->list);
+       __free_winch(&winch->work);
+}
+
 static irqreturn_t winch_interrupt(int irq, void *data)
 {
        struct winch *winch = data;
        struct tty_struct *tty;
        struct line *line;
+       int fd = winch->fd;
        int err;
        char c;
 
-       if (winch->fd != -1) {
-               err = generic_read(winch->fd, &c, NULL);
+       if (fd != -1) {
+               err = generic_read(fd, &c, NULL);
                if (err < 0) {
                        if (err != -EAGAIN) {
+                               winch->fd = -1;
+                               list_del(&winch->list);
+                               os_close_file(fd);
                                printk(KERN_ERR "winch_interrupt : "
                                       "read failed, errno = %d\n", -err);
                                printk(KERN_ERR "fd %d is losing SIGWINCH "
                                       "support\n", winch->tty_fd);
-                               free_winch(winch, 0);
+                               INIT_WORK(&winch->work, __free_winch);
+                               schedule_work(&winch->work);
                                return IRQ_HANDLED;
                        }
                        goto out;
@@ -828,7 +839,7 @@ static void unregister_winch(struct tty_struct *tty)
        list_for_each_safe(ele, next, &winch_handlers) {
                winch = list_entry(ele, struct winch, list);
                if (winch->tty == tty) {
-                       free_winch(winch, 1);
+                       free_winch(winch);
                        break;
                }
        }
@@ -844,7 +855,7 @@ static void winch_cleanup(void)
 
        list_for_each_safe(ele, next, &winch_handlers) {
                winch = list_entry(ele, struct winch, list);
-               free_winch(winch, 1);
+               free_winch(winch);
        }
 
        spin_unlock(&winch_handler_lock);
index 8ac7146..2e1de57 100644 (file)
@@ -123,6 +123,7 @@ static int xterm_open(int input, int output, int primary, void *d,
                err = -errno;
                printk(UM_KERN_ERR "xterm_open : unlink failed, errno = %d\n",
                       errno);
+               close(fd);
                return err;
        }
        close(fd);
index ae084ad..1a7d275 100644 (file)
@@ -42,10 +42,6 @@ extern long subarch_ptrace(struct task_struct *child, long request,
        unsigned long addr, unsigned long data);
 extern unsigned long getreg(struct task_struct *child, int regno);
 extern int putreg(struct task_struct *child, int regno, unsigned long value);
-extern int get_fpregs(struct user_i387_struct __user *buf,
-                     struct task_struct *child);
-extern int set_fpregs(struct user_i387_struct __user *buf,
-                     struct task_struct *child);
 
 extern int arch_copy_tls(struct task_struct *new);
 extern void clear_flushed_tls(struct task_struct *task);
index 72f4f25..63df3ca 100644 (file)
@@ -33,6 +33,7 @@ struct line_driver {
 struct line {
        struct tty_struct *tty;
        spinlock_t count_lock;
+       unsigned long count;
        int valid;
 
        char *init_str;
index b0b4589..f1e0aa5 100644 (file)
@@ -16,7 +16,7 @@ extern int restore_fpx_registers(int pid, unsigned long *fp_regs);
 extern int save_registers(int pid, struct uml_pt_regs *regs);
 extern int restore_registers(int pid, struct uml_pt_regs *regs);
 extern int init_registers(int pid);
-extern void get_safe_registers(unsigned long *regs);
+extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs);
 extern unsigned long get_thread_reg(int reg, jmp_buf *buf);
 extern int get_fp_registers(int pid, unsigned long *regs);
 extern int put_fp_registers(int pid, unsigned long *regs);
index fab4371..21c1ae7 100644 (file)
@@ -202,7 +202,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                arch_copy_thread(&current->thread.arch, &p->thread.arch);
        }
        else {
-               get_safe_registers(p->thread.regs.regs.gp);
+               get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
                p->thread.request.u.thread = current->thread.request.u.thread;
                handler = new_thread_handler;
        }
index 701b672..c9da32b 100644 (file)
@@ -50,23 +50,11 @@ long arch_ptrace(struct task_struct *child, long request,
        void __user *vp = p;
 
        switch (request) {
-       /* read word at location addr. */
-       case PTRACE_PEEKTEXT:
-       case PTRACE_PEEKDATA:
-               ret = generic_ptrace_peekdata(child, addr, data);
-               break;
-
        /* read the word at location addr in the USER area. */
        case PTRACE_PEEKUSR:
                ret = peek_user(child, addr, data);
                break;
 
-       /* write the word at location addr. */
-       case PTRACE_POKETEXT:
-       case PTRACE_POKEDATA:
-               ret = generic_ptrace_pokedata(child, addr, data);
-               break;
-
        /* write the word at location addr in the USER area */
        case PTRACE_POKEUSR:
                ret = poke_user(child, addr, data);
@@ -106,16 +94,6 @@ long arch_ptrace(struct task_struct *child, long request,
                ret = 0;
                break;
        }
-#endif
-#ifdef PTRACE_GETFPREGS
-       case PTRACE_GETFPREGS: /* Get the child FPU state. */
-               ret = get_fpregs(vp, child);
-               break;
-#endif
-#ifdef PTRACE_SETFPREGS
-       case PTRACE_SETFPREGS: /* Set the child FPU state. */
-               ret = set_fpregs(vp, child);
-               break;
 #endif
        case PTRACE_GET_THREAD_AREA:
                ret = ptrace_get_thread_area(child, addr, vp);
@@ -153,12 +131,6 @@ long arch_ptrace(struct task_struct *child, long request,
                ret = -EIO;
                break;
        }
-#endif
-#ifdef PTRACE_ARCH_PRCTL
-       case PTRACE_ARCH_PRCTL:
-               /* XXX Calls ptrace on the host - needs some SMP thinking */
-               ret = arch_prctl(child, data, (void __user *) addr);
-               break;
 #endif
        default:
                ret = ptrace_request(child, request, addr, data);
index 830fe6a..b866b9e 100644 (file)
@@ -8,6 +8,8 @@
 #include <string.h>
 #include <sys/ptrace.h>
 #include "sysdep/ptrace.h"
+#include "sysdep/ptrace_user.h"
+#include "registers.h"
 
 int save_registers(int pid, struct uml_pt_regs *regs)
 {
@@ -32,6 +34,7 @@ int restore_registers(int pid, struct uml_pt_regs *regs)
 /* This is set once at boot time and not changed thereafter */
 
 static unsigned long exec_regs[MAX_REG_NR];
+static unsigned long exec_fp_regs[FP_SIZE];
 
 int init_registers(int pid)
 {
@@ -42,10 +45,14 @@ int init_registers(int pid)
                return -errno;
 
        arch_init_registers(pid);
+       get_fp_registers(pid, exec_fp_regs);
        return 0;
 }
 
-void get_safe_registers(unsigned long *regs)
+void get_safe_registers(unsigned long *regs, unsigned long *fp_regs)
 {
        memcpy(regs, exec_regs, sizeof(exec_regs));
+
+       if (fp_regs)
+               memcpy(fp_regs, exec_fp_regs, sizeof(exec_fp_regs));
 }
index d261f17..e771398 100644 (file)
@@ -39,7 +39,7 @@ static unsigned long syscall_regs[MAX_REG_NR];
 
 static int __init init_syscall_regs(void)
 {
-       get_safe_registers(syscall_regs);
+       get_safe_registers(syscall_regs, NULL);
        syscall_regs[REGS_IP_INDEX] = STUB_CODE +
                ((unsigned long) &batch_syscall_stub -
                 (unsigned long) &__syscall_stub_start);
index d6e0a22..dee0e8c 100644 (file)
@@ -373,6 +373,9 @@ void userspace(struct uml_pt_regs *regs)
                if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))
                        fatal_sigsegv();
 
+               if (put_fp_registers(pid, regs->fp))
+                       fatal_sigsegv();
+
                /* Now we set local_using_sysemu to be used for one loop */
                local_using_sysemu = get_using_sysemu();
 
@@ -399,6 +402,12 @@ void userspace(struct uml_pt_regs *regs)
                        fatal_sigsegv();
                }
 
+               if (get_fp_registers(pid, regs->fp)) {
+                       printk(UM_KERN_ERR "userspace -  get_fp_registers failed, "
+                              "errno = %d\n", errno);
+                       fatal_sigsegv();
+               }
+
                UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
 
                if (WIFSTOPPED(status)) {
@@ -457,10 +466,11 @@ void userspace(struct uml_pt_regs *regs)
 }
 
 static unsigned long thread_regs[MAX_REG_NR];
+static unsigned long thread_fp_regs[FP_SIZE];
 
 static int __init init_thread_regs(void)
 {
-       get_safe_registers(thread_regs);
+       get_safe_registers(thread_regs, thread_fp_regs);
        /* Set parent's instruction pointer to start of clone-stub */
        thread_regs[REGS_IP_INDEX] = STUB_CODE +
                                (unsigned long) stub_clone_handler -
@@ -503,6 +513,13 @@ int copy_context_skas0(unsigned long new_stack, int pid)
                return err;
        }
 
+       err = put_fp_registers(pid, thread_fp_regs);
+       if (err < 0) {
+               printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers "
+                      "failed, pid = %d, err = %d\n", pid, err);
+               return err;
+       }
+
        /* set a well known return code for detection of child write failure */
        child_data->err = 12345678;
 
index 0273e4d..5d2a591 100644 (file)
  */
 struct user_desc;
 
-extern int get_fpxregs(struct user_fxsr_struct __user *buf,
-                      struct task_struct *child);
-extern int set_fpxregs(struct user_fxsr_struct __user *buf,
-                      struct task_struct *tsk);
-
 extern int ptrace_get_thread_area(struct task_struct *child, int idx,
                                   struct user_desc __user *user_desc);
 
index d23b2d3..3375c27 100644 (file)
@@ -145,7 +145,7 @@ int peek_user(struct task_struct *child, long addr, long data)
        return put_user(tmp, (unsigned long __user *) data);
 }
 
-int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
 {
        int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
        struct user_i387_struct fpregs;
@@ -161,7 +161,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
        return n;
 }
 
-int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
 {
        int n, cpu = ((struct thread_info *) child->stack)->cpu;
        struct user_i387_struct fpregs;
@@ -174,7 +174,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
                                    (unsigned long *) &fpregs);
 }
 
-int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
+static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
 {
        int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
        struct user_fxsr_struct fpregs;
@@ -190,7 +190,7 @@ int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
        return n;
 }
 
-int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
+static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
 {
        int n, cpu = ((struct thread_info *) child->stack)->cpu;
        struct user_fxsr_struct fpregs;
@@ -206,5 +206,23 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
 long subarch_ptrace(struct task_struct *child, long request,
                    unsigned long addr, unsigned long data)
 {
-       return -EIO;
+       int ret = -EIO;
+       void __user *datap = (void __user *) data;
+       switch (request) {
+       case PTRACE_GETFPREGS: /* Get the child FPU state. */
+               ret = get_fpregs(datap, child);
+               break;
+       case PTRACE_SETFPREGS: /* Set the child FPU state. */
+               ret = set_fpregs(datap, child);
+               break;
+       case PTRACE_GETFPXREGS: /* Get the child FPU state. */
+               ret = get_fpxregs(datap, child);
+               break;
+       case PTRACE_SETFPXREGS: /* Set the child FPU state. */
+               ret = set_fpxregs(datap, child);
+               break;
+       default:
+               ret = -EIO;
+       }
+       return ret;
 }
index d50e62e..c398a50 100644 (file)
@@ -53,6 +53,7 @@ extern int sysemu_supported;
 
 struct uml_pt_regs {
        unsigned long gp[MAX_REG_NR];
+       unsigned long fp[HOST_FPX_SIZE];
        struct faultinfo faultinfo;
        long syscall;
        int is_user;
index f436136..4005506 100644 (file)
@@ -145,7 +145,7 @@ int is_syscall(unsigned long addr)
        return instr == 0x050f;
 }
 
-int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
 {
        int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
        long fpregs[HOST_FP_SIZE];
@@ -162,7 +162,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
        return n;
 }
 
-int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
 {
        int n, cpu = ((struct thread_info *) child->stack)->cpu;
        long fpregs[HOST_FP_SIZE];
@@ -182,12 +182,16 @@ long subarch_ptrace(struct task_struct *child, long request,
        void __user *datap = (void __user *) data;
 
        switch (request) {
-       case PTRACE_GETFPXREGS: /* Get the child FPU state. */
+       case PTRACE_GETFPREGS: /* Get the child FPU state. */
                ret = get_fpregs(datap, child);
                break;
-       case PTRACE_SETFPXREGS: /* Set the child FPU state. */
+       case PTRACE_SETFPREGS: /* Set the child FPU state. */
                ret = set_fpregs(datap, child);
                break;
+       case PTRACE_ARCH_PRCTL:
+               /* XXX Calls ptrace on the host - needs some SMP thinking */
+               ret = arch_prctl(child, data, (void __user *) addr);
+               break;
        }
 
        return ret;
index fdba545..8ee8f8e 100644 (file)
@@ -85,6 +85,7 @@
 
 struct uml_pt_regs {
        unsigned long gp[MAX_REG_NR];
+       unsigned long fp[HOST_FP_SIZE];
        struct faultinfo faultinfo;
        long syscall;
        int is_user;
index 4554cc6..091508b 100644 (file)
@@ -16,7 +16,6 @@
 #endif
 
 .macro altinstruction_entry orig alt feature orig_len alt_len
-       .align 8
        .long \orig - .
        .long \alt - .
        .word \feature
index 23fb6d7..37ad100 100644 (file)
@@ -48,9 +48,6 @@ struct alt_instr {
        u16 cpuid;              /* cpuid bit set for replacement */
        u8  instrlen;           /* length of original instruction */
        u8  replacementlen;     /* length of new instruction, <= instrlen */
-#ifdef CONFIG_X86_64
-       u32 pad2;
-#endif
 };
 
 extern void alternative_instructions(void);
@@ -83,7 +80,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
                                                                        \
       "661:\n\t" oldinstr "\n662:\n"                                   \
       ".section .altinstructions,\"a\"\n"                              \
-      _ASM_ALIGN "\n"                                                  \
       "         .long 661b - .\n"                      /* label           */   \
       "         .long 663f - .\n"                      /* new instruction */   \
       "         .word " __stringify(feature) "\n"      /* feature bit     */   \
index 4258aac..88b23a4 100644 (file)
@@ -332,7 +332,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
                asm goto("1: jmp %l[t_no]\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
-                        _ASM_ALIGN "\n"
                         " .long 1b - .\n"
                         " .long 0\n"           /* no replacement */
                         " .word %P0\n"         /* feature bit */
@@ -350,7 +349,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
                asm volatile("1: movb $0,%0\n"
                             "2:\n"
                             ".section .altinstructions,\"a\"\n"
-                            _ASM_ALIGN "\n"
                             " .long 1b - .\n"
                             " .long 3f - .\n"
                             " .word %P1\n"             /* feature bit */
index a518c0a..c59cc97 100644 (file)
@@ -44,7 +44,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
                : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
 #elif defined(__x86_64__)
        __asm__ (
-               "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
+               "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
                : [lo]"=a"(product),
                  [hi]"=d"(tmp)
                : "0"(delta),
index 4ee3abf..cfa62ec 100644 (file)
@@ -1900,6 +1900,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 
        perf_callchain_store(entry, regs->ip);
 
+       if (!current->mm)
+               return;
+
        if (perf_callchain_user32(regs, entry))
                return;
 
index c953302..039d913 100644 (file)
@@ -365,8 +365,13 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
         */
        if (bus) {
                struct pci_bus *child;
-               list_for_each_entry(child, &bus->children, node)
-                       pcie_bus_configure_settings(child, child->self->pcie_mpss);
+               list_for_each_entry(child, &bus->children, node) {
+                       struct pci_dev *self = child->self;
+                       if (!self)
+                               continue;
+
+                       pcie_bus_configure_settings(child, self->pcie_mpss);
+               }
        }
 
        if (!bus)
index 20a6142..3dd53f9 100644 (file)
@@ -1721,10 +1721,8 @@ void __init xen_setup_machphys_mapping(void)
                machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
        }
 #ifdef CONFIG_X86_32
-       if ((machine_to_phys_mapping + machine_to_phys_nr)
-           < machine_to_phys_mapping)
-               machine_to_phys_nr = (unsigned long *)NULL
-                                    - machine_to_phys_mapping;
+       WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
+               < machine_to_phys_mapping);
 #endif
 }
 
index df118a8..46d6d21 100644 (file)
@@ -184,6 +184,19 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
                                        PFN_UP(start_pci), PFN_DOWN(last));
        return identity;
 }
+
+static unsigned long __init xen_get_max_pages(void)
+{
+       unsigned long max_pages = MAX_DOMAIN_PAGES;
+       domid_t domid = DOMID_SELF;
+       int ret;
+
+       ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+       if (ret > 0)
+               max_pages = ret;
+       return min(max_pages, MAX_DOMAIN_PAGES);
+}
+
 /**
  * machine_specific_memory_setup - Hook for machine specific memory setup.
  **/
@@ -292,6 +305,14 @@ char * __init xen_memory_setup(void)
 
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 
+       extra_limit = xen_get_max_pages();
+       if (max_pfn + extra_pages > extra_limit) {
+               if (extra_limit > max_pfn)
+                       extra_pages = extra_limit - max_pfn;
+               else
+                       extra_pages = 0;
+       }
+
        extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
 
        /*
index e79dbb9..041d4fe 100644 (file)
@@ -32,6 +32,7 @@
 #include <xen/page.h>
 #include <xen/events.h>
 
+#include <xen/hvc-console.h>
 #include "xen-ops.h"
 #include "mmu.h"
 
@@ -207,6 +208,15 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
        unsigned cpu;
        unsigned int i;
 
+       if (skip_ioapic_setup) {
+               char *m = (max_cpus == 0) ?
+                       "The nosmp parameter is incompatible with Xen; " \
+                       "use Xen dom0_max_vcpus=1 parameter" :
+                       "The noapic parameter is incompatible with Xen";
+
+               xen_raw_printk(m);
+               panic(m);
+       }
        xen_init_lock_cpu(0);
 
        smp_store_cpu_info(0);
@@ -522,7 +532,6 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
        WARN_ON(xen_smp_intr_init(0));
 
        xen_init_lock_cpu(0);
-       xen_init_spinlocks();
 }
 
 static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
index 5158c50..163b467 100644 (file)
@@ -168,9 +168,10 @@ cycle_t xen_clocksource_read(void)
         struct pvclock_vcpu_time_info *src;
        cycle_t ret;
 
-       src = &get_cpu_var(xen_vcpu)->time;
+       preempt_disable_notrace();
+       src = &__get_cpu_var(xen_vcpu)->time;
        ret = pvclock_clocksource_read(src);
-       put_cpu_var(xen_vcpu);
+       preempt_enable_notrace();
        return ret;
 }
 
index 22a2093..b040b0e 100644 (file)
@@ -113,11 +113,13 @@ xen_iret_start_crit:
 
        /*
         * If there's something pending, mask events again so we can
-        * jump back into xen_hypervisor_callback
+        * jump back into xen_hypervisor_callback. Otherwise do not
+        * touch XEN_vcpu_info_mask.
         */
-       sete XEN_vcpu_info_mask(%eax)
+       jne 1f
+       movb $1, XEN_vcpu_info_mask(%eax)
 
-       popl %eax
+1:     popl %eax
 
        /*
         * From this point on the registers are restored and the stack
index bc533dd..f895a24 100644 (file)
 
 /* Maximum sleep allowed via Sleep() operator */
 
-#define ACPI_MAX_SLEEP                  20000  /* Two seconds */
+#define ACPI_MAX_SLEEP                  2000   /* Two seconds */
 
 /******************************************************************************
  *
index c34aa51..e3f4787 100644 (file)
@@ -13,6 +13,7 @@ config ACPI_APEI_GHES
        bool "APEI Generic Hardware Error Source"
        depends on ACPI_APEI && X86
        select ACPI_HED
+       select IRQ_WORK
        select LLIST
        select GENERIC_ALLOCATOR
        help
index 8041248..6154036 100644 (file)
@@ -618,7 +618,7 @@ int apei_osc_setup(void)
        };
 
        capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-       capbuf[OSC_SUPPORT_TYPE] = 0;
+       capbuf[OSC_SUPPORT_TYPE] = 1;
        capbuf[OSC_CONTROL_TYPE] = 0;
 
        if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
index 0eef4da..20663f8 100644 (file)
@@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
        map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
        if (map->work_buf == NULL) {
                ret = -ENOMEM;
-               goto err_bus;
+               goto err_map;
        }
 
        return map;
 
-err_bus:
-       module_put(map->bus->owner);
 err_map:
        kfree(map);
 err:
@@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
 void regmap_exit(struct regmap *map)
 {
        kfree(map->work_buf);
-       module_put(map->bus->owner);
        kfree(map);
 }
 EXPORT_SYMBOL_GPL(regmap_exit);
index 3ef4760..9cbac6b 100644 (file)
@@ -72,9 +72,15 @@ static struct usb_device_id btusb_table[] = {
        /* Apple MacBookAir3,1, MacBookAir3,2 */
        { USB_DEVICE(0x05ac, 0x821b) },
 
+       /* Apple MacBookAir4,1 */
+       { USB_DEVICE(0x05ac, 0x821f) },
+
        /* Apple MacBookPro8,2 */
        { USB_DEVICE(0x05ac, 0x821a) },
 
+       /* Apple MacMini5,1 */
+       { USB_DEVICE(0x05ac, 0x8281) },
+
        /* AVM BlueFRITZ! USB v2.0 */
        { USB_DEVICE(0x057c, 0x3800) },
 
index 65d27af..04d353f 100644 (file)
@@ -124,6 +124,13 @@ static long st_receive(void *priv_data, struct sk_buff *skb)
 /* ------- Interfaces to HCI layer ------ */
 /* protocol structure registered with shared transport */
 static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
+       {
+               .chnl_id = HCI_EVENT_PKT, /* HCI Events */
+               .hdr_len = sizeof(struct hci_event_hdr),
+               .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
+               .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
+               .reserve = 8,
+       },
        {
                .chnl_id = HCI_ACLDATA_PKT, /* ACL */
                .hdr_len = sizeof(struct hci_acl_hdr),
@@ -138,13 +145,6 @@ static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
                .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
                .reserve = 8,
        },
-       {
-               .chnl_id = HCI_EVENT_PKT, /* HCI Events */
-               .hdr_len = sizeof(struct hci_event_hdr),
-               .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
-               .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
-               .reserve = 8,
-       },
 };
 
 /* Called from HCI core to initialize the device */
@@ -240,7 +240,7 @@ static int ti_st_close(struct hci_dev *hdev)
        if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
                return 0;
 
-       for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+       for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
                err = st_unregister(&ti_st_proto[i]);
                if (err)
                        BT_ERR("st_unregister(%d) failed with error %d",
index 7b0603e..cdc02ac 100644 (file)
@@ -261,6 +261,9 @@ static int pcc_get_offset(int cpu)
        pr = per_cpu(processors, cpu);
        pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
 
+       if (!pr)
+               return -ENODEV;
+
        status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
        if (ACPI_FAILURE(status))
                return -ENODEV;
index cd3a7c7..467e4dc 100644 (file)
@@ -174,8 +174,10 @@ struct d40_base;
  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
  * transfer and call client callback.
  * @client: Cliented owned descriptor list.
+ * @pending_queue: Submitted jobs, to be issued by issue_pending()
  * @active: Active descriptor.
  * @queue: Queued jobs.
+ * @prepare_queue: Prepared jobs.
  * @dma_cfg: The client configuration of this dma channel.
  * @configured: whether the dma_cfg configuration is valid
  * @base: Pointer to the device instance struct.
@@ -203,6 +205,7 @@ struct d40_chan {
        struct list_head                 pending_queue;
        struct list_head                 active;
        struct list_head                 queue;
+       struct list_head                 prepare_queue;
        struct stedma40_chan_cfg         dma_cfg;
        bool                             configured;
        struct d40_base                 *base;
@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
 
                list_for_each_entry_safe(d, _d, &d40c->client, node)
                        if (async_tx_test_ack(&d->txd)) {
-                               d40_pool_lli_free(d40c, d);
                                d40_desc_remove(d);
                                desc = d;
                                memset(desc, 0, sizeof(*desc));
@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
        return d;
 }
 
+/* remove desc from current queue and add it to the pending_queue */
 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
 {
+       d40_desc_remove(desc);
+       desc->is_in_client_list = false;
        list_add_tail(&desc->node, &d40c->pending_queue);
 }
 
@@ -803,6 +808,7 @@ done:
 static void d40_term_all(struct d40_chan *d40c)
 {
        struct d40_desc *d40d;
+       struct d40_desc *_d;
 
        /* Release active descriptors */
        while ((d40d = d40_first_active_get(d40c))) {
@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
                d40_desc_free(d40c, d40d);
        }
 
+       /* Release client owned descriptors */
+       if (!list_empty(&d40c->client))
+               list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
+                       d40_desc_remove(d40d);
+                       d40_desc_free(d40c, d40d);
+               }
+
+       /* Release descriptors in prepare queue */
+       if (!list_empty(&d40c->prepare_queue))
+               list_for_each_entry_safe(d40d, _d,
+                                        &d40c->prepare_queue, node) {
+                       d40_desc_remove(d40d);
+                       d40_desc_free(d40c, d40d);
+               }
+
        d40c->pending_tx = 0;
        d40c->busy = false;
 }
@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
 
        if (!d40d->cyclic) {
                if (async_tx_test_ack(&d40d->txd)) {
-                       d40_pool_lli_free(d40c, d40d);
                        d40_desc_remove(d40d);
                        d40_desc_free(d40c, d40d);
                } else {
@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
        u32 event;
        struct d40_phy_res *phy = d40c->phy_chan;
        bool is_src;
-       struct d40_desc *d;
-       struct d40_desc *_d;
-
 
        /* Terminate all queued and active transfers */
        d40_term_all(d40c);
 
-       /* Release client owned descriptors */
-       if (!list_empty(&d40c->client))
-               list_for_each_entry_safe(d, _d, &d40c->client, node) {
-                       d40_pool_lli_free(d40c, d);
-                       d40_desc_remove(d);
-                       d40_desc_free(d40c, d);
-               }
-
        if (phy == NULL) {
                chan_err(d40c, "phy == null\n");
                return -EINVAL;
@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
                goto err;
        }
 
+       /*
+        * add descriptor to the prepare queue in order to be able
+        * to free them later in terminate_all
+        */
+       list_add_tail(&desc->node, &chan->prepare_queue);
+
        spin_unlock_irqrestore(&chan->lock, flags);
 
        return &desc->txd;
@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
                INIT_LIST_HEAD(&d40c->queue);
                INIT_LIST_HEAD(&d40c->pending_queue);
                INIT_LIST_HEAD(&d40c->client);
+               INIT_LIST_HEAD(&d40c->prepare_queue);
 
                tasklet_init(&d40c->tasklet, dma_tasklet,
                             (unsigned long) d40c);
index 57cd3a4..fd7170a 100644 (file)
@@ -290,6 +290,9 @@ static const struct {
        {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER},
 
+       {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
+               QUIRK_NO_MSI},
+
        {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER},
 
index 231714d..4e24436 100644 (file)
@@ -351,7 +351,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc,
        return 0;
 }
 
-int __devexit bgpio_remove(struct bgpio_chip *bgc)
+int bgpio_remove(struct bgpio_chip *bgc)
 {
        int err = gpiochip_remove(&bgc->gc);
 
@@ -361,15 +361,10 @@ int __devexit bgpio_remove(struct bgpio_chip *bgc)
 }
 EXPORT_SYMBOL_GPL(bgpio_remove);
 
-int __devinit bgpio_init(struct bgpio_chip *bgc,
-                        struct device *dev,
-                        unsigned long sz,
-                        void __iomem *dat,
-                        void __iomem *set,
-                        void __iomem *clr,
-                        void __iomem *dirout,
-                        void __iomem *dirin,
-                        bool big_endian)
+int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
+              unsigned long sz, void __iomem *dat, void __iomem *set,
+              void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
+              bool big_endian)
 {
        int ret;
 
index 82db185..fe738f0 100644 (file)
@@ -499,6 +499,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
        mutex_lock(&dev->mode_config.mutex);
        drm_mode_object_put(dev, &connector->base);
        list_del(&connector->head);
+       dev->mode_config.num_connector--;
        mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
@@ -529,6 +530,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
        mutex_lock(&dev->mode_config.mutex);
        drm_mode_object_put(dev, &encoder->base);
        list_del(&encoder->head);
+       dev->mode_config.num_encoder--;
        mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_encoder_cleanup);
index 802b61a..f7c6854 100644 (file)
@@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
 {
        printk(KERN_ERR "panic occurred, switching back to text console\n");
        return drm_fb_helper_force_kernel_mode();
-       return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_panic);
 
index 8d02d87..c919cfc 100644 (file)
@@ -530,7 +530,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
                nouveau_gpuobj_ref(NULL, &obj);
                if (ret)
                        return ret;
-       } else {
+       } else
+       if (USE_SEMA(dev)) {
                /* map fence bo into channel's vm */
                ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
                                         &chan->fence.vma);
index c444cad..2706cb3 100644 (file)
@@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
                return -ENOMEM;
 
        nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
-       if (!nvbe->ttm_alloced)
+       if (!nvbe->ttm_alloced) {
+               kfree(nvbe->pages);
+               nvbe->pages = NULL;
                return -ENOMEM;
+       }
 
        nvbe->nr_pages = 0;
        while (num_pages--) {
@@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
 
                for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
                        nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
-                       dma_offset += NV_CTXDMA_PAGE_SIZE;
+                       offset_l += NV_CTXDMA_PAGE_SIZE;
                }
        }
 
index 118261d..5e45398 100644 (file)
@@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
-       struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
-       struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+       struct drm_framebuffer *drm_fb;
+       struct nouveau_framebuffer *fb;
        int arb_burst, arb_lwm;
        int ret;
 
+       NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+
+       /* no fb bound */
+       if (!atomic && !crtc->fb) {
+               NV_DEBUG_KMS(dev, "No FB bound\n");
+               return 0;
+       }
+
+
        /* If atomic, we want to switch to the fb we were passed, so
         * now we update pointers to do that.  (We don't pin; just
         * assume we're already pinned and update the base address.)
@@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
                drm_fb = passed_fb;
                fb = nouveau_framebuffer(passed_fb);
        } else {
+               drm_fb = crtc->fb;
+               fb = nouveau_framebuffer(crtc->fb);
                /* If not atomic, we can go ahead and pin, and unpin the
                 * old fb we were passed.
                 */
index 46ad59e..5d98907 100644 (file)
@@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
        struct drm_device *dev = nv_crtc->base.dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
-       struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+       struct drm_framebuffer *drm_fb;
+       struct nouveau_framebuffer *fb;
        int ret;
 
        NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
 
+       /* no fb bound */
+       if (!atomic && !crtc->fb) {
+               NV_DEBUG_KMS(dev, "No FB bound\n");
+               return 0;
+       }
+
        /* If atomic, we want to switch to the fb we were passed, so
         * now we update pointers to do that.  (We don't pin; just
         * assume we're already pinned and update the base address.)
@@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
                drm_fb = passed_fb;
                fb = nouveau_framebuffer(passed_fb);
        } else {
+               drm_fb = crtc->fb;
+               fb = nouveau_framebuffer(crtc->fb);
                /* If not atomic, we can go ahead and pin, and unpin the
                 * old fb we were passed.
                 */
index fb5fa08..e8a7467 100644 (file)
@@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
 
+void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
+{
+       u16 ctl, v;
+       int cap, err;
+
+       cap = pci_pcie_cap(rdev->pdev);
+       if (!cap)
+               return;
+
+       err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
+       if (err)
+               return;
+
+       v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
+
+       /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
+        * to avoid hangs or perfomance issues
+        */
+       if ((v == 0) || (v == 6) || (v == 7)) {
+               ctl &= ~PCI_EXP_DEVCTL_READRQ;
+               ctl |= (2 << 12);
+               pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
+       }
+}
+
 void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
 {
        /* enable the pflip int */
@@ -1357,6 +1382,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
                                 SOFT_RESET_PA |
                                 SOFT_RESET_SH |
                                 SOFT_RESET_VGT |
+                                SOFT_RESET_SPI |
                                 SOFT_RESET_SX));
        RREG32(GRBM_SOFT_RESET);
        mdelay(15);
@@ -1378,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       WREG32(CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB_WPTR, rdev->cp.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
@@ -1400,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
        rdev->cp.rptr = RREG32(CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB_WPTR);
 
        evergreen_cp_start(rdev);
        rdev->cp.ready = true;
@@ -1862,6 +1888,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
+       evergreen_fix_pci_max_read_req_size(rdev);
+
        cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
 
        cc_gc_shader_pipe_config |=
@@ -3143,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev)
 }
 
 int evergreen_copy_blit(struct radeon_device *rdev,
-                       uint64_t src_offset, uint64_t dst_offset,
-                       unsigned num_pages, struct radeon_fence *fence)
+                       uint64_t src_offset,
+                       uint64_t dst_offset,
+                       unsigned num_gpu_pages,
+                       struct radeon_fence *fence)
 {
        int r;
 
        mutex_lock(&rdev->r600_blit.mutex);
        rdev->r600_blit.vb_ib = NULL;
-       r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        if (r) {
                if (rdev->r600_blit.vb_ib)
                        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
                mutex_unlock(&rdev->r600_blit.mutex);
                return r;
        }
-       evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+       evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        evergreen_blit_done_copy(rdev, fence);
        mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
index 44c4750..99fbd79 100644 (file)
@@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
 extern void evergreen_mc_program(struct radeon_device *rdev);
 extern void evergreen_irq_suspend(struct radeon_device *rdev);
 extern int evergreen_mc_init(struct radeon_device *rdev);
+extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
 
 #define EVERGREEN_PFP_UCODE_SIZE 1120
 #define EVERGREEN_PM4_UCODE_SIZE 1376
@@ -669,6 +670,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
+       evergreen_fix_pci_max_read_req_size(rdev);
+
        mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
        mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
@@ -1159,6 +1162,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
                                 SOFT_RESET_PA |
                                 SOFT_RESET_SH |
                                 SOFT_RESET_VGT |
+                                SOFT_RESET_SPI |
                                 SOFT_RESET_SX));
        RREG32(GRBM_SOFT_RESET);
        mdelay(15);
@@ -1183,7 +1187,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB0_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB0_WPTR, rdev->cp.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1203,7 +1208,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
 
        rdev->cp.rptr = RREG32(CP_RB0_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB0_WPTR);
 
        /* ring1  - compute only */
        /* Set ring buffer size */
@@ -1216,7 +1220,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB1_WPTR, 0);
+       rdev->cp1.wptr = 0;
+       WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1228,7 +1233,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
 
        rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
-       rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
 
        /* ring2 - compute only */
        /* Set ring buffer size */
@@ -1241,7 +1245,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB2_WPTR, 0);
+       rdev->cp2.wptr = 0;
+       WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1253,7 +1258,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
 
        rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
-       rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
 
        /* start the rings */
        cayman_cp_start(rdev);
index f2204cb..5b1837b 100644 (file)
@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
 int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
-                  unsigned num_pages,
+                  unsigned num_gpu_pages,
                   struct radeon_fence *fence)
 {
        uint32_t cur_pages;
-       uint32_t stride_bytes = PAGE_SIZE;
+       uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
        uint32_t pitch;
        uint32_t stride_pixels;
        unsigned ndw;
@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,
        /* radeon pitch is /64 */
        pitch = stride_bytes / 64;
        stride_pixels = stride_bytes / 4;
-       num_loops = DIV_ROUND_UP(num_pages, 8191);
+       num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
 
        /* Ask for enough room for blit + flush + fence */
        ndw = 64 + (10 * num_loops);
@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,
                DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
                return -EINVAL;
        }
-       while (num_pages > 0) {
-               cur_pages = num_pages;
+       while (num_gpu_pages > 0) {
+               cur_pages = num_gpu_pages;
                if (cur_pages > 8191) {
                        cur_pages = 8191;
                }
-               num_pages -= cur_pages;
+               num_gpu_pages -= cur_pages;
 
                /* pages are in Y direction - height
                   page width in X direction - width */
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,
                radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
                radeon_ring_write(rdev, 0);
                radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
-               radeon_ring_write(rdev, num_pages);
-               radeon_ring_write(rdev, num_pages);
+               radeon_ring_write(rdev, cur_pages);
+               radeon_ring_write(rdev, cur_pages);
                radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
        }
        radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
@@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        /* Force read & write ptr to 0 */
        WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
        WREG32(RADEON_CP_RB_RPTR_WR, 0);
-       WREG32(RADEON_CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
 
        /* set the wb address whether it's enabled or not */
        WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        WREG32(RADEON_CP_RB_CNTL, tmp);
        udelay(10);
        rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
-       /* protect against crazy HW on resume */
-       rdev->cp.wptr &= rdev->cp.ptr_mask;
        /* Set cp mode to bus mastering & enable cp*/
        WREG32(RADEON_CP_CSQ_MODE,
               REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
index f240583..a1f3ba0 100644 (file)
@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
 int r200_copy_dma(struct radeon_device *rdev,
                  uint64_t src_offset,
                  uint64_t dst_offset,
-                 unsigned num_pages,
+                 unsigned num_gpu_pages,
                  struct radeon_fence *fence)
 {
        uint32_t size;
@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,
        int r = 0;
 
        /* radeon pitch is /64 */
-       size = num_pages << PAGE_SHIFT;
+       size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
        num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
        r = radeon_ring_lock(rdev, num_loops * 4 + 64);
        if (r) {
index aa5571b..720dd99 100644 (file)
@@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev)
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       WREG32(CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB_WPTR, rdev->cp.wptr);
 
        /* set the wb address whether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
@@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev)
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
        rdev->cp.rptr = RREG32(CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB_WPTR);
 
        r600_cp_start(rdev);
        rdev->cp.ready = true;
@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
 }
 
 int r600_copy_blit(struct radeon_device *rdev,
-                  uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_pages, struct radeon_fence *fence)
+                  uint64_t src_offset,
+                  uint64_t dst_offset,
+                  unsigned num_gpu_pages,
+                  struct radeon_fence *fence)
 {
        int r;
 
        mutex_lock(&rdev->r600_blit.mutex);
        rdev->r600_blit.vb_ib = NULL;
-       r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        if (r) {
                if (rdev->r600_blit.vb_ib)
                        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
                mutex_unlock(&rdev->r600_blit.mutex);
                return r;
        }
-       r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+       r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        r600_blit_done_copy(rdev, fence);
        mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
index 32807ba..c1e056b 100644 (file)
@@ -322,6 +322,7 @@ union radeon_gart_table {
 
 #define RADEON_GPU_PAGE_SIZE 4096
 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+#define RADEON_GPU_PAGE_SHIFT 12
 
 struct radeon_gart {
        dma_addr_t                      table_addr;
@@ -914,17 +915,17 @@ struct radeon_asic {
        int (*copy_blit)(struct radeon_device *rdev,
                         uint64_t src_offset,
                         uint64_t dst_offset,
-                        unsigned num_pages,
+                        unsigned num_gpu_pages,
                         struct radeon_fence *fence);
        int (*copy_dma)(struct radeon_device *rdev,
                        uint64_t src_offset,
                        uint64_t dst_offset,
-                       unsigned num_pages,
+                       unsigned num_gpu_pages,
                        struct radeon_fence *fence);
        int (*copy)(struct radeon_device *rdev,
                    uint64_t src_offset,
                    uint64_t dst_offset,
-                   unsigned num_pages,
+                   unsigned num_gpu_pages,
                    struct radeon_fence *fence);
        uint32_t (*get_engine_clock)(struct radeon_device *rdev);
        void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
index 3d7a0d7..3dedaa0 100644 (file)
@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
 int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
-                  unsigned num_pages,
+                  unsigned num_gpu_pages,
                   struct radeon_fence *fence);
 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
 extern int r200_copy_dma(struct radeon_device *rdev,
                         uint64_t src_offset,
                         uint64_t dst_offset,
-                        unsigned num_pages,
+                        unsigned num_gpu_pages,
                         struct radeon_fence *fence);
 void r200_set_safe_registers(struct radeon_device *rdev);
 
@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r600_ring_test(struct radeon_device *rdev);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_pages, struct radeon_fence *fence);
+                  unsigned num_gpu_pages, struct radeon_fence *fence);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);
 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int evergreen_copy_blit(struct radeon_device *rdev,
                        uint64_t src_offset, uint64_t dst_offset,
-                       unsigned num_pages, struct radeon_fence *fence);
+                       unsigned num_gpu_pages, struct radeon_fence *fence);
 void evergreen_hpd_init(struct radeon_device *rdev);
 void evergreen_hpd_fini(struct radeon_device *rdev);
 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
index dcd0863..b6e18c8 100644 (file)
@@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev)
                } else {
                        DRM_INFO("Using generic clock info\n");
 
+                       /* may need to be per card */
+                       rdev->clock.max_pixel_clock = 35000;
+
                        if (rdev->flags & RADEON_IS_IGP) {
                                p1pll->reference_freq = 1432;
                                p2pll->reference_freq = 1432;
index e0138b6..6367524 100644 (file)
@@ -3298,6 +3298,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
            rdev->pdev->subsystem_device == 0x30a4)
                return;
 
+       /* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
+        * - it hangs on resume inside the dynclk 1 table.
+        */
+       if (rdev->family == CHIP_RS480 &&
+           rdev->pdev->subsystem_vendor == 0x103c &&
+           rdev->pdev->subsystem_device == 0x30ae)
+               return;
+
        /* DYN CLK 1 */
        table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
        if (table)
index 4f0c1ec..c4b8741 100644 (file)
@@ -1297,12 +1297,33 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
                if (!radeon_dig_connector->edp_on)
                        atombios_set_edp_panel_power(connector,
                                                     ATOM_TRANSMITTER_ACTION_POWER_OFF);
-       } else {
-               /* need to setup ddc on the bridge */
-               if (radeon_connector_encoder_is_dp_bridge(connector)) {
+       } else if (radeon_connector_encoder_is_dp_bridge(connector)) {
+               /* DP bridges are always DP */
+               radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+               /* get the DPCD from the bridge */
+               radeon_dp_getdpcd(radeon_connector);
+
+               if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+                       ret = connector_status_connected;
+               else {
+                       /* need to setup ddc on the bridge */
                        if (encoder)
                                radeon_atom_ext_encoder_setup_ddc(encoder);
+                       if (radeon_ddc_probe(radeon_connector,
+                                            radeon_connector->requires_extended_probe))
+                               ret = connector_status_connected;
+               }
+
+               if ((ret == connector_status_disconnected) &&
+                   radeon_connector->dac_load_detect) {
+                       struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+                       struct drm_encoder_helper_funcs *encoder_funcs;
+                       if (encoder) {
+                               encoder_funcs = encoder->helper_private;
+                               ret = encoder_funcs->detect(encoder, connector);
+                       }
                }
+       } else {
                radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
                if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
                        ret = connector_status_connected;
@@ -1318,16 +1339,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
                                        ret = connector_status_connected;
                        }
                }
-
-               if ((ret == connector_status_disconnected) &&
-                   radeon_connector->dac_load_detect) {
-                       struct drm_encoder *encoder = radeon_best_single_encoder(connector);
-                       struct drm_encoder_helper_funcs *encoder_funcs;
-                       if (encoder) {
-                               encoder_funcs = encoder->helper_private;
-                               ret = encoder_funcs->detect(encoder, connector);
-                       }
-               }
        }
 
        radeon_connector_update_scratch_regs(connector, ret);
index 1a85894..6adb3e5 100644 (file)
@@ -473,8 +473,8 @@ pflip_cleanup:
        spin_lock_irqsave(&dev->event_lock, flags);
        radeon_crtc->unpin_work = NULL;
 unlock_free:
-       drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
        spin_unlock_irqrestore(&dev->event_lock, flags);
+       drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
        radeon_fence_unref(&work->fence);
        kfree(work);
 
@@ -707,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
                radeon_router_select_ddc_port(radeon_connector);
 
        if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
-           (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
+           (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
+           radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) {
                struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
                if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
                     dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
-                       radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
-       }
-       if (!radeon_connector->ddc_bus)
-               return -1;
-       if (!radeon_connector->edid) {
-               radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+                       radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+                                                             &dig->dp_i2c_bus->adapter);
+               else if (radeon_connector->ddc_bus && !radeon_connector->edid)
+                       radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+                                                             &radeon_connector->ddc_bus->adapter);
+       } else {
+               if (radeon_connector->ddc_bus && !radeon_connector->edid)
+                       radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+                                                             &radeon_connector->ddc_bus->adapter);
        }
 
        if (!radeon_connector->edid) {
index 9b86fb0..0b5468b 100644 (file)
@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
                DRM_ERROR("Trying to move memory with CP turned off.\n");
                return -EINVAL;
        }
-       r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
+
+       BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+
+       r = radeon_copy(rdev, old_start, new_start,
+                       new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
+                       fence);
        /* FIXME: handle copy error */
        r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
                                      evict, no_wait_reserve, no_wait_gpu, new_mem);
index a4d38d8..ef06194 100644 (file)
@@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
                if (bo->ttm == NULL) {
-                       ret = ttm_bo_add_ttm(bo, false);
+                       bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+                       ret = ttm_bo_add_ttm(bo, zero);
                        if (ret)
                                goto out_err;
                }
index 7d27d2b..7484e1b 100644 (file)
 #define USB_DEVICE_ID_PENPOWER         0x00f4
 
 #define USB_VENDOR_ID_GREENASIA                0x0e8f
+#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD        0x3013
 
 #define USB_VENDOR_ID_GRETAGMACBETH    0x0971
 #define USB_DEVICE_ID_GRETAGMACBETH_HUEY       0x2005
index 0ec91c1..f0fbd7b 100644 (file)
@@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
 #define NO_TOUCHES -1
 #define SINGLE_TOUCH_UP -2
 
+/* Touch surface information. Dimension is in hundredths of a mm, min and max
+ * are in units. */
+#define MOUSE_DIMENSION_X (float)9056
+#define MOUSE_MIN_X -1100
+#define MOUSE_MAX_X 1258
+#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
+#define MOUSE_DIMENSION_Y (float)5152
+#define MOUSE_MIN_Y -1589
+#define MOUSE_MAX_Y 2047
+#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
+
+#define TRACKPAD_DIMENSION_X (float)13000
+#define TRACKPAD_MIN_X -2909
+#define TRACKPAD_MAX_X 3167
+#define TRACKPAD_RES_X \
+       ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
+#define TRACKPAD_DIMENSION_Y (float)11000
+#define TRACKPAD_MIN_Y -2456
+#define TRACKPAD_MAX_Y 2565
+#define TRACKPAD_RES_Y \
+       ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
+
 /**
  * struct magicmouse_sc - Tracks Magic Mouse-specific data.
  * @input: Input device through which we report events.
@@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
                 * inverse of the reported Y.
                 */
                if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
-                       input_set_abs_params(input, ABS_MT_POSITION_X, -1100,
-                               1358, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_Y, -1589,
-                               2047, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_X,
+                               MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_Y,
+                               MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
+
+                       input_abs_set_res(input, ABS_MT_POSITION_X,
+                               MOUSE_RES_X);
+                       input_abs_set_res(input, ABS_MT_POSITION_Y,
+                               MOUSE_RES_Y);
                } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
-                       input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0);
-                       input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_X, -2909,
-                               3167, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_Y, -2456,
-                               2565, 4, 0);
+                       input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
+                               TRACKPAD_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
+                               TRACKPAD_MAX_Y, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_X,
+                               TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_Y,
+                               TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
+
+                       input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
+                       input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
+                       input_abs_set_res(input, ABS_MT_POSITION_X,
+                               TRACKPAD_RES_X);
+                       input_abs_set_res(input, ABS_MT_POSITION_Y,
+                               TRACKPAD_RES_Y);
                }
 
                input_set_events_per_packet(input, 60);
@@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
        }
        report->size = 6;
 
+       /*
+        * Some devices repond with 'invalid report id' when feature
+        * report switching it into multitouch mode is sent to it.
+        *
+        * This results in -EIO from the _raw low-level transport callback,
+        * but there seems to be no other way of switching the mode.
+        * Thus the super-ugly hacky success check below.
+        */
        ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
                        HID_FEATURE_REPORT);
-       if (ret != sizeof(feature)) {
+       if (ret != -EIO && ret != sizeof(feature)) {
                hid_err(hdev, "unable to request touch data (%d)\n", ret);
                goto err_stop_hw;
        }
index 0688832..72ca689 100644 (file)
@@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
        if (ret) {
                hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
                         ret);
-               /*
-                * battery attribute is not critical for the tablet, but if it
-                * failed then there is no need to create ac attribute
-                */
-               goto move_on;
+               goto err_battery;
        }
 
        wdata->ac.properties = wacom_ac_props;
@@ -371,18 +367,14 @@ static int wacom_probe(struct hid_device *hdev,
        if (ret) {
                hid_warn(hdev,
                         "can't create ac battery attribute, err: %d\n", ret);
-               /*
-                * ac attribute is not critical for the tablet, but if it
-                * failed then we don't want to battery attribute to exist
-                */
-               power_supply_unregister(&wdata->battery);
+               goto err_ac;
        }
-
-move_on:
 #endif
        hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
        input = hidinput->input;
 
+       __set_bit(INPUT_PROP_POINTER, input->propbit);
+
        /* Basics */
        input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
 
@@ -416,6 +408,13 @@ move_on:
 
        return 0;
 
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+err_ac:
+       power_supply_unregister(&wdata->battery);
+err_battery:
+       device_remove_file(&hdev->dev, &dev_attr_speed);
+       hid_hw_stop(hdev);
+#endif
 err_free:
        kfree(wdata);
        return ret;
@@ -426,6 +425,7 @@ static void wacom_remove(struct hid_device *hdev)
 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY
        struct wacom_data *wdata = hid_get_drvdata(hdev);
 #endif
+       device_remove_file(&hdev->dev, &dev_attr_speed);
        hid_hw_stop(hdev);
 
 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY
index 4bdb5d4..3146fdc 100644 (file)
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
 
        { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
        { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
index 59d83e8..4112576 100644 (file)
@@ -601,7 +601,12 @@ static int create_core_data(struct platform_data *pdata,
        err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
        if (!err) {
                tdata->attr_size += MAX_THRESH_ATTRS;
-               tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
+               tdata->tmin = tdata->tjmax -
+                             ((eax & THERM_MASK_THRESHOLD0) >>
+                              THERM_SHIFT_THRESHOLD0) * 1000;
+               tdata->ttarget = tdata->tjmax -
+                                ((eax & THERM_MASK_THRESHOLD1) >>
+                                 THERM_SHIFT_THRESHOLD1) * 1000;
        }
 
        pdata->core_data[attr_no] = tdata;
index d94a24f..dd2d7b9 100644 (file)
@@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
 
 static inline int ADC_TO_CURR(int adc, int gain)
 {
-       return adc * 1400000 / gain * 255;
+       return adc * 1400000 / (gain * 255);
 }
 
 /*
index a561c3a..397fc59 100644 (file)
@@ -978,6 +978,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,
 struct pmbus_limit_attr {
        u16 reg;                /* Limit register */
        bool update;            /* True if register needs updates */
+       bool low;               /* True if low limit; for limits with compare
+                                  functions only */
        const char *attr;       /* Attribute name */
        const char *alarm;      /* Alarm attribute name */
        u32 sbit;               /* Alarm attribute status bit */
@@ -1029,7 +1031,8 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
                                if (attr->compare) {
                                        pmbus_add_boolean_cmp(data, name,
                                                l->alarm, index,
-                                               cbase, cindex,
+                                               l->low ? cindex : cbase,
+                                               l->low ? cbase : cindex,
                                                attr->sbase + page, l->sbit);
                                } else {
                                        pmbus_add_boolean_reg(data, name,
@@ -1366,11 +1369,13 @@ static const struct pmbus_sensor_attr power_attributes[] = {
 static const struct pmbus_limit_attr temp_limit_attrs[] = {
        {
                .reg = PMBUS_UT_WARN_LIMIT,
+               .low = true,
                .attr = "min",
                .alarm = "min_alarm",
                .sbit = PB_TEMP_UT_WARNING,
        }, {
                .reg = PMBUS_UT_FAULT_LIMIT,
+               .low = true,
                .attr = "lcrit",
                .alarm = "lcrit_alarm",
                .sbit = PB_TEMP_UT_FAULT,
@@ -1399,11 +1404,13 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
 static const struct pmbus_limit_attr temp_limit_attrs23[] = {
        {
                .reg = PMBUS_UT_WARN_LIMIT,
+               .low = true,
                .attr = "min",
                .alarm = "min_alarm",
                .sbit = PB_TEMP_UT_WARNING,
        }, {
                .reg = PMBUS_UT_FAULT_LIMIT,
+               .low = true,
                .attr = "lcrit",
                .alarm = "lcrit_alarm",
                .sbit = PB_TEMP_UT_FAULT,
index ace1c73..d0ddb60 100644 (file)
@@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
        block_buffer[ret] = '\0';
        dev_info(&client->dev, "Device ID %s\n", block_buffer);
 
-       mid = NULL;
-       for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
-               mid = &ucd9000_id[i];
+       for (mid = ucd9000_id; mid->name[0]; mid++) {
                if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
                        break;
        }
-       if (!mid || !strlen(mid->name)) {
+       if (!mid->name[0]) {
                dev_err(&client->dev, "Unsupported device\n");
                return -ENODEV;
        }
index ffcc1cf..c65e9da 100644 (file)
@@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
        block_buffer[ret] = '\0';
        dev_info(&client->dev, "Device ID %s\n", block_buffer);
 
-       mid = NULL;
-       for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
-               mid = &ucd9200_id[i];
+       for (mid = ucd9200_id; mid->name[0]; mid++) {
                if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
                        break;
        }
-       if (!mid || !strlen(mid->name)) {
+       if (!mid->name[0]) {
                dev_err(&client->dev, "Unsupported device\n");
                return -ENODEV;
        }
index 6659d26..b73da6c 100644 (file)
@@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
                return -EINVAL;
        }
        sds = kzalloc(sizeof(*sds), GFP_KERNEL);
-       if (!sds)
+       if (!sds) {
+               ret = -ENOMEM;
                goto err_mem;
+       }
 
        for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
                sds->pdev[i] = add_i2c_device(dev, i);
                if (IS_ERR(sds->pdev[i])) {
+                       ret = PTR_ERR(sds->pdev[i]);
                        while (--i >= 0)
                                platform_device_unregister(sds->pdev[i]);
                        goto err_dev_add;
index 2440b74..3c94c4a 100644 (file)
@@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
 
        /* Rounds down to not include partial word at the end of buf */
        words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
-       if (words_to_transfer > tx_fifo_avail)
-               words_to_transfer = tx_fifo_avail;
 
-       i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
-
-       buf += words_to_transfer * BYTES_PER_FIFO_WORD;
-       buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
-       tx_fifo_avail -= words_to_transfer;
+       /* It's very common to have < 4 bytes, so optimize that case. */
+       if (words_to_transfer) {
+               if (words_to_transfer > tx_fifo_avail)
+                       words_to_transfer = tx_fifo_avail;
+
+               /*
+                * Update state before writing to FIFO.  If this casues us
+                * to finish writing all bytes (AKA buf_remaining goes to 0) we
+                * have a potential for an interrupt (PACKET_XFER_COMPLETE is
+                * not maskable).  We need to make sure that the isr sees
+                * buf_remaining as 0 and doesn't call us back re-entrantly.
+                */
+               buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+               tx_fifo_avail -= words_to_transfer;
+               i2c_dev->msg_buf_remaining = buf_remaining;
+               i2c_dev->msg_buf = buf +
+                       words_to_transfer * BYTES_PER_FIFO_WORD;
+               barrier();
+
+               i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+
+               buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+       }
 
        /*
         * If there is a partial word at the end of buf, handle it manually to
@@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
        if (tx_fifo_avail > 0 && buf_remaining > 0) {
                BUG_ON(buf_remaining > 3);
                memcpy(&val, buf, buf_remaining);
+
+               /* Again update before writing to FIFO to make sure isr sees. */
+               i2c_dev->msg_buf_remaining = 0;
+               i2c_dev->msg_buf = NULL;
+               barrier();
+
                i2c_writel(i2c_dev, val, I2C_TX_FIFO);
-               buf_remaining = 0;
-               tx_fifo_avail--;
        }
 
-       BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
-       i2c_dev->msg_buf_remaining = buf_remaining;
-       i2c_dev->msg_buf = buf;
        return 0;
 }
 
@@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
                        tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
        }
 
-       if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
-                       !i2c_dev->msg_buf_remaining)
+       if (status & I2C_INT_PACKET_XFER_COMPLETE) {
+               BUG_ON(i2c_dev->msg_buf_remaining);
                complete(&i2c_dev->msg_complete);
+       }
 
        i2c_writel(i2c_dev, status, I2C_INT_STATUS);
        if (i2c_dev->is_dvc)
@@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 
 static u32 tegra_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C;
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
 static const struct i2c_algorithm tegra_i2c_algo = {
@@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
 }
 #endif
 
+#if defined(CONFIG_OF)
+/* Match table for of_platform binding */
+static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
+       { .compatible = "nvidia,tegra20-i2c", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
+#else
+#define tegra_i2c_of_match NULL
+#endif
+
 static struct platform_driver tegra_i2c_driver = {
        .probe   = tegra_i2c_probe,
        .remove  = tegra_i2c_remove,
@@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
        .driver  = {
                .name  = "tegra-i2c",
                .owner = THIS_MODULE,
+               .of_match_table = tegra_i2c_of_match,
        },
 };
 
index 7b404e5..e34eeb8 100644 (file)
@@ -668,4 +668,3 @@ module_exit(adp5588_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("ADP5588/87 Keypad driver");
-MODULE_ALIAS("platform:adp5588-keys");
index b09c7d1..ab86051 100644 (file)
@@ -475,7 +475,7 @@ static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on)
                                le16_to_cpu(dev->ctl_req->wIndex),
                                dev->ctl_data,
                                USB_PKT_LEN, USB_CTRL_SET_TIMEOUT);
-       if (error && error != EINTR)
+       if (error < 0 && error != -EINTR)
                err("%s: usb_control_msg() failed %d", __func__, error);
 }
 
index da28018..5ec617e 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI   0x0245
 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO    0x0246
 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS    0x0247
+/* MacbookAir4,1 (unibody, July 2011) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI  0x0249
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO   0x024a
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS   0x024b
 /* MacbookAir4,2 (unibody, July 2011) */
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI   0x024c
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO    0x024d
@@ -112,6 +116,10 @@ static const struct usb_device_id bcm5974_table[] = {
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
+       /* MacbookAir4,1 */
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
        /* MacbookAir4,2 */
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
@@ -334,6 +342,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
                { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
        },
+       {
+               USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI,
+               USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO,
+               USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
+               HAS_INTEGRATED_BUTTON,
+               0x84, sizeof(struct bt_data),
+               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+               { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+               { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
+               { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
+       },
        {}
 };
 
index d27c9d9..958b4eb 100644 (file)
@@ -229,13 +229,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
                                                        get_unaligned_le16(&report[i + 3]);
                                                i += 4;
                                        }
-                               } else if (usage == WCM_DIGITIZER) {
-                                       /* max pressure isn't reported
-                                       features->pressure_max = (unsigned short)
-                                                       (report[i+4] << 8  | report[i + 3]);
-                                       */
-                                       features->pressure_max = 255;
-                                       i += 4;
                                }
                                break;
 
@@ -291,13 +284,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
                                pen = 1;
                                i++;
                                break;
-
-                       case HID_USAGE_UNDEFINED:
-                               if (usage == WCM_DESKTOP && finger) /* capacity */
-                                       features->pressure_max =
-                                               get_unaligned_le16(&report[i + 3]);
-                               i += 4;
-                               break;
                        }
                        break;
 
index c1c2f7b..0dc97ec 100644 (file)
@@ -800,25 +800,26 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
        int i;
 
        for (i = 0; i < 2; i++) {
-               int p = data[9 * i + 2];
-               bool touch = p && !wacom->shared->stylus_in_proximity;
+               int offset = (data[1] & 0x80) ? (8 * i) : (9 * i);
+               bool touch = data[offset + 3] & 0x80;
 
-               input_mt_slot(input, i);
-               input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
                /*
                 * Touch events need to be disabled while stylus is
                 * in proximity because user's hand is resting on touchpad
                 * and sending unwanted events.  User expects tablet buttons
                 * to continue working though.
                 */
+               touch = touch && !wacom->shared->stylus_in_proximity;
+
+               input_mt_slot(input, i);
+               input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
                if (touch) {
-                       int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff;
-                       int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff;
+                       int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff;
+                       int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff;
                        if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {
                                x <<= 5;
                                y <<= 5;
                        }
-                       input_report_abs(input, ABS_MT_PRESSURE, p);
                        input_report_abs(input, ABS_MT_POSITION_X, x);
                        input_report_abs(input, ABS_MT_POSITION_Y, y);
                }
@@ -1056,10 +1057,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
                             features->x_fuzz, 0);
        input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
                             features->y_fuzz, 0);
-       input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
-                            features->pressure_fuzz, 0);
 
        if (features->device_type == BTN_TOOL_PEN) {
+               input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
+                            features->pressure_fuzz, 0);
+
                /* penabled devices have fixed resolution for each model */
                input_abs_set_res(input_dev, ABS_X, features->x_resolution);
                input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
@@ -1098,6 +1100,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
                __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
                __set_bit(BTN_STYLUS, input_dev->keybit);
                __set_bit(BTN_STYLUS2, input_dev->keybit);
+
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                break;
 
        case WACOM_21UX2:
@@ -1126,6 +1130,9 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
                }
 
                input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+
+               __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
                wacom_setup_cintiq(wacom_wac);
                break;
 
@@ -1150,6 +1157,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
                /* fall through */
 
        case INTUOS:
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
                wacom_setup_intuos(wacom_wac);
                break;
 
@@ -1165,6 +1174,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
 
                input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
                wacom_setup_intuos(wacom_wac);
+
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                break;
 
        case TABLETPC2FG:
@@ -1183,26 +1194,40 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
        case TABLETPC:
                __clear_bit(ABS_MISC, input_dev->absbit);
 
+               __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
                if (features->device_type != BTN_TOOL_PEN)
                        break;  /* no need to process stylus stuff */
 
                /* fall through */
 
        case PL:
-       case PTU:
        case DTU:
                __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+               __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
                __set_bit(BTN_STYLUS, input_dev->keybit);
                __set_bit(BTN_STYLUS2, input_dev->keybit);
+
+               __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+               break;
+
+       case PTU:
+               __set_bit(BTN_STYLUS2, input_dev->keybit);
                /* fall through */
 
        case PENPARTNER:
+               __set_bit(BTN_TOOL_PEN, input_dev->keybit);
                __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+               __set_bit(BTN_STYLUS, input_dev->keybit);
+
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                break;
 
        case BAMBOO_PT:
                __clear_bit(ABS_MISC, input_dev->absbit);
 
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
                if (features->device_type == BTN_TOOL_DOUBLETAP) {
                        __set_bit(BTN_LEFT, input_dev->keybit);
                        __set_bit(BTN_FORWARD, input_dev->keybit);
index c14412e..9941d39 100644 (file)
@@ -383,6 +383,8 @@ static int w8001_setup(struct w8001 *w8001)
        dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
        strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name));
 
+       __set_bit(INPUT_PROP_DIRECT, dev->propbit);
+
        /* penabled? */
        error = w8001_command(w8001, W8001_CMD_QUERY, true);
        if (!error) {
index a14f8dc..0e4227f 100644 (file)
@@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
  * Writes the command to the IOMMUs command buffer and informs the
  * hardware about the new command.
  */
-static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+static int iommu_queue_command_sync(struct amd_iommu *iommu,
+                                   struct iommu_cmd *cmd,
+                                   bool sync)
 {
        u32 left, tail, head, next_tail;
        unsigned long flags;
@@ -639,13 +641,18 @@ again:
        copy_cmd_to_buffer(iommu, cmd, tail);
 
        /* We need to sync now to make sure all commands are processed */
-       iommu->need_sync = true;
+       iommu->need_sync = sync;
 
        spin_unlock_irqrestore(&iommu->lock, flags);
 
        return 0;
 }
 
+static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+{
+       return iommu_queue_command_sync(iommu, cmd, true);
+}
+
 /*
  * This function queues a completion wait command into the command
  * buffer of an IOMMU
@@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
 
        build_completion_wait(&cmd, (u64)&sem);
 
-       ret = iommu_queue_command(iommu, &cmd);
+       ret = iommu_queue_command_sync(iommu, &cmd, false);
        if (ret)
                return ret;
 
@@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
 static void domain_flush_devices(struct protection_domain *domain)
 {
        struct iommu_dev_data *dev_data;
-       unsigned long flags;
-
-       spin_lock_irqsave(&domain->lock, flags);
 
        list_for_each_entry(dev_data, &domain->dev_list, list)
                device_flush_dte(dev_data);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
 /****************************************************************************
index 3dc9bef..6dcc7e2 100644 (file)
@@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
                return ret;
        }
 
-       ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
+       ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
        if (ret)
                printk(KERN_ERR "IOMMU: can't request irq\n");
        return ret;
index d87c9d0..328c64c 100644 (file)
@@ -41,6 +41,7 @@ static ssize_t led_delay_on_store(struct device *dev,
 
        if (count == size) {
                led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
+               led_cdev->blink_delay_on = state;
                ret = count;
        }
 
@@ -69,6 +70,7 @@ static ssize_t led_delay_off_store(struct device *dev,
 
        if (count == size) {
                led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
+               led_cdev->blink_delay_off = state;
                ret = count;
        }
 
index 0ce29b6..2f2da05 100644 (file)
@@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t;
 
 struct linear_private_data
 {
+       struct rcu_head         rcu;
        sector_t                array_sectors;
        dev_info_t              disks[0];
-       struct rcu_head         rcu;
 };
 
 
index 8e221a2..5404b22 100644 (file)
@@ -848,7 +848,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
        bio->bi_end_io = super_written;
 
        atomic_inc(&mddev->pending_writes);
-       submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
+       submit_bio(WRITE_FLUSH_FUA, bio);
 }
 
 void md_super_wait(mddev_t *mddev)
@@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
                        ret = 0;
        }
        rdev->sectors = rdev->sb_start;
+       /* Limit to 4TB as metadata cannot record more than that */
+       if (rdev->sectors >= (2ULL << 32))
+               rdev->sectors = (2ULL << 32) - 2;
 
-       if (rdev->sectors < sb->size * 2 && sb->level > 1)
+       if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
                /* "this cannot possibly happen" ... */
                ret = -EINVAL;
 
@@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->clevel[0] = 0;
                mddev->layout = sb->layout;
                mddev->raid_disks = sb->raid_disks;
-               mddev->dev_sectors = sb->size * 2;
+               mddev->dev_sectors = ((sector_t)sb->size) * 2;
                mddev->events = ev1;
                mddev->bitmap_info.offset = 0;
                mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
@@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
        rdev->sb_start = calc_dev_sboffset(rdev);
        if (!num_sectors || num_sectors > rdev->sb_start)
                num_sectors = rdev->sb_start;
+       /* Limit to 4TB as metadata cannot record more than that.
+        * 4TB == 2^32 KB, or 2*2^32 sectors.
+        */
+       if (num_sectors >= (2ULL << 32))
+               num_sectors = (2ULL << 32) - 2;
        md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
                       rdev->sb_page);
        md_super_wait(rdev->mddev);
@@ -1738,6 +1746,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->level = cpu_to_le32(mddev->level);
        sb->layout = cpu_to_le32(mddev->layout);
 
+       if (test_bit(WriteMostly, &rdev->flags))
+               sb->devflags |= WriteMostly1;
+       else
+               sb->devflags &= ~WriteMostly1;
+
        if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
                sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
                sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -2561,7 +2574,10 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
        int err = -EINVAL;
        if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
                md_error(rdev->mddev, rdev);
-               err = 0;
+               if (test_bit(Faulty, &rdev->flags))
+                       err = 0;
+               else
+                       err = -EBUSY;
        } else if (cmd_match(buf, "remove")) {
                if (rdev->raid_disk >= 0)
                        err = -EBUSY;
@@ -2584,7 +2600,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                err = 0;
        } else if (cmd_match(buf, "-blocked")) {
                if (!test_bit(Faulty, &rdev->flags) &&
-                   test_bit(BlockedBadBlocks, &rdev->flags)) {
+                   rdev->badblocks.unacked_exist) {
                        /* metadata handler doesn't understand badblocks,
                         * so we need to fail the device
                         */
@@ -5983,6 +5999,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
                return -ENODEV;
 
        md_error(mddev, rdev);
+       if (!test_bit(Faulty, &rdev->flags))
+               return -EBUSY;
        return 0;
 }
 
index 32323f0..f4622dd 100644 (file)
@@ -1099,12 +1099,11 @@ read_again:
                bio_list_add(&conf->pending_bio_list, mbio);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
-       r1_bio_write_done(r1_bio);
-
-       /* In case raid1d snuck in to freeze_array */
-       wake_up(&conf->wait_barrier);
-
+       /* Mustn't call r1_bio_write_done before this next test,
+        * as it could result in the bio being freed.
+        */
        if (sectors_handled < (bio->bi_size >> 9)) {
+               r1_bio_write_done(r1_bio);
                /* We need another r1_bio.  It has already been counted
                 * in bio->bi_phys_segments
                 */
@@ -1117,6 +1116,11 @@ read_again:
                goto retry_write;
        }
 
+       r1_bio_write_done(r1_bio);
+
+       /* In case raid1d snuck in to freeze_array */
+       wake_up(&conf->wait_barrier);
+
        if (do_sync || !bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
 
index 8b29cd4..d7a8468 100644 (file)
@@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)
        md_write_end(r10_bio->mddev);
 }
 
+static void one_write_done(r10bio_t *r10_bio)
+{
+       if (atomic_dec_and_test(&r10_bio->remaining)) {
+               if (test_bit(R10BIO_WriteError, &r10_bio->state))
+                       reschedule_retry(r10_bio);
+               else {
+                       close_write(r10_bio);
+                       if (test_bit(R10BIO_MadeGood, &r10_bio->state))
+                               reschedule_retry(r10_bio);
+                       else
+                               raid_end_bio_io(r10_bio);
+               }
+       }
+}
+
 static void raid10_end_write_request(struct bio *bio, int error)
 {
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
         * Let's see if all mirrored write operations have finished
         * already.
         */
-       if (atomic_dec_and_test(&r10_bio->remaining)) {
-               if (test_bit(R10BIO_WriteError, &r10_bio->state))
-                       reschedule_retry(r10_bio);
-               else {
-                       close_write(r10_bio);
-                       if (test_bit(R10BIO_MadeGood, &r10_bio->state))
-                               reschedule_retry(r10_bio);
-                       else
-                               raid_end_bio_io(r10_bio);
-               }
-       }
+       one_write_done(r10_bio);
        if (dec_rdev)
                rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 }
@@ -1127,20 +1132,12 @@ retry_write:
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
 
-       if (atomic_dec_and_test(&r10_bio->remaining)) {
-               /* This matches the end of raid10_end_write_request() */
-               bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
-                               r10_bio->sectors,
-                               !test_bit(R10BIO_Degraded, &r10_bio->state),
-                               0);
-               md_write_end(mddev);
-               raid_end_bio_io(r10_bio);
-       }
-
-       /* In case raid10d snuck in to freeze_array */
-       wake_up(&conf->wait_barrier);
+       /* Don't remove the bias on 'remaining' (one_write_done) until
+        * after checking if we need to go around again.
+        */
 
        if (sectors_handled < (bio->bi_size >> 9)) {
+               one_write_done(r10_bio);
                /* We need another r10_bio.  It has already been counted
                 * in bio->bi_phys_segments.
                 */
@@ -1154,6 +1151,10 @@ retry_write:
                r10_bio->state = 0;
                goto retry_write;
        }
+       one_write_done(r10_bio);
+
+       /* In case raid10d snuck in to freeze_array */
+       wake_up(&conf->wait_barrier);
 
        if (do_sync || !mddev->bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
index dbae459..43709fa 100644 (file)
@@ -3336,7 +3336,7 @@ static void handle_stripe(struct stripe_head *sh)
 
 finish:
        /* wait for this device to become unblocked */
-       if (unlikely(s.blocked_rdev))
+       if (conf->mddev->external && unlikely(s.blocked_rdev))
                md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
 
        if (s.handle_bad_blocks)
index 3db89e3..536c16c 100644 (file)
@@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties;
 static int vp7045_usb_probe(struct usb_interface *intf,
                const struct usb_device_id *id)
 {
-       struct dvb_usb_device *d;
-       int ret = dvb_usb_device_init(intf, &vp7045_properties,
-                                  THIS_MODULE, &d, adapter_nr);
-       if (ret)
-               return ret;
-
-       d->priv = kmalloc(20, GFP_KERNEL);
-       if (!d->priv) {
-               dvb_usb_device_exit(intf);
-               return -ENOMEM;
-       }
-
-       return ret;
-}
-
-static void vp7045_usb_disconnect(struct usb_interface *intf)
-{
-       struct dvb_usb_device *d = usb_get_intfdata(intf);
-       kfree(d->priv);
-       dvb_usb_device_exit(intf);
+       return dvb_usb_device_init(intf, &vp7045_properties,
+                                  THIS_MODULE, NULL, adapter_nr);
 }
 
 static struct usb_device_id vp7045_usb_table [] = {
@@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table);
 static struct dvb_usb_device_properties vp7045_properties = {
        .usb_ctrl = CYPRESS_FX2,
        .firmware = "dvb-usb-vp7045-01.fw",
-       .size_of_priv = sizeof(u8 *),
+       .size_of_priv = 20,
 
        .num_adapters = 1,
        .adapter = {
@@ -305,7 +287,7 @@ static struct dvb_usb_device_properties vp7045_properties = {
 static struct usb_driver vp7045_usb_driver = {
        .name           = "dvb_usb_vp7045",
        .probe          = vp7045_usb_probe,
-       .disconnect     = vp7045_usb_disconnect,
+       .disconnect     = dvb_usb_device_exit,
        .id_table       = vp7045_usb_table,
 };
 
index eae05b5..144f3f5 100644 (file)
@@ -618,7 +618,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
 static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
 {
        DEFINE_IR_RAW_EVENT(rawir);
-       unsigned int count;
        u32 carrier;
        u8 sample;
        int i;
@@ -631,65 +630,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
        if (nvt->carrier_detect_enabled)
                carrier = nvt_rx_carrier_detect(nvt);
 
-       count = nvt->pkts;
-       nvt_dbg_verbose("Processing buffer of len %d", count);
+       nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
 
        init_ir_raw_event(&rawir);
 
-       for (i = 0; i < count; i++) {
-               nvt->pkts--;
+       for (i = 0; i < nvt->pkts; i++) {
                sample = nvt->buf[i];
 
                rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
                rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
                                          * SAMPLE_PERIOD);
 
-               if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
-                       if (nvt->rawir.pulse == rawir.pulse)
-                               nvt->rawir.duration += rawir.duration;
-                       else {
-                               nvt->rawir.duration = rawir.duration;
-                               nvt->rawir.pulse = rawir.pulse;
-                       }
-                       continue;
-               }
-
-               rawir.duration += nvt->rawir.duration;
+               nvt_dbg("Storing %s with duration %d",
+                       rawir.pulse ? "pulse" : "space", rawir.duration);
 
-               init_ir_raw_event(&nvt->rawir);
-               nvt->rawir.duration = 0;
-               nvt->rawir.pulse = rawir.pulse;
-
-               if (sample == BUF_PULSE_BIT)
-                       rawir.pulse = false;
-
-               if (rawir.duration) {
-                       nvt_dbg("Storing %s with duration %d",
-                               rawir.pulse ? "pulse" : "space",
-                               rawir.duration);
-
-                       ir_raw_event_store_with_filter(nvt->rdev, &rawir);
-               }
+               ir_raw_event_store_with_filter(nvt->rdev, &rawir);
 
                /*
                 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
                 * indicates end of IR signal, but new data incoming. In both
                 * cases, it means we're ready to call ir_raw_event_handle
                 */
-               if ((sample == BUF_PULSE_BIT) && nvt->pkts) {
+               if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
                        nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
                        ir_raw_event_handle(nvt->rdev);
                }
        }
 
+       nvt->pkts = 0;
+
        nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
        ir_raw_event_handle(nvt->rdev);
 
-       if (nvt->pkts) {
-               nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
-               nvt->pkts = 0;
-       }
-
        nvt_dbg_verbose("%s done", __func__);
 }
 
@@ -1048,7 +1020,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 
        spin_lock_init(&nvt->nvt_lock);
        spin_lock_init(&nvt->tx.lock);
-       init_ir_raw_event(&nvt->rawir);
 
        ret = -EBUSY;
        /* now claim resources */
index 1241fc8..0d5e087 100644 (file)
@@ -67,7 +67,6 @@ static int debug;
 struct nvt_dev {
        struct pnp_dev *pdev;
        struct rc_dev *rdev;
-       struct ir_raw_event rawir;
 
        spinlock_t nvt_lock;
 
index 0800433..18305c8 100644 (file)
@@ -2858,7 +2858,6 @@ static void ov7xx0_configure(struct sd *sd)
                        case 0x60:
                                PDEBUG(D_PROBE, "Sensor is a OV7660");
                                sd->sensor = SEN_OV7660;
-                               sd->invert_led = 0;
                                break;
                        default:
                                PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low);
@@ -3337,7 +3336,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
        case BRIDGE_OV519:
                cam->cam_mode = ov519_vga_mode;
                cam->nmodes = ARRAY_SIZE(ov519_vga_mode);
-               sd->invert_led = !sd->invert_led;
                break;
        case BRIDGE_OVFX2:
                cam->cam_mode = ov519_vga_mode;
@@ -5005,24 +5003,24 @@ static const struct sd_desc sd_desc = {
 /* -- module initialisation -- */
 static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
-       {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
-       {USB_DEVICE(0x041e, 0x405f),
+       {USB_DEVICE(0x041e, 0x4052),
                .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+       {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
-       {USB_DEVICE(0x041e, 0x4064),
-               .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+       {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 },
-       {USB_DEVICE(0x041e, 0x4068),
+       {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
+       {USB_DEVICE(0x045e, 0x028c),
                .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
-       {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
-       {USB_DEVICE(0x054c, 0x0155),
-               .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+       {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
        {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
-       {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
-       {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 },
+       {USB_DEVICE(0x05a9, 0x0519),
+               .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+       {USB_DEVICE(0x05a9, 0x0530),
+               .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
        {USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 },
        {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
index 81b8a60..c477ad1 100644 (file)
@@ -2386,7 +2386,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
                reg_w1(gspca_dev, 0x01, 0x22);
                msleep(100);
                reg01 = SCL_SEL_OD | S_PDN_INV;
-               reg17 &= MCK_SIZE_MASK;
+               reg17 &= ~MCK_SIZE_MASK;
                reg17 |= 0x04;          /* clock / 4 */
                break;
        }
@@ -2532,6 +2532,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
                if (!mode) {                    /* if 640x480 */
                        reg17 &= ~MCK_SIZE_MASK;
                        reg17 |= 0x04;          /* clock / 4 */
+               } else {
+                       reg01 &= ~SYS_SEL_48M;  /* clk 24Mz */
+                       reg17 &= ~MCK_SIZE_MASK;
+                       reg17 |= 0x02;          /* clock / 2 */
                }
                break;
        case SENSOR_OV7630:
index e9a0e94..8c70e64 100644 (file)
@@ -338,7 +338,7 @@ int pwc_init_controls(struct pwc_device *pdev)
        if (pdev->restore_factory)
                pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE;
 
-       if (!pdev->features & FEATURE_MOTOR_PANTILT)
+       if (!(pdev->features & FEATURE_MOTOR_PANTILT))
                return hdl->error;
 
        /* Motor pan / tilt / reset */
index 85d3048..bb7f17f 100644 (file)
@@ -1332,6 +1332,8 @@ static __devinit bool viacam_serial_is_enabled(void)
        struct pci_bus *pbus = pci_find_bus(0, 0);
        u8 cbyte;
 
+       if (!pbus)
+               return false;
        pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,
                        VIACAM_SERIAL_CREG, &cbyte);
        if ((cbyte & VIACAM_SERIAL_BIT) == 0)
index 5d1fca0..f83103b 100644 (file)
@@ -135,10 +135,13 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
        max8997->dev = &i2c->dev;
        max8997->i2c = i2c;
        max8997->type = id->driver_data;
+       max8997->irq = i2c->irq;
 
        if (!pdata)
                goto err;
 
+       max8997->irq_base = pdata->irq_base;
+       max8997->ono = pdata->ono;
        max8997->wakeup = pdata->wakeup;
 
        mutex_init(&max8997->iolock);
@@ -152,6 +155,8 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
 
        pm_runtime_set_active(max8997->dev);
 
+       max8997_irq_init(max8997);
+
        mfd_add_devices(max8997->dev, -1, max8997_devs,
                        ARRAY_SIZE(max8997_devs),
                        NULL, 0);
index 29601e7..86e1458 100644 (file)
@@ -17,6 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -676,7 +677,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
                                | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
                                | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
 
-                       reg |= (1 << (i + 1));
                } else
                        continue;
 
index 2bfad5c..a56be93 100644 (file)
@@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
        switch (tps65910_chip_id(tps65910)) {
        case TPS65910:
                tps65910->irq_num = TPS65910_NUM_IRQ;
+               break;
        case TPS65911:
                tps65910->irq_num = TPS65911_NUM_IRQ;
+               break;
        }
 
        /* Register with genirq */
index b5d598c..7cbf2aa 100644 (file)
@@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
        u8 ch_msb, ch_lsb;
        int ret;
 
-       if (!req)
+       if (!req || !twl4030_madc)
                return -EINVAL;
+
        mutex_lock(&twl4030_madc->lock);
        if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
                ret = -EINVAL;
@@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
        if (!madc)
                return -ENOMEM;
 
+       madc->dev = &pdev->dev;
+
        /*
         * Phoenix provides 2 interrupt lines. The first one is connected to
         * the OMAP. The other one can be connected to the other processor such
index ebf99be..d584f6b 100644 (file)
@@ -37,7 +37,7 @@ static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir)
        return ret;
 }
 
-static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
+static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
 {
        if (db == WM8350_GPIO_DEBOUNCE_ON)
                return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE,
@@ -210,7 +210,7 @@ int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func,
                goto err;
        if (gpio_set_polarity(wm8350, gpio, pol))
                goto err;
-       if (gpio_set_debounce(wm8350, gpio, debounce))
+       if (wm8350_gpio_set_debounce(wm8350, gpio, debounce))
                goto err;
        if (gpio_set_dir(wm8350, gpio, dir))
                goto err;
index 06df187..0b56e3f 100644 (file)
@@ -165,6 +165,11 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc,
 static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
                                             const char *thread_name)
 {
+       /*
+        * Since we access the comm member in current's task_struct, we only
+        * need to be as large as what 'comm' in that structure is.
+        */
+       char comm[TASK_COMM_LEN];
        struct pti_masterchannel mccontrol = {.master = CONTROL_ID,
                                              .channel = 0};
        const char *thread_name_p;
@@ -172,13 +177,6 @@ static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
        u8 control_frame[CONTROL_FRAME_LEN];
 
        if (!thread_name) {
-               /*
-                * Since we access the comm member in current's task_struct,
-                * we only need to be as large as what 'comm' in that
-                * structure is.
-                */
-               char comm[TASK_COMM_LEN];
-
                if (!in_interrupt())
                        get_task_comm(comm, current);
                else
index 91a0a74..b27b940 100644 (file)
@@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
                if (mrq->done)
                        mrq->done(mrq);
 
-               mmc_host_clk_gate(host);
+               mmc_host_clk_release(host);
        }
 }
 
@@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
                        mrq->stop->mrq = mrq;
                }
        }
-       mmc_host_clk_ungate(host);
+       mmc_host_clk_hold(host);
        led_trigger_event(host->led, LED_FULL);
        host->ops->request(host, mrq);
 }
@@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
  */
 void mmc_set_chip_select(struct mmc_host *host, int mode)
 {
+       mmc_host_clk_hold(host);
        host->ios.chip_select = mode;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
  * Sets the host clock to the highest possible frequency that
  * is below "hz".
  */
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
 {
        WARN_ON(hz < host->f_min);
 
@@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
        mmc_set_ios(host);
 }
 
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+       mmc_host_clk_hold(host);
+       __mmc_set_clock(host, hz);
+       mmc_host_clk_release(host);
+}
+
 #ifdef CONFIG_MMC_CLKGATE
 /*
  * This gates the clock by setting it to 0 Hz.
@@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
        if (host->clk_old) {
                BUG_ON(host->ios.clock);
                /* This call will also set host->clk_gated to false */
-               mmc_set_clock(host, host->clk_old);
+               __mmc_set_clock(host, host->clk_old);
        }
 }
 
@@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
  */
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 {
+       mmc_host_clk_hold(host);
        host->ios.bus_mode = mode;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  */
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 {
+       mmc_host_clk_hold(host);
        host->ios.bus_width = width;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /**
@@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
 
                ocr &= 3 << bit;
 
+               mmc_host_clk_hold(host);
                host->ios.vdd = bit;
                mmc_set_ios(host);
+               mmc_host_clk_release(host);
        } else {
                pr_warning("%s: host doesn't support card's voltages\n",
                                mmc_hostname(host));
@@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
  */
 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
 {
+       mmc_host_clk_hold(host);
        host->ios.timing = timing;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  */
 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
 {
+       mmc_host_clk_hold(host);
        host->ios.drv_type = drv_type;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
 {
        int bit;
 
+       mmc_host_clk_hold(host);
+
        /* If ocr is set, we use it */
        if (host->ocr)
                bit = ffs(host->ocr) - 1;
@@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
         * time required to reach a stable voltage.
         */
        mmc_delay(10);
+
+       mmc_host_clk_release(host);
 }
 
 static void mmc_power_off(struct mmc_host *host)
 {
+       mmc_host_clk_hold(host);
+
        host->ios.clock = 0;
        host->ios.vdd = 0;
 
@@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
        host->ios.bus_width = MMC_BUS_WIDTH_1;
        host->ios.timing = MMC_TIMING_LEGACY;
        mmc_set_ios(host);
+
+       mmc_host_clk_release(host);
 }
 
 /*
index b29d3e8..793d0a0 100644 (file)
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
 }
 
 /**
- *     mmc_host_clk_ungate - ungate hardware MCI clocks
+ *     mmc_host_clk_hold - ungate hardware MCI clocks
  *     @host: host to ungate.
  *
  *     Makes sure the host ios.clock is restored to a non-zero value
  *     past this call. Increase clock reference count and ungate clock
  *     if we're the first user.
  */
-void mmc_host_clk_ungate(struct mmc_host *host)
+void mmc_host_clk_hold(struct mmc_host *host)
 {
        unsigned long flags;
 
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
 }
 
 /**
- *     mmc_host_clk_gate - gate off hardware MCI clocks
+ *     mmc_host_clk_release - gate off hardware MCI clocks
  *     @host: host to gate.
  *
  *     Calls the host driver with ios.clock set to zero as often as possible
  *     in order to gate off hardware MCI clocks. Decrease clock reference
  *     count and schedule disabling of clock.
  */
-void mmc_host_clk_gate(struct mmc_host *host)
+void mmc_host_clk_release(struct mmc_host *host)
 {
        unsigned long flags;
 
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
        host->clk_requests--;
        if (mmc_host_may_gate_card(host->card) &&
            !host->clk_requests)
-               schedule_work(&host->clk_gate_work);
+               queue_work(system_nrt_wq, &host->clk_gate_work);
        spin_unlock_irqrestore(&host->clk_lock, flags);
 }
 
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
        if (cancel_work_sync(&host->clk_gate_work))
                mmc_host_clk_gate_delayed(host);
        if (host->clk_gated)
-               mmc_host_clk_ungate(host);
+               mmc_host_clk_hold(host);
        /* There should be only one user now */
        WARN_ON(host->clk_requests > 1);
 }
index de199f9..fb8a5cd 100644 (file)
@@ -16,16 +16,16 @@ int mmc_register_host_class(void);
 void mmc_unregister_host_class(void);
 
 #ifdef CONFIG_MMC_CLKGATE
-void mmc_host_clk_ungate(struct mmc_host *host);
-void mmc_host_clk_gate(struct mmc_host *host);
+void mmc_host_clk_hold(struct mmc_host *host);
+void mmc_host_clk_release(struct mmc_host *host);
 unsigned int mmc_host_clk_rate(struct mmc_host *host);
 
 #else
-static inline void mmc_host_clk_ungate(struct mmc_host *host)
+static inline void mmc_host_clk_hold(struct mmc_host *host)
 {
 }
 
-static inline void mmc_host_clk_gate(struct mmc_host *host)
+static inline void mmc_host_clk_release(struct mmc_host *host)
 {
 }
 
index 633975f..0370e03 100644 (file)
@@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
        return 0;
 }
 
-static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+static void sd_update_bus_speed_mode(struct mmc_card *card)
 {
-       unsigned int bus_speed = 0, timing = 0;
-       int err;
-
        /*
         * If the host doesn't support any of the UHS-I modes, fallback on
         * default speed.
         */
        if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
-           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
-               return 0;
+           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
+               card->sd_bus_speed = 0;
+               return;
+       }
 
        if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
            (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
-                       bus_speed = UHS_SDR104_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR104;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
        } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
                   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
-                       bus_speed = UHS_DDR50_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_DDR50;
-                       card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+                       card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
                    SD_MODE_UHS_SDR50)) {
-                       bus_speed = UHS_SDR50_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR50;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
                   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
-                       bus_speed = UHS_SDR25_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR25;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
                    MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
                    SD_MODE_UHS_SDR12)) {
-                       bus_speed = UHS_SDR12_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR12;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+       }
+}
+
+static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+{
+       int err;
+       unsigned int timing = 0;
+
+       switch (card->sd_bus_speed) {
+       case UHS_SDR104_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR104;
+               card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+               break;
+       case UHS_DDR50_BUS_SPEED:
+               timing = MMC_TIMING_UHS_DDR50;
+               card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+               break;
+       case UHS_SDR50_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR50;
+               card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+               break;
+       case UHS_SDR25_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR25;
+               card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+               break;
+       case UHS_SDR12_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR12;
+               card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+               break;
+       default:
+               return 0;
        }
 
-       card->sd_bus_speed = bus_speed;
-       err = mmc_sd_switch(card, 1, 0, bus_speed, status);
+       err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
        if (err)
                return err;
 
-       if ((status[16] & 0xF) != bus_speed)
+       if ((status[16] & 0xF) != card->sd_bus_speed)
                printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
                        mmc_hostname(card->host));
        else {
@@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
                mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
        }
 
+       /*
+        * Select the bus speed mode depending on host
+        * and card capability.
+        */
+       sd_update_bus_speed_mode(card);
+
        /* Set the driver strength for the card */
        err = sd_select_driver_type(card, status);
        if (err)
                goto out;
 
-       /* Set bus speed mode of the card */
-       err = sd_set_bus_speed_mode(card, status);
+       /* Set current limit for the card */
+       err = sd_set_current_limit(card, status);
        if (err)
                goto out;
 
-       /* Set current limit for the card */
-       err = sd_set_current_limit(card, status);
+       /* Set bus speed mode of the card */
+       err = sd_set_bus_speed_mode(card, status);
        if (err)
                goto out;
 
index 0e9780f..4dc0028 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/gpio.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
index 2bd7bf4..fe886d6 100644 (file)
@@ -302,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
                ctrl &= ~SDHCI_CTRL_8BITBUS;
                break;
        default:
+               ctrl &= ~SDHCI_CTRL_4BITBUS;
+               ctrl &= ~SDHCI_CTRL_8BITBUS;
                break;
        }
 
index 774f643..0c4a672 100644 (file)
@@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
        mmc_data->hclk = clk_get_rate(priv->clk);
        mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
        mmc_data->get_cd = sh_mobile_sdhi_get_cd;
-       if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
-               mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
        mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
        if (p) {
                mmc_data->flags = p->tmio_flags;
+               if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
+                       mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
                mmc_data->ocr_mask = p->tmio_ocr_mask;
                mmc_data->capabilities |= p->tmio_caps;
 
index 65b5b76..64fbb00 100644 (file)
@@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
 
 #define ubi_dbg_msg(fmt, ...) do {                                           \
        if (0)                                                               \
-               pr_debug(fmt "\n", ##__VA_ARGS__);                           \
+               printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__);                  \
 } while (0)
 
 #define dbg_msg(fmt, ...)  ubi_dbg_msg(fmt, ##__VA_ARGS__)
index 52fe21e..3b1416e 100644 (file)
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
        struct net_device *dev = (struct net_device *)data;
        struct dev_priv *priv = netdev_priv(dev);
        unsigned int lnkstat, carrier;
+       unsigned long flags;
 
+       spin_lock_irqsave(&priv->chip_lock, flags);
        lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
+       spin_unlock_irqrestore(&priv->chip_lock, flags);
        carrier = netif_carrier_ok(dev);
 
        if (lnkstat && !carrier) {
index 26f1ab8..e293a79 100644 (file)
@@ -1632,7 +1632,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
        u32 cmd, beacon0_valid, beacon1_valid;
 
        if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
-           !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
+           !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) &&
+           !b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
                return;
 
        /* This is the bottom half of the asynchronous beacon update. */
index 3774dd0..ef9ad79 100644 (file)
@@ -1901,17 +1901,19 @@ static void ipw2100_down(struct ipw2100_priv *priv)
 
 /* Called by register_netdev() */
 static int ipw2100_net_init(struct net_device *dev)
+{
+       struct ipw2100_priv *priv = libipw_priv(dev);
+
+       return ipw2100_up(priv, 1);
+}
+
+static int ipw2100_wdev_init(struct net_device *dev)
 {
        struct ipw2100_priv *priv = libipw_priv(dev);
        const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
        struct wireless_dev *wdev = &priv->ieee->wdev;
-       int ret;
        int i;
 
-       ret = ipw2100_up(priv, 1);
-       if (ret)
-               return ret;
-
        memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
 
        /* fill-out priv->ieee->bg_band */
@@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
                       "Error calling register_netdev.\n");
                goto fail;
        }
+       registered = 1;
+
+       err = ipw2100_wdev_init(dev);
+       if (err)
+               goto fail;
 
        mutex_lock(&priv->action_mutex);
-       registered = 1;
 
        IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
 
@@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
 
       fail_unlock:
        mutex_unlock(&priv->action_mutex);
-
+       wiphy_unregister(priv->ieee->wdev.wiphy);
+       kfree(priv->ieee->bg_band.channels);
       fail:
        if (dev) {
                if (registered)
index 87813c3..4ffebed 100644 (file)
@@ -11424,17 +11424,24 @@ static void ipw_bg_down(struct work_struct *work)
 
 /* Called by register_netdev() */
 static int ipw_net_init(struct net_device *dev)
+{
+       int rc = 0;
+       struct ipw_priv *priv = libipw_priv(dev);
+
+       mutex_lock(&priv->mutex);
+       if (ipw_up(priv))
+               rc = -EIO;
+       mutex_unlock(&priv->mutex);
+
+       return rc;
+}
+
+static int ipw_wdev_init(struct net_device *dev)
 {
        int i, rc = 0;
        struct ipw_priv *priv = libipw_priv(dev);
        const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
        struct wireless_dev *wdev = &priv->ieee->wdev;
-       mutex_lock(&priv->mutex);
-
-       if (ipw_up(priv)) {
-               rc = -EIO;
-               goto out;
-       }
 
        memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
 
@@ -11519,13 +11526,9 @@ static int ipw_net_init(struct net_device *dev)
        set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
 
        /* With that information in place, we can now register the wiphy... */
-       if (wiphy_register(wdev->wiphy)) {
+       if (wiphy_register(wdev->wiphy))
                rc = -EIO;
-               goto out;
-       }
-
 out:
-       mutex_unlock(&priv->mutex);
        return rc;
 }
 
@@ -11832,14 +11835,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
                goto out_remove_sysfs;
        }
 
+       err = ipw_wdev_init(net_dev);
+       if (err) {
+               IPW_ERROR("failed to register wireless device\n");
+               goto out_unregister_netdev;
+       }
+
 #ifdef CONFIG_IPW2200_PROMISCUOUS
        if (rtap_iface) {
                err = ipw_prom_alloc(priv);
                if (err) {
                        IPW_ERROR("Failed to register promiscuous network "
                                  "device (error %d).\n", err);
-                       unregister_netdev(priv->net_dev);
-                       goto out_remove_sysfs;
+                       wiphy_unregister(priv->ieee->wdev.wiphy);
+                       kfree(priv->ieee->a_band.channels);
+                       kfree(priv->ieee->bg_band.channels);
+                       goto out_unregister_netdev;
                }
        }
 #endif
@@ -11851,6 +11862,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
 
        return 0;
 
+      out_unregister_netdev:
+       unregister_netdev(priv->net_dev);
       out_remove_sysfs:
        sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
       out_release_irq:
index b0ae4de..f9c3cd9 100644 (file)
@@ -2140,7 +2140,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
                    IEEE80211_HW_SPECTRUM_MGMT |
                    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
 
+       /*
+        * Including the following line will crash some AP's.  This
+        * workaround removes the stimulus which causes the crash until
+        * the AP software can be fixed.
        hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+        */
 
        hw->flags |= IEEE80211_HW_SUPPORTS_PS |
                     IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
index ef67f67..0019dfd 100644 (file)
@@ -3697,14 +3697,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
        rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
 
        /* Apparently the data is read from end to start */
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3,
-                                       (u32 *)&rt2x00dev->eeprom[i]);
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2,
-                                       (u32 *)&rt2x00dev->eeprom[i + 2]);
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1,
-                                       (u32 *)&rt2x00dev->eeprom[i + 4]);
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0,
-                                       (u32 *)&rt2x00dev->eeprom[i + 6]);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
+       /* The returned value is in CPU order, but eeprom is le */
+       rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
+       *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
+       *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg);
+       *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
 
        mutex_unlock(&rt2x00dev->csr_mutex);
 }
@@ -3870,19 +3871,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
                return -ENODEV;
        }
 
-       if (!rt2x00_rf(rt2x00dev, RF2820) &&
-           !rt2x00_rf(rt2x00dev, RF2850) &&
-           !rt2x00_rf(rt2x00dev, RF2720) &&
-           !rt2x00_rf(rt2x00dev, RF2750) &&
-           !rt2x00_rf(rt2x00dev, RF3020) &&
-           !rt2x00_rf(rt2x00dev, RF2020) &&
-           !rt2x00_rf(rt2x00dev, RF3021) &&
-           !rt2x00_rf(rt2x00dev, RF3022) &&
-           !rt2x00_rf(rt2x00dev, RF3052) &&
-           !rt2x00_rf(rt2x00dev, RF3320) &&
-           !rt2x00_rf(rt2x00dev, RF5370) &&
-           !rt2x00_rf(rt2x00dev, RF5390)) {
-               ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
+       switch (rt2x00dev->chip.rf) {
+       case RF2820:
+       case RF2850:
+       case RF2720:
+       case RF2750:
+       case RF3020:
+       case RF2020:
+       case RF3021:
+       case RF3022:
+       case RF3052:
+       case RF3320:
+       case RF5370:
+       case RF5390:
+               break;
+       default:
+               ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n",
+                     rt2x00dev->chip.rf);
                return -ENODEV;
        }
 
index 753b21a..3ffd9c1 100644 (file)
@@ -169,7 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
                        (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
                return;
 
-       pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss);
+       if (dev->bus && dev->bus->self)
+               pcie_bus_configure_settings(dev->bus,
+                                           dev->bus->self->pcie_mpss);
 
        memset(&hpp, 0, sizeof(hpp));
        ret = pci_get_hp_params(dev, &hpp);
index 0ce6742..4e84fd4 100644 (file)
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
 
-enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
 
 /*
  * The default CLS is used if arch didn't set CLS explicitly and not
index 8473727..f3f94a5 100644 (file)
@@ -1351,7 +1351,8 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data)
         * will occur as normal.
         */
        if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
-           dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))
+            (dev->bus->self &&
+             dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)))
                *smpss = 0;
 
        if (*smpss > dev->pcie_mpss)
@@ -1396,34 +1397,37 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
 
 static void pcie_write_mrrs(struct pci_dev *dev, int mps)
 {
-       int rc, mrrs;
+       int rc, mrrs, dev_mpss;
 
-       if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
-               int dev_mpss = 128 << dev->pcie_mpss;
+       /* In the "safe" case, do not configure the MRRS.  There appear to be
+        * issues with setting MRRS to 0 on a number of devices.
+        */
 
-               /* For Max performance, the MRRS must be set to the largest
-                * supported value.  However, it cannot be configured larger
-                * than the MPS the device or the bus can support.  This assumes
-                * that the largest MRRS available on the device cannot be
-                * smaller than the device MPSS.
-                */
-               mrrs = mps < dev_mpss ? mps : dev_mpss;
-       } else
-               /* In the "safe" case, configure the MRRS for fairness on the
-                * bus by making all devices have the same size
-                */
-               mrrs = mps;
+       if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
+               return;
 
+       dev_mpss = 128 << dev->pcie_mpss;
+
+       /* For Max performance, the MRRS must be set to the largest supported
+        * value.  However, it cannot be configured larger than the MPS the
+        * device or the bus can support.  This assumes that the largest MRRS
+        * available on the device cannot be smaller than the device MPSS.
+        */
+       mrrs = min(mps, dev_mpss);
 
        /* MRRS is a R/W register.  Invalid values can be written, but a
-        * subsiquent read will verify if the value is acceptable or not.
+        * subsequent read will verify if the value is acceptable or not.
         * If the MRRS value provided is not acceptable (e.g., too large),
         * shrink the value until it is acceptable to the HW.
         */
        while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
+               dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
+                        " to %d.  If any issues are encountered, please try "
+                        "running with pci=pcie_bus_safe\n", mrrs);
                rc = pcie_set_readrq(dev, mrrs);
                if (rc)
-                       dev_err(&dev->dev, "Failed attempting to set the MRRS\n");
+                       dev_err(&dev->dev,
+                               "Failed attempting to set the MRRS\n");
 
                mrrs /= 2;
        }
@@ -1436,13 +1440,13 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
        if (!pci_is_pcie(dev))
                return 0;
 
-       dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+       dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
                 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
 
        pcie_write_mps(dev, mps);
        pcie_write_mrrs(dev, mps);
 
-       dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+       dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
                 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
 
        return 0;
@@ -1456,9 +1460,6 @@ void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
 {
        u8 smpss = mpss;
 
-       if (!bus->self)
-               return;
-
        if (!pci_is_pcie(bus->self))
                return;
 
index 335551d..14a42a1 100644 (file)
@@ -36,6 +36,7 @@
  */
 struct ep93xx_rtc {
        void __iomem    *mmio_base;
+       struct rtc_device *rtc;
 };
 
 static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
 {
        struct ep93xx_rtc *ep93xx_rtc;
        struct resource *res;
-       struct rtc_device *rtc;
        int err;
 
        ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
                return -ENXIO;
 
        pdev->dev.platform_data = ep93xx_rtc;
-       platform_set_drvdata(pdev, rtc);
+       platform_set_drvdata(pdev, ep93xx_rtc);
 
-       rtc = rtc_device_register(pdev->name,
+       ep93xx_rtc->rtc = rtc_device_register(pdev->name,
                                &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
-       if (IS_ERR(rtc)) {
-               err = PTR_ERR(rtc);
+       if (IS_ERR(ep93xx_rtc->rtc)) {
+               err = PTR_ERR(ep93xx_rtc->rtc);
                goto exit;
        }
 
@@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
        return 0;
 
 fail:
-       rtc_device_unregister(rtc);
+       rtc_device_unregister(ep93xx_rtc->rtc);
 exit:
        platform_set_drvdata(pdev, NULL);
        pdev->dev.platform_data = NULL;
@@ -176,11 +176,11 @@ exit:
 
 static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
 {
-       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
 
        sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
        platform_set_drvdata(pdev, NULL);
-       rtc_device_unregister(rtc);
+       rtc_device_unregister(ep93xx_rtc->rtc);
        pdev->dev.platform_data = NULL;
 
        return 0;
index 2dd3c01..d93a960 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/rtc.h>
+#include <linux/sched.h>
 #include <linux/workqueue.h>
 
 /* DryIce Register Definitions */
index 075f170..c4cf057 100644 (file)
@@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
        time -= tm->tm_hour * 3600;
        tm->tm_min = time / 60;
        tm->tm_sec = time - tm->tm_min * 60;
+
+       tm->tm_isdst = 0;
 }
 EXPORT_SYMBOL(rtc_time_to_tm);
 
index 4e7c04e..7639ab9 100644 (file)
@@ -51,6 +51,27 @@ static enum s3c_cpu_type s3c_rtc_cpu_type;
 
 static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
 
+static void s3c_rtc_alarm_clk_enable(bool enable)
+{
+       static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock);
+       static bool alarm_clk_enabled;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags);
+       if (enable) {
+               if (!alarm_clk_enabled) {
+                       clk_enable(rtc_clk);
+                       alarm_clk_enabled = true;
+               }
+       } else {
+               if (alarm_clk_enabled) {
+                       clk_disable(rtc_clk);
+                       alarm_clk_enabled = false;
+               }
+       }
+       spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags);
+}
+
 /* IRQ Handlers */
 
 static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
@@ -64,6 +85,9 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
                writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP);
 
        clk_disable(rtc_clk);
+
+       s3c_rtc_alarm_clk_enable(false);
+
        return IRQ_HANDLED;
 }
 
@@ -97,6 +121,8 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
        writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
        clk_disable(rtc_clk);
 
+       s3c_rtc_alarm_clk_enable(enabled);
+
        return 0;
 }
 
index 9a81f77..20687d5 100644 (file)
@@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
        int res;
        u8 rd_reg;
 
-#ifdef CONFIG_LOCKDEP
-       /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
-        * we don't want and can't tolerate.  Although it might be
-        * friendlier not to borrow this thread context...
-        */
-       local_irq_enable();
-#endif
-
        res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
        if (res)
                goto out;
@@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {
 static int __devinit twl_rtc_probe(struct platform_device *pdev)
 {
        struct rtc_device *rtc;
-       int ret = 0;
+       int ret = -EINVAL;
        int irq = platform_get_irq(pdev, 0);
        u8 rd_reg;
 
        if (irq <= 0)
-               return -EINVAL;
-
-       rtc = rtc_device_register(pdev->name,
-                                 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
-       if (IS_ERR(rtc)) {
-               ret = PTR_ERR(rtc);
-               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
-                       PTR_ERR(rtc));
-               goto out0;
-
-       }
-
-       platform_set_drvdata(pdev, rtc);
+               goto out1;
 
        ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
        if (ret < 0)
@@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        if (ret < 0)
                goto out1;
 
-       ret = request_irq(irq, twl_rtc_interrupt,
-                               IRQF_TRIGGER_RISING,
-                               dev_name(&rtc->dev), rtc);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "IRQ is not free.\n");
-               goto out1;
-       }
-
        if (twl_class_is_6030()) {
                twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
                        REG_INT_MSK_LINE_A);
@@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        /* Check RTC module status, Enable if it is off */
        ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
        if (ret < 0)
-               goto out2;
+               goto out1;
 
        if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
                dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
                rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
                ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
                if (ret < 0)
-                       goto out2;
+                       goto out1;
        }
 
        /* init cached IRQ enable bits */
        ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
        if (ret < 0)
+               goto out1;
+
+       rtc = rtc_device_register(pdev->name,
+                                 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
+       if (IS_ERR(rtc)) {
+               ret = PTR_ERR(rtc);
+               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
+                       PTR_ERR(rtc));
+               goto out1;
+       }
+
+       ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
+                                  IRQF_TRIGGER_RISING,
+                                  dev_name(&rtc->dev), rtc);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "IRQ is not free.\n");
                goto out2;
+       }
 
-       return ret;
+       platform_set_drvdata(pdev, rtc);
+       return 0;
 
 out2:
-       free_irq(irq, rtc);
-out1:
        rtc_device_unregister(rtc);
-out0:
+out1:
        return ret;
 }
 
index 9ae80cd..dba72a4 100644 (file)
@@ -563,7 +563,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
        nopout_wqe->itt = ((u16)task->itt |
                           (ISCSI_TASK_TYPE_MPATH <<
                            ISCSI_TMF_REQUEST_TYPE_SHIFT));
-       nopout_wqe->ttt = nopout_hdr->ttt;
+       nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt);
        nopout_wqe->flags = 0;
        if (!unsol)
                nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
index ba710e3..5d0e9a2 100644 (file)
@@ -432,6 +432,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
        u8 flogi_maddr[ETH_ALEN];
        const struct net_device_ops *ops;
 
+       rtnl_lock();
+
        /*
         * Don't listen for Ethernet packets anymore.
         * synchronize_net() ensures that the packet handlers are not running
@@ -461,6 +463,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
                                        " specific feature for LLD.\n");
        }
 
+       rtnl_unlock();
+
        /* Release the self-reference taken during fcoe_interface_create() */
        fcoe_interface_put(fcoe);
 }
@@ -1951,11 +1955,8 @@ static void fcoe_destroy_work(struct work_struct *work)
        fcoe_if_destroy(port->lport);
 
        /* Do not tear down the fcoe interface for NPIV port */
-       if (!npiv) {
-               rtnl_lock();
+       if (!npiv)
                fcoe_interface_cleanup(fcoe);
-               rtnl_unlock();
-       }
 
        mutex_unlock(&fcoe_config_mutex);
 }
@@ -2009,8 +2010,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
                printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
                       netdev->name);
                rc = -EIO;
+               rtnl_unlock();
                fcoe_interface_cleanup(fcoe);
-               goto out_nodev;
+               goto out_nortnl;
        }
 
        /* Make this the "master" N_Port */
@@ -2027,6 +2029,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
 
 out_nodev:
        rtnl_unlock();
+out_nortnl:
        mutex_unlock(&fcoe_config_mutex);
        return rc;
 }
index ec61bdb..b200b73 100644 (file)
@@ -676,6 +676,16 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
        BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
        removed[*nremoved] = h->dev[entry];
        (*nremoved)++;
+
+       /*
+        * New physical devices won't have target/lun assigned yet
+        * so we need to preserve the values in the slot we are replacing.
+        */
+       if (new_entry->target == -1) {
+               new_entry->target = h->dev[entry]->target;
+               new_entry->lun = h->dev[entry]->lun;
+       }
+
        h->dev[entry] = new_entry;
        added[*nadded] = new_entry;
        (*nadded)++;
@@ -1548,10 +1558,17 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
 }
 
 static int hpsa_update_device_info(struct ctlr_info *h,
-       unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
+       unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
+       unsigned char *is_OBDR_device)
 {
-#define OBDR_TAPE_INQ_SIZE 49
+
+#define OBDR_SIG_OFFSET 43
+#define OBDR_TAPE_SIG "$DR-10"
+#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
+#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
+
        unsigned char *inq_buff;
+       unsigned char *obdr_sig;
 
        inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
        if (!inq_buff)
@@ -1583,6 +1600,16 @@ static int hpsa_update_device_info(struct ctlr_info *h,
        else
                this_device->raid_level = RAID_UNKNOWN;
 
+       if (is_OBDR_device) {
+               /* See if this is a One-Button-Disaster-Recovery device
+                * by looking for "$DR-10" at offset 43 in inquiry data.
+                */
+               obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
+               *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
+                                       strncmp(obdr_sig, OBDR_TAPE_SIG,
+                                               OBDR_SIG_LEN) == 0);
+       }
+
        kfree(inq_buff);
        return 0;
 
@@ -1716,7 +1743,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
                return 0;
        }
 
-       if (hpsa_update_device_info(h, scsi3addr, this_device))
+       if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
                return 0;
        (*nmsa2xxx_enclosures)++;
        hpsa_set_bus_target_lun(this_device, bus, target, 0);
@@ -1808,7 +1835,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
         */
        struct ReportLUNdata *physdev_list = NULL;
        struct ReportLUNdata *logdev_list = NULL;
-       unsigned char *inq_buff = NULL;
        u32 nphysicals = 0;
        u32 nlogicals = 0;
        u32 ndev_allocated = 0;
@@ -1824,11 +1850,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
                GFP_KERNEL);
        physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
        logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
-       inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
        tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
 
-       if (!currentsd || !physdev_list || !logdev_list ||
-               !inq_buff || !tmpdevice) {
+       if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
                dev_err(&h->pdev->dev, "out of memory\n");
                goto out;
        }
@@ -1863,7 +1887,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
        /* adjust our table of devices */
        nmsa2xxx_enclosures = 0;
        for (i = 0; i < nphysicals + nlogicals + 1; i++) {
-               u8 *lunaddrbytes;
+               u8 *lunaddrbytes, is_OBDR = 0;
 
                /* Figure out where the LUN ID info is coming from */
                lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
@@ -1874,7 +1898,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
                        continue;
 
                /* Get device type, vendor, model, device id */
-               if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
+               if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
+                                                       &is_OBDR))
                        continue; /* skip it if we can't talk to it. */
                figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
                        tmpdevice);
@@ -1898,7 +1923,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
                hpsa_set_bus_target_lun(this_device, bus, target, lun);
 
                switch (this_device->devtype) {
-               case TYPE_ROM: {
+               case TYPE_ROM:
                        /* We don't *really* support actual CD-ROM devices,
                         * just "One Button Disaster Recovery" tape drive
                         * which temporarily pretends to be a CD-ROM drive.
@@ -1906,15 +1931,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
                         * device by checking for "$DR-10" in bytes 43-48 of
                         * the inquiry data.
                         */
-                               char obdr_sig[7];
-#define OBDR_TAPE_SIG "$DR-10"
-                               strncpy(obdr_sig, &inq_buff[43], 6);
-                               obdr_sig[6] = '\0';
-                               if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
-                                       /* Not OBDR device, ignore it. */
-                                       break;
-                       }
-                       ncurrent++;
+                       if (is_OBDR)
+                               ncurrent++;
                        break;
                case TYPE_DISK:
                        if (i < nphysicals)
@@ -1947,7 +1965,6 @@ out:
        for (i = 0; i < ndev_allocated; i++)
                kfree(currentsd[i]);
        kfree(currentsd);
-       kfree(inq_buff);
        kfree(physdev_list);
        kfree(logdev_list);
 }
index 26072f1..6981b77 100644 (file)
@@ -531,6 +531,9 @@ static void sci_controller_process_completions(struct isci_host *ihost)
                        break;
 
                case SCU_COMPLETION_TYPE_EVENT:
+                       sci_controller_event_completion(ihost, ent);
+                       break;
+
                case SCU_COMPLETION_TYPE_NOTIFY: {
                        event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
                                       (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
@@ -1091,6 +1094,7 @@ static void isci_host_completion_routine(unsigned long data)
        struct isci_request *request;
        struct isci_request *next_request;
        struct sas_task     *task;
+       u16 active;
 
        INIT_LIST_HEAD(&completed_request_list);
        INIT_LIST_HEAD(&errored_request_list);
@@ -1181,6 +1185,13 @@ static void isci_host_completion_routine(unsigned long data)
                }
        }
 
+       /* the coalesence timeout doubles at each encoding step, so
+        * update it based on the ilog2 value of the outstanding requests
+        */
+       active = isci_tci_active(ihost);
+       writel(SMU_ICC_GEN_VAL(NUMBER, active) |
+              SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
+              &ihost->smu_registers->interrupt_coalesce_control);
 }
 
 /**
@@ -1471,7 +1482,7 @@ static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
        struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
 
        /* set the default interrupt coalescence number and timeout value. */
-       sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
+       sci_controller_set_interrupt_coalescence(ihost, 00);
 }
 
 static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
index 062101a..9f33831 100644 (file)
@@ -369,6 +369,9 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
 #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
 #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
 
+/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */
+#define ISCI_COALESCE_BASE 9
+
 /* expander attached sata devices require 3 rnc slots */
 static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
 {
index 61e0d09..29aa34e 100644 (file)
 #include <linux/firmware.h>
 #include <linux/efi.h>
 #include <asm/string.h>
+#include <scsi/scsi_host.h>
 #include "isci.h"
 #include "task.h"
 #include "probe_roms.h"
 
+#define MAJ 1
+#define MIN 0
+#define BUILD 0
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+       __stringify(BUILD)
+
+MODULE_VERSION(DRV_VERSION);
+
 static struct scsi_transport_template *isci_transport_template;
 
 static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
@@ -113,6 +122,22 @@ unsigned char max_concurr_spinup = 1;
 module_param(max_concurr_spinup, byte, 0);
 MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
 
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+       struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+       struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+struct device_attribute *isci_host_attrs[] = {
+       &dev_attr_isci_id,
+       NULL
+};
+
 static struct scsi_host_template isci_sht = {
 
        .module                         = THIS_MODULE,
@@ -138,6 +163,7 @@ static struct scsi_host_template isci_sht = {
        .slave_alloc                    = sas_slave_alloc,
        .target_destroy                 = sas_target_destroy,
        .ioctl                          = sas_ioctl,
+       .shost_attrs                    = isci_host_attrs,
 };
 
 static struct sas_domain_function_template isci_transport_ops  = {
@@ -232,17 +258,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
        return 0;
 }
 
-static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
-       struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
-       struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
-
-       return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
-}
-
-static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
-
 static void isci_unregister(struct isci_host *isci_host)
 {
        struct Scsi_Host *shost;
@@ -251,7 +266,6 @@ static void isci_unregister(struct isci_host *isci_host)
                return;
 
        shost = isci_host->shost;
-       device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
 
        sas_unregister_ha(&isci_host->sas_ha);
 
@@ -415,14 +429,8 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
        if (err)
                goto err_shost_remove;
 
-       err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
-       if (err)
-               goto err_unregister_ha;
-
        return isci_host;
 
- err_unregister_ha:
-       sas_unregister_ha(&(isci_host->sas_ha));
  err_shost_remove:
        scsi_remove_host(shost);
  err_shost:
@@ -540,7 +548,8 @@ static __init int isci_init(void)
 {
        int err;
 
-       pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
+       pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n",
+               DRV_NAME, DRV_VERSION);
 
        isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
        if (!isci_transport_template)
index 79313a7..430fc8f 100644 (file)
@@ -104,6 +104,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
        u32 parity_count = 0;
        u32 llctl, link_rate;
        u32 clksm_value = 0;
+       u32 sp_timeouts = 0;
 
        iphy->link_layer_registers = reg;
 
@@ -211,6 +212,18 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
        llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
        writel(llctl, &iphy->link_layer_registers->link_layer_control);
 
+       sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
+
+       /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
+       sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
+
+       /* Set RATE_CHANGE timeout value to 0x3B (59us).  This ensures SCU can
+        * lock with 3Gb drive when SCU max rate is set to 1.5Gb.
+        */
+       sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
+
+       writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
+
        if (is_a2(ihost->pdev)) {
                /* Program the max ARB time for the PHY to 700us so we inter-operate with
                 * the PMC expander which shuts down PHYs if the expander PHY generates too
index 9b266c7..00afc73 100644 (file)
@@ -1299,6 +1299,18 @@ struct scu_transport_layer_registers {
 #define SCU_AFE_XCVRCR_OFFSET       0x00DC
 #define SCU_AFE_LUTCR_OFFSET        0x00E0
 
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT          (0UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK           (0x000000FFUL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT                 (8UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK                  (0x0000FF00UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT         (16UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK          (0x00FF0000UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT              (24UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK               (0xFF000000UL)
+
+#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value)
+
 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT                  (0)
 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK                   (0x00000003)
 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1                   (0)
index a46e07a..b5d3a8c 100644 (file)
@@ -732,12 +732,20 @@ sci_io_request_terminate(struct isci_request *ireq)
                sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
                return SCI_SUCCESS;
        case SCI_REQ_TASK_WAIT_TC_RESP:
+               /* The task frame was already confirmed to have been
+                * sent by the SCU HW.  Since the state machine is
+                * now only waiting for the task response itself,
+                * abort the request and complete it immediately
+                * and don't wait for the task response.
+                */
                sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
                sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
                return SCI_SUCCESS;
        case SCI_REQ_ABORTING:
-               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
-               return SCI_SUCCESS;
+               /* If a request has a termination requested twice, return
+                * a failure indication, since HW confirmation of the first
+                * abort is still outstanding.
+                */
        case SCI_REQ_COMPLETED:
        default:
                dev_warn(&ireq->owning_controller->pdev->dev,
@@ -2399,22 +2407,19 @@ static void isci_task_save_for_upper_layer_completion(
        }
 }
 
-static void isci_request_process_stp_response(struct sas_task *task,
-                                             void *response_buffer)
+static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
 {
-       struct dev_to_host_fis *d2h_reg_fis = response_buffer;
        struct task_status_struct *ts = &task->task_status;
        struct ata_task_resp *resp = (void *)&ts->buf[0];
 
-       resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
-       memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
+       resp->frame_len = sizeof(*fis);
+       memcpy(resp->ending_fis, fis, sizeof(*fis));
        ts->buf_valid_size = sizeof(*resp);
 
-       /**
-        * If the device fault bit is set in the status register, then
+       /* If the device fault bit is set in the status register, then
         * set the sense data and return.
         */
-       if (d2h_reg_fis->status & ATA_DF)
+       if (fis->status & ATA_DF)
                ts->stat = SAS_PROTO_RESPONSE;
        else
                ts->stat = SAM_STAT_GOOD;
@@ -2428,7 +2433,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
 {
        struct sas_task *task = isci_request_access_task(request);
        struct ssp_response_iu *resp_iu;
-       void *resp_buf;
        unsigned long task_flags;
        struct isci_remote_device *idev = isci_lookup_device(task->dev);
        enum service_response response       = SAS_TASK_UNDELIVERED;
@@ -2565,9 +2569,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
                                task);
 
                        if (sas_protocol_ata(task->task_proto)) {
-                               resp_buf = &request->stp.rsp;
-                               isci_request_process_stp_response(task,
-                                                                 resp_buf);
+                               isci_process_stp_response(task, &request->stp.rsp);
                        } else if (SAS_PROTOCOL_SSP == task->task_proto) {
 
                                /* crack the iu response buffer. */
index e9e1e2a..16f88ab 100644 (file)
@@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
         */
        buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
        header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
-       size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
+       size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
 
        /*
         * The Unsolicited Frame buffers are set at the start of the UF
index 31cb950..75d8966 100644 (file)
@@ -214,7 +214,7 @@ struct sci_uf_address_table_array {
         * starting address of the UF address table.
         * 64-bit pointers are required by the hardware.
         */
-       dma_addr_t *array;
+       u64 *array;
 
        /**
         * This field specifies the physical address location for the UF
index 01ff082..d261e98 100644 (file)
@@ -494,6 +494,9 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
         */
        error = lport->tt.frame_send(lport, fp);
 
+       if (fh->fh_type == FC_TYPE_BLS)
+               return error;
+
        /*
         * Update the exchange and sequence flags,
         * assuming all frames for the sequence have been sent.
@@ -575,42 +578,35 @@ static void fc_seq_set_resp(struct fc_seq *sp,
 }
 
 /**
- * fc_seq_exch_abort() - Abort an exchange and sequence
- * @req_sp:    The sequence to be aborted
+ * fc_exch_abort_locked() - Abort an exchange
+ * @ep:        The exchange to be aborted
  * @timer_msec: The period of time to wait before aborting
  *
- * Generally called because of a timeout or an abort from the upper layer.
+ * Locking notes:  Called with exch lock held
+ *
+ * Return value: 0 on success else error code
  */
-static int fc_seq_exch_abort(const struct fc_seq *req_sp,
-                            unsigned int timer_msec)
+static int fc_exch_abort_locked(struct fc_exch *ep,
+                               unsigned int timer_msec)
 {
        struct fc_seq *sp;
-       struct fc_exch *ep;
        struct fc_frame *fp;
        int error;
 
-       ep = fc_seq_exch(req_sp);
-
-       spin_lock_bh(&ep->ex_lock);
        if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
-           ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
-               spin_unlock_bh(&ep->ex_lock);
+           ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
                return -ENXIO;
-       }
 
        /*
         * Send the abort on a new sequence if possible.
         */
        sp = fc_seq_start_next_locked(&ep->seq);
-       if (!sp) {
-               spin_unlock_bh(&ep->ex_lock);
+       if (!sp)
                return -ENOMEM;
-       }
 
        ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
        if (timer_msec)
                fc_exch_timer_set_locked(ep, timer_msec);
-       spin_unlock_bh(&ep->ex_lock);
 
        /*
         * If not logged into the fabric, don't send ABTS but leave
@@ -632,6 +628,28 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,
        return error;
 }
 
+/**
+ * fc_seq_exch_abort() - Abort an exchange and sequence
+ * @req_sp:    The sequence to be aborted
+ * @timer_msec: The period of time to wait before aborting
+ *
+ * Generally called because of a timeout or an abort from the upper layer.
+ *
+ * Return value: 0 on success else error code
+ */
+static int fc_seq_exch_abort(const struct fc_seq *req_sp,
+                            unsigned int timer_msec)
+{
+       struct fc_exch *ep;
+       int error;
+
+       ep = fc_seq_exch(req_sp);
+       spin_lock_bh(&ep->ex_lock);
+       error = fc_exch_abort_locked(ep, timer_msec);
+       spin_unlock_bh(&ep->ex_lock);
+       return error;
+}
+
 /**
  * fc_exch_timeout() - Handle exchange timer expiration
  * @work: The work_struct identifying the exchange that timed out
@@ -1715,6 +1733,7 @@ static void fc_exch_reset(struct fc_exch *ep)
        int rc = 1;
 
        spin_lock_bh(&ep->ex_lock);
+       fc_exch_abort_locked(ep, 0);
        ep->state |= FC_EX_RST_CLEANUP;
        if (cancel_delayed_work(&ep->timeout_work))
                atomic_dec(&ep->ex_refcnt);     /* drop hold for timer */
@@ -1962,6 +1981,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
        struct fc_exch *ep;
        struct fc_seq *sp = NULL;
        struct fc_frame_header *fh;
+       struct fc_fcp_pkt *fsp = NULL;
        int rc = 1;
 
        ep = fc_exch_alloc(lport, fp);
@@ -1984,8 +2004,10 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
        fc_exch_setup_hdr(ep, fp, ep->f_ctl);
        sp->cnt++;
 
-       if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD)
+       if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
+               fsp = fr_fsp(fp);
                fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
+       }
 
        if (unlikely(lport->tt.frame_send(lport, fp)))
                goto err;
@@ -1999,7 +2021,8 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
        spin_unlock_bh(&ep->ex_lock);
        return sp;
 err:
-       fc_fcp_ddp_done(fr_fsp(fp));
+       if (fsp)
+               fc_fcp_ddp_done(fsp);
        rc = fc_exch_done_locked(ep);
        spin_unlock_bh(&ep->ex_lock);
        if (!rc)
index afb63c8..4c41ee8 100644 (file)
@@ -2019,6 +2019,11 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
        struct fc_fcp_internal *si;
        int rc = FAILED;
        unsigned long flags;
+       int rval;
+
+       rval = fc_block_scsi_eh(sc_cmd);
+       if (rval)
+               return rval;
 
        lport = shost_priv(sc_cmd->device->host);
        if (lport->state != LPORT_ST_READY)
@@ -2068,9 +2073,9 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
        int rc = FAILED;
        int rval;
 
-       rval = fc_remote_port_chkready(rport);
+       rval = fc_block_scsi_eh(sc_cmd);
        if (rval)
-               goto out;
+               return rval;
 
        lport = shost_priv(sc_cmd->device->host);
 
@@ -2116,6 +2121,8 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
 
        FC_SCSI_DBG(lport, "Resetting host\n");
 
+       fc_block_scsi_eh(sc_cmd);
+
        lport->tt.lport_reset(lport);
        wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
        while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
index e55ed9c..628f347 100644 (file)
@@ -88,6 +88,7 @@
  */
 
 #include <linux/timer.h>
+#include <linux/delay.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 
@@ -1029,8 +1030,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
                           FCH_EVT_LIPRESET, 0);
        fc_vports_linkchange(lport);
        fc_lport_reset_locked(lport);
-       if (lport->link_up)
+       if (lport->link_up) {
+               /*
+                * Wait upto resource allocation time out before
+                * doing re-login since incomplete FIP exchanged
+                * from last session may collide with exchanges
+                * in new session.
+                */
+               msleep(lport->r_a_tov);
                fc_lport_enter_flogi(lport);
+       }
 }
 
 /**
index 7836eb0..a31e05f 100644 (file)
@@ -1786,13 +1786,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
                        fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
        }
 
-       if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
+       if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
                if (ha->fw_attributes & BIT_4) {
+                       int prot = 0;
                        vha->flags.difdix_supported = 1;
                        ql_dbg(ql_dbg_user, vha, 0x7082,
                            "Registered for DIF/DIX type 1 and 3 protection.\n");
+                       if (ql2xenabledif == 1)
+                               prot = SHOST_DIX_TYPE0_PROTECTION;
                        scsi_host_set_prot(vha->host,
-                           SHOST_DIF_TYPE1_PROTECTION
+                           prot | SHOST_DIF_TYPE1_PROTECTION
                            | SHOST_DIF_TYPE2_PROTECTION
                            | SHOST_DIF_TYPE3_PROTECTION
                            | SHOST_DIX_TYPE1_PROTECTION
index 2155071..d79cd8a 100644 (file)
@@ -8,24 +8,24 @@
 /*
  * Table for showing the current message id in use for particular level
  * Change this table for addition of log/debug messages.
- * -----------------------------------------------------
- * |             Level            |   Last Value Used  |
- * -----------------------------------------------------
- * | Module Init and Probe        |       0x0116       |
- * | Mailbox commands             |       0x111e       |
- * | Device Discovery             |       0x2083       |
- * | Queue Command and IO tracing |       0x302e       |
- * | DPC Thread                   |       0x401c       |
- * | Async Events                 |       0x5059       |
- * | Timer Routines               |       0x600d       |
- * | User Space Interactions      |       0x709c       |
- * | Task Management              |       0x8043       |
- * | AER/EEH                      |       0x900f       |
- * | Virtual Port                 |       0xa007       |
- * | ISP82XX Specific             |       0xb027       |
- * | MultiQ                       |       0xc00b       |
- * | Misc                         |       0xd00b       |
- * -----------------------------------------------------
+ * ----------------------------------------------------------------------
+ * |             Level            |   Last Value Used  |     Holes     |
+ * ----------------------------------------------------------------------
+ * | Module Init and Probe        |       0x0116       |               |
+ * | Mailbox commands             |       0x1126       |               |
+ * | Device Discovery             |       0x2083       |               |
+ * | Queue Command and IO tracing |       0x302e       |     0x3008     |
+ * | DPC Thread                   |       0x401c       |               |
+ * | Async Events                 |       0x5059       |               |
+ * | Timer Routines               |       0x600d       |               |
+ * | User Space Interactions      |       0x709d       |               |
+ * | Task Management              |       0x8041       |               |
+ * | AER/EEH                      |       0x900f       |               |
+ * | Virtual Port                 |       0xa007       |               |
+ * | ISP82XX Specific             |       0xb04f       |               |
+ * | MultiQ                       |       0xc00b       |               |
+ * | Misc                         |       0xd00b       |               |
+ * ----------------------------------------------------------------------
  */
 
 #include "qla_def.h"
index cc5a792..a03eaf4 100644 (file)
@@ -2529,6 +2529,7 @@ struct qla_hw_data {
 #define DT_ISP8021                     BIT_14
 #define DT_ISP_LAST                    (DT_ISP8021 << 1)
 
+#define DT_T10_PI                       BIT_25
 #define DT_IIDMA                        BIT_26
 #define DT_FWI2                         BIT_27
 #define DT_ZIO_SUPPORTED                BIT_28
@@ -2572,6 +2573,7 @@ struct qla_hw_data {
 #define IS_NOCACHE_VPD_TYPE(ha)        (IS_QLA81XX(ha))
 #define IS_ALOGIO_CAPABLE(ha)  (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
 
+#define IS_T10_PI_CAPABLE(ha)   ((ha)->device_type & DT_T10_PI)
 #define IS_IIDMA_CAPABLE(ha)    ((ha)->device_type & DT_IIDMA)
 #define IS_FWI2_CAPABLE(ha)     ((ha)->device_type & DT_FWI2)
 #define IS_ZIO_SUPPORTED(ha)    ((ha)->device_type & DT_ZIO_SUPPORTED)
index 691783a..aa69486 100644 (file)
@@ -537,6 +537,11 @@ struct sts_entry_24xx {
        /*
         * If DIF Error is set in comp_status, these additional fields are
         * defined:
+        *
+        * !!! NOTE: Firmware sends expected/actual DIF data in big endian
+        * format; but all of the "data" field gets swab32-d in the beginning
+        * of qla2x00_status_entry().
+        *
         * &data[10] : uint8_t report_runt_bg[2];       - computed guard
         * &data[12] : uint8_t actual_dif[8];           - DIF Data received
         * &data[20] : uint8_t expected_dif[8];         - DIF Data computed
index def6942..37da04d 100644 (file)
@@ -3838,15 +3838,12 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
                req = vha->req;
        rsp = req->rsp;
 
-       atomic_set(&vha->loop_state, LOOP_UPDATE);
        clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
        if (vha->flags.online) {
                if (!(rval = qla2x00_fw_ready(vha))) {
                        /* Wait at most MAX_TARGET RSCNs for a stable link. */
                        wait_time = 256;
                        do {
-                               atomic_set(&vha->loop_state, LOOP_UPDATE);
-
                                /* Issue a marker after FW becomes ready. */
                                qla2x00_marker(vha, req, rsp, 0, 0,
                                        MK_SYNC_ALL);
index d2e904b..9902834 100644 (file)
@@ -102,3 +102,32 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
                    fcport->d_id.b.al_pa);
        }
 }
+
+static inline int
+qla2x00_hba_err_chk_enabled(srb_t *sp)
+{
+       /*
+        * Uncomment when corresponding SCSI changes are done.
+        *
+       if (!sp->cmd->prot_chk)
+               return 0;
+        *
+        */
+
+       switch (scsi_get_prot_op(sp->cmd)) {
+       case SCSI_PROT_READ_STRIP:
+       case SCSI_PROT_WRITE_INSERT:
+               if (ql2xenablehba_err_chk >= 1)
+                       return 1;
+               break;
+       case SCSI_PROT_READ_PASS:
+       case SCSI_PROT_WRITE_PASS:
+               if (ql2xenablehba_err_chk >= 2)
+                       return 1;
+               break;
+       case SCSI_PROT_READ_INSERT:
+       case SCSI_PROT_WRITE_STRIP:
+               return 1;
+       }
+       return 0;
+}
index 49d6906..dbec896 100644 (file)
@@ -709,20 +709,28 @@ struct fw_dif_context {
  *
  */
 static inline void
-qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
+qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
     unsigned int protcnt)
 {
-       struct sd_dif_tuple *spt;
+       struct scsi_cmnd *cmd = sp->cmd;
        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
-       unsigned char op = scsi_get_prot_op(cmd);
 
        switch (scsi_get_prot_type(cmd)) {
-       /* For TYPE 0 protection: no checking */
        case SCSI_PROT_DIF_TYPE0:
-               pkt->ref_tag_mask[0] = 0x00;
-               pkt->ref_tag_mask[1] = 0x00;
-               pkt->ref_tag_mask[2] = 0x00;
-               pkt->ref_tag_mask[3] = 0x00;
+               /*
+                * No check for ql2xenablehba_err_chk, as it would be an
+                * I/O error if hba tag generation is not done.
+                */
+               pkt->ref_tag = cpu_to_le32((uint32_t)
+                   (0xffffffff & scsi_get_lba(cmd)));
+
+               if (!qla2x00_hba_err_chk_enabled(sp))
+                       break;
+
+               pkt->ref_tag_mask[0] = 0xff;
+               pkt->ref_tag_mask[1] = 0xff;
+               pkt->ref_tag_mask[2] = 0xff;
+               pkt->ref_tag_mask[3] = 0xff;
                break;
 
        /*
@@ -730,20 +738,16 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
         * match LBA in CDB + N
         */
        case SCSI_PROT_DIF_TYPE2:
-               if (!ql2xenablehba_err_chk)
-                       break;
-
-               if (scsi_prot_sg_count(cmd)) {
-                       spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
-                           scsi_prot_sglist(cmd)[0].offset;
-                       pkt->app_tag = swab32(spt->app_tag);
-                       pkt->app_tag_mask[0] =  0xff;
-                       pkt->app_tag_mask[1] =  0xff;
-               }
+               pkt->app_tag = __constant_cpu_to_le16(0);
+               pkt->app_tag_mask[0] = 0x0;
+               pkt->app_tag_mask[1] = 0x0;
 
                pkt->ref_tag = cpu_to_le32((uint32_t)
                    (0xffffffff & scsi_get_lba(cmd)));
 
+               if (!qla2x00_hba_err_chk_enabled(sp))
+                       break;
+
                /* enable ALL bytes of the ref tag */
                pkt->ref_tag_mask[0] = 0xff;
                pkt->ref_tag_mask[1] = 0xff;
@@ -763,26 +767,15 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
         * 16 bit app tag.
         */
        case SCSI_PROT_DIF_TYPE1:
-               if (!ql2xenablehba_err_chk)
+               pkt->ref_tag = cpu_to_le32((uint32_t)
+                   (0xffffffff & scsi_get_lba(cmd)));
+               pkt->app_tag = __constant_cpu_to_le16(0);
+               pkt->app_tag_mask[0] = 0x0;
+               pkt->app_tag_mask[1] = 0x0;
+
+               if (!qla2x00_hba_err_chk_enabled(sp))
                        break;
 
-               if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
-                   op == SCSI_PROT_WRITE_PASS)) {
-                       spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
-                           scsi_prot_sglist(cmd)[0].offset;
-                       ql_dbg(ql_dbg_io, vha, 0x3008,
-                           "LBA from user %p, lba = 0x%x for cmd=%p.\n",
-                           spt, (int)spt->ref_tag, cmd);
-                       pkt->ref_tag = swab32(spt->ref_tag);
-                       pkt->app_tag_mask[0] = 0x0;
-                       pkt->app_tag_mask[1] = 0x0;
-               } else {
-                       pkt->ref_tag = cpu_to_le32((uint32_t)
-                           (0xffffffff & scsi_get_lba(cmd)));
-                       pkt->app_tag = __constant_cpu_to_le16(0);
-                       pkt->app_tag_mask[0] = 0x0;
-                       pkt->app_tag_mask[1] = 0x0;
-               }
                /* enable ALL bytes of the ref tag */
                pkt->ref_tag_mask[0] = 0xff;
                pkt->ref_tag_mask[1] = 0xff;
@@ -798,7 +791,161 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
            scsi_get_prot_type(cmd), cmd);
 }
 
+struct qla2_sgx {
+       dma_addr_t              dma_addr;       /* OUT */
+       uint32_t                dma_len;        /* OUT */
+
+       uint32_t                tot_bytes;      /* IN */
+       struct scatterlist      *cur_sg;        /* IN */
+
+       /* for book keeping, bzero on initial invocation */
+       uint32_t                bytes_consumed;
+       uint32_t                num_bytes;
+       uint32_t                tot_partial;
+
+       /* for debugging */
+       uint32_t                num_sg;
+       srb_t                   *sp;
+};
 
+static int
+qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
+       uint32_t *partial)
+{
+       struct scatterlist *sg;
+       uint32_t cumulative_partial, sg_len;
+       dma_addr_t sg_dma_addr;
+
+       if (sgx->num_bytes == sgx->tot_bytes)
+               return 0;
+
+       sg = sgx->cur_sg;
+       cumulative_partial = sgx->tot_partial;
+
+       sg_dma_addr = sg_dma_address(sg);
+       sg_len = sg_dma_len(sg);
+
+       sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
+
+       if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
+               sgx->dma_len = (blk_sz - cumulative_partial);
+               sgx->tot_partial = 0;
+               sgx->num_bytes += blk_sz;
+               *partial = 0;
+       } else {
+               sgx->dma_len = sg_len - sgx->bytes_consumed;
+               sgx->tot_partial += sgx->dma_len;
+               *partial = 1;
+       }
+
+       sgx->bytes_consumed += sgx->dma_len;
+
+       if (sg_len == sgx->bytes_consumed) {
+               sg = sg_next(sg);
+               sgx->num_sg++;
+               sgx->cur_sg = sg;
+               sgx->bytes_consumed = 0;
+       }
+
+       return 1;
+}
+
+static int
+qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
+       uint32_t *dsd, uint16_t tot_dsds)
+{
+       void *next_dsd;
+       uint8_t avail_dsds = 0;
+       uint32_t dsd_list_len;
+       struct dsd_dma *dsd_ptr;
+       struct scatterlist *sg_prot;
+       uint32_t *cur_dsd = dsd;
+       uint16_t        used_dsds = tot_dsds;
+
+       uint32_t        prot_int;
+       uint32_t        partial;
+       struct qla2_sgx sgx;
+       dma_addr_t      sle_dma;
+       uint32_t        sle_dma_len, tot_prot_dma_len = 0;
+       struct scsi_cmnd *cmd = sp->cmd;
+
+       prot_int = cmd->device->sector_size;
+
+       memset(&sgx, 0, sizeof(struct qla2_sgx));
+       sgx.tot_bytes = scsi_bufflen(sp->cmd);
+       sgx.cur_sg = scsi_sglist(sp->cmd);
+       sgx.sp = sp;
+
+       sg_prot = scsi_prot_sglist(sp->cmd);
+
+       while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
+
+               sle_dma = sgx.dma_addr;
+               sle_dma_len = sgx.dma_len;
+alloc_and_fill:
+               /* Allocate additional continuation packets? */
+               if (avail_dsds == 0) {
+                       avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+                                       QLA_DSDS_PER_IOCB : used_dsds;
+                       dsd_list_len = (avail_dsds + 1) * 12;
+                       used_dsds -= avail_dsds;
+
+                       /* allocate tracking DS */
+                       dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+                       if (!dsd_ptr)
+                               return 1;
+
+                       /* allocate new list */
+                       dsd_ptr->dsd_addr = next_dsd =
+                           dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+                               &dsd_ptr->dsd_list_dma);
+
+                       if (!next_dsd) {
+                               /*
+                                * Need to cleanup only this dsd_ptr, rest
+                                * will be done by sp_free_dma()
+                                */
+                               kfree(dsd_ptr);
+                               return 1;
+                       }
+
+                       list_add_tail(&dsd_ptr->list,
+                           &((struct crc_context *)sp->ctx)->dsd_list);
+
+                       sp->flags |= SRB_CRC_CTX_DSD_VALID;
+
+                       /* add new list to cmd iocb or last list */
+                       *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+                       *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+                       *cur_dsd++ = dsd_list_len;
+                       cur_dsd = (uint32_t *)next_dsd;
+               }
+               *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+               *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+               *cur_dsd++ = cpu_to_le32(sle_dma_len);
+               avail_dsds--;
+
+               if (partial == 0) {
+                       /* Got a full protection interval */
+                       sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
+                       sle_dma_len = 8;
+
+                       tot_prot_dma_len += sle_dma_len;
+                       if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
+                               tot_prot_dma_len = 0;
+                               sg_prot = sg_next(sg_prot);
+                       }
+
+                       partial = 1; /* So as to not re-enter this block */
+                       goto alloc_and_fill;
+               }
+       }
+       /* Null termination */
+       *cur_dsd++ = 0;
+       *cur_dsd++ = 0;
+       *cur_dsd++ = 0;
+       return 0;
+}
 static int
 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
        uint16_t tot_dsds)
@@ -981,7 +1128,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        struct scsi_cmnd        *cmd;
        struct scatterlist      *cur_seg;
        int                     sgc;
-       uint32_t                total_bytes;
+       uint32_t                total_bytes = 0;
        uint32_t                data_bytes;
        uint32_t                dif_bytes;
        uint8_t                 bundling = 1;
@@ -1023,8 +1170,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
                    __constant_cpu_to_le16(CF_READ_DATA);
        }
 
-       tot_prot_dsds = scsi_prot_sg_count(cmd);
-       if (!tot_prot_dsds)
+       if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
+           (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
+           (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
+           (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
                bundling = 0;
 
        /* Allocate CRC context from global pool */
@@ -1047,7 +1196,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
 
        INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
 
-       qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
+       qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
            &crc_ctx_pkt->ref_tag, tot_prot_dsds);
 
        cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
@@ -1076,7 +1225,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
                fcp_cmnd->additional_cdb_len |= 2;
 
        int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
-       host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
        memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
        cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
        cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@@ -1107,15 +1255,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
 
        /* Compute dif len and adjust data len to incude protection */
-       total_bytes = data_bytes;
        dif_bytes = 0;
        blk_size = cmd->device->sector_size;
-       if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
-               dif_bytes = (data_bytes / blk_size) * 8;
-               total_bytes += dif_bytes;
+       dif_bytes = (data_bytes / blk_size) * 8;
+
+       switch (scsi_get_prot_op(sp->cmd)) {
+       case SCSI_PROT_READ_INSERT:
+       case SCSI_PROT_WRITE_STRIP:
+           total_bytes = data_bytes;
+           data_bytes += dif_bytes;
+           break;
+
+       case SCSI_PROT_READ_STRIP:
+       case SCSI_PROT_WRITE_INSERT:
+       case SCSI_PROT_READ_PASS:
+       case SCSI_PROT_WRITE_PASS:
+           total_bytes = data_bytes + dif_bytes;
+           break;
+       default:
+           BUG();
        }
 
-       if (!ql2xenablehba_err_chk)
+       if (!qla2x00_hba_err_chk_enabled(sp))
                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
 
        if (!bundling) {
@@ -1151,7 +1312,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
 
        cmd_pkt->control_flags |=
            __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
-       if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
+
+       if (!bundling && tot_prot_dsds) {
+               if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
+                   cur_dsd, tot_dsds))
+                       goto crc_queuing_error;
+       } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
            (tot_dsds - tot_prot_dsds)))
                goto crc_queuing_error;
 
@@ -1414,6 +1580,22 @@ qla24xx_dif_start_scsi(srb_t *sp)
                        goto queuing_error;
                else
                        sp->flags |= SRB_DMA_VALID;
+
+               if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+                   (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+                       struct qla2_sgx sgx;
+                       uint32_t        partial;
+
+                       memset(&sgx, 0, sizeof(struct qla2_sgx));
+                       sgx.tot_bytes = scsi_bufflen(cmd);
+                       sgx.cur_sg = scsi_sglist(cmd);
+                       sgx.sp = sp;
+
+                       nseg = 0;
+                       while (qla24xx_get_one_block_sg(
+                           cmd->device->sector_size, &sgx, &partial))
+                               nseg++;
+               }
        } else
                nseg = 0;
 
@@ -1428,6 +1610,11 @@ qla24xx_dif_start_scsi(srb_t *sp)
                        goto queuing_error;
                else
                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+               if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+                   (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+                       nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+               }
        } else {
                nseg = 0;
        }
@@ -1454,6 +1641,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
        /* Build header part of command packet (excluding the OPCODE). */
        req->current_outstanding_cmd = handle;
        req->outstanding_cmds[handle] = sp;
+       sp->handle = handle;
        sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
        req->cnt -= req_cnt;
 
index b16b772..646fc52 100644 (file)
@@ -719,7 +719,6 @@ skip_rio:
                        vha->flags.rscn_queue_overflow = 1;
                }
 
-               atomic_set(&vha->loop_state, LOOP_UPDATE);
                atomic_set(&vha->loop_down_timer, 0);
                vha->flags.management_server_logged_in = 0;
 
@@ -1435,25 +1434,27 @@ struct scsi_dif_tuple {
  * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
  * to indicate to the kernel that the HBA detected error.
  */
-static inline void
+static inline int
 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
 {
        struct scsi_qla_host *vha = sp->fcport->vha;
        struct scsi_cmnd *cmd = sp->cmd;
-       struct scsi_dif_tuple   *ep =
-                       (struct scsi_dif_tuple *)&sts24->data[20];
-       struct scsi_dif_tuple   *ap =
-                       (struct scsi_dif_tuple *)&sts24->data[12];
+       uint8_t         *ap = &sts24->data[12];
+       uint8_t         *ep = &sts24->data[20];
        uint32_t        e_ref_tag, a_ref_tag;
        uint16_t        e_app_tag, a_app_tag;
        uint16_t        e_guard, a_guard;
 
-       e_ref_tag = be32_to_cpu(ep->ref_tag);
-       a_ref_tag = be32_to_cpu(ap->ref_tag);
-       e_app_tag = be16_to_cpu(ep->app_tag);
-       a_app_tag = be16_to_cpu(ap->app_tag);
-       e_guard = be16_to_cpu(ep->guard);
-       a_guard = be16_to_cpu(ap->guard);
+       /*
+        * swab32 of the "data" field in the beginning of qla2x00_status_entry()
+        * would make guard field appear at offset 2
+        */
+       a_guard   = le16_to_cpu(*(uint16_t *)(ap + 2));
+       a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
+       a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
+       e_guard   = le16_to_cpu(*(uint16_t *)(ep + 2));
+       e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
+       e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
 
        ql_dbg(ql_dbg_io, vha, 0x3023,
            "iocb(s) %p Returned STATUS.\n", sts24);
@@ -1465,6 +1466,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
            cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
            a_app_tag, e_app_tag, a_guard, e_guard);
 
+       /*
+        * Ignore sector if:
+        * For type     3: ref & app tag is all 'f's
+        * For type 0,1,2: app tag is all 'f's
+        */
+       if ((a_app_tag == 0xffff) &&
+           ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
+            (a_ref_tag == 0xffffffff))) {
+               uint32_t blocks_done, resid;
+               sector_t lba_s = scsi_get_lba(cmd);
+
+               /* 2TB boundary case covered automatically with this */
+               blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
+
+               resid = scsi_bufflen(cmd) - (blocks_done *
+                   cmd->device->sector_size);
+
+               scsi_set_resid(cmd, resid);
+               cmd->result = DID_OK << 16;
+
+               /* Update protection tag */
+               if (scsi_prot_sg_count(cmd)) {
+                       uint32_t i, j = 0, k = 0, num_ent;
+                       struct scatterlist *sg;
+                       struct sd_dif_tuple *spt;
+
+                       /* Patch the corresponding protection tags */
+                       scsi_for_each_prot_sg(cmd, sg,
+                           scsi_prot_sg_count(cmd), i) {
+                               num_ent = sg_dma_len(sg) / 8;
+                               if (k + num_ent < blocks_done) {
+                                       k += num_ent;
+                                       continue;
+                               }
+                               j = blocks_done - k - 1;
+                               k = blocks_done;
+                               break;
+                       }
+
+                       if (k != blocks_done) {
+                               qla_printk(KERN_WARNING, sp->fcport->vha->hw,
+                                   "unexpected tag values tag:lba=%x:%lx)\n",
+                                   e_ref_tag, lba_s);
+                               return 1;
+                       }
+
+                       spt = page_address(sg_page(sg)) + sg->offset;
+                       spt += j;
+
+                       spt->app_tag = 0xffff;
+                       if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
+                               spt->ref_tag = 0xffffffff;
+               }
+
+               return 0;
+       }
+
        /* check guard */
        if (e_guard != a_guard) {
                scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
@@ -1472,28 +1530,30 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
                set_driver_byte(cmd, DRIVER_SENSE);
                set_host_byte(cmd, DID_ABORT);
                cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
-               return;
+               return 1;
        }
 
-       /* check appl tag */
-       if (e_app_tag != a_app_tag) {
+       /* check ref tag */
+       if (e_ref_tag != a_ref_tag) {
                scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
-                   0x10, 0x2);
+                   0x10, 0x3);
                set_driver_byte(cmd, DRIVER_SENSE);
                set_host_byte(cmd, DID_ABORT);
                cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
-               return;
+               return 1;
        }
 
-       /* check ref tag */
-       if (e_ref_tag != a_ref_tag) {
+       /* check appl tag */
+       if (e_app_tag != a_app_tag) {
                scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
-                   0x10, 0x3);
+                   0x10, 0x2);
                set_driver_byte(cmd, DRIVER_SENSE);
                set_host_byte(cmd, DID_ABORT);
                cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
-               return;
+               return 1;
        }
+
+       return 1;
 }
 
 /**
@@ -1767,7 +1827,7 @@ check_scsi_status:
                break;
 
        case CS_DIF_ERROR:
-               qla2x00_handle_dif_error(sp, sts24);
+               logit = qla2x00_handle_dif_error(sp, sts24);
                break;
        default:
                cp->result = DID_ERROR << 16;
@@ -2468,11 +2528,10 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
                goto skip_msi;
        }
 
-       if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
-               !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
+       if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
                ql_log(ql_log_warn, vha, 0x0035,
                    "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
-                   ha->pdev->revision, ha->fw_attributes);
+                   ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
                goto skip_msix;
        }
 
index c706ed3..f488cc6 100644 (file)
@@ -472,7 +472,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
        host->can_queue = base_vha->req->length + 128;
        host->this_id = 255;
        host->cmd_per_lun = 3;
-       if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
+       if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
                host->max_cmd_len = 32;
        else
                host->max_cmd_len = MAX_CMDSZ;
index 5cbf33a..049807c 100644 (file)
@@ -2208,6 +2208,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
        struct qla_hw_data *ha;
        struct rsp_que *rsp;
        struct device_reg_82xx __iomem *reg;
+       unsigned long flags;
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
@@ -2218,11 +2219,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
 
        ha = rsp->hw;
        reg = &ha->iobase->isp82;
-       spin_lock_irq(&ha->hardware_lock);
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        vha = pci_get_drvdata(ha->pdev);
        qla24xx_process_response_queue(vha, rsp);
        WRT_REG_DWORD(&reg->host_int, 0);
-       spin_unlock_irq(&ha->hardware_lock);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
        return IRQ_HANDLED;
 }
 
@@ -2838,6 +2839,16 @@ sufficient_dsds:
                int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
 
+               /* build FCP_CMND IU */
+               memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+               int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+               ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+               if (cmd->sc_data_direction == DMA_TO_DEVICE)
+                       ctx->fcp_cmnd->additional_cdb_len |= 1;
+               else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+                       ctx->fcp_cmnd->additional_cdb_len |= 2;
+
                /*
                 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
                 */
@@ -2854,16 +2865,6 @@ sufficient_dsds:
                        }
                }
 
-               /* build FCP_CMND IU */
-               memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
-               int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
-               ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
-
-               if (cmd->sc_data_direction == DMA_TO_DEVICE)
-                       ctx->fcp_cmnd->additional_cdb_len |= 1;
-               else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
-                       ctx->fcp_cmnd->additional_cdb_len |= 2;
-
                memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
 
                fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
index e02df27..4cace3f 100644 (file)
@@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
                "Maximum queue depth to report for target devices.");
 
 /* Do not change the value of this after module load */
-int ql2xenabledif = 1;
+int ql2xenabledif = 0;
 module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xenabledif,
                " Enable T10-CRC-DIF "
-               " Default is 0 - No DIF Support. 1 - Enable it");
+               " Default is 0 - No DIF Support. 1 - Enable it"
+               ", 2 - Enable DIF for all types, except Type 0.");
 
-int ql2xenablehba_err_chk;
+int ql2xenablehba_err_chk = 2;
 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xenablehba_err_chk,
-               " Enable T10-CRC-DIF Error isolation by HBA"
-               " Default is 0 - Error isolation disabled, 1 - Enable it");
+               " Enable T10-CRC-DIF Error isolation by HBA:\n"
+               " Default is 1.\n"
+               "  0 -- Error isolation disabled\n"
+               "  1 -- Error isolation enabled only for DIX Type 0\n"
+               "  2 -- Error isolation enabled for all Types\n");
 
 int ql2xiidmaenable=1;
 module_param(ql2xiidmaenable, int, S_IRUGO);
@@ -909,7 +913,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
                    "Abort command mbx success.\n");
                wait = 1;
        }
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        qla2x00_sp_compl(ha, sp);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       /* Did the command return during mailbox execution? */
+       if (ret == FAILED && !CMD_SP(cmd))
+               ret = SUCCESS;
 
        /* Wait for the command to be returned. */
        if (wait) {
@@ -2251,7 +2262,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        host->this_id = 255;
        host->cmd_per_lun = 3;
        host->unique_id = host->host_no;
-       if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
+       if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
                host->max_cmd_len = 32;
        else
                host->max_cmd_len = MAX_CMDSZ;
@@ -2378,13 +2389,16 @@ skip_dpc:
            "Detected hba at address=%p.\n",
            ha);
 
-       if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
+       if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
                if (ha->fw_attributes & BIT_4) {
+                       int prot = 0;
                        base_vha->flags.difdix_supported = 1;
                        ql_dbg(ql_dbg_init, base_vha, 0x00f1,
                            "Registering for DIF/DIX type 1 and 3 protection.\n");
+                       if (ql2xenabledif == 1)
+                               prot = SHOST_DIX_TYPE0_PROTECTION;
                        scsi_host_set_prot(host,
-                           SHOST_DIF_TYPE1_PROTECTION
+                           prot | SHOST_DIF_TYPE1_PROTECTION
                            | SHOST_DIF_TYPE2_PROTECTION
                            | SHOST_DIF_TYPE3_PROTECTION
                            | SHOST_DIX_TYPE1_PROTECTION
index 062c97b..13b6357 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.07.03-k"
+#define QLA2XXX_VERSION      "8.03.07.07-k"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   3
index 2c33ce6..0f5599e 100644 (file)
@@ -1,6 +1,6 @@
 config SCSI_QLA_ISCSI
        tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
-       depends on PCI && SCSI
+       depends on PCI && SCSI && NET
        select SCSI_ISCSI_ATTRS
        ---help---
        This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
index 6859af0..7611def 100644 (file)
@@ -241,8 +241,10 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev,
                                   struct comedi_insn *insn,
                                   unsigned int *data);
 static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd);
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_ISA_DMA_API
 static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd);
+#endif
+#ifdef CONFIG_COMEDI_PCI
 static int labpc_find_device(struct comedi_device *dev, int bus, int slot);
 #endif
 static int labpc_dio_mem_callback(int dir, int port, int data,
index 497b2e7..5b77316 100644 (file)
@@ -1430,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(
        u8 DataSequenceInOrder = 0;
        u8 ErrorRecoveryLevel = 0, SessionType = 0;
        u8 IFMarker = 0, OFMarker = 0;
-       u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
+       u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
        u32 FirstBurstLength = 0, MaxBurstLength = 0;
        struct iscsi_param *param = NULL;
 
index a0d23bc..f00137f 100644 (file)
@@ -874,40 +874,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
        spin_unlock_bh(&sess->session_usage_lock);
 }
 
-/*
- *     Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
- *     array counts needed for sync and steering.
- */
-static int iscsit_determine_sync_and_steering_counts(
-       struct iscsi_conn *conn,
-       struct iscsi_data_count *count)
-{
-       u32 length = count->data_length;
-       u32 marker, markint;
-
-       count->sync_and_steering = 1;
-
-       marker = (count->type == ISCSI_RX_DATA) ?
-                       conn->of_marker : conn->if_marker;
-       markint = (count->type == ISCSI_RX_DATA) ?
-                       (conn->conn_ops->OFMarkInt * 4) :
-                       (conn->conn_ops->IFMarkInt * 4);
-       count->ss_iov_count = count->iov_count;
-
-       while (length > 0) {
-               if (length >= marker) {
-                       count->ss_iov_count += 3;
-                       count->ss_marker_count += 2;
-
-                       length -= marker;
-                       marker = markint;
-               } else
-                       length = 0;
-       }
-
-       return 0;
-}
-
 /*
  *     Setup conn->if_marker and conn->of_marker values based upon
  *     the initial marker-less interval. (see iSCSI v19 A.2)
@@ -1290,7 +1256,7 @@ int iscsit_fe_sendpage_sg(
        struct kvec iov;
        u32 tx_hdr_size, data_len;
        u32 offset = cmd->first_data_sg_off;
-       int tx_sent;
+       int tx_sent, iov_off;
 
 send_hdr:
        tx_hdr_size = ISCSI_HDR_LEN;
@@ -1310,9 +1276,19 @@ send_hdr:
        }
 
        data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
-       if (conn->conn_ops->DataDigest)
+       /*
+        * Set iov_off used by padding and data digest tx_data() calls below
+        * in order to determine proper offset into cmd->iov_data[]
+        */
+       if (conn->conn_ops->DataDigest) {
                data_len -= ISCSI_CRC_LEN;
-
+               if (cmd->padding)
+                       iov_off = (cmd->iov_data_count - 2);
+               else
+                       iov_off = (cmd->iov_data_count - 1);
+       } else {
+               iov_off = (cmd->iov_data_count - 1);
+       }
        /*
         * Perform sendpage() for each page in the scatterlist
         */
@@ -1341,8 +1317,7 @@ send_pg:
 
 send_padding:
        if (cmd->padding) {
-               struct kvec *iov_p =
-                       &cmd->iov_data[cmd->iov_data_count-1];
+               struct kvec *iov_p = &cmd->iov_data[iov_off++];
 
                tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
                if (cmd->padding != tx_sent) {
@@ -1356,8 +1331,7 @@ send_padding:
 
 send_datacrc:
        if (conn->conn_ops->DataDigest) {
-               struct kvec *iov_d =
-                       &cmd->iov_data[cmd->iov_data_count];
+               struct kvec *iov_d = &cmd->iov_data[iov_off];
 
                tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
                if (ISCSI_CRC_LEN != tx_sent) {
@@ -1431,8 +1405,7 @@ static int iscsit_do_rx_data(
        struct iscsi_data_count *count)
 {
        int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
-       u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
-       struct kvec iov[count->ss_iov_count], *iov_p;
+       struct kvec *iov_p;
        struct msghdr msg;
 
        if (!conn || !conn->sock || !conn->conn_ops)
@@ -1440,93 +1413,8 @@ static int iscsit_do_rx_data(
 
        memset(&msg, 0, sizeof(struct msghdr));
 
-       if (count->sync_and_steering) {
-               int size = 0;
-               u32 i, orig_iov_count = 0;
-               u32 orig_iov_len = 0, orig_iov_loc = 0;
-               u32 iov_count = 0, per_iov_bytes = 0;
-               u32 *rx_marker, old_rx_marker = 0;
-               struct kvec *iov_record;
-
-               memset(&rx_marker_val, 0,
-                               count->ss_marker_count * sizeof(u32));
-               memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
-
-               iov_record = count->iov;
-               orig_iov_count = count->iov_count;
-               rx_marker = &conn->of_marker;
-
-               i = 0;
-               size = data;
-               orig_iov_len = iov_record[orig_iov_loc].iov_len;
-               while (size > 0) {
-                       pr_debug("rx_data: #1 orig_iov_len %u,"
-                       " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
-                       pr_debug("rx_data: #2 rx_marker %u, size"
-                               " %u\n", *rx_marker, size);
-
-                       if (orig_iov_len >= *rx_marker) {
-                               iov[iov_count].iov_len = *rx_marker;
-                               iov[iov_count++].iov_base =
-                                       (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &rx_marker_val[rx_marker_iov++];
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &rx_marker_val[rx_marker_iov++];
-                               old_rx_marker = *rx_marker;
-
-                               /*
-                                * OFMarkInt is in 32-bit words.
-                                */
-                               *rx_marker = (conn->conn_ops->OFMarkInt * 4);
-                               size -= old_rx_marker;
-                               orig_iov_len -= old_rx_marker;
-                               per_iov_bytes += old_rx_marker;
-
-                               pr_debug("rx_data: #3 new_rx_marker"
-                                       " %u, size %u\n", *rx_marker, size);
-                       } else {
-                               iov[iov_count].iov_len = orig_iov_len;
-                               iov[iov_count++].iov_base =
-                                       (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               per_iov_bytes = 0;
-                               *rx_marker -= orig_iov_len;
-                               size -= orig_iov_len;
-
-                               if (size)
-                                       orig_iov_len =
-                                       iov_record[++orig_iov_loc].iov_len;
-
-                               pr_debug("rx_data: #4 new_rx_marker"
-                                       " %u, size %u\n", *rx_marker, size);
-                       }
-               }
-               data += (rx_marker_iov * (MARKER_SIZE / 2));
-
-               iov_p   = &iov[0];
-               iov_len = iov_count;
-
-               if (iov_count > count->ss_iov_count) {
-                       pr_err("iov_count: %d, count->ss_iov_count:"
-                               " %d\n", iov_count, count->ss_iov_count);
-                       return -1;
-               }
-               if (rx_marker_iov > count->ss_marker_count) {
-                       pr_err("rx_marker_iov: %d, count->ss_marker"
-                               "_count: %d\n", rx_marker_iov,
-                               count->ss_marker_count);
-                       return -1;
-               }
-       } else {
-               iov_p = count->iov;
-               iov_len = count->iov_count;
-       }
+       iov_p = count->iov;
+       iov_len = count->iov_count;
 
        while (total_rx < data) {
                rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1541,16 +1429,6 @@ static int iscsit_do_rx_data(
                                rx_loop, total_rx, data);
        }
 
-       if (count->sync_and_steering) {
-               int j;
-               for (j = 0; j < rx_marker_iov; j++) {
-                       pr_debug("rx_data: #5 j: %d, offset: %d\n",
-                               j, rx_marker_val[j]);
-                       conn->of_marker_offset = rx_marker_val[j];
-               }
-               total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
-       }
-
        return total_rx;
 }
 
@@ -1559,8 +1437,7 @@ static int iscsit_do_tx_data(
        struct iscsi_data_count *count)
 {
        int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
-       u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
-       struct kvec iov[count->ss_iov_count], *iov_p;
+       struct kvec *iov_p;
        struct msghdr msg;
 
        if (!conn || !conn->sock || !conn->conn_ops)
@@ -1573,98 +1450,8 @@ static int iscsit_do_tx_data(
 
        memset(&msg, 0, sizeof(struct msghdr));
 
-       if (count->sync_and_steering) {
-               int size = 0;
-               u32 i, orig_iov_count = 0;
-               u32 orig_iov_len = 0, orig_iov_loc = 0;
-               u32 iov_count = 0, per_iov_bytes = 0;
-               u32 *tx_marker, old_tx_marker = 0;
-               struct kvec *iov_record;
-
-               memset(&tx_marker_val, 0,
-                       count->ss_marker_count * sizeof(u32));
-               memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
-
-               iov_record = count->iov;
-               orig_iov_count = count->iov_count;
-               tx_marker = &conn->if_marker;
-
-               i = 0;
-               size = data;
-               orig_iov_len = iov_record[orig_iov_loc].iov_len;
-               while (size > 0) {
-                       pr_debug("tx_data: #1 orig_iov_len %u,"
-                       " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
-                       pr_debug("tx_data: #2 tx_marker %u, size"
-                               " %u\n", *tx_marker, size);
-
-                       if (orig_iov_len >= *tx_marker) {
-                               iov[iov_count].iov_len = *tx_marker;
-                               iov[iov_count++].iov_base =
-                                       (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               tx_marker_val[tx_marker_iov] =
-                                               (size - *tx_marker);
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &tx_marker_val[tx_marker_iov++];
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &tx_marker_val[tx_marker_iov++];
-                               old_tx_marker = *tx_marker;
-
-                               /*
-                                * IFMarkInt is in 32-bit words.
-                                */
-                               *tx_marker = (conn->conn_ops->IFMarkInt * 4);
-                               size -= old_tx_marker;
-                               orig_iov_len -= old_tx_marker;
-                               per_iov_bytes += old_tx_marker;
-
-                               pr_debug("tx_data: #3 new_tx_marker"
-                                       " %u, size %u\n", *tx_marker, size);
-                               pr_debug("tx_data: #4 offset %u\n",
-                                       tx_marker_val[tx_marker_iov-1]);
-                       } else {
-                               iov[iov_count].iov_len = orig_iov_len;
-                               iov[iov_count++].iov_base
-                                       = (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               per_iov_bytes = 0;
-                               *tx_marker -= orig_iov_len;
-                               size -= orig_iov_len;
-
-                               if (size)
-                                       orig_iov_len =
-                                       iov_record[++orig_iov_loc].iov_len;
-
-                               pr_debug("tx_data: #5 new_tx_marker"
-                                       " %u, size %u\n", *tx_marker, size);
-                       }
-               }
-
-               data += (tx_marker_iov * (MARKER_SIZE / 2));
-
-               iov_p = &iov[0];
-               iov_len = iov_count;
-
-               if (iov_count > count->ss_iov_count) {
-                       pr_err("iov_count: %d, count->ss_iov_count:"
-                               " %d\n", iov_count, count->ss_iov_count);
-                       return -1;
-               }
-               if (tx_marker_iov > count->ss_marker_count) {
-                       pr_err("tx_marker_iov: %d, count->ss_marker"
-                               "_count: %d\n", tx_marker_iov,
-                               count->ss_marker_count);
-                       return -1;
-               }
-       } else {
-               iov_p = count->iov;
-               iov_len = count->iov_count;
-       }
+       iov_p = count->iov;
+       iov_len = count->iov_count;
 
        while (total_tx < data) {
                tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1679,9 +1466,6 @@ static int iscsit_do_tx_data(
                                        tx_loop, total_tx, data);
        }
 
-       if (count->sync_and_steering)
-               total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
-
        return total_tx;
 }
 
@@ -1702,12 +1486,6 @@ int rx_data(
        c.data_length = data;
        c.type = ISCSI_RX_DATA;
 
-       if (conn->conn_ops->OFMarker &&
-          (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
-               if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
-                       return -1;
-       }
-
        return iscsit_do_rx_data(conn, &c);
 }
 
@@ -1728,12 +1506,6 @@ int tx_data(
        c.data_length = data;
        c.type = ISCSI_TX_DATA;
 
-       if (conn->conn_ops->IFMarker &&
-          (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
-               if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
-                       return -1;
-       }
-
        return iscsit_do_tx_data(conn, &c);
 }
 
index 89ae923..f04d4ef 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/ctype.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi.h>
 
@@ -154,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
        return 0;
 }
 
+static void
+target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off)
+{
+       unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
+       unsigned char *buf = buf_off;
+       int cnt = 0, next = 1;
+       /*
+        * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
+        * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
+        * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
+        * to complete the payload.  These are based from VPD=0x80 PRODUCT SERIAL
+        * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
+        * per device uniqeness.
+        */
+       while (*p != '\0') {
+               if (cnt >= 13)
+                       break;
+               if (!isxdigit(*p)) {
+                       p++;
+                       continue;
+               }
+               if (next != 0) {
+                       buf[cnt++] |= hex_to_bin(*p++);
+                       next = 0;
+               } else {
+                       buf[cnt] = hex_to_bin(*p++) << 4;
+                       next = 1;
+               }
+       }
+}
+
 /*
  * Device identification VPD, for a complete list of
  * DESIGNATOR TYPEs see spc4r17 Table 459.
@@ -219,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
         * VENDOR_SPECIFIC_IDENTIFIER and
         * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
         */
-       buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
-       hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
+       target_parse_naa_6h_vendor_specific(dev, &buf[off]);
 
        len = 20;
        off = (len + 4);
index 8d0c58e..a4b0a8d 100644 (file)
@@ -977,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work)
 {
        struct se_device *dev = container_of(work, struct se_device,
                                        qf_work_queue);
+       LIST_HEAD(qf_cmd_list);
        struct se_cmd *cmd, *cmd_tmp;
 
        spin_lock_irq(&dev->qf_cmd_lock);
-       list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
+       list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
+       spin_unlock_irq(&dev->qf_cmd_lock);
 
+       list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
                list_del(&cmd->se_qf_node);
                atomic_dec(&dev->dev_qf_count);
                smp_mb__after_atomic_dec();
-               spin_unlock_irq(&dev->qf_cmd_lock);
 
                pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
                        " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -997,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work)
                 * has been added to head of queue
                 */
                transport_add_cmd_to_queue(cmd, cmd->t_state);
-
-               spin_lock_irq(&dev->qf_cmd_lock);
        }
-       spin_unlock_irq(&dev->qf_cmd_lock);
 }
 
 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
index bd4fe21..3749d8b 100644 (file)
@@ -98,8 +98,7 @@ struct ft_tpg {
        struct list_head list;          /* linkage in ft_lport_acl tpg_list */
        struct list_head lun_list;      /* head of LUNs */
        struct se_portal_group se_tpg;
-       struct task_struct *thread;     /* processing thread */
-       struct se_queue_obj qobj;       /* queue for processing thread */
+       struct workqueue_struct *workqueue;
 };
 
 struct ft_lport_acl {
@@ -110,16 +109,10 @@ struct ft_lport_acl {
        struct se_wwn fc_lport_wwn;
 };
 
-enum ft_cmd_state {
-       FC_CMD_ST_NEW = 0,
-       FC_CMD_ST_REJ
-};
-
 /*
  * Commands
  */
 struct ft_cmd {
-       enum ft_cmd_state state;
        u32 lun;                        /* LUN from request */
        struct ft_sess *sess;           /* session held for cmd */
        struct fc_seq *seq;             /* sequence in exchange mgr */
@@ -127,7 +120,7 @@ struct ft_cmd {
        struct fc_frame *req_frame;
        unsigned char *cdb;             /* pointer to CDB inside frame */
        u32 write_data_len;             /* data received on writes */
-       struct se_queue_req se_req;
+       struct work_struct work;
        /* Local sense buffer */
        unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
        u32 was_ddp_setup:1;            /* Set only if ddp is setup */
@@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);
 /*
  * other internal functions.
  */
-int ft_thread(void *);
 void ft_recv_req(struct ft_sess *, struct fc_frame *);
 struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
 struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
index 5654dc2..80fbcde 100644 (file)
@@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
        int count;
 
        se_cmd = &cmd->se_cmd;
-       pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
-               caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
+       pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
+               caller, cmd, cmd->sess, cmd->seq, se_cmd);
        pr_debug("%s: cmd %p cdb %p\n",
                caller, cmd, cmd->cdb);
        pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
@@ -90,38 +90,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
                16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
 }
 
-static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
-{
-       struct ft_tpg *tpg = sess->tport->tpg;
-       struct se_queue_obj *qobj = &tpg->qobj;
-       unsigned long flags;
-
-       qobj = &sess->tport->tpg->qobj;
-       spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-       list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
-       atomic_inc(&qobj->queue_cnt);
-       spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
-       wake_up_process(tpg->thread);
-}
-
-static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
-{
-       unsigned long flags;
-       struct se_queue_req *qr;
-
-       spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-       if (list_empty(&qobj->qobj_list)) {
-               spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-               return NULL;
-       }
-       qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
-       list_del(&qr->qr_list);
-       atomic_dec(&qobj->queue_cnt);
-       spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-       return container_of(qr, struct ft_cmd, se_req);
-}
-
 static void ft_free_cmd(struct ft_cmd *cmd)
 {
        struct fc_frame *fp;
@@ -282,9 +250,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
 
 int ft_get_cmd_state(struct se_cmd *se_cmd)
 {
-       struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-
-       return cmd->state;
+       return 0;
 }
 
 int ft_is_state_remove(struct se_cmd *se_cmd)
@@ -505,6 +471,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
        return 0;
 }
 
+static void ft_send_work(struct work_struct *work);
+
 /*
  * Handle incoming FCP command.
  */
@@ -523,7 +491,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
                goto busy;
        }
        cmd->req_frame = fp;            /* hold frame during cmd */
-       ft_queue_cmd(sess, cmd);
+
+       INIT_WORK(&cmd->work, ft_send_work);
+       queue_work(sess->tport->tpg->workqueue, &cmd->work);
        return;
 
 busy:
@@ -563,12 +533,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
 /*
  * Send new command to target.
  */
-static void ft_send_cmd(struct ft_cmd *cmd)
+static void ft_send_work(struct work_struct *work)
 {
+       struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
        struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
        struct se_cmd *se_cmd;
        struct fcp_cmnd *fcp;
-       int data_dir;
+       int data_dir = 0;
        u32 data_len;
        int task_attr;
        int ret;
@@ -675,42 +646,3 @@ static void ft_send_cmd(struct ft_cmd *cmd)
 err:
        ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
 }
-
-/*
- * Handle request in the command thread.
- */
-static void ft_exec_req(struct ft_cmd *cmd)
-{
-       pr_debug("cmd state %x\n", cmd->state);
-       switch (cmd->state) {
-       case FC_CMD_ST_NEW:
-               ft_send_cmd(cmd);
-               break;
-       default:
-               break;
-       }
-}
-
-/*
- * Processing thread.
- * Currently one thread per tpg.
- */
-int ft_thread(void *arg)
-{
-       struct ft_tpg *tpg = arg;
-       struct se_queue_obj *qobj = &tpg->qobj;
-       struct ft_cmd *cmd;
-
-       while (!kthread_should_stop()) {
-               schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
-               if (kthread_should_stop())
-                       goto out;
-
-               cmd = ft_dequeue_cmd(qobj);
-               if (cmd)
-                       ft_exec_req(cmd);
-       }
-
-out:
-       return 0;
-}
index b15879d..8fa39b7 100644 (file)
@@ -327,7 +327,6 @@ static struct se_portal_group *ft_add_tpg(
        tpg->index = index;
        tpg->lport_acl = lacl;
        INIT_LIST_HEAD(&tpg->lun_list);
-       transport_init_queue_obj(&tpg->qobj);
 
        ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
                                tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -336,8 +335,8 @@ static struct se_portal_group *ft_add_tpg(
                return NULL;
        }
 
-       tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index);
-       if (IS_ERR(tpg->thread)) {
+       tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
+       if (!tpg->workqueue) {
                kfree(tpg);
                return NULL;
        }
@@ -356,7 +355,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
        pr_debug("del tpg %s\n",
                    config_item_name(&tpg->se_tpg.tpg_group.cg_item));
 
-       kthread_stop(tpg->thread);
+       destroy_workqueue(tpg->workqueue);
 
        /* Wait for sessions to be freed thru RCU, for BUG_ON below */
        synchronize_rcu();
index c37f4cd..d35ea5a 100644 (file)
@@ -219,43 +219,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
        if (cmd->was_ddp_setup) {
                BUG_ON(!ep);
                BUG_ON(!lport);
-       }
-
-       /*
-        * Doesn't expect payload if DDP is setup. Payload
-        * is expected to be copied directly to user buffers
-        * due to DDP (Large Rx offload),
-        */
-       buf = fc_frame_payload_get(fp, 1);
-       if (buf)
-               pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
+               /*
+                * Since DDP (Large Rx offload) was setup for this request,
+                * payload is expected to be copied directly to user buffers.
+                */
+               buf = fc_frame_payload_get(fp, 1);
+               if (buf)
+                       pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
                                "cmd->sg_cnt 0x%x. DDP was setup"
                                " hence not expected to receive frame with "
-                               "payload, Frame will be dropped if "
-                               "'Sequence Initiative' bit in f_ctl is "
+                               "payload, Frame will be dropped if"
+                               "'Sequence Initiative' bit in f_ctl is"
                                "not set\n", __func__, ep->xid, f_ctl,
                                cmd->sg, cmd->sg_cnt);
-       /*
-        * Invalidate HW DDP context if it was setup for respective
-        * command. Invalidation of HW DDP context is requited in both
-        * situation (success and error). 
-        */
-       ft_invl_hw_context(cmd);
+               /*
+                * Invalidate HW DDP context if it was setup for respective
+                * command. Invalidation of HW DDP context is requited in both
+                * situation (success and error).
+                */
+               ft_invl_hw_context(cmd);
 
-       /*
-        * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
-        * write data frame is received successfully where payload is
-        * posted directly to user buffer and only the last frame's
-        * header is posted in receive queue.
-        *
-        * If "Sequence Initiative (TSI)" bit is not set, means error
-        * condition w.r.t. DDP, hence drop the packet and let explict
-        * ABORTS from other end of exchange timer trigger the recovery.
-        */
-       if (f_ctl & FC_FC_SEQ_INIT)
-               goto last_frame;
-       else
-               goto drop;
+               /*
+                * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
+                * write data frame is received successfully where payload is
+                * posted directly to user buffer and only the last frame's
+                * header is posted in receive queue.
+                *
+                * If "Sequence Initiative (TSI)" bit is not set, means error
+                * condition w.r.t. DDP, hence drop the packet and let explict
+                * ABORTS from other end of exchange timer trigger the recovery.
+                */
+               if (f_ctl & FC_FC_SEQ_INIT)
+                       goto last_frame;
+               else
+                       goto drop;
+       }
 
        rel_off = ntohl(fh->fh_parm_offset);
        frame_len = fr_len(fp);
index 225123b..58be715 100644 (file)
@@ -4450,7 +4450,7 @@ static int __init rs_init(void)
 
 #if defined(CONFIG_ETRAX_RS485)
 #if defined(CONFIG_ETRAX_RS485_ON_PA)
-       if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit,
+       if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,
                        rs485_pa_bit)) {
                printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
                        "RS485 pin\n");
@@ -4459,7 +4459,7 @@ static int __init rs_init(void)
        }
 #endif
 #if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
-       if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit,
+       if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,
                        rs485_port_g_bit)) {
                printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
                        "RS485 pin\n");
index 1e96d1f..723f823 100644 (file)
@@ -761,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
        memset(buf, 0, retval);
        status = 0;
 
-       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC;
+       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
 
        spin_lock_irqsave(&xhci->lock, flags);
        /* For each port, did anything change?  If so, set that bit in buf. */
index 54139a2..952e2de 100644 (file)
@@ -1934,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
        int status = -EINPROGRESS;
        struct urb_priv *urb_priv;
        struct xhci_ep_ctx *ep_ctx;
+       struct list_head *tmp;
        u32 trb_comp_code;
        int ret = 0;
+       int td_num = 0;
 
        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
        xdev = xhci->devs[slot_id];
@@ -1957,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                return -ENODEV;
        }
 
+       /* Count current td numbers if ep->skip is set */
+       if (ep->skip) {
+               list_for_each(tmp, &ep_ring->td_list)
+                       td_num++;
+       }
+
        event_dma = le64_to_cpu(event->buffer);
        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
        /* Look for common error cases */
@@ -2068,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                        goto cleanup;
                }
 
+               /* We've skipped all the TDs on the ep ring when ep->skip set */
+               if (ep->skip && td_num == 0) {
+                       ep->skip = false;
+                       xhci_dbg(xhci, "All tds on the ep_ring skipped. "
+                                               "Clear skip flag.\n");
+                       ret = 0;
+                       goto cleanup;
+               }
+
                td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+               if (ep->skip)
+                       td_num--;
 
                /* Is this a TRB in the currently executing TD? */
                event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
index 80d292f..7363c1b 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/backlight.h>
 #endif
 
-static const char const *backlight_types[] = {
+static const char *const backlight_types[] = {
        [BACKLIGHT_RAW] = "raw",
        [BACKLIGHT_PLATFORM] = "platform",
        [BACKLIGHT_FIRMWARE] = "firmware",
index da70f5c..7523719 100644 (file)
@@ -54,7 +54,7 @@
  * This lock protects updates to the following mapping and reference-count
  * arrays. The lock does not need to be acquired to read the mapping tables.
  */
-static DEFINE_SPINLOCK(irq_mapping_update_lock);
+static DEFINE_MUTEX(irq_mapping_update_lock);
 
 static LIST_HEAD(xen_irq_list_head);
 
@@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
        int irq = -1;
        struct physdev_irq irq_op;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        irq = find_irq_by_gsi(gsi);
        if (irq != -1) {
@@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
                                handle_edge_irq, name);
 
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 
        return irq;
 }
@@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
 {
        int irq, ret;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        irq = xen_allocate_irq_dynamic();
        if (irq == -1)
@@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
        if (ret < 0)
                goto error_irq;
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
        return irq;
 error_irq:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
        xen_free_irq(irq);
        return -1;
 }
@@ -740,7 +740,7 @@ int xen_destroy_irq(int irq)
        struct irq_info *info = info_for_irq(irq);
        int rc = -ENOENT;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        desc = irq_to_desc(irq);
        if (!desc)
@@ -766,7 +766,7 @@ int xen_destroy_irq(int irq)
        xen_free_irq(irq);
 
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
        return rc;
 }
 
@@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq)
 
        struct irq_info *info;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        list_for_each_entry(info, &xen_irq_list_head, list) {
                if (info == NULL || info->type != IRQT_PIRQ)
@@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq)
        }
        irq = -1;
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 
        return irq;
 }
@@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
 {
        int irq;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        irq = evtchn_to_irq[evtchn];
 
@@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
        }
 
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 
        return irq;
 }
@@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
        struct evtchn_bind_ipi bind_ipi;
        int evtchn, irq;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        irq = per_cpu(ipi_to_irq, cpu)[ipi];
 
@@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
        }
 
  out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
        return irq;
 }
 
@@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
        struct evtchn_bind_virq bind_virq;
        int evtchn, irq;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        irq = per_cpu(virq_to_irq, cpu)[virq];
 
@@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
        }
 
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 
        return irq;
 }
@@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq)
        struct evtchn_close close;
        int evtchn = evtchn_from_irq(irq);
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        if (VALID_EVTCHN(evtchn)) {
                close.port = evtchn;
@@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq)
 
        xen_free_irq(irq);
 
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 }
 
 int bind_evtchn_to_irqhandler(unsigned int evtchn,
@@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
           will also be masked. */
        disable_irq(irq);
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        /* After resume the irq<->evtchn mappings are all cleared out */
        BUG_ON(evtchn_to_irq[evtchn] != -1);
@@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
 
        xen_irq_info_evtchn_init(irq, evtchn);
 
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 
        /* new event channels are always bound to cpu 0 */
        irq_set_affinity(irq, cpumask_of(0));
index 46ce357..410ffd6 100644 (file)
@@ -54,9 +54,9 @@ extern struct kmem_cache *v9fs_inode_cache;
 
 struct inode *v9fs_alloc_inode(struct super_block *sb);
 void v9fs_destroy_inode(struct inode *inode);
-struct inode *v9fs_get_inode(struct super_block *sb, int mode);
+struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t);
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
-                   struct inode *inode, int mode);
+                   struct inode *inode, int mode, dev_t);
 void v9fs_evict_inode(struct inode *inode);
 ino_t v9fs_qid2ino(struct p9_qid *qid);
 void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
@@ -83,4 +83,6 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
        v9inode->cache_validity |= V9FS_INO_INVALID_ATTR;
        return;
 }
+
+int v9fs_open_to_dotl_flags(int flags);
 #endif
index 3c173fc..62857a8 100644 (file)
@@ -65,7 +65,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
        v9inode = V9FS_I(inode);
        v9ses = v9fs_inode2v9ses(inode);
        if (v9fs_proto_dotl(v9ses))
-               omode = file->f_flags;
+               omode = v9fs_open_to_dotl_flags(file->f_flags);
        else
                omode = v9fs_uflags2omode(file->f_flags,
                                        v9fs_proto_dotu(v9ses));
@@ -169,7 +169,18 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
 
        /* convert posix lock to p9 tlock args */
        memset(&flock, 0, sizeof(flock));
-       flock.type = fl->fl_type;
+       /* map the lock type */
+       switch (fl->fl_type) {
+       case F_RDLCK:
+               flock.type = P9_LOCK_TYPE_RDLCK;
+               break;
+       case F_WRLCK:
+               flock.type = P9_LOCK_TYPE_WRLCK;
+               break;
+       case F_UNLCK:
+               flock.type = P9_LOCK_TYPE_UNLCK;
+               break;
+       }
        flock.start = fl->fl_start;
        if (fl->fl_end == OFFSET_MAX)
                flock.length = 0;
@@ -245,7 +256,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
 
        /* convert posix lock to p9 tgetlock args */
        memset(&glock, 0, sizeof(glock));
-       glock.type = fl->fl_type;
+       glock.type  = P9_LOCK_TYPE_UNLCK;
        glock.start = fl->fl_start;
        if (fl->fl_end == OFFSET_MAX)
                glock.length = 0;
@@ -257,17 +268,26 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
        res = p9_client_getlock_dotl(fid, &glock);
        if (res < 0)
                return res;
-       if (glock.type != F_UNLCK) {
-               fl->fl_type = glock.type;
+       /* map 9p lock type to os lock type */
+       switch (glock.type) {
+       case P9_LOCK_TYPE_RDLCK:
+               fl->fl_type = F_RDLCK;
+               break;
+       case P9_LOCK_TYPE_WRLCK:
+               fl->fl_type = F_WRLCK;
+               break;
+       case P9_LOCK_TYPE_UNLCK:
+               fl->fl_type = F_UNLCK;
+               break;
+       }
+       if (glock.type != P9_LOCK_TYPE_UNLCK) {
                fl->fl_start = glock.start;
                if (glock.length == 0)
                        fl->fl_end = OFFSET_MAX;
                else
                        fl->fl_end = glock.start + glock.length - 1;
                fl->fl_pid = glock.proc_id;
-       } else
-               fl->fl_type = F_UNLCK;
-
+       }
        return res;
 }
 
index 8bb5507..e3c03db 100644 (file)
@@ -95,15 +95,18 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
 /**
  * p9mode2unixmode- convert plan9 mode bits to unix mode bits
  * @v9ses: v9fs session information
- * @mode: mode to convert
+ * @stat: p9_wstat from which mode need to be derived
+ * @rdev: major number, minor number in case of device files.
  *
  */
-
-static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
+static int p9mode2unixmode(struct v9fs_session_info *v9ses,
+                          struct p9_wstat *stat, dev_t *rdev)
 {
        int res;
+       int mode = stat->mode;
 
-       res = mode & 0777;
+       res = mode & S_IALLUGO;
+       *rdev = 0;
 
        if ((mode & P9_DMDIR) == P9_DMDIR)
                res |= S_IFDIR;
@@ -116,9 +119,26 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
                 && (v9ses->nodev == 0))
                res |= S_IFIFO;
        else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses))
-                && (v9ses->nodev == 0))
-               res |= S_IFBLK;
-       else
+                && (v9ses->nodev == 0)) {
+               char type = 0, ext[32];
+               int major = -1, minor = -1;
+
+               strncpy(ext, stat->extension, sizeof(ext));
+               sscanf(ext, "%c %u %u", &type, &major, &minor);
+               switch (type) {
+               case 'c':
+                       res |= S_IFCHR;
+                       break;
+               case 'b':
+                       res |= S_IFBLK;
+                       break;
+               default:
+                       P9_DPRINTK(P9_DEBUG_ERROR,
+                               "Unknown special type %c %s\n", type,
+                               stat->extension);
+               };
+               *rdev = MKDEV(major, minor);
+       } else
                res |= S_IFREG;
 
        if (v9fs_proto_dotu(v9ses)) {
@@ -131,7 +151,6 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
                if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
                        res |= S_ISVTX;
        }
-
        return res;
 }
 
@@ -242,13 +261,13 @@ void v9fs_destroy_inode(struct inode *inode)
 }
 
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
-                   struct inode *inode, int mode)
+                   struct inode *inode, int mode, dev_t rdev)
 {
        int err = 0;
 
        inode_init_owner(inode, NULL, mode);
        inode->i_blocks = 0;
-       inode->i_rdev = 0;
+       inode->i_rdev = rdev;
        inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        inode->i_mapping->a_ops = &v9fs_addr_operations;
 
@@ -335,7 +354,7 @@ error:
  *
  */
 
-struct inode *v9fs_get_inode(struct super_block *sb, int mode)
+struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev)
 {
        int err;
        struct inode *inode;
@@ -348,7 +367,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
                P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
                return ERR_PTR(-ENOMEM);
        }
-       err = v9fs_init_inode(v9ses, inode, mode);
+       err = v9fs_init_inode(v9ses, inode, mode, rdev);
        if (err) {
                iput(inode);
                return ERR_PTR(err);
@@ -435,11 +454,12 @@ void v9fs_evict_inode(struct inode *inode)
 static int v9fs_test_inode(struct inode *inode, void *data)
 {
        int umode;
+       dev_t rdev;
        struct v9fs_inode *v9inode = V9FS_I(inode);
        struct p9_wstat *st = (struct p9_wstat *)data;
        struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
 
-       umode = p9mode2unixmode(v9ses, st->mode);
+       umode = p9mode2unixmode(v9ses, st, &rdev);
        /* don't match inode of different type */
        if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
                return 0;
@@ -473,6 +493,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
                                   struct p9_wstat *st,
                                   int new)
 {
+       dev_t rdev;
        int retval, umode;
        unsigned long i_ino;
        struct inode *inode;
@@ -496,8 +517,8 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
         * later.
         */
        inode->i_ino = i_ino;
-       umode = p9mode2unixmode(v9ses, st->mode);
-       retval = v9fs_init_inode(v9ses, inode, umode);
+       umode = p9mode2unixmode(v9ses, st, &rdev);
+       retval = v9fs_init_inode(v9ses, inode, umode, rdev);
        if (retval)
                goto error;
 
@@ -531,6 +552,19 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
        return inode;
 }
 
+/**
+ * v9fs_at_to_dotl_flags- convert Linux specific AT flags to
+ * plan 9 AT flag.
+ * @flags: flags to convert
+ */
+static int v9fs_at_to_dotl_flags(int flags)
+{
+       int rflags = 0;
+       if (flags & AT_REMOVEDIR)
+               rflags |= P9_DOTL_AT_REMOVEDIR;
+       return rflags;
+}
+
 /**
  * v9fs_remove - helper function to remove files and directories
  * @dir: directory inode that is being deleted
@@ -558,7 +592,8 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
                return retval;
        }
        if (v9fs_proto_dotl(v9ses))
-               retval = p9_client_unlinkat(dfid, dentry->d_name.name, flags);
+               retval = p9_client_unlinkat(dfid, dentry->d_name.name,
+                                           v9fs_at_to_dotl_flags(flags));
        if (retval == -EOPNOTSUPP) {
                /* Try the one based on path */
                v9fid = v9fs_fid_clone(dentry);
@@ -645,13 +680,11 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
                P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
-       d_instantiate(dentry, inode);
        err = v9fs_fid_add(dentry, fid);
        if (err < 0)
                goto error;
-
+       d_instantiate(dentry, inode);
        return ofid;
-
 error:
        if (ofid)
                p9_client_clunk(ofid);
@@ -792,6 +825,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
                                      struct nameidata *nameidata)
 {
+       struct dentry *res;
        struct super_block *sb;
        struct v9fs_session_info *v9ses;
        struct p9_fid *dfid, *fid;
@@ -823,22 +857,35 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
 
                return ERR_PTR(result);
        }
-
-       inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+       /*
+        * Make sure we don't use a wrong inode due to parallel
+        * unlink. For cached mode create calls request for new
+        * inode. But with cache disabled, lookup should do this.
+        */
+       if (v9ses->cache)
+               inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+       else
+               inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
        if (IS_ERR(inode)) {
                result = PTR_ERR(inode);
                inode = NULL;
                goto error;
        }
-
        result = v9fs_fid_add(dentry, fid);
        if (result < 0)
                goto error_iput;
-
 inst_out:
-       d_add(dentry, inode);
-       return NULL;
-
+       /*
+        * If we had a rename on the server and a parallel lookup
+        * for the new name, then make sure we instantiate with
+        * the new name. ie look up for a/b, while on server somebody
+        * moved b under k and client parallely did a lookup for
+        * k/b.
+        */
+       res = d_materialise_unique(dentry, inode);
+       if (!IS_ERR(res))
+               return res;
+       result = PTR_ERR(res);
 error_iput:
        iput(inode);
 error:
@@ -1002,7 +1049,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
                return PTR_ERR(st);
 
        v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
-               generic_fillattr(dentry->d_inode, stat);
+       generic_fillattr(dentry->d_inode, stat);
 
        p9stat_free(st);
        kfree(st);
@@ -1086,6 +1133,7 @@ void
 v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
        struct super_block *sb)
 {
+       mode_t mode;
        char ext[32];
        char tag_name[14];
        unsigned int i_nlink;
@@ -1121,31 +1169,9 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
                                inode->i_nlink = i_nlink;
                }
        }
-       inode->i_mode = p9mode2unixmode(v9ses, stat->mode);
-       if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) {
-               char type = 0;
-               int major = -1;
-               int minor = -1;
-
-               strncpy(ext, stat->extension, sizeof(ext));
-               sscanf(ext, "%c %u %u", &type, &major, &minor);
-               switch (type) {
-               case 'c':
-                       inode->i_mode &= ~S_IFBLK;
-                       inode->i_mode |= S_IFCHR;
-                       break;
-               case 'b':
-                       break;
-               default:
-                       P9_DPRINTK(P9_DEBUG_ERROR,
-                               "Unknown special type %c %s\n", type,
-                               stat->extension);
-               };
-               inode->i_rdev = MKDEV(major, minor);
-               init_special_inode(inode, inode->i_mode, inode->i_rdev);
-       } else
-               inode->i_rdev = 0;
-
+       mode = stat->mode & S_IALLUGO;
+       mode |= inode->i_mode & ~S_IALLUGO;
+       inode->i_mode = mode;
        i_size_write(inode, stat->length);
 
        /* not real number of blocks, but 512 byte ones ... */
@@ -1411,6 +1437,8 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
 
 int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
 {
+       int umode;
+       dev_t rdev;
        loff_t i_size;
        struct p9_wstat *st;
        struct v9fs_session_info *v9ses;
@@ -1419,6 +1447,12 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
        st = p9_client_stat(fid);
        if (IS_ERR(st))
                return PTR_ERR(st);
+       /*
+        * Don't update inode if the file type is different
+        */
+       umode = p9mode2unixmode(v9ses, st, &rdev);
+       if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
+               goto out;
 
        spin_lock(&inode->i_lock);
        /*
@@ -1430,6 +1464,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
        if (v9ses->cache)
                inode->i_size = i_size;
        spin_unlock(&inode->i_lock);
+out:
        p9stat_free(st);
        kfree(st);
        return 0;
index b6c8ed2..aded79f 100644 (file)
@@ -153,7 +153,8 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
         * later.
         */
        inode->i_ino = i_ino;
-       retval = v9fs_init_inode(v9ses, inode, st->st_mode);
+       retval = v9fs_init_inode(v9ses, inode,
+                                st->st_mode, new_decode_dev(st->st_rdev));
        if (retval)
                goto error;
 
@@ -190,6 +191,58 @@ v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
        return inode;
 }
 
+struct dotl_openflag_map {
+       int open_flag;
+       int dotl_flag;
+};
+
+static int v9fs_mapped_dotl_flags(int flags)
+{
+       int i;
+       int rflags = 0;
+       struct dotl_openflag_map dotl_oflag_map[] = {
+               { O_CREAT,      P9_DOTL_CREATE },
+               { O_EXCL,       P9_DOTL_EXCL },
+               { O_NOCTTY,     P9_DOTL_NOCTTY },
+               { O_TRUNC,      P9_DOTL_TRUNC },
+               { O_APPEND,     P9_DOTL_APPEND },
+               { O_NONBLOCK,   P9_DOTL_NONBLOCK },
+               { O_DSYNC,      P9_DOTL_DSYNC },
+               { FASYNC,       P9_DOTL_FASYNC },
+               { O_DIRECT,     P9_DOTL_DIRECT },
+               { O_LARGEFILE,  P9_DOTL_LARGEFILE },
+               { O_DIRECTORY,  P9_DOTL_DIRECTORY },
+               { O_NOFOLLOW,   P9_DOTL_NOFOLLOW },
+               { O_NOATIME,    P9_DOTL_NOATIME },
+               { O_CLOEXEC,    P9_DOTL_CLOEXEC },
+               { O_SYNC,       P9_DOTL_SYNC},
+       };
+       for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
+               if (flags & dotl_oflag_map[i].open_flag)
+                       rflags |= dotl_oflag_map[i].dotl_flag;
+       }
+       return rflags;
+}
+
+/**
+ * v9fs_open_to_dotl_flags- convert Linux specific open flags to
+ * plan 9 open flag.
+ * @flags: flags to convert
+ */
+int v9fs_open_to_dotl_flags(int flags)
+{
+       int rflags = 0;
+
+       /*
+        * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
+        * and P9_DOTL_NOACCESS
+        */
+       rflags |= flags & O_ACCMODE;
+       rflags |= v9fs_mapped_dotl_flags(flags);
+
+       return rflags;
+}
+
 /**
  * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
  * @dir: directory inode that is being created
@@ -258,7 +311,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
                           "Failed to get acl values in creat %d\n", err);
                goto error;
        }
-       err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
+       err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
+                                   mode, gid, &qid);
        if (err < 0) {
                P9_DPRINTK(P9_DEBUG_VFS,
                                "p9_client_open_dotl failed in creat %d\n",
@@ -281,10 +335,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
                P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
-       d_instantiate(dentry, inode);
        err = v9fs_fid_add(dentry, fid);
        if (err < 0)
                goto error;
+       d_instantiate(dentry, inode);
 
        /* Now set the ACL based on the default value */
        v9fs_set_create_acl(dentry, &dacl, &pacl);
@@ -403,10 +457,10 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                                err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /*
@@ -414,7 +468,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                 * inode with stat. We need to get an inode
                 * so that we can set the acl with dentry
                 */
-               inode = v9fs_get_inode(dir->i_sb, mode);
+               inode = v9fs_get_inode(dir->i_sb, mode, 0);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -540,6 +594,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
 void
 v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
 {
+       mode_t mode;
        struct v9fs_inode *v9inode = V9FS_I(inode);
 
        if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
@@ -552,11 +607,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
                inode->i_uid = stat->st_uid;
                inode->i_gid = stat->st_gid;
                inode->i_nlink = stat->st_nlink;
-               inode->i_mode = stat->st_mode;
-               inode->i_rdev = new_decode_dev(stat->st_rdev);
 
-               if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
-                       init_special_inode(inode, inode->i_mode, inode->i_rdev);
+               mode = stat->st_mode & S_IALLUGO;
+               mode |= inode->i_mode & ~S_IALLUGO;
+               inode->i_mode = mode;
 
                i_size_write(inode, stat->st_size);
                inode->i_blocks = stat->st_blocks;
@@ -657,14 +711,14 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
                                        err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /* Not in cached mode. No need to populate inode with stat */
-               inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
+               inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -810,17 +864,17 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
                                err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /*
                 * Not in cached mode. No need to populate inode with stat.
                 * socket syscall returns a fd, so we need instantiate
                 */
-               inode = v9fs_get_inode(dir->i_sb, mode);
+               inode = v9fs_get_inode(dir->i_sb, mode, rdev);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -886,6 +940,11 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
        st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
        if (IS_ERR(st))
                return PTR_ERR(st);
+       /*
+        * Don't update inode if the file type is different
+        */
+       if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
+               goto out;
 
        spin_lock(&inode->i_lock);
        /*
@@ -897,6 +956,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
        if (v9ses->cache)
                inode->i_size = i_size;
        spin_unlock(&inode->i_lock);
+out:
        kfree(st);
        return 0;
 }
index feef6cd..c70251d 100644 (file)
@@ -149,7 +149,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
        else
                sb->s_d_op = &v9fs_dentry_operations;
 
-       inode = v9fs_get_inode(sb, S_IFDIR | mode);
+       inode = v9fs_get_inode(sb, S_IFDIR | mode, 0);
        if (IS_ERR(inode)) {
                retval = PTR_ERR(inode);
                goto release_sb;
index ff77262..95f786e 100644 (file)
@@ -1429,6 +1429,11 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                WARN_ON_ONCE(bdev->bd_holders);
                sync_blockdev(bdev);
                kill_bdev(bdev);
+               /* ->release can cause the old bdi to disappear,
+                * so must switch it out first
+                */
+               bdev_inode_switch_bdi(bdev->bd_inode,
+                                       &default_backing_dev_info);
        }
        if (bdev->bd_contains == bdev) {
                if (disk->fops->release)
@@ -1442,8 +1447,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                disk_put_part(bdev->bd_part);
                bdev->bd_part = NULL;
                bdev->bd_disk = NULL;
-               bdev_inode_switch_bdi(bdev->bd_inode,
-                                       &default_backing_dev_info);
                if (bdev != bdev->bd_contains)
                        victim = bdev->bd_contains;
                bdev->bd_contains = NULL;
index 502b9e9..d9f99a1 100644 (file)
@@ -176,7 +176,11 @@ static inline u64 btrfs_ino(struct inode *inode)
 {
        u64 ino = BTRFS_I(inode)->location.objectid;
 
-       if (ino <= BTRFS_FIRST_FREE_OBJECTID)
+       /*
+        * !ino: btree_inode
+        * type == BTRFS_ROOT_ITEM_KEY: subvol dir
+        */
+       if (!ino || BTRFS_I(inode)->location.type == BTRFS_ROOT_ITEM_KEY)
                ino = inode->i_ino;
        return ino;
 }
index b910694..a1cb782 100644 (file)
@@ -183,8 +183,10 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
         * read from the commit root and sidestep a nasty deadlock
         * between reading the free space cache and updating the csum tree.
         */
-       if (btrfs_is_free_space_inode(root, inode))
+       if (btrfs_is_free_space_inode(root, inode)) {
                path->search_commit_root = 1;
+               path->skip_locking = 1;
+       }
 
        disk_bytenr = (u64)bio->bi_sector << 9;
        if (dio)
index e7872e4..a381cd2 100644 (file)
@@ -1075,12 +1075,6 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
        start_pos = pos & ~((u64)root->sectorsize - 1);
        last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
 
-       if (start_pos > inode->i_size) {
-               err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
-               if (err)
-                       return err;
-       }
-
 again:
        for (i = 0; i < num_pages; i++) {
                pages[i] = find_or_create_page(inode->i_mapping, index + i,
@@ -1338,6 +1332,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        struct inode *inode = fdentry(file)->d_inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        loff_t *ppos = &iocb->ki_pos;
+       u64 start_pos;
        ssize_t num_written = 0;
        ssize_t err = 0;
        size_t count, ocount;
@@ -1386,6 +1381,15 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        file_update_time(file);
        BTRFS_I(inode)->sequence++;
 
+       start_pos = round_down(pos, root->sectorsize);
+       if (start_pos > i_size_read(inode)) {
+               err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
+               if (err) {
+                       mutex_unlock(&inode->i_mutex);
+                       goto out;
+               }
+       }
+
        if (unlikely(file->f_flags & O_DIRECT)) {
                num_written = __btrfs_direct_write(iocb, iov, nr_segs,
                                                   pos, ppos, count, ocount);
@@ -1813,6 +1817,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
                goto out;
        case SEEK_DATA:
        case SEEK_HOLE:
+               if (offset >= i_size_read(inode)) {
+                       mutex_unlock(&inode->i_mutex);
+                       return -ENXIO;
+               }
+
                ret = find_desired_extent(inode, &offset, origin);
                if (ret) {
                        mutex_unlock(&inode->i_mutex);
@@ -1821,11 +1830,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
        }
 
        if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
-               ret = -EINVAL;
+               offset = -EINVAL;
                goto out;
        }
        if (offset > inode->i_sb->s_maxbytes) {
-               ret = -EINVAL;
+               offset = -EINVAL;
                goto out;
        }
 
index 6a265b9..41ac927 100644 (file)
@@ -190,9 +190,11 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
                                    struct btrfs_path *path,
                                    struct inode *inode)
 {
+       struct btrfs_block_rsv *rsv;
        loff_t oldsize;
        int ret = 0;
 
+       rsv = trans->block_rsv;
        trans->block_rsv = root->orphan_block_rsv;
        ret = btrfs_block_rsv_check(trans, root,
                                    root->orphan_block_rsv,
@@ -210,6 +212,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
         */
        ret = btrfs_truncate_inode_items(trans, root, inode,
                                         0, BTRFS_EXTENT_DATA_KEY);
+
+       trans->block_rsv = rsv;
        if (ret) {
                WARN_ON(1);
                return ret;
index 0ccc743..b2d004a 100644 (file)
@@ -1786,7 +1786,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                          &ordered_extent->list);
 
        ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
-       if (!ret) {
+       if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
                ret = btrfs_update_inode(trans, root, inode);
                BUG_ON(ret);
        }
@@ -3510,15 +3510,19 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                        err = btrfs_drop_extents(trans, inode, cur_offset,
                                                 cur_offset + hole_size,
                                                 &hint_byte, 1);
-                       if (err)
+                       if (err) {
+                               btrfs_end_transaction(trans, root);
                                break;
+                       }
 
                        err = btrfs_insert_file_extent(trans, root,
                                        btrfs_ino(inode), cur_offset, 0,
                                        0, hole_size, 0, hole_size,
                                        0, 0, 0);
-                       if (err)
+                       if (err) {
+                               btrfs_end_transaction(trans, root);
                                break;
+                       }
 
                        btrfs_drop_extent_cache(inode, hole_start,
                                        last_byte - 1, 0);
@@ -3952,7 +3956,6 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                         struct btrfs_root *root, int *new)
 {
        struct inode *inode;
-       int bad_inode = 0;
 
        inode = btrfs_iget_locked(s, location->objectid, root);
        if (!inode)
@@ -3968,15 +3971,12 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                        if (new)
                                *new = 1;
                } else {
-                       bad_inode = 1;
+                       unlock_new_inode(inode);
+                       iput(inode);
+                       inode = ERR_PTR(-ESTALE);
                }
        }
 
-       if (bad_inode) {
-               iput(inode);
-               inode = ERR_PTR(-ESTALE);
-       }
-
        return inode;
 }
 
@@ -4018,7 +4018,8 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
                memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
                kfree(dentry->d_fsdata);
                dentry->d_fsdata = NULL;
-               d_clear_need_lookup(dentry);
+               /* This thing is hashed, drop it for now */
+               d_drop(dentry);
        } else {
                ret = btrfs_inode_by_name(dir, dentry, &location);
        }
@@ -4085,7 +4086,15 @@ static void btrfs_dentry_release(struct dentry *dentry)
 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
                                   struct nameidata *nd)
 {
-       return d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
+       struct dentry *ret;
+
+       ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
+       if (unlikely(d_need_lookup(dentry))) {
+               spin_lock(&dentry->d_lock);
+               dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
+               spin_unlock(&dentry->d_lock);
+       }
+       return ret;
 }
 
 unsigned char btrfs_filetype_table[] = {
@@ -4125,7 +4134,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
 
        /* special case for "." */
        if (filp->f_pos == 0) {
-               over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
+               over = filldir(dirent, ".", 1,
+                              filp->f_pos, btrfs_ino(inode), DT_DIR);
                if (over)
                        return 0;
                filp->f_pos = 1;
@@ -4134,7 +4144,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        if (filp->f_pos == 1) {
                u64 pino = parent_ino(filp->f_path.dentry);
                over = filldir(dirent, "..", 2,
-                              2, pino, DT_DIR);
+                              filp->f_pos, pino, DT_DIR);
                if (over)
                        return 0;
                filp->f_pos = 2;
@@ -5823,7 +5833,7 @@ again:
 
        add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
        ret = btrfs_ordered_update_i_size(inode, 0, ordered);
-       if (!ret)
+       if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
                btrfs_update_inode(trans, root, inode);
        ret = 0;
 out_unlock:
index 970977a..d11fd28 100644 (file)
@@ -2177,6 +2177,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
        if (!(src_file->f_mode & FMODE_READ))
                goto out_fput;
 
+       /* don't make the dst file partly checksummed */
+       if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
+           (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
+               goto out_fput;
+
        ret = -EISDIR;
        if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
                goto out_fput;
@@ -2220,6 +2225,16 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
            !IS_ALIGNED(destoff, bs))
                goto out_unlock;
 
+       if (destoff > inode->i_size) {
+               ret = btrfs_cont_expand(inode, inode->i_size, destoff);
+               if (ret)
+                       goto out_unlock;
+       }
+
+       /* truncate page cache pages from target inode range */
+       truncate_inode_pages_range(&inode->i_data, destoff,
+                                  PAGE_CACHE_ALIGN(destoff + len) - 1);
+
        /* do any pending delalloc/csum calc on src, one way or
           another, and lock file content */
        while (1) {
@@ -2236,10 +2251,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                btrfs_wait_ordered_range(src, off, len);
        }
 
-       /* truncate page cache pages from target inode range */
-       truncate_inode_pages_range(&inode->i_data, off,
-                                  ALIGN(off + len, PAGE_CACHE_SIZE) - 1);
-
        /* clone data */
        key.objectid = btrfs_ino(src);
        key.type = BTRFS_EXTENT_DATA_KEY;
@@ -2325,14 +2336,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 
                        if (type == BTRFS_FILE_EXTENT_REG ||
                            type == BTRFS_FILE_EXTENT_PREALLOC) {
+                               /*
+                                *    a  | --- range to clone ---|  b
+                                * | ------------- extent ------------- |
+                                */
+
+                               /* substract range b */
+                               if (key.offset + datal > off + len)
+                                       datal = off + len - key.offset;
+
+                               /* substract range a */
                                if (off > key.offset) {
                                        datao += off - key.offset;
                                        datal -= off - key.offset;
                                }
 
-                               if (key.offset + datal > off + len)
-                                       datal = off + len - key.offset;
-
                                ret = btrfs_drop_extents(trans, inode,
                                                         new_key.offset,
                                                         new_key.offset + datal,
@@ -2429,7 +2447,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        if (endoff > inode->i_size)
                                btrfs_i_size_write(inode, endoff);
 
-                       BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
                        ret = btrfs_update_inode(trans, root, inode);
                        BUG_ON(ret);
                        btrfs_end_transaction(trans, root);
index 7dc36fa..e24b796 100644 (file)
@@ -884,6 +884,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        struct btrfs_root *tree_root = fs_info->tree_root;
        struct btrfs_root *root = pending->root;
        struct btrfs_root *parent_root;
+       struct btrfs_block_rsv *rsv;
        struct inode *parent_inode;
        struct dentry *parent;
        struct dentry *dentry;
@@ -895,6 +896,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        u64 objectid;
        u64 root_flags;
 
+       rsv = trans->block_rsv;
+
        new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
        if (!new_root_item) {
                pending->error = -ENOMEM;
@@ -1002,6 +1005,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        btrfs_orphan_post_snapshot(trans, pending);
 fail:
        kfree(new_root_item);
+       trans->block_rsv = rsv;
        btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
        return 0;
 }
index d733b9c..69565e5 100644 (file)
@@ -116,6 +116,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
                if (ret)
                        goto out;
                btrfs_release_path(path);
+
+               /*
+                * remove the attribute
+                */
+               if (!value)
+                       goto out;
        }
 
 again:
@@ -158,6 +164,9 @@ out:
        return ret;
 }
 
+/*
+ * @value: "" makes the attribute to empty, NULL removes it
+ */
 int __btrfs_setxattr(struct btrfs_trans_handle *trans,
                     struct inode *inode, const char *name,
                     const void *value, size_t size, int flags)
index fee028b..86c59e1 100644 (file)
@@ -1595,7 +1595,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
                r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
                dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
                     *ppath);
-       } else if (rpath) {
+       } else if (rpath || rino) {
                *ino = rino;
                *ppath = rpath;
                *pathlen = strlen(rpath);
index d47c5ec..88bacaf 100644 (file)
@@ -813,8 +813,8 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
        fsc = create_fs_client(fsopt, opt);
        if (IS_ERR(fsc)) {
                res = ERR_CAST(fsc);
-               kfree(fsopt);
-               kfree(opt);
+               destroy_mount_options(fsopt);
+               ceph_destroy_options(opt);
                goto out_final;
        }
 
index e717dfd..b7d7bd0 100644 (file)
@@ -175,6 +175,7 @@ struct mpage_da_data {
  */
 #define        EXT4_IO_END_UNWRITTEN   0x0001
 #define EXT4_IO_END_ERROR      0x0002
+#define EXT4_IO_END_QUEUED     0x0004
 
 struct ext4_io_page {
        struct page     *p_page;
index c4da98a..18d2558 100644 (file)
@@ -121,9 +121,6 @@ void ext4_evict_inode(struct inode *inode)
 
        trace_ext4_evict_inode(inode);
 
-       mutex_lock(&inode->i_mutex);
-       ext4_flush_completed_IO(inode);
-       mutex_unlock(&inode->i_mutex);
        ext4_ioend_wait(inode);
 
        if (inode->i_nlink) {
index 78839af..92f38ee 100644 (file)
@@ -142,7 +142,23 @@ static void ext4_end_io_work(struct work_struct *work)
        unsigned long           flags;
        int                     ret;
 
-       mutex_lock(&inode->i_mutex);
+       if (!mutex_trylock(&inode->i_mutex)) {
+               /*
+                * Requeue the work instead of waiting so that the work
+                * items queued after this can be processed.
+                */
+               queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work);
+               /*
+                * To prevent the ext4-dio-unwritten thread from keeping
+                * requeueing end_io requests and occupying cpu for too long,
+                * yield the cpu if it sees an end_io request that has already
+                * been requeued.
+                */
+               if (io->flag & EXT4_IO_END_QUEUED)
+                       yield();
+               io->flag |= EXT4_IO_END_QUEUED;
+               return;
+       }
        ret = ext4_end_io_nolock(io);
        if (ret < 0) {
                mutex_unlock(&inode->i_mutex);
index 168a80f..5cb8614 100644 (file)
@@ -258,10 +258,14 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
        forget->forget_one.nlookup = nlookup;
 
        spin_lock(&fc->lock);
-       fc->forget_list_tail->next = forget;
-       fc->forget_list_tail = forget;
-       wake_up(&fc->waitq);
-       kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+       if (fc->connected) {
+               fc->forget_list_tail->next = forget;
+               fc->forget_list_tail = forget;
+               wake_up(&fc->waitq);
+               kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+       } else {
+               kfree(forget);
+       }
        spin_unlock(&fc->lock);
 }
 
index 12b5029..add96f6 100644 (file)
@@ -812,6 +812,9 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                        if (arg->minor >= 17) {
                                if (!(arg->flags & FUSE_FLOCK_LOCKS))
                                        fc->no_flock = 1;
+                       } else {
+                               if (!(arg->flags & FUSE_POSIX_LOCKS))
+                                       fc->no_flock = 1;
                        }
                        if (arg->flags & FUSE_ATOMIC_O_TRUNC)
                                fc->atomic_o_trunc = 1;
index c106ca2..d24a9b6 100644 (file)
@@ -344,6 +344,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        struct inode *root, *inode;
        struct qstr str;
        struct nls_table *nls = NULL;
+       u64 last_fs_block, last_fs_page;
        int err;
 
        err = -EINVAL;
@@ -399,9 +400,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        if (!sbi->rsrc_clump_blocks)
                sbi->rsrc_clump_blocks = 1;
 
-       err = generic_check_addressable(sbi->alloc_blksz_shift,
-                                       sbi->total_blocks);
-       if (err) {
+       err = -EFBIG;
+       last_fs_block = sbi->total_blocks - 1;
+       last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
+                       PAGE_CACHE_SHIFT;
+
+       if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
+           (last_fs_page > (pgoff_t)(~0ULL))) {
                printk(KERN_ERR "hfs: filesystem size too large.\n");
                goto out_free_vhdr;
        }
@@ -525,8 +530,8 @@ out_close_cat_tree:
 out_close_ext_tree:
        hfs_btree_close(sbi->ext_tree);
 out_free_vhdr:
-       kfree(sbi->s_vhdr);
-       kfree(sbi->s_backup_vhdr);
+       kfree(sbi->s_vhdr_buf);
+       kfree(sbi->s_backup_vhdr_buf);
 out_unload_nls:
        unload_nls(sbi->nls);
        unload_nls(nls);
index 10e515a..7daf4b8 100644 (file)
@@ -272,9 +272,9 @@ reread:
        return 0;
 
 out_free_backup_vhdr:
-       kfree(sbi->s_backup_vhdr);
+       kfree(sbi->s_backup_vhdr_buf);
 out_free_vhdr:
-       kfree(sbi->s_vhdr);
+       kfree(sbi->s_vhdr_buf);
 out:
        return error;
 }
index 2826db3..f478836 100644 (file)
@@ -727,25 +727,22 @@ static int follow_automount(struct path *path, unsigned flags,
        if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT))
                return -EISDIR; /* we actually want to stop here */
 
-       /*
-        * We don't want to mount if someone's just doing a stat and they've
-        * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and
-        * appended a '/' to the name.
+       /* We don't want to mount if someone's just doing a stat -
+        * unless they're stat'ing a directory and appended a '/' to
+        * the name.
+        *
+        * We do, however, want to mount if someone wants to open or
+        * create a file of any type under the mountpoint, wants to
+        * traverse through the mountpoint or wants to open the
+        * mounted directory.  Also, autofs may mark negative dentries
+        * as being automount points.  These will need the attentions
+        * of the daemon to instantiate them before they can be used.
         */
-       if (!(flags & LOOKUP_FOLLOW)) {
-               /* We do, however, want to mount if someone wants to open or
-                * create a file of any type under the mountpoint, wants to
-                * traverse through the mountpoint or wants to open the mounted
-                * directory.
-                * Also, autofs may mark negative dentries as being automount
-                * points.  These will need the attentions of the daemon to
-                * instantiate them before they can be used.
-                */
-               if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
-                            LOOKUP_OPEN | LOOKUP_CREATE)) &&
-                   path->dentry->d_inode)
-                       return -EISDIR;
-       }
+       if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
+                    LOOKUP_OPEN | LOOKUP_CREATE)) &&
+           path->dentry->d_inode)
+               return -EISDIR;
+
        current->total_link_count++;
        if (current->total_link_count >= 40)
                return -ELOOP;
@@ -2619,6 +2616,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
        if (!dir->i_op->rmdir)
                return -EPERM;
 
+       dget(dentry);
        mutex_lock(&dentry->d_inode->i_mutex);
 
        error = -EBUSY;
@@ -2639,6 +2637,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
 
 out:
        mutex_unlock(&dentry->d_inode->i_mutex);
+       dput(dentry);
        if (!error)
                d_delete(dentry);
        return error;
@@ -3028,6 +3027,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
        if (error)
                return error;
 
+       dget(new_dentry);
        if (target)
                mutex_lock(&target->i_mutex);
 
@@ -3048,6 +3048,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
 out:
        if (target)
                mutex_unlock(&target->i_mutex);
+       dput(new_dentry);
        if (!error)
                if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
                        d_move(old_dentry,new_dentry);
index 1ec1a85..3e93e9a 100644 (file)
@@ -56,6 +56,9 @@ enum nfs4_session_state {
        NFS4_SESSION_DRAINING,
 };
 
+#define NFS4_RENEW_TIMEOUT             0x01
+#define NFS4_RENEW_DELEGATION_CB       0x02
+
 struct nfs4_minor_version_ops {
        u32     minor_version;
 
@@ -225,7 +228,7 @@ struct nfs4_state_recovery_ops {
 };
 
 struct nfs4_state_maintenance_ops {
-       int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *);
+       int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned);
        struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
        int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
 };
@@ -237,8 +240,6 @@ extern const struct inode_operations nfs4_dir_inode_operations;
 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
 extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
-extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
-extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
 extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
@@ -349,6 +350,7 @@ extern void nfs4_close_sync(struct nfs4_state *, fmode_t);
 extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
 extern void nfs4_schedule_lease_recovery(struct nfs_client *);
 extern void nfs4_schedule_state_manager(struct nfs_client *);
+extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
 extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
 extern void nfs41_handle_recall_slot(struct nfs_client *clp);
index 8c77039..4700fae 100644 (file)
@@ -3374,9 +3374,13 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
 
        if (task->tk_status < 0) {
                /* Unless we're shutting down, schedule state recovery! */
-               if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
+               if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
+                       return;
+               if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
                        nfs4_schedule_lease_recovery(clp);
-               return;
+                       return;
+               }
+               nfs4_schedule_path_down_recovery(clp);
        }
        do_renew_lease(clp, timestamp);
 }
@@ -3386,7 +3390,7 @@ static const struct rpc_call_ops nfs4_renew_ops = {
        .rpc_release = nfs4_renew_release,
 };
 
-int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
+static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
 {
        struct rpc_message msg = {
                .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
@@ -3395,9 +3399,11 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
        };
        struct nfs4_renewdata *data;
 
+       if (renew_flags == 0)
+               return 0;
        if (!atomic_inc_not_zero(&clp->cl_count))
                return -EIO;
-       data = kmalloc(sizeof(*data), GFP_KERNEL);
+       data = kmalloc(sizeof(*data), GFP_NOFS);
        if (data == NULL)
                return -ENOMEM;
        data->client = clp;
@@ -3406,7 +3412,7 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
                        &nfs4_renew_ops, data);
 }
 
-int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
+static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
 {
        struct rpc_message msg = {
                .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
@@ -5504,11 +5510,13 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_
        return rpc_run_task(&task_setup_data);
 }
 
-static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
 {
        struct rpc_task *task;
        int ret = 0;
 
+       if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
+               return 0;
        task = _nfs41_proc_sequence(clp, cred);
        if (IS_ERR(task))
                ret = PTR_ERR(task);
index df8e7f3..dc484c0 100644 (file)
@@ -60,6 +60,7 @@ nfs4_renew_state(struct work_struct *work)
        struct rpc_cred *cred;
        long lease;
        unsigned long last, now;
+       unsigned renew_flags = 0;
 
        ops = clp->cl_mvops->state_renewal_ops;
        dprintk("%s: start\n", __func__);
@@ -72,18 +73,23 @@ nfs4_renew_state(struct work_struct *work)
        last = clp->cl_last_renewal;
        now = jiffies;
        /* Are we close to a lease timeout? */
-       if (time_after(now, last + lease/3)) {
+       if (time_after(now, last + lease/3))
+               renew_flags |= NFS4_RENEW_TIMEOUT;
+       if (nfs_delegations_present(clp))
+               renew_flags |= NFS4_RENEW_DELEGATION_CB;
+
+       if (renew_flags != 0) {
                cred = ops->get_state_renewal_cred_locked(clp);
                spin_unlock(&clp->cl_lock);
                if (cred == NULL) {
-                       if (!nfs_delegations_present(clp)) {
+                       if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) {
                                set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
                                goto out;
                        }
                        nfs_expire_all_delegations(clp);
                } else {
                        /* Queue an asynchronous RENEW. */
-                       ops->sched_state_renewal(clp, cred);
+                       ops->sched_state_renewal(clp, cred, renew_flags);
                        put_rpccred(cred);
                        goto out_exp;
                }
index 72ab97e..39914be 100644 (file)
@@ -1038,6 +1038,12 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)
        nfs4_schedule_state_manager(clp);
 }
 
+void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
+{
+       nfs_handle_cb_pathdown(clp);
+       nfs4_schedule_state_manager(clp);
+}
+
 static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
 {
 
index b961cea..9b7dd70 100644 (file)
@@ -2035,9 +2035,6 @@ static inline void nfs_initialise_sb(struct super_block *sb)
                sb->s_blocksize = nfs_block_bits(server->wsize,
                                                 &sb->s_blocksize_bits);
 
-       if (server->flags & NFS_MOUNT_NOAC)
-               sb->s_flags |= MS_SYNCHRONOUS;
-
        sb->s_bdi = &server->backing_dev_info;
 
        nfs_super_set_maxbytes(sb, server->maxfilesize);
@@ -2249,6 +2246,10 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
        if (server->flags & NFS_MOUNT_UNSHARED)
                compare_super = NULL;
 
+       /* -o noac implies -o sync */
+       if (server->flags & NFS_MOUNT_NOAC)
+               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
        if (IS_ERR(s)) {
@@ -2361,6 +2362,10 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags,
        if (server->flags & NFS_MOUNT_UNSHARED)
                compare_super = NULL;
 
+       /* -o noac implies -o sync */
+       if (server->flags & NFS_MOUNT_NOAC)
+               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
        if (IS_ERR(s)) {
@@ -2628,6 +2633,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
        if (server->flags & NFS4_MOUNT_UNSHARED)
                compare_super = NULL;
 
+       /* -o noac implies -o sync */
+       if (server->flags & NFS_MOUNT_NOAC)
+               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
        if (IS_ERR(s)) {
@@ -2916,6 +2925,10 @@ nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
        if (server->flags & NFS4_MOUNT_UNSHARED)
                compare_super = NULL;
 
+       /* -o noac implies -o sync */
+       if (server->flags & NFS_MOUNT_NOAC)
+               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
        if (IS_ERR(s)) {
@@ -3003,6 +3016,10 @@ nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
        if (server->flags & NFS4_MOUNT_UNSHARED)
                compare_super = NULL;
 
+       /* -o noac implies -o sync */
+       if (server->flags & NFS_MOUNT_NOAC)
+               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
        if (IS_ERR(s)) {
index b39b37f..c9bd2a6 100644 (file)
@@ -958,7 +958,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head
                if (!data)
                        goto out_bad;
                data->pagevec[0] = page;
-               nfs_write_rpcsetup(req, data, wsize, offset, desc->pg_ioflags);
+               nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
                list_add(&data->list, res);
                requests++;
                nbytes -= len;
index 45174b5..feb361e 100644 (file)
@@ -335,9 +335,9 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
 #define DBGKEY(key)  ((char *)(key))
 #define DBGKEY1(key) ((char *)(key))
 
-#define ubifs_dbg_msg(fmt, ...) do {               \
-       if (0)                                     \
-               pr_debug(fmt "\n", ##__VA_ARGS__); \
+#define ubifs_dbg_msg(fmt, ...) do {                        \
+       if (0)                                              \
+               printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
 } while (0)
 
 #define dbg_dump_stack()
index 63e971e..8c37dde 100644 (file)
@@ -1300,6 +1300,7 @@ xfs_end_io_direct_write(
        bool                    is_async)
 {
        struct xfs_ioend        *ioend = iocb->private;
+       struct inode            *inode = ioend->io_inode;
 
        /*
         * blockdev_direct_IO can return an error even after the I/O
@@ -1331,7 +1332,7 @@ xfs_end_io_direct_write(
        }
 
        /* XXX: probably should move into the real I/O completion handler */
-       inode_dio_done(ioend->io_inode);
+       inode_dio_done(inode);
 }
 
 STATIC ssize_t
index b9c172b..673704f 100644 (file)
@@ -70,9 +70,8 @@ xfs_synchronize_times(
 }
 
 /*
- * If the linux inode is valid, mark it dirty.
- * Used when committing a dirty inode into a transaction so that
- * the inode will get written back by the linux code
+ * If the linux inode is valid, mark it dirty, else mark the dirty state
+ * in the XFS inode to make sure we pick it up when reclaiming the inode.
  */
 void
 xfs_mark_inode_dirty_sync(
@@ -82,6 +81,10 @@ xfs_mark_inode_dirty_sync(
 
        if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
                mark_inode_dirty_sync(inode);
+       else {
+               barrier();
+               ip->i_update_core = 1;
+       }
 }
 
 void
@@ -92,6 +95,11 @@ xfs_mark_inode_dirty(
 
        if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
                mark_inode_dirty(inode);
+       else {
+               barrier();
+               ip->i_update_core = 1;
+       }
+
 }
 
 /*
index 9a72dda..2366c54 100644 (file)
@@ -356,6 +356,8 @@ xfs_parseargs(
                        mp->m_flags |= XFS_MOUNT_DELAYLOG;
                } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
                        mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
+                       xfs_warn(mp,
+       "nodelaylog is deprecated and will be removed in Linux 3.3");
                } else if (!strcmp(this_char, MNTOPT_DISCARD)) {
                        mp->m_flags |= XFS_MOUNT_DISCARD;
                } else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
@@ -877,33 +879,17 @@ xfs_log_inode(
        struct xfs_trans        *tp;
        int                     error;
 
-       xfs_iunlock(ip, XFS_ILOCK_SHARED);
        tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
        error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
-
        if (error) {
                xfs_trans_cancel(tp, 0);
-               /* we need to return with the lock hold shared */
-               xfs_ilock(ip, XFS_ILOCK_SHARED);
                return error;
        }
 
        xfs_ilock(ip, XFS_ILOCK_EXCL);
-
-       /*
-        * Note - it's possible that we might have pushed ourselves out of the
-        * way during trans_reserve which would flush the inode.  But there's
-        * no guarantee that the inode buffer has actually gone out yet (it's
-        * delwri).  Plus the buffer could be pinned anyway if it's part of
-        * an inode in another recent transaction.  So we play it safe and
-        * fire off the transaction anyway.
-        */
-       xfs_trans_ijoin(tp, ip);
+       xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       error = xfs_trans_commit(tp, 0);
-       xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
-
-       return error;
+       return xfs_trans_commit(tp, 0);
 }
 
 STATIC int
@@ -918,7 +904,9 @@ xfs_fs_write_inode(
        trace_xfs_write_inode(ip);
 
        if (XFS_FORCED_SHUTDOWN(mp))
-               return XFS_ERROR(EIO);
+               return -XFS_ERROR(EIO);
+       if (!ip->i_update_core)
+               return 0;
 
        if (wbc->sync_mode == WB_SYNC_ALL) {
                /*
@@ -929,12 +917,10 @@ xfs_fs_write_inode(
                 * of synchronous log foces dramatically.
                 */
                xfs_ioend_wait(ip);
-               xfs_ilock(ip, XFS_ILOCK_SHARED);
-               if (ip->i_update_core) {
-                       error = xfs_log_inode(ip);
-                       if (error)
-                               goto out_unlock;
-               }
+               error = xfs_log_inode(ip);
+               if (error)
+                       goto out;
+               return 0;
        } else {
                /*
                 * We make this non-blocking if the inode is contended, return
index 98999cf..feb9121 100644 (file)
@@ -63,15 +63,10 @@ static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)
        return container_of(gc, struct bgpio_chip, gc);
 }
 
-int __devexit bgpio_remove(struct bgpio_chip *bgc);
-int __devinit bgpio_init(struct bgpio_chip *bgc,
-                        struct device *dev,
-                        unsigned long sz,
-                        void __iomem *dat,
-                        void __iomem *set,
-                        void __iomem *clr,
-                        void __iomem *dirout,
-                        void __iomem *dirin,
-                        bool big_endian);
+int bgpio_remove(struct bgpio_chip *bgc);
+int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
+              unsigned long sz, void __iomem *dat, void __iomem *set,
+              void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
+              bool big_endian);
 
 #endif /* __BASIC_MMIO_GPIO_H */
index 3b535db..343bd76 100644 (file)
@@ -39,16 +39,6 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        struct mem_cgroup *mem_cont,
                                        int active, int file);
 
-struct memcg_scanrecord {
-       struct mem_cgroup *mem; /* scanend memory cgroup */
-       struct mem_cgroup *root; /* scan target hierarchy root */
-       int context;            /* scanning context (see memcontrol.c) */
-       unsigned long nr_scanned[2]; /* the number of scanned pages */
-       unsigned long nr_rotated[2]; /* the number of rotated pages */
-       unsigned long nr_freed[2]; /* the number of freed pages */
-       unsigned long elapsed; /* nsec of time elapsed while scanning */
-};
-
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 /*
  * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -127,15 +117,6 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page);
 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
                                        struct task_struct *p);
 
-extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
-                                                 gfp_t gfp_mask, bool noswap,
-                                                 struct memcg_scanrecord *rec);
-extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
-                                               gfp_t gfp_mask, bool noswap,
-                                               struct zone *zone,
-                                               struct memcg_scanrecord *rec,
-                                               unsigned long *nr_scanned);
-
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 extern int do_swap_account;
 #endif
index d12f8d6..97cf4f2 100644 (file)
@@ -26,7 +26,7 @@ struct wm8994_ldo_pdata {
        struct regulator_init_data *init_data;
 };
 
-#define WM8994_CONFIGURE_GPIO 0x8000
+#define WM8994_CONFIGURE_GPIO 0x10000
 
 #define WM8994_DRC_REGS 5
 #define WM8994_EQ_REGS  20
index 245bafd..c816075 100644 (file)
@@ -944,8 +944,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
 
 extern int perf_num_counters(void);
 extern const char *perf_pmu_name(void);
-extern void __perf_event_task_sched_in(struct task_struct *task);
-extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
+extern void __perf_event_task_sched_in(struct task_struct *prev,
+                                      struct task_struct *task);
+extern void __perf_event_task_sched_out(struct task_struct *prev,
+                                       struct task_struct *next);
 extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
@@ -1059,17 +1061,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 
 extern struct jump_label_key perf_sched_events;
 
-static inline void perf_event_task_sched_in(struct task_struct *task)
+static inline void perf_event_task_sched_in(struct task_struct *prev,
+                                           struct task_struct *task)
 {
        if (static_branch(&perf_sched_events))
-               __perf_event_task_sched_in(task);
+               __perf_event_task_sched_in(prev, task);
 }
 
-static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
+static inline void perf_event_task_sched_out(struct task_struct *prev,
+                                            struct task_struct *next)
 {
        perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 
-       __perf_event_task_sched_out(task, next);
+       if (static_branch(&perf_sched_events))
+               __perf_event_task_sched_out(prev, next);
 }
 
 extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1139,10 +1144,11 @@ extern void perf_event_disable(struct perf_event *event);
 extern void perf_event_task_tick(void);
 #else
 static inline void
-perf_event_task_sched_in(struct task_struct *task)                     { }
+perf_event_task_sched_in(struct task_struct *prev,
+                        struct task_struct *task)                      { }
 static inline void
-perf_event_task_sched_out(struct task_struct *task,
-                           struct task_struct *next)                   { }
+perf_event_task_sched_out(struct task_struct *prev,
+                         struct task_struct *next)                     { }
 static inline int perf_event_init_task(struct task_struct *child)      { return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)     { }
 static inline void perf_event_free_task(struct task_struct *task)      { }
index 26f6ea4..b47771a 100644 (file)
@@ -123,7 +123,7 @@ struct regulator_bulk_data {
        const char *supply;
        struct regulator *consumer;
 
-       /* Internal use */
+       /* private: Internal use */
        int ret;
 };
 
index 14d6249..c71f84b 100644 (file)
@@ -252,6 +252,12 @@ static inline void lru_cache_add_file(struct page *page)
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                                        gfp_t gfp_mask, nodemask_t *mask);
 extern int __isolate_lru_page(struct page *page, int mode, int file);
+extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
+                                                 gfp_t gfp_mask, bool noswap);
+extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
+                                               gfp_t gfp_mask, bool noswap,
+                                               struct zone *zone,
+                                               unsigned long *nr_scanned);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
 extern int remove_mapping(struct address_space *mapping, struct page *page);
index 342dcf1..a6326ef 100644 (file)
@@ -288,6 +288,35 @@ enum p9_perm_t {
        P9_DMSETVTX = 0x00010000,
 };
 
+/* 9p2000.L open flags */
+#define P9_DOTL_RDONLY        00000000
+#define P9_DOTL_WRONLY        00000001
+#define P9_DOTL_RDWR          00000002
+#define P9_DOTL_NOACCESS      00000003
+#define P9_DOTL_CREATE        00000100
+#define P9_DOTL_EXCL          00000200
+#define P9_DOTL_NOCTTY        00000400
+#define P9_DOTL_TRUNC         00001000
+#define P9_DOTL_APPEND        00002000
+#define P9_DOTL_NONBLOCK      00004000
+#define P9_DOTL_DSYNC         00010000
+#define P9_DOTL_FASYNC        00020000
+#define P9_DOTL_DIRECT        00040000
+#define P9_DOTL_LARGEFILE     00100000
+#define P9_DOTL_DIRECTORY     00200000
+#define P9_DOTL_NOFOLLOW      00400000
+#define P9_DOTL_NOATIME       01000000
+#define P9_DOTL_CLOEXEC       02000000
+#define P9_DOTL_SYNC          04000000
+
+/* 9p2000.L at flags */
+#define P9_DOTL_AT_REMOVEDIR           0x200
+
+/* 9p2000.L lock type */
+#define P9_LOCK_TYPE_RDLCK 0
+#define P9_LOCK_TYPE_WRLCK 1
+#define P9_LOCK_TYPE_UNLCK 2
+
 /**
  * enum p9_qid_t - QID types
  * @P9_QTDIR: directory
index 408ae48..401d73b 100644 (file)
@@ -1744,6 +1744,8 @@ struct wiphy_wowlan_support {
  *     by default for perm_addr. In this case, the mask should be set to
  *     all-zeroes. In this case it is assumed that the device can handle
  *     the same number of arbitrary MAC addresses.
+ * @registered: protects ->resume and ->suspend sysfs callbacks against
+ *     unregister hardware
  * @debugfsdir: debugfs directory used for this wiphy, will be renamed
  *     automatically on wiphy renames
  * @dev: (virtual) struct device for this wiphy
index e9b48b0..acc620a 100644 (file)
@@ -431,17 +431,34 @@ extern int tcp_disconnect(struct sock *sk, int flags);
 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 
                                    struct ip_options *opt);
+#ifdef CONFIG_SYN_COOKIES
 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 
                                     __u16 *mss);
+#else
+static inline __u32 cookie_v4_init_sequence(struct sock *sk,
+                                           struct sk_buff *skb,
+                                           __u16 *mss)
+{
+       return 0;
+}
+#endif
 
 extern __u32 cookie_init_timestamp(struct request_sock *req);
 extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
 
 /* From net/ipv6/syncookies.c */
 extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
+#ifdef CONFIG_SYN_COOKIES
 extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
                                     __u16 *mss);
-
+#else
+static inline __u32 cookie_v6_init_sequence(struct sock *sk,
+                                           struct sk_buff *skb,
+                                           __u16 *mss)
+{
+       return 0;
+}
+#endif
 /* tcp_output.c */
 
 extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
index b8785e2..0f85778 100644 (file)
@@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
        local_irq_restore(flags);
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task)
+static inline void perf_cgroup_sched_out(struct task_struct *task,
+                                        struct task_struct *next)
 {
-       perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+       struct perf_cgroup *cgrp1;
+       struct perf_cgroup *cgrp2 = NULL;
+
+       /*
+        * we come here when we know perf_cgroup_events > 0
+        */
+       cgrp1 = perf_cgroup_from_task(task);
+
+       /*
+        * next is NULL when called from perf_event_enable_on_exec()
+        * that will systematically cause a cgroup_switch()
+        */
+       if (next)
+               cgrp2 = perf_cgroup_from_task(next);
+
+       /*
+        * only schedule out current cgroup events if we know
+        * that we are switching to a different cgroup. Otherwise,
+        * do no touch the cgroup events.
+        */
+       if (cgrp1 != cgrp2)
+               perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
 }
 
-static inline void perf_cgroup_sched_in(struct task_struct *task)
+static inline void perf_cgroup_sched_in(struct task_struct *prev,
+                                       struct task_struct *task)
 {
-       perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+       struct perf_cgroup *cgrp1;
+       struct perf_cgroup *cgrp2 = NULL;
+
+       /*
+        * we come here when we know perf_cgroup_events > 0
+        */
+       cgrp1 = perf_cgroup_from_task(task);
+
+       /* prev can never be NULL */
+       cgrp2 = perf_cgroup_from_task(prev);
+
+       /*
+        * only need to schedule in cgroup events if we are changing
+        * cgroup during ctxsw. Cgroup events were not scheduled
+        * out of ctxsw out if that was not the case.
+        */
+       if (cgrp1 != cgrp2)
+               perf_cgroup_switch(task, PERF_CGROUP_SWIN);
 }
 
 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
 {
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task)
+static inline void perf_cgroup_sched_out(struct task_struct *task,
+                                        struct task_struct *next)
 {
 }
 
-static inline void perf_cgroup_sched_in(struct task_struct *task)
+static inline void perf_cgroup_sched_in(struct task_struct *prev,
+                                       struct task_struct *task)
 {
 }
 
@@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
         * cgroup event are system-wide mode only
         */
        if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
-               perf_cgroup_sched_out(task);
+               perf_cgroup_sched_out(task, next);
 }
 
 static void task_ctx_sched_out(struct perf_event_context *ctx)
@@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
  * accessing the event control register. If a NMI hits, then it will
  * keep the event running.
  */
-void __perf_event_task_sched_in(struct task_struct *task)
+void __perf_event_task_sched_in(struct task_struct *prev,
+                               struct task_struct *task)
 {
        struct perf_event_context *ctx;
        int ctxn;
@@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task)
         * cgroup event are system-wide mode only
         */
        if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
-               perf_cgroup_sched_in(task);
+               perf_cgroup_sched_in(prev, task);
 }
 
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
@@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
         * ctxswin cgroup events which are already scheduled
         * in.
         */
-       perf_cgroup_sched_out(current);
+       perf_cgroup_sched_out(current, NULL);
 
        raw_spin_lock(&ctx->lock);
        task_ctx_sched_out(ctx);
@@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event)
 }
 
 static void calc_timer_values(struct perf_event *event,
-                               u64 *running,
-                               u64 *enabled)
+                               u64 *enabled,
+                               u64 *running)
 {
        u64 now, ctx_time;
 
index d5a3009..dc5114b 100644 (file)
@@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)
        desc->depth = 1;
        if (desc->irq_data.chip->irq_shutdown)
                desc->irq_data.chip->irq_shutdown(&desc->irq_data);
-       if (desc->irq_data.chip->irq_disable)
+       else if (desc->irq_data.chip->irq_disable)
                desc->irq_data.chip->irq_disable(&desc->irq_data);
        else
                desc->irq_data.chip->irq_mask(&desc->irq_data);
index ccacdbd..ec5f472 100644 (file)
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
        local_irq_disable();
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
-       perf_event_task_sched_in(current);
+       perf_event_task_sched_in(prev, current);
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
        local_irq_enable();
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
@@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq)
 }
 
 /*
- * schedule() is the main scheduler function.
+ * __schedule() is the main scheduler function.
  */
-asmlinkage void __sched schedule(void)
+static void __sched __schedule(void)
 {
        struct task_struct *prev, *next;
        unsigned long *switch_count;
@@ -4322,16 +4322,6 @@ need_resched:
                                if (to_wakeup)
                                        try_to_wake_up_local(to_wakeup);
                        }
-
-                       /*
-                        * If we are going to sleep and we have plugged IO
-                        * queued, make sure to submit it to avoid deadlocks.
-                        */
-                       if (blk_needs_flush_plug(prev)) {
-                               raw_spin_unlock(&rq->lock);
-                               blk_schedule_flush_plug(prev);
-                               raw_spin_lock(&rq->lock);
-                       }
                }
                switch_count = &prev->nvcsw;
        }
@@ -4369,6 +4359,26 @@ need_resched:
        if (need_resched())
                goto need_resched;
 }
+
+static inline void sched_submit_work(struct task_struct *tsk)
+{
+       if (!tsk->state)
+               return;
+       /*
+        * If we are going to sleep and we have plugged IO queued,
+        * make sure to submit it to avoid deadlocks.
+        */
+       if (blk_needs_flush_plug(tsk))
+               blk_schedule_flush_plug(tsk);
+}
+
+asmlinkage void schedule(void)
+{
+       struct task_struct *tsk = current;
+
+       sched_submit_work(tsk);
+       __schedule();
+}
 EXPORT_SYMBOL(schedule);
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
 
        do {
                add_preempt_count_notrace(PREEMPT_ACTIVE);
-               schedule();
+               __schedule();
                sub_preempt_count_notrace(PREEMPT_ACTIVE);
 
                /*
@@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
        do {
                add_preempt_count(PREEMPT_ACTIVE);
                local_irq_enable();
-               schedule();
+               __schedule();
                local_irq_disable();
                sub_preempt_count(PREEMPT_ACTIVE);
 
@@ -5588,7 +5598,7 @@ static inline int should_resched(void)
 static void __cond_resched(void)
 {
        add_preempt_count(PREEMPT_ACTIVE);
-       schedule();
+       __schedule();
        sub_preempt_count(PREEMPT_ACTIVE);
 }
 
@@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
                        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
                        if (sd && (sd->flags & SD_OVERLAP))
                                free_sched_groups(sd->groups, 0);
+                       kfree(*per_cpu_ptr(sdd->sd, j));
                        kfree(*per_cpu_ptr(sdd->sg, j));
                        kfree(*per_cpu_ptr(sdd->sgp, j));
                }
index e19ce14..e660464 100644 (file)
@@ -655,6 +655,7 @@ static struct genl_ops taskstats_ops = {
        .cmd            = TASKSTATS_CMD_GET,
        .doit           = taskstats_user_cmd,
        .policy         = taskstats_cmd_get_policy,
+       .flags          = GENL_ADMIN_PERM,
 };
 
 static struct genl_ops cgroupstats_ops = {
index 59f369f..ea5e1a9 100644 (file)
@@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer)
 static void alarm_timer_get(struct k_itimer *timr,
                                struct itimerspec *cur_setting)
 {
+       memset(cur_setting, 0, sizeof(struct itimerspec));
+
        cur_setting->it_interval =
                        ktime_to_timespec(timr->it.alarmtimer.period);
        cur_setting->it_value =
@@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
        if (!rtcdev)
                return -ENOTSUPP;
 
-       /* Save old values */
-       old_setting->it_interval =
-                       ktime_to_timespec(timr->it.alarmtimer.period);
-       old_setting->it_value =
-                       ktime_to_timespec(timr->it.alarmtimer.node.expires);
+       /*
+        * XXX HACK! Currently we can DOS a system if the interval
+        * period on alarmtimers is too small. Cap the interval here
+        * to 100us and solve this properly in a future patch! -jstultz
+        */
+       if ((new_setting->it_interval.tv_sec == 0) &&
+                       (new_setting->it_interval.tv_nsec < 100000))
+               new_setting->it_interval.tv_nsec = 100000;
+
+       if (old_setting)
+               alarm_timer_get(timr, old_setting);
 
        /* If the timer was already set, cancel it */
        alarm_cancel(&timr->it.alarmtimer);
index 24dc60d..5bbfac8 100644 (file)
@@ -78,6 +78,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
 
 #define KB 1024
 #define MB (1024*KB)
+#define KB_MASK (~(KB-1))
 /*
  * fill in extended accounting fields
  */
@@ -95,14 +96,14 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
                stats->hiwater_vm    = get_mm_hiwater_vm(mm)  * PAGE_SIZE / KB;
                mmput(mm);
        }
-       stats->read_char        = p->ioac.rchar;
-       stats->write_char       = p->ioac.wchar;
-       stats->read_syscalls    = p->ioac.syscr;
-       stats->write_syscalls   = p->ioac.syscw;
+       stats->read_char        = p->ioac.rchar & KB_MASK;
+       stats->write_char       = p->ioac.wchar & KB_MASK;
+       stats->read_syscalls    = p->ioac.syscr & KB_MASK;
+       stats->write_syscalls   = p->ioac.syscw & KB_MASK;
 #ifdef CONFIG_TASK_IO_ACCOUNTING
-       stats->read_bytes       = p->ioac.read_bytes;
-       stats->write_bytes      = p->ioac.write_bytes;
-       stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes;
+       stats->read_bytes       = p->ioac.read_bytes & KB_MASK;
+       stats->write_bytes      = p->ioac.write_bytes & KB_MASK;
+       stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;
 #else
        stats->read_bytes       = 0;
        stats->write_bytes      = 0;
index 25fb1b0..1783aab 100644 (file)
@@ -2412,8 +2412,13 @@ reflush:
 
        for_each_cwq_cpu(cpu, wq) {
                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+               bool drained;
 
-               if (!cwq->nr_active && list_empty(&cwq->delayed_works))
+               spin_lock_irq(&cwq->gcwq->lock);
+               drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
+               spin_unlock_irq(&cwq->gcwq->lock);
+
+               if (drained)
                        continue;
 
                if (++flush_cnt == 10 ||
index d5d175c..3f5bc6d 100644 (file)
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         idr.o int_sqrt.o extable.o prio_tree.o \
         sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
         proportions.o prio_heap.o ratelimit.o show_mem.o \
-        is_single_threaded.o plist.o decompress.o find_next_bit.o
+        is_single_threaded.o plist.o decompress.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
         bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
         string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
-        bsearch.o find_last_bit.o
+        bsearch.o find_last_bit.o find_next_bit.o
 obj-y += kstrtox.o
 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
 
index f33271d..1de509a 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/bitops.h>
+#include <linux/cryptohash.h>
 #include <asm/unaligned.h>
 
 /*
index 645a080..7771871 100644 (file)
@@ -827,13 +827,14 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 {
        unsigned int i;
        unsigned int ret;
-       unsigned int nr_found;
+       unsigned int nr_found, nr_skip;
 
        rcu_read_lock();
 restart:
        nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
                                (void ***)pages, NULL, start, nr_pages);
        ret = 0;
+       nr_skip = 0;
        for (i = 0; i < nr_found; i++) {
                struct page *page;
 repeat:
@@ -856,6 +857,7 @@ repeat:
                         * here as an exceptional entry: so skip over it -
                         * we only reach this from invalidate_mapping_pages().
                         */
+                       nr_skip++;
                        continue;
                }
 
@@ -876,7 +878,7 @@ repeat:
         * If all entries were removed before we could secure them,
         * try again, because callers stop trying once 0 is returned.
         */
-       if (unlikely(!ret && nr_found))
+       if (unlikely(!ret && nr_found > nr_skip))
                goto restart;
        rcu_read_unlock();
        return ret;
index ebd1e86..3508777 100644 (file)
@@ -204,50 +204,6 @@ struct mem_cgroup_eventfd_list {
 static void mem_cgroup_threshold(struct mem_cgroup *mem);
 static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
 
-enum {
-       SCAN_BY_LIMIT,
-       SCAN_BY_SYSTEM,
-       NR_SCAN_CONTEXT,
-       SCAN_BY_SHRINK, /* not recorded now */
-};
-
-enum {
-       SCAN,
-       SCAN_ANON,
-       SCAN_FILE,
-       ROTATE,
-       ROTATE_ANON,
-       ROTATE_FILE,
-       FREED,
-       FREED_ANON,
-       FREED_FILE,
-       ELAPSED,
-       NR_SCANSTATS,
-};
-
-struct scanstat {
-       spinlock_t      lock;
-       unsigned long   stats[NR_SCAN_CONTEXT][NR_SCANSTATS];
-       unsigned long   rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS];
-};
-
-const char *scanstat_string[NR_SCANSTATS] = {
-       "scanned_pages",
-       "scanned_anon_pages",
-       "scanned_file_pages",
-       "rotated_pages",
-       "rotated_anon_pages",
-       "rotated_file_pages",
-       "freed_pages",
-       "freed_anon_pages",
-       "freed_file_pages",
-       "elapsed_ns",
-};
-#define SCANSTAT_WORD_LIMIT    "_by_limit"
-#define SCANSTAT_WORD_SYSTEM   "_by_system"
-#define SCANSTAT_WORD_HIERARCHY        "_under_hierarchy"
-
-
 /*
  * The memory controller data structure. The memory controller controls both
  * page cache and RSS per cgroup. We would eventually like to provide
@@ -313,8 +269,7 @@ struct mem_cgroup {
 
        /* For oom notifier event fd */
        struct list_head oom_notify;
-       /* For recording LRU-scan statistics */
-       struct scanstat scanstat;
+
        /*
         * Should we move charges of a task when a task is moved into this
         * mem_cgroup ? And what type of charges should we move ?
@@ -1678,44 +1633,6 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
 }
 #endif
 
-static void __mem_cgroup_record_scanstat(unsigned long *stats,
-                          struct memcg_scanrecord *rec)
-{
-
-       stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1];
-       stats[SCAN_ANON] += rec->nr_scanned[0];
-       stats[SCAN_FILE] += rec->nr_scanned[1];
-
-       stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1];
-       stats[ROTATE_ANON] += rec->nr_rotated[0];
-       stats[ROTATE_FILE] += rec->nr_rotated[1];
-
-       stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1];
-       stats[FREED_ANON] += rec->nr_freed[0];
-       stats[FREED_FILE] += rec->nr_freed[1];
-
-       stats[ELAPSED] += rec->elapsed;
-}
-
-static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec)
-{
-       struct mem_cgroup *mem;
-       int context = rec->context;
-
-       if (context >= NR_SCAN_CONTEXT)
-               return;
-
-       mem = rec->mem;
-       spin_lock(&mem->scanstat.lock);
-       __mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec);
-       spin_unlock(&mem->scanstat.lock);
-
-       mem = rec->root;
-       spin_lock(&mem->scanstat.lock);
-       __mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec);
-       spin_unlock(&mem->scanstat.lock);
-}
-
 /*
  * Scan the hierarchy if needed to reclaim memory. We remember the last child
  * we reclaimed from, so that we don't end up penalizing one child extensively
@@ -1740,9 +1657,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
        bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
        bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
        bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
-       struct memcg_scanrecord rec;
        unsigned long excess;
-       unsigned long scanned;
+       unsigned long nr_scanned;
 
        excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
 
@@ -1750,15 +1666,6 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
        if (!check_soft && !shrink && root_mem->memsw_is_minimum)
                noswap = true;
 
-       if (shrink)
-               rec.context = SCAN_BY_SHRINK;
-       else if (check_soft)
-               rec.context = SCAN_BY_SYSTEM;
-       else
-               rec.context = SCAN_BY_LIMIT;
-
-       rec.root = root_mem;
-
        while (1) {
                victim = mem_cgroup_select_victim(root_mem);
                if (victim == root_mem) {
@@ -1799,23 +1706,14 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
                        css_put(&victim->css);
                        continue;
                }
-               rec.mem = victim;
-               rec.nr_scanned[0] = 0;
-               rec.nr_scanned[1] = 0;
-               rec.nr_rotated[0] = 0;
-               rec.nr_rotated[1] = 0;
-               rec.nr_freed[0] = 0;
-               rec.nr_freed[1] = 0;
-               rec.elapsed = 0;
                /* we use swappiness of local cgroup */
                if (check_soft) {
                        ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
-                               noswap, zone, &rec, &scanned);
-                       *total_scanned += scanned;
+                               noswap, zone, &nr_scanned);
+                       *total_scanned += nr_scanned;
                } else
                        ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
-                                               noswap, &rec);
-               mem_cgroup_record_scanstat(&rec);
+                                               noswap);
                css_put(&victim->css);
                /*
                 * At shrinking usage, we can't check we should stop here or
@@ -3854,18 +3752,14 @@ try_to_free:
        /* try to free all pages in this cgroup */
        shrink = 1;
        while (nr_retries && mem->res.usage > 0) {
-               struct memcg_scanrecord rec;
                int progress;
 
                if (signal_pending(current)) {
                        ret = -EINTR;
                        goto out;
                }
-               rec.context = SCAN_BY_SHRINK;
-               rec.mem = mem;
-               rec.root = mem;
                progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
-                                               false, &rec);
+                                               false);
                if (!progress) {
                        nr_retries--;
                        /* maybe some writeback is necessary */
@@ -4709,54 +4603,6 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
 }
 #endif /* CONFIG_NUMA */
 
-static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp,
-                               struct cftype *cft,
-                               struct cgroup_map_cb *cb)
-{
-       struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
-       char string[64];
-       int i;
-
-       for (i = 0; i < NR_SCANSTATS; i++) {
-               strcpy(string, scanstat_string[i]);
-               strcat(string, SCANSTAT_WORD_LIMIT);
-               cb->fill(cb, string,  mem->scanstat.stats[SCAN_BY_LIMIT][i]);
-       }
-
-       for (i = 0; i < NR_SCANSTATS; i++) {
-               strcpy(string, scanstat_string[i]);
-               strcat(string, SCANSTAT_WORD_SYSTEM);
-               cb->fill(cb, string,  mem->scanstat.stats[SCAN_BY_SYSTEM][i]);
-       }
-
-       for (i = 0; i < NR_SCANSTATS; i++) {
-               strcpy(string, scanstat_string[i]);
-               strcat(string, SCANSTAT_WORD_LIMIT);
-               strcat(string, SCANSTAT_WORD_HIERARCHY);
-               cb->fill(cb, string,  mem->scanstat.rootstats[SCAN_BY_LIMIT][i]);
-       }
-       for (i = 0; i < NR_SCANSTATS; i++) {
-               strcpy(string, scanstat_string[i]);
-               strcat(string, SCANSTAT_WORD_SYSTEM);
-               strcat(string, SCANSTAT_WORD_HIERARCHY);
-               cb->fill(cb, string,  mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]);
-       }
-       return 0;
-}
-
-static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp,
-                               unsigned int event)
-{
-       struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
-
-       spin_lock(&mem->scanstat.lock);
-       memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats));
-       memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats));
-       spin_unlock(&mem->scanstat.lock);
-       return 0;
-}
-
-
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
@@ -4827,11 +4673,6 @@ static struct cftype mem_cgroup_files[] = {
                .mode = S_IRUGO,
        },
 #endif
-       {
-               .name = "vmscan_stat",
-               .read_map = mem_cgroup_vmscan_stat_read,
-               .trigger = mem_cgroup_reset_vmscan_stat,
-       },
 };
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -5095,7 +4936,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        atomic_set(&mem->refcnt, 1);
        mem->move_charge_at_immigrate = 0;
        mutex_init(&mem->thresholds_lock);
-       spin_lock_init(&mem->scanstat.lock);
        return &mem->css;
 free_out:
        __mem_cgroup_free(mem);
index 8b57173..9c51f9f 100644 (file)
@@ -636,7 +636,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        struct vm_area_struct *prev;
        struct vm_area_struct *vma;
        int err = 0;
-       pgoff_t pgoff;
        unsigned long vmstart;
        unsigned long vmend;
 
@@ -649,9 +648,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
                vmstart = max(start, vma->vm_start);
                vmend   = min(end, vma->vm_end);
 
-               pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
-                                 vma->anon_vma, vma->vm_file, pgoff, new_pol);
+                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+                                 new_pol);
                if (prev) {
                        vma = prev;
                        next = vma->vm_next;
@@ -1412,7 +1411,9 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy,
        err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
 
        if (!err && nmask) {
-               err = copy_from_user(bm, nm, alloc_size);
+               unsigned long copy_size;
+               copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
+               err = copy_from_user(bm, nm, copy_size);
                /* ensure entire bitmap is zeroed */
                err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
                err |= compat_put_bitmap(nmask, bm, nr_bits);
index 9f662d7..7c54fe8 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                 */
                if (unlikely(!prior)) {
                        remove_full(s, page);
-                       add_partial(n, page, 0);
+                       add_partial(n, page, 1);
                        stat(s, FREE_ADD_PARTIAL);
                }
        }
index 7ef0903..5016f19 100644 (file)
@@ -2140,6 +2140,14 @@ struct vm_struct *alloc_vm_area(size_t size)
                return NULL;
        }
 
+       /*
+        * If the allocated address space is passed to a hypercall
+        * before being used then we cannot rely on a page fault to
+        * trigger an update of the page tables.  So sync all the page
+        * tables here.
+        */
+       vmalloc_sync_all();
+
        return area;
 }
 EXPORT_SYMBOL_GPL(alloc_vm_area);
index b7719ec..b55699c 100644 (file)
@@ -105,7 +105,6 @@ struct scan_control {
 
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
-       struct memcg_scanrecord *memcg_record;
 
        /*
         * Nodemask of nodes allowed by the caller. If NULL, all nodes
@@ -1349,8 +1348,6 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
                        int file = is_file_lru(lru);
                        int numpages = hpage_nr_pages(page);
                        reclaim_stat->recent_rotated[file] += numpages;
-                       if (!scanning_global_lru(sc))
-                               sc->memcg_record->nr_rotated[file] += numpages;
                }
                if (!pagevec_add(&pvec, page)) {
                        spin_unlock_irq(&zone->lru_lock);
@@ -1394,10 +1391,6 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone,
 
        reclaim_stat->recent_scanned[0] += *nr_anon;
        reclaim_stat->recent_scanned[1] += *nr_file;
-       if (!scanning_global_lru(sc)) {
-               sc->memcg_record->nr_scanned[0] += *nr_anon;
-               sc->memcg_record->nr_scanned[1] += *nr_file;
-       }
 }
 
 /*
@@ -1511,9 +1504,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
                nr_reclaimed += shrink_page_list(&page_list, zone, sc);
        }
 
-       if (!scanning_global_lru(sc))
-               sc->memcg_record->nr_freed[file] += nr_reclaimed;
-
        local_irq_disable();
        if (current_is_kswapd())
                __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
@@ -1613,8 +1603,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        }
 
        reclaim_stat->recent_scanned[file] += nr_taken;
-       if (!scanning_global_lru(sc))
-               sc->memcg_record->nr_scanned[file] += nr_taken;
 
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
        if (file)
@@ -1666,8 +1654,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         * get_scan_ratio.
         */
        reclaim_stat->recent_rotated[file] += nr_rotated;
-       if (!scanning_global_lru(sc))
-               sc->memcg_record->nr_rotated[file] += nr_rotated;
 
        move_active_pages_to_lru(zone, &l_active,
                                                LRU_ACTIVE + file * LRU_FILE);
@@ -1808,23 +1794,15 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        u64 fraction[2], denominator;
        enum lru_list l;
        int noswap = 0;
-       int force_scan = 0;
+       bool force_scan = false;
        unsigned long nr_force_scan[2];
 
-
-       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
-       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
-
-       if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
-               /* kswapd does zone balancing and need to scan this zone */
-               if (scanning_global_lru(sc) && current_is_kswapd())
-                       force_scan = 1;
-               /* memcg may have small limit and need to avoid priority drop */
-               if (!scanning_global_lru(sc))
-                       force_scan = 1;
-       }
+       /* kswapd does zone balancing and needs to scan this zone */
+       if (scanning_global_lru(sc) && current_is_kswapd())
+               force_scan = true;
+       /* memcg may have small limit and need to avoid priority drop */
+       if (!scanning_global_lru(sc))
+               force_scan = true;
 
        /* If we have no swap space, do not bother scanning anon pages. */
        if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1837,6 +1815,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                goto out;
        }
 
+       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+
        if (scanning_global_lru(sc)) {
                free  = zone_page_state(zone, NR_FREE_PAGES);
                /* If we have very few page cache pages,
@@ -2268,10 +2251,9 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
-                                       gfp_t gfp_mask, bool noswap,
-                                       struct zone *zone,
-                                       struct memcg_scanrecord *rec,
-                                       unsigned long *scanned)
+                                               gfp_t gfp_mask, bool noswap,
+                                               struct zone *zone,
+                                               unsigned long *nr_scanned)
 {
        struct scan_control sc = {
                .nr_scanned = 0,
@@ -2281,9 +2263,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                .may_swap = !noswap,
                .order = 0,
                .mem_cgroup = mem,
-               .memcg_record = rec,
        };
-       ktime_t start, end;
 
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -2292,7 +2272,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                                                      sc.may_writepage,
                                                      sc.gfp_mask);
 
-       start = ktime_get();
        /*
         * NOTE: Although we can get the priority field, using it
         * here is not a good idea, since it limits the pages we can scan.
@@ -2301,25 +2280,19 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
         * the priority and make it zero.
         */
        shrink_zone(0, zone, &sc);
-       end = ktime_get();
-
-       if (rec)
-               rec->elapsed += ktime_to_ns(ktime_sub(end, start));
-       *scanned = sc.nr_scanned;
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
+       *nr_scanned = sc.nr_scanned;
        return sc.nr_reclaimed;
 }
 
 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                           gfp_t gfp_mask,
-                                          bool noswap,
-                                          struct memcg_scanrecord *rec)
+                                          bool noswap)
 {
        struct zonelist *zonelist;
        unsigned long nr_reclaimed;
-       ktime_t start, end;
        int nid;
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
@@ -2328,7 +2301,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .order = 0,
                .mem_cgroup = mem_cont,
-               .memcg_record = rec,
                .nodemask = NULL, /* we don't care the placement */
                .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                                (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
@@ -2337,7 +2309,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .gfp_mask = sc.gfp_mask,
        };
 
-       start = ktime_get();
        /*
         * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
         * take care of from where we get pages. So the node where we start the
@@ -2352,9 +2323,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                            sc.gfp_mask);
 
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
-       end = ktime_get();
-       if (rec)
-               rec->elapsed += ktime_to_ns(ktime_sub(end, start));
 
        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
 
index 20c18b7..d52b13d 100644 (file)
@@ -659,7 +659,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
 }
 #endif
 
-#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS)
+#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
 #ifdef CONFIG_ZONE_DMA
 #define TEXT_FOR_DMA(xx) xx "_dma",
 #else
@@ -788,7 +788,7 @@ const char * const vmstat_text[] = {
 
 #endif /* CONFIG_VM_EVENTS_COUNTERS */
 };
-#endif /* CONFIG_PROC_FS || CONFIG_SYSFS */
+#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
 
 
 #ifdef CONFIG_PROC_FS
index 175b513..e317583 100644 (file)
@@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
 {
        int in, out, inp, outp;
        struct virtio_chan *chan = client->trans;
-       char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
        unsigned long flags;
        size_t pdata_off = 0;
        struct trans_rpage_info *rpinfo = NULL;
@@ -346,7 +345,8 @@ req_retry_pinned:
                 * Arrange in such a way that server places header in the
                 * alloced memory and payload onto the user buffer.
                 */
-               inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
+               inp = pack_sg_list(chan->sg, out,
+                                  VIRTQUEUE_NUM, req->rc->sdata, 11);
                /*
                 * Running executables in the filesystem may result in
                 * a read request with kernel buffer as opposed to user buffer.
@@ -366,8 +366,8 @@ req_retry_pinned:
                }
                in += inp;
        } else {
-               in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
-                               req->rc->capacity);
+               in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
+                                 req->rc->sdata, req->rc->capacity);
        }
 
        err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
@@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = {
        .close = p9_virtio_close,
        .request = p9_virtio_request,
        .cancel = p9_virtio_cancel,
-       .maxsize = PAGE_SIZE*VIRTQUEUE_NUM,
+
+       /*
+        * We leave one entry for input and one entry for response
+        * headers. We also skip one more entry to accomodate, address
+        * that are not at page boundary, that can result in an extra
+        * page in zero copy.
+        */
+       .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
        .pref = P9_TRANS_PREF_PAYLOAD_SEP,
        .def = 0,
        .owner = THIS_MODULE,
index a40170e..7ef4eb4 100644 (file)
@@ -58,8 +58,8 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
        if (status)
                return;
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                               test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+                       test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 0);
 
        hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -76,8 +76,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
        if (status)
                return;
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                               test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+                               test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 0);
 
        hci_conn_check_pending(hdev);
@@ -959,9 +959,8 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
                return;
        }
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                                       !test_and_set_bit(HCI_INQUIRY,
-                                                       &hdev->flags))
+       if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
+                               test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 1);
 }
 
@@ -1340,8 +1339,8 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
 
        BT_DBG("%s status %d", hdev->name, status);
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                               test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+                               test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 0);
 
        hci_req_complete(hdev, HCI_OP_INQUIRY, status);
index d5f2d97..1f4cb30 100644 (file)
@@ -7,27 +7,37 @@
 
 #include <linux/ceph/msgpool.h>
 
-static void *alloc_fn(gfp_t gfp_mask, void *arg)
+static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
 {
        struct ceph_msgpool *pool = arg;
-       void *p;
+       struct ceph_msg *msg;
 
-       p = ceph_msg_new(0, pool->front_len, gfp_mask);
-       if (!p)
-               pr_err("msgpool %s alloc failed\n", pool->name);
-       return p;
+       msg = ceph_msg_new(0, pool->front_len, gfp_mask);
+       if (!msg) {
+               dout("msgpool_alloc %s failed\n", pool->name);
+       } else {
+               dout("msgpool_alloc %s %p\n", pool->name, msg);
+               msg->pool = pool;
+       }
+       return msg;
 }
 
-static void free_fn(void *element, void *arg)
+static void msgpool_free(void *element, void *arg)
 {
-       ceph_msg_put(element);
+       struct ceph_msgpool *pool = arg;
+       struct ceph_msg *msg = element;
+
+       dout("msgpool_release %s %p\n", pool->name, msg);
+       msg->pool = NULL;
+       ceph_msg_put(msg);
 }
 
 int ceph_msgpool_init(struct ceph_msgpool *pool,
                      int front_len, int size, bool blocking, const char *name)
 {
+       dout("msgpool %s init\n", name);
        pool->front_len = front_len;
-       pool->pool = mempool_create(size, alloc_fn, free_fn, pool);
+       pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
        if (!pool->pool)
                return -ENOMEM;
        pool->name = name;
@@ -36,14 +46,17 @@ int ceph_msgpool_init(struct ceph_msgpool *pool,
 
 void ceph_msgpool_destroy(struct ceph_msgpool *pool)
 {
+       dout("msgpool %s destroy\n", pool->name);
        mempool_destroy(pool->pool);
 }
 
 struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
                                  int front_len)
 {
+       struct ceph_msg *msg;
+
        if (front_len > pool->front_len) {
-               pr_err("msgpool_get pool %s need front %d, pool size is %d\n",
+               dout("msgpool_get %s need front %d, pool size is %d\n",
                       pool->name, front_len, pool->front_len);
                WARN_ON(1);
 
@@ -51,14 +64,19 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
                return ceph_msg_new(0, front_len, GFP_NOFS);
        }
 
-       return mempool_alloc(pool->pool, GFP_NOFS);
+       msg = mempool_alloc(pool->pool, GFP_NOFS);
+       dout("msgpool_get %s %p\n", pool->name, msg);
+       return msg;
 }
 
 void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
 {
+       dout("msgpool_put %s %p\n", pool->name, msg);
+
        /* reset msg front_len; user may have changed it */
        msg->front.iov_len = pool->front_len;
        msg->hdr.front_len = cpu_to_le32(pool->front_len);
 
        kref_init(&msg->kref);  /* retake single ref */
+       mempool_free(msg, pool->pool);
 }
index ce310ee..16836a7 100644 (file)
@@ -685,6 +685,18 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
        put_osd(osd);
 }
 
+static void remove_all_osds(struct ceph_osd_client *osdc)
+{
+       dout("__remove_old_osds %p\n", osdc);
+       mutex_lock(&osdc->request_mutex);
+       while (!RB_EMPTY_ROOT(&osdc->osds)) {
+               struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
+                                               struct ceph_osd, o_node);
+               __remove_osd(osdc, osd);
+       }
+       mutex_unlock(&osdc->request_mutex);
+}
+
 static void __move_osd_to_lru(struct ceph_osd_client *osdc,
                              struct ceph_osd *osd)
 {
@@ -701,14 +713,14 @@ static void __remove_osd_from_lru(struct ceph_osd *osd)
                list_del_init(&osd->o_osd_lru);
 }
 
-static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all)
+static void remove_old_osds(struct ceph_osd_client *osdc)
 {
        struct ceph_osd *osd, *nosd;
 
        dout("__remove_old_osds %p\n", osdc);
        mutex_lock(&osdc->request_mutex);
        list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
-               if (!remove_all && time_before(jiffies, osd->lru_ttl))
+               if (time_before(jiffies, osd->lru_ttl))
                        break;
                __remove_osd(osdc, osd);
        }
@@ -751,6 +763,7 @@ static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
        struct rb_node *parent = NULL;
        struct ceph_osd *osd = NULL;
 
+       dout("__insert_osd %p osd%d\n", new, new->o_osd);
        while (*p) {
                parent = *p;
                osd = rb_entry(parent, struct ceph_osd, o_node);
@@ -1144,7 +1157,7 @@ static void handle_osds_timeout(struct work_struct *work)
 
        dout("osds timeout\n");
        down_read(&osdc->map_sem);
-       remove_old_osds(osdc, 0);
+       remove_old_osds(osdc);
        up_read(&osdc->map_sem);
 
        schedule_delayed_work(&osdc->osds_timeout_work,
@@ -1862,8 +1875,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
                ceph_osdmap_destroy(osdc->osdmap);
                osdc->osdmap = NULL;
        }
-       remove_old_osds(osdc, 1);
-       WARN_ON(!RB_EMPTY_ROOT(&osdc->osds));
+       remove_all_osds(osdc);
        mempool_destroy(osdc->req_mempool);
        ceph_msgpool_destroy(&osdc->msgpool_op);
        ceph_msgpool_destroy(&osdc->msgpool_op_reply);
index ea0d218..21fab3e 100644 (file)
@@ -1124,7 +1124,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
                return 0;
 
        /* ...Then it's D-SACK, and must reside below snd_una completely */
-       if (!after(end_seq, tp->snd_una))
+       if (after(end_seq, tp->snd_una))
                return 0;
 
        if (!before(start_seq, tp->undo_marker))
index 02751db..68a471b 100644 (file)
@@ -852,6 +852,7 @@ static void handle_channel(struct wiphy *wiphy,
                return;
        }
 
+       chan->beacon_found = false;
        chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
        chan->max_antenna_gain = min(chan->orig_mag,
                (int) MBI_TO_DBI(power_rule->max_antenna_gain));
index b7b6ff8..dec0fa2 100644 (file)
@@ -118,6 +118,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
                             i++, j++)
                                request->channels[i] =
                                        &wdev->wiphy->bands[band]->channels[j];
+                       request->rates[band] =
+                               (1 << wdev->wiphy->bands[band]->n_bitrates) - 1;
                }
        }
        request->n_channels = n_channels;
index 86d0caf..62e90b8 100644 (file)
@@ -1761,6 +1761,10 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
        snd_pcm_uframes_t avail = 0;
        long wait_time, tout;
 
+       init_waitqueue_entry(&wait, current);
+       set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&runtime->tsleep, &wait);
+
        if (runtime->no_period_wakeup)
                wait_time = MAX_SCHEDULE_TIMEOUT;
        else {
@@ -1771,16 +1775,32 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
                }
                wait_time = msecs_to_jiffies(wait_time * 1000);
        }
-       init_waitqueue_entry(&wait, current);
-       add_wait_queue(&runtime->tsleep, &wait);
+
        for (;;) {
                if (signal_pending(current)) {
                        err = -ERESTARTSYS;
                        break;
                }
+
+               /*
+                * We need to check if space became available already
+                * (and thus the wakeup happened already) first to close
+                * the race of space already having become available.
+                * This check must happen after been added to the waitqueue
+                * and having current state be INTERRUPTIBLE.
+                */
+               if (is_playback)
+                       avail = snd_pcm_playback_avail(runtime);
+               else
+                       avail = snd_pcm_capture_avail(runtime);
+               if (avail >= runtime->twake)
+                       break;
                snd_pcm_stream_unlock_irq(substream);
-               tout = schedule_timeout_interruptible(wait_time);
+
+               tout = schedule_timeout(wait_time);
+
                snd_pcm_stream_lock_irq(substream);
+               set_current_state(TASK_INTERRUPTIBLE);
                switch (runtime->status->state) {
                case SNDRV_PCM_STATE_SUSPENDED:
                        err = -ESTRPIPE;
@@ -1806,14 +1826,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
                        err = -EIO;
                        break;
                }
-               if (is_playback)
-                       avail = snd_pcm_playback_avail(runtime);
-               else
-                       avail = snd_pcm_capture_avail(runtime);
-               if (avail >= runtime->twake)
-                       break;
        }
  _endloop:
+       set_current_state(TASK_RUNNING);
        remove_wait_queue(&runtime->tsleep, &wait);
        *availp = avail;
        return err;
index 3e7850c..f3aefef 100644 (file)
@@ -579,9 +579,13 @@ int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
                return -1;
        }
        recursive++;
-       for (i = 0; i < nums; i++)
+       for (i = 0; i < nums; i++) {
+               unsigned int type = get_wcaps_type(get_wcaps(codec, conn[i]));
+               if (type == AC_WID_PIN || type == AC_WID_AUD_OUT)
+                       continue;
                if (snd_hda_get_conn_index(codec, conn[i], nid, recursive) >= 0)
                        return i;
+       }
        return -1;
 }
 EXPORT_SYMBOL_HDA(snd_hda_get_conn_index);
index d6c93d9..c45f3e6 100644 (file)
@@ -535,7 +535,7 @@ static int add_volume(struct hda_codec *codec, const char *name,
                      int index, unsigned int pval, int dir,
                      struct snd_kcontrol **kctlp)
 {
-       char tmp[32];
+       char tmp[44];
        struct snd_kcontrol_new knew =
                HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT);
        knew.private_value = pval;
index a118a0f..5956584 100644 (file)
@@ -103,7 +103,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {
                .cpu_dai_name = "bfin-tdm.0",
                .codec_dai_name ="ad193x-hifi",
                .platform_name = "bfin-tdm-pcm-audio",
-               .codec_name = "ad193x.5",
+               .codec_name = "spi0.5",
                .ops = &bf5xx_ad193x_ops,
        },
        {
@@ -112,7 +112,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {
                .cpu_dai_name = "bfin-tdm.1",
                .codec_dai_name ="ad193x-hifi",
                .platform_name = "bfin-tdm-pcm-audio",
-               .codec_name = "ad193x.5",
+               .codec_name = "spi0.5",
                .ops = &bf5xx_ad193x_ops,
        },
 };
index fd0dc46..5c6c245 100644 (file)
@@ -369,7 +369,7 @@ static struct snd_soc_platform_driver mpc5200_audio_dma_platform = {
        .pcm_free       = &psc_dma_free,
 };
 
-static int mpc5200_hpcd_probe(struct of_device *op)
+static int mpc5200_hpcd_probe(struct platform_device *op)
 {
        phys_addr_t fifo;
        struct psc_dma *psc_dma;
@@ -487,7 +487,7 @@ out_unmap:
        return ret;
 }
 
-static int mpc5200_hpcd_remove(struct of_device *op)
+static int mpc5200_hpcd_remove(struct platform_device *op)
 {
        struct psc_dma *psc_dma = dev_get_drvdata(&op->dev);
 
@@ -519,7 +519,7 @@ MODULE_DEVICE_TABLE(of, mpc5200_hpcd_match);
 static struct platform_driver mpc5200_hpcd_of_driver = {
        .probe          = mpc5200_hpcd_probe,
        .remove         = mpc5200_hpcd_remove,
-       .dev = {
+       .driver = {
                .owner          = THIS_MODULE,
                .name           = "mpc5200-pcm-audio",
                .of_match_table    = mpc5200_hpcd_match,
index 309c59e..7945625 100644 (file)
@@ -240,7 +240,6 @@ static int ssi_irq = 0;
 
 static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)
 {
-       struct snd_card *card = rtd->card->snd_card;
        struct snd_soc_dai *dai = rtd->cpu_dai;
        struct snd_pcm *pcm = rtd->pcm;
        int ret;
index 8f16cd3..d0bcf3f 100644 (file)
@@ -424,7 +424,7 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev)
        if (!priv->mem) {
                dev_err(&pdev->dev, "request_mem_region failed\n");
                err = -EBUSY;
-               goto error_alloc;
+               goto err_alloc;
        }
 
        priv->io = ioremap(priv->mem->start, SZ_16K);
index d9f8ade..20b7f3b 100644 (file)
@@ -203,14 +203,14 @@ static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
                rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
                for (i = 0; i < rbnode->blklen; ++i) {
                        regtmp = rbnode->base_reg + i;
-                       WARN_ON(codec->writable_register &&
-                               codec->writable_register(codec, regtmp));
                        val = snd_soc_rbtree_get_register(rbnode, i);
                        def = snd_soc_get_cache_val(codec->reg_def_copy, i,
                                                    rbnode->word_size);
                        if (val == def)
                                continue;
 
+                       WARN_ON(!snd_soc_codec_writable_register(codec, regtmp));
+
                        codec->cache_bypass = 1;
                        ret = snd_soc_write(codec, regtmp, val);
                        codec->cache_bypass = 0;
@@ -563,8 +563,7 @@ static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
 
        lzo_blocks = codec->reg_cache;
        for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
-               WARN_ON(codec->writable_register &&
-                       codec->writable_register(codec, i));
+               WARN_ON(!snd_soc_codec_writable_register(codec, i));
                ret = snd_soc_cache_read(codec, i, &val);
                if (ret)
                        return ret;
@@ -823,8 +822,6 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
 
        codec_drv = codec->driver;
        for (i = 0; i < codec_drv->reg_cache_size; ++i) {
-               WARN_ON(codec->writable_register &&
-                       codec->writable_register(codec, i));
                ret = snd_soc_cache_read(codec, i, &val);
                if (ret)
                        return ret;
@@ -832,6 +829,9 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
                        if (snd_soc_get_cache_val(codec->reg_def_copy,
                                                  i, codec_drv->reg_word_size) == val)
                                continue;
+
+               WARN_ON(!snd_soc_codec_writable_register(codec, i));
+
                ret = snd_soc_write(codec, i, val);
                if (ret)
                        return ret;
index b085d8e..d2ef014 100644 (file)
@@ -1633,7 +1633,7 @@ int snd_soc_codec_readable_register(struct snd_soc_codec *codec,
        if (codec->readable_register)
                return codec->readable_register(codec, reg);
        else
-               return 0;
+               return 1;
 }
 EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register);
 
@@ -1651,7 +1651,7 @@ int snd_soc_codec_writable_register(struct snd_soc_codec *codec,
        if (codec->writable_register)
                return codec->writable_register(codec, reg);
        else
-               return 0;
+               return 1;
 }
 EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register);
 
index 7e15914..d67c637 100644 (file)
@@ -2763,7 +2763,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);
 
 /**
  * snd_soc_dapm_free - free dapm resources
- * @card: SoC device
+ * @dapm: DAPM context
  *
  * Free all dapm widgets and resources.
  */
index 38b0013..fa31d9c 100644 (file)
@@ -105,7 +105,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
 
        snd_soc_dapm_sync(dapm);
 
-       snd_jack_report(jack->jack, status);
+       snd_jack_report(jack->jack, jack->status);
 
 out:
        mutex_unlock(&codec->mutex);
index fff6450..e8d5c55 100644 (file)
@@ -8,7 +8,10 @@
  * published by the Free Software Foundation.
  */
 
+#include <stdlib.h>
+#ifndef __UCLIBC__
 #include <libio.h>
+#endif
 #include <dwarf-regs.h>
 
 struct pt_regs_dwarfnum {