Merge branches 'acrux', 'logitech', 'multitouch', 'roccat' and 'wiimote' into for...
authorJiri Kosina <jkosina@suse.cz>
Tue, 25 Oct 2011 07:54:16 +0000 (09:54 +0200)
committerJiri Kosina <jkosina@suse.cz>
Tue, 25 Oct 2011 07:54:16 +0000 (09:54 +0200)
552 files changed:
Documentation/ABI/testing/sysfs-class-scsi_host [new file with mode: 0644]
Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff [new file with mode: 0644]
Documentation/DocBook/media/v4l/controls.xml
Documentation/cgroups/memory.txt
Documentation/hwmon/coretemp
Documentation/hwmon/max16065
Documentation/ioctl/ioctl-number.txt
Documentation/kernel-parameters.txt
Documentation/networking/dmfe.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/scaling.txt
Documentation/vm/transhuge.txt
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/arm/Kconfig
arch/arm/boot/dts/tegra-harmony.dts
arch/arm/boot/dts/tegra-seaboard.dts
arch/arm/include/asm/futex.h
arch/arm/include/asm/hardware/cache-l2x0.h
arch/arm/include/asm/unistd.h
arch/arm/kernel/smp_scu.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/mach-cns3xxx/include/mach/entry-macro.S
arch/arm/mach-cns3xxx/include/mach/system.h
arch/arm/mach-cns3xxx/include/mach/uncompress.h
arch/arm/mach-cns3xxx/pcie.c
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/include/mach/psc.h
arch/arm/mach-davinci/sleep.S
arch/arm/mach-dove/common.c
arch/arm/mach-exynos4/clock.c
arch/arm/mach-exynos4/mct.c
arch/arm/mach-exynos4/platsmp.c
arch/arm/mach-exynos4/setup-keypad.c
arch/arm/mach-integrator/integrator_ap.c
arch/arm/mach-integrator/pci_v3.c
arch/arm/mach-omap2/clock3xxx_data.c
arch/arm/mach-omap2/clock44xx_data.c
arch/arm/mach-omap2/clockdomain.c
arch/arm/mach-omap2/omap_hwmod_2430_data.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/powerdomain.c
arch/arm/mach-prima2/clock.c
arch/arm/mach-prima2/irq.c
arch/arm/mach-prima2/rstc.c
arch/arm/mach-prima2/timer.c
arch/arm/mach-s3c2443/clock.c
arch/arm/mach-s3c64xx/mach-smdk6410.c
arch/arm/mach-s5pv210/clock.c
arch/arm/mm/abort-macro.S
arch/arm/mm/cache-l2x0.c
arch/arm/mm/cache-v7.S
arch/arm/mm/dma-mapping.c
arch/arm/mm/init.c
arch/arm/plat-omap/omap_device.c
arch/arm/plat-s5p/irq-gpioint.c
arch/arm/plat-samsung/clock.c
arch/arm/plat-samsung/include/plat/clock.h
arch/arm/plat-samsung/include/plat/watchdog-reset.h
arch/openrisc/include/asm/dma-mapping.h
arch/openrisc/include/asm/sigcontext.h
arch/openrisc/kernel/dma.c
arch/openrisc/kernel/signal.c
arch/powerpc/platforms/powermac/pci.c
arch/s390/include/asm/elf.h
arch/s390/include/asm/pgtable.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry64.S
arch/s390/kvm/kvm-s390.c
arch/s390/mm/pgtable.c
arch/sparc/include/asm/spitfire.h
arch/sparc/include/asm/xor_64.h
arch/sparc/kernel/cpu.c
arch/sparc/kernel/cpumap.c
arch/sparc/kernel/head_64.S
arch/sparc/kernel/process_32.c
arch/sparc/kernel/process_64.c
arch/sparc/kernel/setup_32.c
arch/sparc/kernel/setup_64.c
arch/sparc/mm/init_64.c
arch/um/Kconfig.x86
arch/um/Makefile
arch/um/drivers/line.c
arch/um/drivers/xterm.c
arch/um/include/asm/ptrace-generic.h
arch/um/include/shared/line.h
arch/um/include/shared/registers.h
arch/um/kernel/process.c
arch/um/kernel/ptrace.c
arch/um/os-Linux/registers.c
arch/um/os-Linux/skas/mem.c
arch/um/os-Linux/skas/process.c
arch/um/sys-i386/asm/ptrace.h
arch/um/sys-i386/ptrace.c
arch/um/sys-i386/shared/sysdep/ptrace.h
arch/um/sys-x86_64/ptrace.c
arch/um/sys-x86_64/shared/sysdep/ptrace.h
arch/x86/include/asm/alternative-asm.h
arch/x86/include/asm/alternative.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/pvclock.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/rtc.c
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.c
arch/x86/pci/acpi.c
arch/x86/platform/mrst/vrtc.c
arch/x86/xen/mmu.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
arch/x86/xen/time.c
arch/x86/xen/xen-asm_32.S
block/blk-cgroup.c
block/blk-core.c
block/blk-softirq.c
block/blk-sysfs.c
block/cfq-iosched.c
drivers/acpi/acpica/acconfig.h
drivers/acpi/apei/Kconfig
drivers/acpi/apei/apei-base.c
drivers/base/power/clock_ops.c
drivers/base/regmap/regmap.c
drivers/block/floppy.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c
drivers/bluetooth/btusb.c
drivers/bluetooth/btwilink.c
drivers/char/tpm/Kconfig
drivers/char/tpm/tpm.c
drivers/char/tpm/tpm_nsc.c
drivers/cpufreq/pcc-cpufreq.c
drivers/dma/ste_dma40.c
drivers/firewire/ohci.c
drivers/gpio/gpio-generic.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nv04_crtc.c
drivers/gpu/drm/nouveau/nv50_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/hid/Kconfig
drivers/hid/Makefile
drivers/hid/hid-axff.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-lg.c
drivers/hid/hid-lg.h
drivers/hid/hid-lg4ff.c
drivers/hid/hid-lgff.c
drivers/hid/hid-logitech-dj.c [new file with mode: 0644]
drivers/hid/hid-logitech-dj.h [new file with mode: 0644]
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-wacom.c
drivers/hid/hid-wiimote.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/coretemp.c
drivers/hwmon/ds620.c
drivers/hwmon/max16065.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwmon/pmbus/ucd9000.c
drivers/hwmon/pmbus/ucd9200.c
drivers/hwmon/w83791d.c
drivers/i2c/busses/i2c-pxa-pci.c
drivers/i2c/busses/i2c-tegra.c
drivers/ide/ide-disk.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/input/keyboard/adp5588-keys.c
drivers/input/misc/cm109.c
drivers/input/mouse/bcm5974.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/wacom_w8001.c
drivers/iommu/amd_iommu.c
drivers/iommu/dmar.c
drivers/leds/ledtrig-timer.c
drivers/md/dm-crypt.c
drivers/md/dm-flakey.c
drivers/md/dm-raid.c
drivers/md/dm-table.c
drivers/md/md.c
drivers/md/md.h
drivers/md/multipath.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb/dvb-usb/vp7045.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/nuvoton-cir.h
drivers/media/video/gspca/ov519.c
drivers/media/video/gspca/sonixj.c
drivers/media/video/omap/omap_vout.c
drivers/media/video/omap3isp/ispccdc.c
drivers/media/video/pwc/pwc-v4l.c
drivers/media/video/uvc/uvc_driver.c
drivers/media/video/uvc/uvc_entity.c
drivers/media/video/uvc/uvc_video.c
drivers/media/video/uvc/uvcvideo.h
drivers/media/video/v4l2-dev.c
drivers/media/video/v4l2-device.c
drivers/media/video/via-camera.c
drivers/mfd/jz4740-adc.c
drivers/mfd/max8997.c
drivers/mfd/omap-usb-host.c
drivers/mfd/tps65910-irq.c
drivers/mfd/twl4030-madc.c
drivers/mfd/wm8350-gpio.c
drivers/misc/lis3lv02d/lis3lv02d.c
drivers/misc/pti.c
drivers/mmc/card/block.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/host.h
drivers/mmc/core/sd.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sh_mobile_sdhi.c
drivers/mtd/ubi/debug.h
drivers/net/Kconfig
drivers/net/arm/am79c961a.c
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_dcb.c
drivers/net/bnx2x/bnx2x_ethtool.c
drivers/net/bnx2x/bnx2x_link.c
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bnx2x/bnx2x_reg.h
drivers/net/bnx2x/bnx2x_stats.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/can/ti_hecc.c
drivers/net/cxgb3/cxgb3_offload.c
drivers/net/cxgb3/l2t.c
drivers/net/cxgb3/l2t.h
drivers/net/cxgb4/cxgb4_main.c
drivers/net/e1000/e1000_hw.c
drivers/net/gianfar_ethtool.c
drivers/net/greth.c
drivers/net/greth.h
drivers/net/ibmveth.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/macvlan.c
drivers/net/netconsole.c
drivers/net/pch_gbe/pch_gbe.h
drivers/net/pch_gbe/pch_gbe_main.c
drivers/net/phy/dp83640.c
drivers/net/ppp_generic.c
drivers/net/pxa168_eth.c
drivers/net/r8169.c
drivers/net/sfc/efx.c
drivers/net/sfc/io.h
drivers/net/sfc/mcdi.c
drivers/net/sfc/nic.c
drivers/net/sfc/nic.h
drivers/net/sfc/siena.c
drivers/net/sfc/workarounds.h
drivers/net/tg3.c
drivers/net/usb/ipheth.c
drivers/net/wireless/ath/ath9k/ar9002_calib.c
drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlegacy/iwl-3945-rs.c
drivers/net/wireless/iwlegacy/iwl-core.c
drivers/net/wireless/iwlegacy/iwl-hcmd.c
drivers/net/wireless/iwlegacy/iwl-tx.c
drivers/net/wireless/iwlegacy/iwl3945-base.c
drivers/net/wireless/iwlegacy/iwl4965-base.c
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-scan.c
drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/xen-netback/interface.c
drivers/pci/hotplug/pcihp_slot.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/rtc/rtc-ep93xx.c
drivers/rtc/rtc-imxdi.c
drivers/rtc/rtc-lib.c
drivers/rtc/rtc-s3c.c
drivers/rtc/rtc-twl.c
drivers/s390/cio/cio.c
drivers/scsi/3w-9xxx.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/aacraid/commsup.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/hpsa.c
drivers/scsi/isci/host.c
drivers/scsi/isci/host.h
drivers/scsi/isci/init.c
drivers/scsi/isci/phy.c
drivers/scsi/isci/registers.h
drivers/scsi/isci/request.c
drivers/scsi/isci/unsolicited_frame_control.c
drivers/scsi/isci/unsolicited_frame_control.h
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla4xxx/Kconfig
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-imx.c
drivers/spi/spi-topcliff-pch.c
drivers/staging/comedi/drivers/ni_labpc.c
drivers/staging/zcache/zcache-main.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_cdb.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tcm_fc.h
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
drivers/target/tcm_fc/tfc_io.c
drivers/tty/serial/crisv10.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-ring.c
drivers/video/backlight/backlight.c
drivers/watchdog/hpwdt.c
drivers/watchdog/lantiq_wdt.c
drivers/watchdog/sbc_epx_c3.c
drivers/watchdog/watchdog_dev.c
drivers/xen/events.c
drivers/zorro/zorro.c
fs/9p/v9fs_vfs.h
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/9p/vfs_super.c
fs/block_dev.c
fs/btrfs/btrfs_inode.h
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/transaction.c
fs/btrfs/xattr.c
fs/ceph/mds_client.c
fs/ceph/super.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/ext3/inode.c
fs/ext3/namei.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/fuse/dev.c
fs/fuse/inode.c
fs/gfs2/log.c
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/gfs2/quota.c
fs/hfsplus/super.c
fs/hfsplus/wrapper.c
fs/namei.c
fs/namespace.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4renewd.c
fs/nfs/nfs4state.c
fs/nfs/super.c
fs/nfs/write.c
fs/proc/task_mmu.c
fs/quota/quota.c
fs/stat.c
fs/ubifs/debug.h
fs/xfs/xfs_aops.c
include/linux/basic_mmio_gpio.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/device-mapper.h
include/linux/fs.h
include/linux/hid.h
include/linux/irqdomain.h
include/linux/kvm.h
include/linux/memcontrol.h
include/linux/mfd/wm8994/pdata.h
include/linux/namei.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/ptp_classify.h
include/linux/regulator/consumer.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/snmp.h
include/linux/swap.h
include/net/9p/9p.h
include/net/cfg80211.h
include/net/flow.h
include/net/request_sock.h
include/net/sctp/command.h
include/net/tcp.h
include/net/transp_v6.h
include/trace/events/writeback.h
init/main.c
kernel/events/core.c
kernel/irq/chip.c
kernel/irq/irqdomain.c
kernel/posix-cpu-timers.c
kernel/ptrace.c
kernel/resource.c
kernel/sched.c
kernel/sched_rt.c
kernel/taskstats.c
kernel/time/alarmtimer.c
kernel/tsacct.c
kernel/workqueue.c
lib/sha1.c
lib/xz/xz_dec_bcj.c
mm/backing-dev.c
mm/filemap.c
mm/memcontrol.c
mm/mempolicy.c
mm/slub.c
mm/vmalloc.c
mm/vmscan.c
mm/vmstat.c
net/9p/trans_virtio.c
net/batman-adv/soft-interface.c
net/bluetooth/hci_event.c
net/bridge/br_device.c
net/bridge/netfilter/Kconfig
net/caif/caif_dev.c
net/can/af_can.c
net/can/bcm.c
net/ceph/ceph_common.c
net/ceph/messenger.c
net/ceph/msgpool.c
net/ceph/osd_client.c
net/ceph/osdmap.c
net/core/dev.c
net/core/fib_rules.c
net/core/flow.c
net/core/skbuff.c
net/ethernet/eth.c
net/ipv4/af_inet.c
net/ipv4/fib_semantics.c
net/ipv4/netfilter/ip_queue.c
net/ipv4/proc.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/ip6_queue.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/irda/irsysctl.c
net/irda/qos.c
net/mac80211/sta_info.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_pptp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nfnetlink_queue.c
net/netfilter/xt_rateest.c
net/packet/af_packet.c
net/rds/iw_rdma.c
net/sched/cls_rsvp.h
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/sme.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
sound/core/pcm_lib.c
sound/pci/fm801.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/blackfin/bf5xx-ad193x.c
sound/soc/blackfin/bf5xx-ad73311.c
sound/soc/codecs/ssm2602.c
sound/soc/codecs/wm8753.c
sound/soc/codecs/wm8962.c
sound/soc/fsl/mpc5200_dma.c
sound/soc/imx/imx-pcm-fiq.c
sound/soc/kirkwood/kirkwood-i2s.c
sound/soc/omap/mcpdm.c
sound/soc/omap/mcpdm.h
sound/soc/omap/omap-mcbsp.c
sound/soc/pxa/zylonite.c
sound/soc/soc-cache.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-jack.c
sound/usb/card.c
tools/perf/Makefile
tools/perf/builtin-record.c
tools/perf/builtin-test.c
tools/perf/builtin-top.c
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/probe-finder.c
tools/perf/util/python.c
tools/perf/util/session.h
tools/perf/util/sort.c
tools/perf/util/symbol.c

diff --git a/Documentation/ABI/testing/sysfs-class-scsi_host b/Documentation/ABI/testing/sysfs-class-scsi_host
new file mode 100644 (file)
index 0000000..29a4f89
--- /dev/null
@@ -0,0 +1,13 @@
+What:          /sys/class/scsi_host/hostX/isci_id
+Date:          June 2011
+Contact:       Dave Jiang <dave.jiang@intel.com>
+Description:
+               This file contains the enumerated host ID for the Intel
+               SCU controller. The Intel(R) C600 Series Chipset SATA/SAS
+               Storage Control Unit embeds up to two 4-port controllers in
+               a single PCI device.  The controllers are enumerated in order
+               which usually means the lowest number scsi_host corresponds
+               with the first controller, but this association is not
+               guaranteed.  The 'isci_id' attribute unambiguously identifies
+               the controller index: '0' for the first controller,
+               '1' for the second.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
new file mode 100644 (file)
index 0000000..9aec8ef
--- /dev/null
@@ -0,0 +1,7 @@
+What:          /sys/module/hid_logitech/drivers/hid:logitech/<dev>/range.
+Date:          July 2011
+KernelVersion: 3.2
+Contact:       Michal Malý <madcatxster@gmail.com>
+Description:   Display minimum, maximum and current range of the steering
+               wheel. Writing a value within min and max boundaries sets the
+               range of the wheel.
index 8516401..23fdf79 100644 (file)
@@ -1455,7 +1455,7 @@ Applicable to the H264 encoder.</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-h264-vui-sar-idc">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC</constant>&nbsp;</entry>
                <entry>enum&nbsp;v4l2_mpeg_video_h264_vui_sar_idc</entry>
              </row>
@@ -1561,7 +1561,7 @@ Applicable to the H264 encoder.</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-h264-level">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LEVEL</constant>&nbsp;</entry>
                <entry>enum&nbsp;v4l2_mpeg_video_h264_level</entry>
              </row>
@@ -1641,7 +1641,7 @@ Possible values are:</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-mpeg4-level">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL</constant>&nbsp;</entry>
                <entry>enum&nbsp;v4l2_mpeg_video_mpeg4_level</entry>
              </row>
@@ -1689,9 +1689,9 @@ Possible values are:</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-h264-profile">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_PROFILE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_h264_profile</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_h264_profile</entry>
              </row>
              <row><entry spanname="descr">The profile information for H264.
 Applicable to the H264 encoder.
@@ -1774,9 +1774,9 @@ Possible values are:</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-mpeg4-profile">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_mpeg4_profile</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_mpeg4_profile</entry>
              </row>
              <row><entry spanname="descr">The profile information for MPEG4.
 Applicable to the MPEG4 encoder.
@@ -1820,9 +1820,9 @@ Applicable to the encoder.
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-multi-slice-mode">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_multi_slice_mode</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_multi_slice_mode</entry>
              </row>
              <row><entry spanname="descr">Determines how the encoder should handle division of frame into slices.
 Applicable to the encoder.
@@ -1868,9 +1868,9 @@ Applicable to the encoder.</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-h264-loop-filter-mode">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_h264_loop_filter_mode</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_h264_loop_filter_mode</entry>
              </row>
              <row><entry spanname="descr">Loop filter mode for H264 encoder.
 Possible values are:</entry>
@@ -1913,9 +1913,9 @@ Applicable to the H264 encoder.</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-h264-entropy-mode">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_h264_symbol_mode</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_h264_entropy_mode</entry>
              </row>
              <row><entry spanname="descr">Entropy coding mode for H264 - CABAC/CAVALC.
 Applicable to the H264 encoder.
@@ -2140,9 +2140,9 @@ previous frames. Applicable to the H264 encoder.</entry>
              </row>
 
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-video-header-mode">
                <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_HEADER_MODE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_header_mode</entry>
+               <entry>enum&nbsp;v4l2_mpeg_video_header_mode</entry>
              </row>
              <row><entry spanname="descr">Determines whether the header is returned as the first buffer or is
 it returned together with the first frame. Applicable to encoders.
@@ -2320,9 +2320,9 @@ Valid only when H.264 and macroblock level RC is enabled (<constant>V4L2_CID_MPE
 Applicable to the H264 encoder.</entry>
              </row>
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-mfc51-video-frame-skip-mode">
                <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_mfc51_frame_skip_mode</entry>
+               <entry>enum&nbsp;v4l2_mpeg_mfc51_video_frame_skip_mode</entry>
              </row>
              <row><entry spanname="descr">
 Indicates in what conditions the encoder should skip frames. If encoding a frame would cause the encoded stream to be larger then
@@ -2361,9 +2361,9 @@ the stream will meet tight bandwidth contraints. Applicable to encoders.
 </entry>
              </row>
              <row><entry></entry></row>
-             <row>
+             <row id="v4l2-mpeg-mfc51-video-force-frame-type">
                <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE</constant>&nbsp;</entry>
-               <entry>enum&nbsp;v4l2_mpeg_mfc51_force_frame_type</entry>
+               <entry>enum&nbsp;v4l2_mpeg_mfc51_video_force_frame_type</entry>
              </row>
              <row><entry spanname="descr">Force a frame type for the next queued buffer. Applicable to encoders.
 Possible values are:</entry>
index 6f3c598..06eb6d9 100644 (file)
@@ -380,7 +380,7 @@ will be charged as a new owner of it.
 
 5.2 stat file
 
-5.2.1 memory.stat file includes following statistics
+memory.stat file includes following statistics
 
 # per-memory cgroup local status
 cache          - # of bytes of page cache memory.
@@ -438,89 +438,6 @@ Note:
         file_mapped is accounted only when the memory cgroup is owner of page
         cache.)
 
-5.2.2 memory.vmscan_stat
-
-memory.vmscan_stat includes statistics information for memory scanning and
-freeing, reclaiming. The statistics shows memory scanning information since
-memory cgroup creation and can be reset to 0 by writing 0 as
-
- #echo 0 > ../memory.vmscan_stat
-
-This file contains following statistics.
-
-[param]_[file_or_anon]_pages_by_[reason]_[under_heararchy]
-[param]_elapsed_ns_by_[reason]_[under_hierarchy]
-
-For example,
-
-  scanned_file_pages_by_limit indicates the number of scanned
-  file pages at vmscan.
-
-Now, 3 parameters are supported
-
-  scanned - the number of pages scanned by vmscan
-  rotated - the number of pages activated at vmscan
-  freed   - the number of pages freed by vmscan
-
-If "rotated" is high against scanned/freed, the memcg seems busy.
-
-Now, 2 reason are supported
-
-  limit - the memory cgroup's limit
-  system - global memory pressure + softlimit
-           (global memory pressure not under softlimit is not handled now)
-
-When under_hierarchy is added in the tail, the number indicates the
-total memcg scan of its children and itself.
-
-elapsed_ns is a elapsed time in nanosecond. This may include sleep time
-and not indicates CPU usage. So, please take this as just showing
-latency.
-
-Here is an example.
-
-# cat /cgroup/memory/A/memory.vmscan_stat
-scanned_pages_by_limit 9471864
-scanned_anon_pages_by_limit 6640629
-scanned_file_pages_by_limit 2831235
-rotated_pages_by_limit 4243974
-rotated_anon_pages_by_limit 3971968
-rotated_file_pages_by_limit 272006
-freed_pages_by_limit 2318492
-freed_anon_pages_by_limit 962052
-freed_file_pages_by_limit 1356440
-elapsed_ns_by_limit 351386416101
-scanned_pages_by_system 0
-scanned_anon_pages_by_system 0
-scanned_file_pages_by_system 0
-rotated_pages_by_system 0
-rotated_anon_pages_by_system 0
-rotated_file_pages_by_system 0
-freed_pages_by_system 0
-freed_anon_pages_by_system 0
-freed_file_pages_by_system 0
-elapsed_ns_by_system 0
-scanned_pages_by_limit_under_hierarchy 9471864
-scanned_anon_pages_by_limit_under_hierarchy 6640629
-scanned_file_pages_by_limit_under_hierarchy 2831235
-rotated_pages_by_limit_under_hierarchy 4243974
-rotated_anon_pages_by_limit_under_hierarchy 3971968
-rotated_file_pages_by_limit_under_hierarchy 272006
-freed_pages_by_limit_under_hierarchy 2318492
-freed_anon_pages_by_limit_under_hierarchy 962052
-freed_file_pages_by_limit_under_hierarchy 1356440
-elapsed_ns_by_limit_under_hierarchy 351386416101
-scanned_pages_by_system_under_hierarchy 0
-scanned_anon_pages_by_system_under_hierarchy 0
-scanned_file_pages_by_system_under_hierarchy 0
-rotated_pages_by_system_under_hierarchy 0
-rotated_anon_pages_by_system_under_hierarchy 0
-rotated_file_pages_by_system_under_hierarchy 0
-freed_pages_by_system_under_hierarchy 0
-freed_anon_pages_by_system_under_hierarchy 0
-freed_file_pages_by_system_under_hierarchy 0
-elapsed_ns_by_system_under_hierarchy 0
-
 5.3 swappiness
 
 Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
index fa8776a..84d46c0 100644 (file)
@@ -35,13 +35,6 @@ the Out-Of-Spec bit. Following table summarizes the exported sysfs files:
 All Sysfs entries are named with their core_id (represented here by 'X').
 tempX_input     - Core temperature (in millidegrees Celsius).
 tempX_max       - All cooling devices should be turned on (on Core2).
-                  Initialized with IA32_THERM_INTERRUPT. When the CPU
-                  temperature reaches this temperature, an interrupt is
-                  generated and tempX_max_alarm is set.
-tempX_max_hyst   - If the CPU temperature falls below than temperature,
-                  an interrupt is generated and tempX_max_alarm is reset.
-tempX_max_alarm  - Set if the temperature reaches or exceeds tempX_max.
-                  Reset if the temperature drops to or below tempX_max_hyst.
 tempX_crit      - Maximum junction temperature (in millidegrees Celsius).
 tempX_crit_alarm - Set when Out-of-spec bit is set, never clears.
                   Correct CPU operation is no longer guaranteed.
@@ -49,9 +42,10 @@ tempX_label   - Contains string "Core X", where X is processor
                   number. For Package temp, this will be "Physical id Y",
                   where Y is the package number.
 
-The TjMax temperature is set to 85 degrees C if undocumented model specific
-register (UMSR) 0xee has bit 30 set. If not the TjMax is 100 degrees C as
-(sometimes) documented in processor datasheet.
+On CPU models which support it, TjMax is read from a model-specific register.
+On other models, it is set to an arbitrary value based on weak heuristics.
+If these heuristics don't work for you, you can pass the correct TjMax value
+as a module parameter (tjmax).
 
 Appendix A. Known TjMax lists (TBD):
 Some information comes from ark.intel.com
index 44b4f61..c11f64a 100644 (file)
@@ -62,6 +62,13 @@ can be safely used to identify the chip. You will have to instantiate
 the devices explicitly. Please see Documentation/i2c/instantiating-devices for
 details.
 
+WARNING: Do not access chip registers using the i2cdump command, and do not use
+any of the i2ctools commands on a command register (0xa5 to 0xac). The chips
+supported by this driver interpret any access to a command register (including
+read commands) as request to execute the command in question. This may result in
+power loss, board resets, and/or Flash corruption. Worst case, your board may
+turn into a brick.
+
 
 Sysfs entries
 -------------
index 845a191..54078ed 100644 (file)
@@ -319,4 +319,6 @@ Code  Seq#(hex)     Include File            Comments
                                        <mailto:thomas@winischhofer.net>
 0xF4   00-1F   video/mbxfb.h           mbxfb
                                        <mailto:raph@8d.com>
+0xF6   all     LTTng                   Linux Trace Toolkit Next Generation
+                                       <mailto:mathieu.desnoyers@efficios.com>
 0xFD   all     linux/dm-ioctl.h
index 614d038..854ed5c 100644 (file)
@@ -2086,9 +2086,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Override pmtimer IOPort with a hex value.
                        e.g. pmtmr=0x508
 
-       pnp.debug       [PNP]
-                       Enable PNP debug messages.  This depends on the
-                       CONFIG_PNP_DEBUG_MESSAGES option.
+       pnp.debug=1     [PNP]
+                       Enable PNP debug messages (depends on the
+                       CONFIG_PNP_DEBUG_MESSAGES option).  Change at run-time
+                       via /sys/module/pnp/parameters/debug.  We always show
+                       current resource usage; turning this on also shows
+                       possible settings and some assignment information.
 
        pnpacpi=        [ACPI]
                        { off }
index 8006c22..25320bf 100644 (file)
@@ -1,3 +1,5 @@
+Note: This driver doesn't have a maintainer.
+
 Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux.
 
 This program is free software; you can redistribute it and/or
@@ -55,7 +57,6 @@ Test and make sure PCI latency is now correct for all cases.
 Authors:
 
 Sten Wang <sten_wang@davicom.com.tw >   : Original Author
-Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer
 
 Contributors:
 
index 8154699..ca5cdcd 100644 (file)
@@ -1042,7 +1042,7 @@ conf/interface/*:
        The functional behaviour for certain settings is different
        depending on whether local forwarding is enabled or not.
 
-accept_ra - BOOLEAN
+accept_ra - INTEGER
        Accept Router Advertisements; autoconfigure using them.
 
        Possible values are:
@@ -1106,7 +1106,7 @@ dad_transmits - INTEGER
        The amount of Duplicate Address Detection probes to send.
        Default: 1
 
-forwarding - BOOLEAN
+forwarding - INTEGER
        Configure interface-specific Host/Router behaviour.
 
        Note: It is recommended to have the same setting on all
index 58fd741..fe67b5c 100644 (file)
@@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number
 of logical flows. Packets for each flow are steered to a separate receive
 queue, which in turn can be processed by separate CPUs. This mechanism is
 generally known as “Receive-side Scaling” (RSS). The goal of RSS and
-the other scaling techniques to increase performance uniformly.
+the other scaling techniques is to increase performance uniformly.
 Multi-queue distribution can also be used for traffic prioritization, but
 that is not the focus of these techniques.
 
@@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the
 same CPU. Indeed, with many flows and few CPUs, it is very likely that
 a single application thread handles flows with many different flow hashes.
 
-rps_sock_table is a global flow table that contains the *desired* CPU for
-flows: the CPU that is currently processing the flow in userspace. Each
-table value is a CPU index that is updated during calls to recvmsg and
-sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
+rps_sock_flow_table is a global flow table that contains the *desired* CPU
+for flows: the CPU that is currently processing the flow in userspace.
+Each table value is a CPU index that is updated during calls to recvmsg
+and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
 and tcp_splice_read()).
 
 When the scheduler moves a thread to a new CPU while it has outstanding
@@ -243,7 +243,7 @@ configured. The number of entries in the global flow table is set through:
 
 The number of entries in the per-queue flow table are set through:
 
- /sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt
+ /sys/class/net/<dev>/queues/rx-<n>/rps_flow_cnt
 
 == Suggested Configuration
 
index 0924aac..29bdf62 100644 (file)
@@ -123,10 +123,11 @@ be automatically shutdown if it's set to "never".
 khugepaged runs usually at low frequency so while one may not want to
 invoke defrag algorithms synchronously during the page faults, it
 should be worth invoking defrag at least in khugepaged. However it's
-also possible to disable defrag in khugepaged:
+also possible to disable defrag in khugepaged by writing 0 or enable
+defrag in khugepaged by writing 1:
 
-echo yes >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
-echo no >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
+echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
+echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
 
 You can also control how many pages khugepaged should scan at each
 pass:
index 28f65c2..cf556fc 100644 (file)
@@ -1278,7 +1278,6 @@ F:        drivers/input/misc/ati_remote2.c
 ATLX ETHERNET DRIVERS
 M:     Jay Cliburn <jcliburn@gmail.com>
 M:     Chris Snook <chris.snook@gmail.com>
-M:     Jie Yang <jie.yang@atheros.com>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/atl1
 W:     http://atl1.sourceforge.net
@@ -1574,7 +1573,6 @@ F:        drivers/scsi/bfa/
 
 BROCADE BNA 10 GIGABIT ETHERNET DRIVER
 M:     Rasesh Mody <rmody@brocade.com>
-M:     Debashis Dutt <ddutt@brocade.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/bna/
@@ -1758,7 +1756,6 @@ F:        Documentation/zh_CN/
 
 CISCO VIC ETHERNET NIC DRIVER
 M:     Christian Benvenuti <benve@cisco.com>
-M:     Vasanthy Kolluri <vkolluri@cisco.com>
 M:     Roopa Prabhu <roprabhu@cisco.com>
 M:     David Wang <dwang2@cisco.com>
 S:     Supported
@@ -3262,6 +3259,17 @@ F:       Documentation/input/multi-touch-protocol.txt
 F:     drivers/input/input-mt.c
 K:     \b(ABS|SYN)_MT_
 
+INTEL C600 SERIES SAS CONTROLLER DRIVER
+M:     Intel SCU Linux support <intel-linux-scu@intel.com>
+M:     Dan Williams <dan.j.williams@intel.com>
+M:     Dave Jiang <dave.jiang@intel.com>
+M:     Ed Nadolski <edmund.nadolski@intel.com>
+L:     linux-scsi@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git
+S:     Maintained
+F:     drivers/scsi/isci/
+F:     firmware/isci/
+
 INTEL IDLE DRIVER
 M:     Len Brown <lenb@kernel.org>
 L:     linux-pm@lists.linux-foundation.org
@@ -4404,7 +4412,8 @@ L:        netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
 S:     Supported
 F:     include/linux/netfilter*
 F:     include/linux/netfilter/
@@ -4774,7 +4783,7 @@ F:        drivers/net/wireless/orinoco/
 
 OSD LIBRARY and FILESYSTEM
 M:     Boaz Harrosh <bharrosh@panasas.com>
-M:     Benny Halevy <bhalevy@panasas.com>
+M:     Benny Halevy <bhalevy@tonian.com>
 L:     osd-dev@open-osd.org
 W:     http://open-osd.org
 T:     git git://git.open-osd.org/open-osd.git
@@ -6365,7 +6374,6 @@ S:        Supported
 F:     arch/arm/mach-tegra
 
 TEHUTI ETHERNET DRIVER
-M:     Alexander Indenbaum <baum@tehutinetworks.net>
 M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -7134,6 +7142,12 @@ L:       linux-scsi@vger.kernel.org
 S:     Maintained
 F:     drivers/scsi/wd7000.c
 
+WIIMOTE HID DRIVER
+M:     David Herrmann <dh.herrmann@googlemail.com>
+L:     linux-input@vger.kernel.org
+S:     Maintained
+F:     drivers/hid/hid-wiimote*
+
 WINBOND CIR DRIVER
 M:     David Härdeman <david@hardeman.nu>
 S:     Maintained
@@ -7200,6 +7214,9 @@ W:        http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
 S:     Supported
 F:     Documentation/hwmon/wm83??
 F:     drivers/leds/leds-wm83*.c
+F:     drivers/input/misc/wm831x-on.c
+F:     drivers/input/touchscreen/wm831x-ts.c
+F:     drivers/input/touchscreen/wm97*.c
 F:     drivers/mfd/wm8*.c
 F:     drivers/power/wm83*.c
 F:     drivers/rtc/rtc-wm83*.c
@@ -7209,6 +7226,7 @@ F:        drivers/watchdog/wm83*_wdt.c
 F:     include/linux/mfd/wm831x/
 F:     include/linux/mfd/wm8350/
 F:     include/linux/mfd/wm8400*
+F:     include/linux/wm97xx.h
 F:     include/sound/wm????.h
 F:     sound/soc/codecs/wm*
 
index 03d97aa..31f967c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc9
 NAME = "Divemaster Edition"
 
 # *DOCUMENTATION*
index 60cde53..8bb9362 100644 (file)
@@ -51,7 +51,7 @@ config GENERIC_CMOS_UPDATE
         def_bool y
 
 config GENERIC_GPIO
-       def_bool y
+       bool
 
 config ZONE_DMA
        bool
index 3269576..3146ed3 100644 (file)
@@ -1283,6 +1283,20 @@ config ARM_ERRATA_364296
          processor into full low interrupt latency mode. ARM11MPCore
          is not affected.
 
+config ARM_ERRATA_764369
+       bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for erratum 764369
+         affecting Cortex-A9 MPCore with two or more processors (all
+         current revisions). Under certain timing circumstances, a data
+         cache line maintenance operation by MVA targeting an Inner
+         Shareable memory region may fail to proceed up to either the
+         Point of Coherency or to the Point of Unification of the
+         system. This workaround adds a DSB instruction before the
+         relevant cache maintenance functions and sets a specific bit
+         in the diagnostic control register of the SCU.
+
 endmenu
 
 source "arch/arm/common/Kconfig"
index 4c05334..e581866 100644 (file)
        };
 
        sdhci@c8000200 {
-               gpios = <&gpio 69 0>, /* cd, gpio PI5 */
-                       <&gpio 57 0>, /* wp, gpio PH1 */
-                       <&gpio 155 0>; /* power, gpio PT3 */
+               cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+               wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+               power-gpios = <&gpio 155 0>; /* gpio PT3 */
        };
 
        sdhci@c8000600 {
-               gpios = <&gpio 58 0>, /* cd, gpio PH2 */
-                       <&gpio 59 0>, /* wp, gpio PH3 */
-                       <&gpio 70 0>; /* power, gpio PI6 */
+               cd-gpios = <&gpio 58 0>; /* gpio PH2 */
+               wp-gpios = <&gpio 59 0>; /* gpio PH3 */
+               power-gpios = <&gpio 70 0>; /* gpio PI6 */
        };
 };
index 1940cae..64cedca 100644 (file)
@@ -21,8 +21,8 @@
        };
 
        sdhci@c8000400 {
-               gpios = <&gpio 69 0>, /* cd, gpio PI5 */
-                       <&gpio 57 0>, /* wp, gpio PH1 */
-                       <&gpio 70 0>; /* power, gpio PI6 */
+               cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+               wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+               power-gpios = <&gpio 70 0>; /* gpio PI6 */
        };
 };
index 8c73900..253cc86 100644 (file)
 
 #ifdef CONFIG_SMP
 
-#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)     \
+#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)        \
        smp_mb();                                               \
        __asm__ __volatile__(                                   \
-       "1:     ldrex   %1, [%2]\n"                             \
+       "1:     ldrex   %1, [%3]\n"                             \
        "       " insn "\n"                                     \
-       "2:     strex   %1, %0, [%2]\n"                         \
-       "       teq     %1, #0\n"                               \
+       "2:     strex   %2, %0, [%3]\n"                         \
+       "       teq     %2, #0\n"                               \
        "       bne     1b\n"                                   \
        "       mov     %0, #0\n"                               \
-       __futex_atomic_ex_table("%4")                           \
-       : "=&r" (ret), "=&r" (oldval)                           \
+       __futex_atomic_ex_table("%5")                           \
+       : "=&r" (ret), "=&r" (oldval), "=&r" (tmp)              \
        : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)              \
        : "cc", "memory")
 
@@ -73,14 +73,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 #include <linux/preempt.h>
 #include <asm/domain.h>
 
-#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)     \
+#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)        \
        __asm__ __volatile__(                                   \
-       "1:     " T(ldr) "      %1, [%2]\n"                     \
+       "1:     " T(ldr) "      %1, [%3]\n"                     \
        "       " insn "\n"                                     \
-       "2:     " T(str) "      %0, [%2]\n"                     \
+       "2:     " T(str) "      %0, [%3]\n"                     \
        "       mov     %0, #0\n"                               \
-       __futex_atomic_ex_table("%4")                           \
-       : "=&r" (ret), "=&r" (oldval)                           \
+       __futex_atomic_ex_table("%5")                           \
+       : "=&r" (ret), "=&r" (oldval), "=&r" (tmp)              \
        : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)              \
        : "cc", "memory")
 
@@ -117,7 +117,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
        int cmp = (encoded_op >> 24) & 15;
        int oparg = (encoded_op << 8) >> 20;
        int cmparg = (encoded_op << 20) >> 20;
-       int oldval = 0, ret;
+       int oldval = 0, ret, tmp;
 
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
@@ -129,19 +129,19 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 
        switch (op) {
        case FUTEX_OP_SET:
-               __futex_atomic_op("mov  %0, %3", ret, oldval, uaddr, oparg);
+               __futex_atomic_op("mov  %0, %4", ret, oldval, tmp, uaddr, oparg);
                break;
        case FUTEX_OP_ADD:
-               __futex_atomic_op("add  %0, %1, %3", ret, oldval, uaddr, oparg);
+               __futex_atomic_op("add  %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
                break;
        case FUTEX_OP_OR:
-               __futex_atomic_op("orr  %0, %1, %3", ret, oldval, uaddr, oparg);
+               __futex_atomic_op("orr  %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
                break;
        case FUTEX_OP_ANDN:
-               __futex_atomic_op("and  %0, %1, %3", ret, oldval, uaddr, ~oparg);
+               __futex_atomic_op("and  %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg);
                break;
        case FUTEX_OP_XOR:
-               __futex_atomic_op("eor  %0, %1, %3", ret, oldval, uaddr, oparg);
+               __futex_atomic_op("eor  %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
                break;
        default:
                ret = -ENOSYS;
index bfa706f..99a6ed7 100644 (file)
 #define L2X0_CLEAN_INV_LINE_PA         0x7F0
 #define L2X0_CLEAN_INV_LINE_IDX                0x7F8
 #define L2X0_CLEAN_INV_WAY             0x7FC
-#define L2X0_LOCKDOWN_WAY_D            0x900
-#define L2X0_LOCKDOWN_WAY_I            0x904
+/*
+ * The lockdown registers repeat 8 times for L310, the L210 has only one
+ * D and one I lockdown register at 0x0900 and 0x0904.
+ */
+#define L2X0_LOCKDOWN_WAY_D_BASE       0x900
+#define L2X0_LOCKDOWN_WAY_I_BASE       0x904
+#define L2X0_LOCKDOWN_STRIDE           0x08
 #define L2X0_TEST_OPERATION            0xF00
 #define L2X0_LINE_DATA                 0xF10
 #define L2X0_LINE_TAG                  0xF30
index 2c04ed5..c60a294 100644 (file)
 /*
  * Unimplemented (or alternatively implemented) syscalls
  */
-#define __IGNORE_fadvise64_64          1
-#define __IGNORE_migrate_pages         1
+#define __IGNORE_fadvise64_64
+#define __IGNORE_migrate_pages
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_ARM_UNISTD_H */
index 79ed5e7..7fcddb7 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <asm/smp_scu.h>
 #include <asm/cacheflush.h>
+#include <asm/cputype.h>
 
 #define SCU_CTRL               0x00
 #define SCU_CONFIG             0x04
@@ -37,6 +38,15 @@ void __init scu_enable(void __iomem *scu_base)
 {
        u32 scu_ctrl;
 
+#ifdef CONFIG_ARM_ERRATA_764369
+       /* Cortex-A9 only */
+       if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
+               scu_ctrl = __raw_readl(scu_base + 0x30);
+               if (!(scu_ctrl & 1))
+                       __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
+       }
+#endif
+
        scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
        /* already enabled? */
        if (scu_ctrl & 1)
index bf977f8..4e66f62 100644 (file)
 
 #if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
 #define ARM_EXIT_KEEP(x)       x
+#define ARM_EXIT_DISCARD(x)
 #else
 #define ARM_EXIT_KEEP(x)
+#define ARM_EXIT_DISCARD(x)    x
 #endif
 
 OUTPUT_ARCH(arm)
@@ -39,6 +41,11 @@ jiffies = jiffies_64 + 4;
 SECTIONS
 {
        /*
+        * XXX: The linker does not define how output sections are
+        * assigned to input sections when there are multiple statements
+        * matching the same input section name.  There is no documented
+        * order of matching.
+        *
         * unwind exit sections must be discarded before the rest of the
         * unwind sections get included.
         */
@@ -47,6 +54,9 @@ SECTIONS
                *(.ARM.extab.exit.text)
                ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
                ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
+               ARM_EXIT_DISCARD(EXIT_TEXT)
+               ARM_EXIT_DISCARD(EXIT_DATA)
+               EXIT_CALL
 #ifndef CONFIG_HOTPLUG
                *(.ARM.exidx.devexit.text)
                *(.ARM.extab.devexit.text)
@@ -58,6 +68,8 @@ SECTIONS
 #ifndef CONFIG_SMP_ON_UP
                *(.alt.smp.init)
 #endif
+               *(.discard)
+               *(.discard.*)
        }
 
 #ifdef CONFIG_XIP_KERNEL
@@ -279,9 +291,6 @@ SECTIONS
 
        STABS_DEBUG
        .comment 0 : { *(.comment) }
-
-       /* Default discards */
-       DISCARDS
 }
 
 /*
index 6bd83ed..d87bfc3 100644 (file)
@@ -8,7 +8,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <mach/hardware.h>
 #include <asm/hardware/entry-macro-gic.S>
 
                .macro  disable_fiq
index 58bb03a..4f16c9b 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <linux/io.h>
 #include <asm/proc-fns.h>
-#include <mach/hardware.h>
 
 static inline void arch_idle(void)
 {
index de8ead9..a91b605 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <mach/cns3xxx.h>
 
 #define AMBA_UART_DR(base)     (*(volatile unsigned char *)((base) + 0x00))
index 06fd25d..0f8fca4 100644 (file)
@@ -49,7 +49,7 @@ static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
        return &cns3xxx_pcie[root->domain];
 }
 
-static struct cns3xxx_pcie *pdev_to_cnspci(struct pci_dev *dev)
+static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
 {
        return sysdata_to_cnspci(dev->sysdata);
 }
index bd53945..008d514 100644 (file)
@@ -115,6 +115,32 @@ static struct spi_board_info da850evm_spi_info[] = {
        },
 };
 
+#ifdef CONFIG_MTD
+static void da850_evm_m25p80_notify_add(struct mtd_info *mtd)
+{
+       char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
+       size_t retlen;
+
+       if (!strcmp(mtd->name, "MAC-Address")) {
+               mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
+               if (retlen == ETH_ALEN)
+                       pr_info("Read MAC addr from SPI Flash: %pM\n",
+                               mac_addr);
+       }
+}
+
+static struct mtd_notifier da850evm_spi_notifier = {
+       .add    = da850_evm_m25p80_notify_add,
+};
+
+static void da850_evm_setup_mac_addr(void)
+{
+       register_mtd_user(&da850evm_spi_notifier);
+}
+#else
+static void da850_evm_setup_mac_addr(void) { }
+#endif
+
 static struct mtd_partition da850_evm_norflash_partition[] = {
        {
                .name           = "bootloaders + env",
@@ -1244,6 +1270,8 @@ static __init void da850_evm_init(void)
        if (ret)
                pr_warning("da850_evm_init: sata registration failed: %d\n",
                                ret);
+
+       da850_evm_setup_mac_addr();
 }
 
 #ifdef CONFIG_SERIAL_8250_CONSOLE
index 47fd0bc..fa59c09 100644 (file)
 #define PSC_STATE_DISABLE      2
 #define PSC_STATE_ENABLE       3
 
-#define MDSTAT_STATE_MASK      0x1f
+#define MDSTAT_STATE_MASK      0x3f
 #define MDCTL_FORCE            BIT(31)
 
 #ifndef __ASSEMBLER__
index fb5e72b..5f1e045 100644 (file)
@@ -217,7 +217,11 @@ ddr2clk_stop_done:
 ENDPROC(davinci_ddr_psc_config)
 
 CACHE_FLUSH:
-       .word   arm926_flush_kern_cache_all
+#ifdef CONFIG_CPU_V6
+       .word   v6_flush_kern_cache_all
+#else
+       .word   arm926_flush_kern_cache_all
+#endif
 
 ENTRY(davinci_cpu_suspend_sz)
        .word   . - davinci_cpu_suspend
index 83dce85..a9e0dae 100644 (file)
@@ -158,7 +158,7 @@ void __init dove_spi0_init(void)
 
 void __init dove_spi1_init(void)
 {
-       orion_spi_init(DOVE_SPI1_PHYS_BASE, get_tclk());
+       orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk());
 }
 
 /*****************************************************************************
index 1561b03..86964d2 100644 (file)
@@ -899,8 +899,7 @@ static struct clksrc_clk clksrcs[] = {
                .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 28, .size = 4 },
        }, {
                .clk            = {
-                       .name           = "sclk_cam",
-                       .devname        = "exynos4-fimc.0",
+                       .name           = "sclk_cam0",
                        .enable         = exynos4_clksrc_mask_cam_ctrl,
                        .ctrlbit        = (1 << 16),
                },
@@ -909,8 +908,7 @@ static struct clksrc_clk clksrcs[] = {
                .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 16, .size = 4 },
        }, {
                .clk            = {
-                       .name           = "sclk_cam",
-                       .devname        = "exynos4-fimc.1",
+                       .name           = "sclk_cam1",
                        .enable         = exynos4_clksrc_mask_cam_ctrl,
                        .ctrlbit        = (1 << 20),
                },
@@ -1160,7 +1158,7 @@ void __init_or_cpufreq exynos4_setup_clocks(void)
 
        vpllsrc = clk_get_rate(&clk_vpllsrc.clk);
        vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0),
-                               __raw_readl(S5P_VPLL_CON1), pll_4650);
+                               __raw_readl(S5P_VPLL_CON1), pll_4650c);
 
        clk_fout_apll.ops = &exynos4_fout_apll_ops;
        clk_fout_mpll.rate = mpll;
index 1ae059b..ddd8686 100644 (file)
@@ -132,12 +132,18 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
        return ((cycle_t)hi << 32) | lo;
 }
 
+static void exynos4_frc_resume(struct clocksource *cs)
+{
+       exynos4_mct_frc_start(0, 0);
+}
+
 struct clocksource mct_frc = {
        .name           = "mct-frc",
        .rating         = 400,
        .read           = exynos4_frc_read,
        .mask           = CLOCKSOURCE_MASK(64),
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+       .resume         = exynos4_frc_resume,
 };
 
 static void __init exynos4_clocksource_init(void)
@@ -389,9 +395,11 @@ static void exynos4_mct_tick_init(struct clock_event_device *evt)
 }
 
 /* Setup the local clock events for a CPU */
-void __cpuinit local_timer_setup(struct clock_event_device *evt)
+int __cpuinit local_timer_setup(struct clock_event_device *evt)
 {
        exynos4_mct_tick_init(evt);
+
+       return 0;
 }
 
 int local_timer_ack(void)
index 7c2282c..df6ef1b 100644 (file)
@@ -106,6 +106,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
         */
        spin_lock(&boot_lock);
        spin_unlock(&boot_lock);
+
+       set_cpu_online(cpu, true);
 }
 
 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
index 1ee0ebf..7862bfb 100644 (file)
@@ -19,15 +19,16 @@ void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
 
        if (rows > 8) {
                /* Set all the necessary GPX2 pins: KP_ROW[0~7] */
-               s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3));
+               s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3),
+                                       S3C_GPIO_PULL_UP);
 
                /* Set all the necessary GPX3 pins: KP_ROW[8~] */
-               s3c_gpio_cfgrange_nopull(EXYNOS4_GPX3(0), (rows - 8),
-                                        S3C_GPIO_SFN(3));
+               s3c_gpio_cfgall_range(EXYNOS4_GPX3(0), (rows - 8),
+                                        S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
        } else {
                /* Set all the necessary GPX2 pins: KP_ROW[x] */
-               s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), rows,
-                                        S3C_GPIO_SFN(3));
+               s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), rows, S3C_GPIO_SFN(3),
+                                       S3C_GPIO_PULL_UP);
        }
 
        /* Set all the necessary GPX1 pins to special-function 3: KP_COL[x] */
index 2fbbdd5..8cdc730 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/mtd/physmap.h>
+#include <video/vga.h>
 
 #include <mach/hardware.h>
 #include <mach/platform.h>
@@ -154,6 +155,7 @@ static struct map_desc ap_io_desc[] __initdata = {
 static void __init ap_map_io(void)
 {
        iotable_init(ap_io_desc, ARRAY_SIZE(ap_io_desc));
+       vga_base = PCI_MEMORY_VADDR;
 }
 
 #define INTEGRATOR_SC_VALID_INT        0x003fffff
@@ -337,15 +339,15 @@ static unsigned long timer_reload;
 static void integrator_clocksource_init(u32 khz)
 {
        void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
-       u32 ctrl = TIMER_CTRL_ENABLE;
+       u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
 
        if (khz >= 1500) {
                khz /= 16;
-               ctrl = TIMER_CTRL_DIV16;
+               ctrl |= TIMER_CTRL_DIV16;
        }
 
-       writel(ctrl, base + TIMER_CTRL);
        writel(0xffff, base + TIMER_LOAD);
+       writel(ctrl, base + TIMER_CTRL);
 
        clocksource_mmio_init(base + TIMER_VALUE, "timer2",
                khz * 1000, 200, 16, clocksource_mmio_readl_down);
index dd56bfb..11b86e5 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/spinlock.h>
 #include <linux/init.h>
 #include <linux/io.h>
-#include <video/vga.h>
 
 #include <mach/hardware.h>
 #include <mach/platform.h>
@@ -505,7 +504,6 @@ void __init pci_v3_preinit(void)
 
        pcibios_min_io = 0x6000;
        pcibios_min_mem = 0x00100000;
-       vga_base = PCI_MEMORY_VADDR;
 
        /*
         * Hook in our fault handler for PCI errors
index ffd55b1..b9b8446 100644 (file)
@@ -3078,6 +3078,7 @@ static struct clk gpt12_fck = {
        .name           = "gpt12_fck",
        .ops            = &clkops_null,
        .parent         = &secure_32k_fck,
+       .clkdm_name     = "wkup_clkdm",
        .recalc         = &followparent_recalc,
 };
 
@@ -3085,6 +3086,7 @@ static struct clk wdt1_fck = {
        .name           = "wdt1_fck",
        .ops            = &clkops_null,
        .parent         = &secure_32k_fck,
+       .clkdm_name     = "wkup_clkdm",
        .recalc         = &followparent_recalc,
 };
 
index 2af0e3f..c0b6fbd 100644 (file)
@@ -3376,10 +3376,18 @@ int __init omap4xxx_clk_init(void)
        } else if (cpu_is_omap446x()) {
                cpu_mask = RATE_IN_4460;
                cpu_clkflg = CK_446X;
+       } else {
+               return 0;
        }
 
        clk_init(&omap2_clk_functions);
-       omap2_clk_disable_clkdm_control();
+
+       /*
+        * Must stay commented until all OMAP SoC drivers are
+        * converted to runtime PM, or drivers may start crashing
+        *
+        * omap2_clk_disable_clkdm_control();
+        */
 
        for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks);
                                                                          c++)
index ab7db08..8f08906 100644 (file)
@@ -747,6 +747,7 @@ int clkdm_wakeup(struct clockdomain *clkdm)
        spin_lock_irqsave(&clkdm->lock, flags);
        clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
        ret = arch_clkdm->clkdm_wakeup(clkdm);
+       ret |= pwrdm_state_switch(clkdm->pwrdm.ptr);
        spin_unlock_irqrestore(&clkdm->lock, flags);
        return ret;
 }
@@ -818,6 +819,7 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
        spin_lock_irqsave(&clkdm->lock, flags);
        clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
        arch_clkdm->clkdm_deny_idle(clkdm);
+       pwrdm_state_switch(clkdm->pwrdm.ptr);
        spin_unlock_irqrestore(&clkdm->lock, flags);
 }
 
index 16743c7..408193d 100644 (file)
@@ -192,6 +192,7 @@ static struct omap_hwmod_addr_space omap2430_usbhsotg_addrs[] = {
                .pa_end         = OMAP243X_HS_BASE + SZ_4K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /*  l4_core ->usbhsotg  interface */
index 3feb359..472bf22 100644 (file)
@@ -130,7 +130,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
                } else {
                        hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]);
                        clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
-                       pwrdm_wait_transition(pwrdm);
                        sleep_switch = FORCEWAKEUP_SWITCH;
                }
        }
@@ -156,7 +155,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
                return ret;
        }
 
-       pwrdm_wait_transition(pwrdm);
        pwrdm_state_switch(pwrdm);
 err:
        return ret;
index 9af0847..ef71fdd 100644 (file)
@@ -195,28 +195,35 @@ static int _pwrdm_post_transition_cb(struct powerdomain *pwrdm, void *unused)
 
 /**
  * pwrdm_init - set up the powerdomain layer
- * @pwrdm_list: array of struct powerdomain pointers to register
+ * @pwrdms: array of struct powerdomain pointers to register
  * @custom_funcs: func pointers for arch specific implementations
  *
- * Loop through the array of powerdomains @pwrdm_list, registering all
- * that are available on the current CPU. If pwrdm_list is supplied
- * and not null, all of the referenced powerdomains will be
- * registered.  No return value.  XXX pwrdm_list is not really a
- * "list"; it is an array.  Rename appropriately.
+ * Loop through the array of powerdomains @pwrdms, registering all
+ * that are available on the current CPU.  Also, program all
+ * powerdomain target state as ON; this is to prevent domains from
+ * hitting low power states (if bootloader has target states set to
+ * something other than ON) and potentially even losing context while
+ * PM is not fully initialized.  The PM late init code can then program
+ * the desired target state for all the power domains.  No return
+ * value.
  */
-void pwrdm_init(struct powerdomain **pwrdm_list, struct pwrdm_ops *custom_funcs)
+void pwrdm_init(struct powerdomain **pwrdms, struct pwrdm_ops *custom_funcs)
 {
        struct powerdomain **p = NULL;
+       struct powerdomain *temp_p;
 
        if (!custom_funcs)
                WARN(1, "powerdomain: No custom pwrdm functions registered\n");
        else
                arch_pwrdm = custom_funcs;
 
-       if (pwrdm_list) {
-               for (p = pwrdm_list; *p; p++)
+       if (pwrdms) {
+               for (p = pwrdms; *p; p++)
                        _pwrdm_register(*p);
        }
+
+       list_for_each_entry(temp_p, &pwrdm_list, node)
+               pwrdm_set_next_pwrst(temp_p, PWRDM_POWER_ON);
 }
 
 /**
index f9a2aaf..615a4e7 100644 (file)
@@ -481,6 +481,7 @@ static void __init sirfsoc_clk_init(void)
 
 static struct of_device_id clkc_ids[] = {
        { .compatible = "sirf,prima2-clkc" },
+       {},
 };
 
 void __init sirfsoc_of_clk_init(void)
index c3404cb..7af254d 100644 (file)
@@ -51,6 +51,7 @@ static __init void sirfsoc_irq_init(void)
 
 static struct of_device_id intc_ids[]  = {
        { .compatible = "sirf,prima2-intc" },
+       {},
 };
 
 void __init sirfsoc_of_irq_init(void)
index d074786..492cfa8 100644 (file)
@@ -19,6 +19,7 @@ static DEFINE_MUTEX(rstc_lock);
 
 static struct of_device_id rstc_ids[]  = {
        { .compatible = "sirf,prima2-rstc" },
+       {},
 };
 
 static int __init sirfsoc_of_rstc_init(void)
index 44027f3..ed7ec48 100644 (file)
@@ -190,6 +190,7 @@ static void __init sirfsoc_timer_init(void)
 
 static struct of_device_id timer_ids[] = {
        { .compatible = "sirf,prima2-tick" },
+       {},
 };
 
 static void __init sirfsoc_of_timer_map(void)
index a1a7176..38058af 100644 (file)
@@ -128,7 +128,7 @@ static int s3c2443_armclk_setrate(struct clk *clk, unsigned long rate)
                unsigned long clkcon0;
 
                clkcon0 = __raw_readl(S3C2443_CLKDIV0);
-               clkcon0 &= S3C2443_CLKDIV0_ARMDIV_MASK;
+               clkcon0 &= ~S3C2443_CLKDIV0_ARMDIV_MASK;
                clkcon0 |= val << S3C2443_CLKDIV0_ARMDIV_SHIFT;
                __raw_writel(clkcon0, S3C2443_CLKDIV0);
        }
index ecbea92..a9f3183 100644 (file)
@@ -262,45 +262,6 @@ static struct samsung_keypad_platdata smdk6410_keypad_data __initdata = {
        .cols           = 8,
 };
 
-static int smdk6410_backlight_init(struct device *dev)
-{
-       int ret;
-
-       ret = gpio_request(S3C64XX_GPF(15), "Backlight");
-       if (ret) {
-               printk(KERN_ERR "failed to request GPF for PWM-OUT1\n");
-               return ret;
-       }
-
-       /* Configure GPIO pin with S3C64XX_GPF15_PWM_TOUT1 */
-       s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2));
-
-       return 0;
-}
-
-static void smdk6410_backlight_exit(struct device *dev)
-{
-       s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_OUTPUT);
-       gpio_free(S3C64XX_GPF(15));
-}
-
-static struct platform_pwm_backlight_data smdk6410_backlight_data = {
-       .pwm_id         = 1,
-       .max_brightness = 255,
-       .dft_brightness = 255,
-       .pwm_period_ns  = 78770,
-       .init           = smdk6410_backlight_init,
-       .exit           = smdk6410_backlight_exit,
-};
-
-static struct platform_device smdk6410_backlight_device = {
-       .name           = "pwm-backlight",
-       .dev            = {
-               .parent         = &s3c_device_timer[1].dev,
-               .platform_data  = &smdk6410_backlight_data,
-       },
-};
-
 static struct map_desc smdk6410_iodesc[] = {};
 
 static struct platform_device *smdk6410_devices[] __initdata = {
index 52a8e60..f5f8fa8 100644 (file)
@@ -815,8 +815,7 @@ static struct clksrc_clk clksrcs[] = {
                .reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 },
        }, {
                .clk            = {
-                       .name           = "sclk_cam",
-                       .devname        = "s5pv210-fimc.0",
+                       .name           = "sclk_cam0",
                        .enable         = s5pv210_clk_mask0_ctrl,
                        .ctrlbit        = (1 << 3),
                },
@@ -825,8 +824,7 @@ static struct clksrc_clk clksrcs[] = {
                .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 },
        }, {
                .clk            = {
-                       .name           = "sclk_cam",
-                       .devname        = "s5pv210-fimc.1",
+                       .name           = "sclk_cam1",
                        .enable         = s5pv210_clk_mask0_ctrl,
                        .ctrlbit        = (1 << 4),
                },
index 52162d5..2cbf68e 100644 (file)
@@ -17,7 +17,7 @@
        cmp     \tmp, # 0x5600                  @ Is it ldrsb?
        orreq   \tmp, \tmp, #1 << 11            @ Set L-bit if yes
        tst     \tmp, #1 << 11                  @ L = 0 -> write
-       orreq   \psr, \psr, #1 << 11            @ yes.
+       orreq   \fsr, \fsr, #1 << 11            @ yes.
        b       do_DataAbort
 not_thumb:
        .endm
index 44c0867..9ecfdb5 100644 (file)
@@ -277,6 +277,25 @@ static void l2x0_disable(void)
        spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
+static void __init l2x0_unlock(__u32 cache_id)
+{
+       int lockregs;
+       int i;
+
+       if (cache_id == L2X0_CACHE_ID_PART_L310)
+               lockregs = 8;
+       else
+               /* L210 and unknown types */
+               lockregs = 1;
+
+       for (i = 0; i < lockregs; i++) {
+               writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
+                              i * L2X0_LOCKDOWN_STRIDE);
+               writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
+                              i * L2X0_LOCKDOWN_STRIDE);
+       }
+}
+
 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
 {
        __u32 aux;
@@ -328,6 +347,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
         * accessing the below registers will fault.
         */
        if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+               /* Make sure that I&D is not locked down when starting */
+               l2x0_unlock(cache_id);
 
                /* l2x0 controller is disabled */
                writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
index 3b24bfa..07c4bc8 100644 (file)
@@ -174,6 +174,10 @@ ENTRY(v7_coherent_user_range)
        dcache_line_size r2, r3
        sub     r3, r2, #1
        bic     r12, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+       ALT_SMP(W(dsb))
+       ALT_UP(W(nop))
+#endif
 1:
  USER( mcr     p15, 0, r12, c7, c11, 1 )       @ clean D line to the point of unification
        add     r12, r12, r2
@@ -223,6 +227,10 @@ ENTRY(v7_flush_kern_dcache_area)
        add     r1, r0, r1
        sub     r3, r2, #1
        bic     r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+       ALT_SMP(W(dsb))
+       ALT_UP(W(nop))
+#endif
 1:
        mcr     p15, 0, r0, c7, c14, 1          @ clean & invalidate D line / unified line
        add     r0, r0, r2
@@ -247,6 +255,10 @@ v7_dma_inv_range:
        sub     r3, r2, #1
        tst     r0, r3
        bic     r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+       ALT_SMP(W(dsb))
+       ALT_UP(W(nop))
+#endif
        mcrne   p15, 0, r0, c7, c14, 1          @ clean & invalidate D / U line
 
        tst     r1, r3
@@ -270,6 +282,10 @@ v7_dma_clean_range:
        dcache_line_size r2, r3
        sub     r3, r2, #1
        bic     r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+       ALT_SMP(W(dsb))
+       ALT_UP(W(nop))
+#endif
 1:
        mcr     p15, 0, r0, c7, c10, 1          @ clean D / U line
        add     r0, r0, r2
@@ -288,6 +304,10 @@ ENTRY(v7_dma_flush_range)
        dcache_line_size r2, r3
        sub     r3, r2, #1
        bic     r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+       ALT_SMP(W(dsb))
+       ALT_UP(W(nop))
+#endif
 1:
        mcr     p15, 0, r0, c7, c14, 1          @ clean & invalidate D / U line
        add     r0, r0, r2
index 0a0a1e7..c3ff82f 100644 (file)
@@ -324,6 +324,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
 
        if (addr)
                *handle = pfn_to_dma(dev, page_to_pfn(page));
+       else
+               __dma_free_buffer(page, size);
 
        return addr;
 }
index 91bca35..cc7e2d8 100644 (file)
@@ -298,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
 int pfn_valid(unsigned long pfn)
 {
-       return memblock_is_memory(pfn << PAGE_SHIFT);
+       return memblock_is_memory(__pfn_to_phys(pfn));
 }
 EXPORT_SYMBOL(pfn_valid);
 #endif
index 9a6a538..02609ee 100644 (file)
@@ -615,6 +615,9 @@ static int _od_resume_noirq(struct device *dev)
 
        return pm_generic_resume_noirq(dev);
 }
+#else
+#define _od_suspend_noirq NULL
+#define _od_resume_noirq NULL
 #endif
 
 static struct dev_pm_domain omap_device_pm_domain = {
index f71078e..f88216d 100644 (file)
@@ -114,17 +114,18 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
 {
        static int used_gpioint_groups = 0;
        int group = chip->group;
-       struct s5p_gpioint_bank *bank = NULL;
+       struct s5p_gpioint_bank *b, *bank = NULL;
        struct irq_chip_generic *gc;
        struct irq_chip_type *ct;
 
        if (used_gpioint_groups >= S5P_GPIOINT_GROUP_COUNT)
                return -ENOMEM;
 
-       list_for_each_entry(bank, &banks, list) {
-               if (group >= bank->start &&
-                   group < bank->start + bank->nr_groups)
+       list_for_each_entry(b, &banks, list) {
+               if (group >= b->start && group < b->start + b->nr_groups) {
+                       bank = b;
                        break;
+               }
        }
        if (!bank)
                return -EINVAL;
index 302c426..3b44519 100644 (file)
@@ -64,6 +64,17 @@ static LIST_HEAD(clocks);
  */
 DEFINE_SPINLOCK(clocks_lock);
 
+/* Global watchdog clock used by arch_wtd_reset() callback */
+struct clk *s3c2410_wdtclk;
+static int __init s3c_wdt_reset_init(void)
+{
+       s3c2410_wdtclk = clk_get(NULL, "watchdog");
+       if (IS_ERR(s3c2410_wdtclk))
+               printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__);
+       return 0;
+}
+arch_initcall(s3c_wdt_reset_init);
+
 /* enable and disable calls for use with the clk struct */
 
 static int clk_null_enable(struct clk *clk, int enable)
index 87d5b38..73c66d4 100644 (file)
@@ -9,6 +9,9 @@
  * published by the Free Software Foundation.
 */
 
+#ifndef __ASM_PLAT_CLOCK_H
+#define __ASM_PLAT_CLOCK_H __FILE__
+
 #include <linux/spinlock.h>
 #include <linux/clkdev.h>
 
@@ -121,3 +124,8 @@ extern int s3c64xx_sclk_ctrl(struct clk *clk, int enable);
 
 extern void s3c_pwmclk_init(void);
 
+/* Global watchdog clock used by arch_wtd_reset() callback */
+
+extern struct clk *s3c2410_wdtclk;
+
+#endif /* __ASM_PLAT_CLOCK_H */
index 54b762a..40dbb2b 100644 (file)
@@ -10,6 +10,7 @@
  * published by the Free Software Foundation.
 */
 
+#include <plat/clock.h>
 #include <plat/regs-watchdog.h>
 #include <mach/map.h>
 
 
 static inline void arch_wdt_reset(void)
 {
-       struct clk *wdtclk;
-
        printk("arch_reset: attempting watchdog reset\n");
 
        __raw_writel(0, S3C2410_WTCON);   /* disable watchdog, to be safe  */
 
-       wdtclk = clk_get(NULL, "watchdog");
-       if (!IS_ERR(wdtclk)) {
-               clk_enable(wdtclk);
-       } else
-               printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__);
+       if (s3c2410_wdtclk)
+               clk_enable(s3c2410_wdtclk);
 
        /* put initial values into count and data */
        __raw_writel(0x80, S3C2410_WTCNT);
index 052f877..60b4722 100644 (file)
@@ -31,7 +31,6 @@
 
 #define DMA_ERROR_CODE         (~(dma_addr_t)0x0)
 
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
@@ -47,6 +46,12 @@ dma_addr_t or1k_map_page(struct device *dev, struct page *page,
 void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
                     size_t size, enum dma_data_direction dir,
                     struct dma_attrs *attrs);
+int or1k_map_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir,
+               struct dma_attrs *attrs);
+void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
+                  int nents, enum dma_data_direction dir,
+                  struct dma_attrs *attrs);
 void or1k_sync_single_for_cpu(struct device *dev,
                              dma_addr_t dma_handle, size_t size,
                              enum dma_data_direction dir);
@@ -98,6 +103,51 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
        debug_dma_unmap_page(dev, addr, size, dir, true);
 }
 
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+                                  int nents, enum dma_data_direction dir)
+{
+       int i, ents;
+       struct scatterlist *s;
+
+       for_each_sg(sg, s, nents, i)
+               kmemcheck_mark_initialized(sg_virt(s), s->length);
+       BUG_ON(!valid_dma_direction(dir));
+       ents = or1k_map_sg(dev, sg, nents, dir, NULL);
+       debug_dma_map_sg(dev, sg, nents, ents, dir);
+
+       return ents;
+}
+
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+                                     int nents, enum dma_data_direction dir)
+{
+       BUG_ON(!valid_dma_direction(dir));
+       debug_dma_unmap_sg(dev, sg, nents, dir);
+       or1k_unmap_sg(dev, sg, nents, dir, NULL);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+                                     size_t offset, size_t size,
+                                     enum dma_data_direction dir)
+{
+       dma_addr_t addr;
+
+       kmemcheck_mark_initialized(page_address(page) + offset, size);
+       BUG_ON(!valid_dma_direction(dir));
+       addr = or1k_map_page(dev, page, offset, size, dir, NULL);
+       debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+
+       return addr;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+                                 size_t size, enum dma_data_direction dir)
+{
+       BUG_ON(!valid_dma_direction(dir));
+       or1k_unmap_page(dev, addr, size, dir, NULL);
+       debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
                                           size_t size,
                                           enum dma_data_direction dir)
@@ -119,7 +169,12 @@ static inline void dma_sync_single_for_device(struct device *dev,
 static inline int dma_supported(struct device *dev, u64 dma_mask)
 {
        /* Support 32 bit DMA mask exclusively */
-       return dma_mask == 0xffffffffULL;
+       return dma_mask == DMA_BIT_MASK(32);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return 0;
 }
 
 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
index 54a5c50..b79c2b1 100644 (file)
 
 /* This struct is saved by setup_frame in signal.c, to keep the current
    context while a signal handler is executed. It's restored by sys_sigreturn.
-
-   To keep things simple, we use pt_regs here even though normally you just
-   specify the list of regs to save. Then we can use copy_from_user on the
-   entire regs instead of a bunch of get_user's as well...
 */
 
 struct sigcontext {
-       struct pt_regs regs;  /* needs to be first */
+       struct user_regs_struct regs;  /* needs to be first */
        unsigned long oldmask;
-       unsigned long usp;    /* usp before stacking this gunk on it */
 };
 
 #endif /* __ASM_OPENRISC_SIGCONTEXT_H */
index 968d3ee..f1c8ee2 100644 (file)
@@ -154,6 +154,33 @@ void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
        /* Nothing special to do here... */
 }
 
+int or1k_map_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i) {
+               s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
+                                              s->length, dir, NULL);
+       }
+
+       return nents;
+}
+
+void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
+                  int nents, enum dma_data_direction dir,
+                  struct dma_attrs *attrs)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i) {
+               or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
+       }
+}
+
 void or1k_sync_single_for_cpu(struct device *dev,
                              dma_addr_t dma_handle, size_t size,
                              enum dma_data_direction dir)
@@ -187,5 +214,4 @@ static int __init dma_init(void)
 
        return 0;
 }
-
 fs_initcall(dma_init);
index 5f759c7..95207ab 100644 (file)
@@ -52,31 +52,25 @@ struct rt_sigframe {
 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
 {
        unsigned int err = 0;
-       unsigned long old_usp;
 
        /* Alwys make any pending restarted system call return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
-       /* restore the regs from &sc->regs (same as sc, since regs is first)
+       /*
+        * Restore the regs from &sc->regs.
         * (sc is already checked for VERIFY_READ since the sigframe was
         *  checked in sys_sigreturn previously)
         */
-
-       if (__copy_from_user(regs, sc, sizeof(struct pt_regs)))
+       if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
+               goto badframe;
+       if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long)))
+               goto badframe;
+       if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long)))
                goto badframe;
 
        /* make sure the SM-bit is cleared so user-mode cannot fool us */
        regs->sr &= ~SPR_SR_SM;
 
-       /* restore the old USP as it was before we stacked the sc etc.
-        * (we cannot just pop the sigcontext since we aligned the sp and
-        *  stuff after pushing it)
-        */
-
-       err |= __get_user(old_usp, &sc->usp);
-
-       regs->sp = old_usp;
-
        /* TODO: the other ports use regs->orig_XX to disable syscall checks
         * after this completes, but we don't use that mechanism. maybe we can
         * use it now ?
@@ -137,18 +131,17 @@ static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
                            unsigned long mask)
 {
        int err = 0;
-       unsigned long usp = regs->sp;
 
-       /* copy the regs. they are first in sc so we can use sc directly */
+       /* copy the regs */
 
-       err |= __copy_to_user(sc, regs, sizeof(struct pt_regs));
+       err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
+       err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
+       err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
 
        /* then some other stuff */
 
        err |= __put_user(mask, &sc->oldmask);
 
-       err |= __put_user(usp, &sc->usp);
-
        return err;
 }
 
index 5cc8385..31a7d3a 100644 (file)
@@ -561,6 +561,20 @@ static struct pci_ops u4_pcie_pci_ops =
        .write = u4_pcie_write_config,
 };
 
+static void __devinit pmac_pci_fixup_u4_of_node(struct pci_dev *dev)
+{
+       /* Apple's device-tree "hides" the root complex virtual P2P bridge
+        * on U4. However, Linux sees it, causing the PCI <-> OF matching
+        * code to fail to properly match devices below it. This works around
+        * it by setting the node of the bridge to point to the PHB node,
+        * which is not entirely correct but fixes the matching code and
+        * doesn't break anything else. It's also the simplest possible fix.
+        */
+       if (dev->dev.of_node == NULL)
+               dev->dev.of_node = pcibios_get_phb_of_node(dev->bus);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node);
+
 #endif /* CONFIG_PPC64 */
 
 #ifdef CONFIG_PPC32
index 64b61bf..547f1a6 100644 (file)
@@ -188,7 +188,8 @@ extern char elf_platform[];
 #define SET_PERSONALITY(ex)                                    \
 do {                                                           \
        if (personality(current->personality) != PER_LINUX32)   \
-               set_personality(PER_LINUX);                     \
+               set_personality(PER_LINUX |                     \
+                       (current->personality & ~PER_MASK));    \
        if ((ex).e_ident[EI_CLASS] == ELFCLASS32)               \
                set_thread_flag(TIF_31BIT);                     \
        else                                                    \
index 519eb5f..c0cb794 100644 (file)
@@ -658,12 +658,14 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
  * struct gmap_struct - guest address space
  * @mm: pointer to the parent mm_struct
  * @table: pointer to the page directory
+ * @asce: address space control element for gmap page table
  * @crst_list: list of all crst tables used in the guest address space
  */
 struct gmap {
        struct list_head list;
        struct mm_struct *mm;
        unsigned long *table;
+       unsigned long asce;
        struct list_head crst_list;
 };
 
index 532fd43..2b45591 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/sched.h>
 #include <asm/vdso.h>
 #include <asm/sigp.h>
+#include <asm/pgtable.h>
 
 /*
  * Make sure that the compiler is new enough. We want a compiler that
@@ -126,6 +127,7 @@ int main(void)
        DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
        DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
        DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
+       DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
        DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
        DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
        DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
@@ -151,6 +153,7 @@ int main(void)
        DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
        DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
        DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
+       DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
 #endif /* CONFIG_32BIT */
        return 0;
 }
index 5f729d6..713da07 100644 (file)
@@ -1076,6 +1076,11 @@ sie_loop:
        lg      %r14,__LC_THREAD_INFO           # pointer thread_info struct
        tm      __TI_flags+7(%r14),_TIF_EXIT_SIE
        jnz     sie_exit
+       lg      %r14,__LC_GMAP                  # get gmap pointer
+       ltgr    %r14,%r14
+       jz      sie_gmap
+       lctlg   %c1,%c1,__GMAP_ASCE(%r14)       # load primary asce
+sie_gmap:
        lg      %r14,__SF_EMPTY(%r15)           # get control block pointer
        SPP     __SF_EMPTY(%r15)                # set guest id
        sie     0(%r14)
@@ -1083,6 +1088,7 @@ sie_done:
        SPP     __LC_CMF_HPP                    # set host id
        lg      %r14,__LC_THREAD_INFO           # pointer thread_info struct
 sie_exit:
+       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
        ni      __TI_flags+6(%r14),255-(_TIF_SIE>>8)
        lg      %r14,__SF_EMPTY+8(%r15)         # load guest register save area
        stmg    %r0,%r13,0(%r14)                # save guest gprs 0-13
index f17296e..dc2b580 100644 (file)
@@ -123,6 +123,7 @@ int kvm_dev_ioctl_check_extension(long ext)
 
        switch (ext) {
        case KVM_CAP_S390_PSW:
+       case KVM_CAP_S390_GMAP:
                r = 1;
                break;
        default:
@@ -263,10 +264,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
        restore_fp_regs(&vcpu->arch.guest_fpregs);
        restore_access_regs(vcpu->arch.guest_acrs);
+       gmap_enable(vcpu->arch.gmap);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       gmap_disable(vcpu->arch.gmap);
        save_fp_regs(&vcpu->arch.guest_fpregs);
        save_access_regs(vcpu->arch.guest_acrs);
        restore_fp_regs(&vcpu->arch.host_fpregs);
@@ -461,7 +464,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
        local_irq_disable();
        kvm_guest_enter();
        local_irq_enable();
-       gmap_enable(vcpu->arch.gmap);
        VCPU_EVENT(vcpu, 6, "entering sie flags %x",
                   atomic_read(&vcpu->arch.sie_block->cpuflags));
        if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
@@ -470,7 +472,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
        }
        VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
                   vcpu->arch.sie_block->icptcode);
-       gmap_disable(vcpu->arch.gmap);
        local_irq_disable();
        kvm_guest_exit();
        local_irq_enable();
index 4d1f2bc..5d56c2b 100644 (file)
@@ -160,6 +160,8 @@ struct gmap *gmap_alloc(struct mm_struct *mm)
        table = (unsigned long *) page_to_phys(page);
        crst_table_init(table, _REGION1_ENTRY_EMPTY);
        gmap->table = table;
+       gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
+                    _ASCE_USER_BITS | __pa(table);
        list_add(&gmap->list, &mm->context.gmap_list);
        return gmap;
 
@@ -240,10 +242,6 @@ EXPORT_SYMBOL_GPL(gmap_free);
  */
 void gmap_enable(struct gmap *gmap)
 {
-       /* Load primary space page table origin. */
-       S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
-                                _ASCE_USER_BITS | __pa(gmap->table);
-       asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
        S390_lowcore.gmap = (unsigned long) gmap;
 }
 EXPORT_SYMBOL_GPL(gmap_enable);
@@ -254,10 +252,6 @@ EXPORT_SYMBOL_GPL(gmap_enable);
  */
 void gmap_disable(struct gmap *gmap)
 {
-       /* Load primary space page table origin. */
-       S390_lowcore.user_asce =
-               gmap->mm->context.asce_bits | __pa(gmap->mm->pgd);
-       asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
        S390_lowcore.gmap = 0UL;
 }
 EXPORT_SYMBOL_GPL(gmap_disable);
@@ -309,15 +303,15 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
                /* Walk the guest addr space page table */
                table = gmap->table + (((to + off) >> 53) & 0x7ff);
                if (*table & _REGION_ENTRY_INV)
-                       return 0;
+                       goto out;
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                table = table + (((to + off) >> 42) & 0x7ff);
                if (*table & _REGION_ENTRY_INV)
-                       return 0;
+                       goto out;
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                table = table + (((to + off) >> 31) & 0x7ff);
                if (*table & _REGION_ENTRY_INV)
-                       return 0;
+                       goto out;
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                table = table + (((to + off) >> 20) & 0x7ff);
 
@@ -325,6 +319,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
                flush |= gmap_unlink_segment(gmap, table);
                *table = _SEGMENT_ENTRY_INV;
        }
+out:
        up_read(&gmap->mm->mmap_sem);
        if (flush)
                gmap_flush_tlb(gmap);
index 55a17c6..d06a266 100644 (file)
@@ -43,6 +43,8 @@
 #define SUN4V_CHIP_NIAGARA1    0x01
 #define SUN4V_CHIP_NIAGARA2    0x02
 #define SUN4V_CHIP_NIAGARA3    0x03
+#define SUN4V_CHIP_NIAGARA4    0x04
+#define SUN4V_CHIP_NIAGARA5    0x05
 #define SUN4V_CHIP_UNKNOWN     0xff
 
 #ifndef __ASSEMBLY__
index 9ed6ff6..ee8edc6 100644 (file)
@@ -66,6 +66,8 @@ static struct xor_block_template xor_block_niagara = {
        ((tlb_type == hypervisor && \
          (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \
           sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \
-          sun4v_chip_type == SUN4V_CHIP_NIAGARA3)) ? \
+          sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || \
+          sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || \
+          sun4v_chip_type == SUN4V_CHIP_NIAGARA5)) ? \
         &xor_block_niagara : \
         &xor_block_VIS)
index 9810fd8..ba9b1ce 100644 (file)
@@ -481,6 +481,18 @@ static void __init sun4v_cpu_probe(void)
                sparc_pmu_type = "niagara3";
                break;
 
+       case SUN4V_CHIP_NIAGARA4:
+               sparc_cpu_type = "UltraSparc T4 (Niagara4)";
+               sparc_fpu_type = "UltraSparc T4 integrated FPU";
+               sparc_pmu_type = "niagara4";
+               break;
+
+       case SUN4V_CHIP_NIAGARA5:
+               sparc_cpu_type = "UltraSparc T5 (Niagara5)";
+               sparc_fpu_type = "UltraSparc T5 integrated FPU";
+               sparc_pmu_type = "niagara5";
+               break;
+
        default:
                printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
                       prom_cpu_compatible);
index 4197e8d..9323eaf 100644 (file)
@@ -325,6 +325,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
        case SUN4V_CHIP_NIAGARA1:
        case SUN4V_CHIP_NIAGARA2:
        case SUN4V_CHIP_NIAGARA3:
+       case SUN4V_CHIP_NIAGARA4:
+       case SUN4V_CHIP_NIAGARA5:
                rover_inc_table = niagara_iterate_method;
                break;
        default:
index 0eac1b2..0d810c2 100644 (file)
@@ -133,7 +133,7 @@ prom_sun4v_name:
 prom_niagara_prefix:
        .asciz  "SUNW,UltraSPARC-T"
 prom_sparc_prefix:
-       .asciz  "SPARC-T"
+       .asciz  "SPARC-"
        .align  4
 prom_root_compatible:
        .skip   64
@@ -396,7 +396,7 @@ sun4v_chip_type:
        or      %g1, %lo(prom_cpu_compatible), %g1
        sethi   %hi(prom_sparc_prefix), %g7
        or      %g7, %lo(prom_sparc_prefix), %g7
-       mov     7, %g3
+       mov     6, %g3
 90:    ldub    [%g7], %g2
        ldub    [%g1], %g4
        cmp     %g2, %g4
@@ -408,10 +408,23 @@ sun4v_chip_type:
 
        sethi   %hi(prom_cpu_compatible), %g1
        or      %g1, %lo(prom_cpu_compatible), %g1
-       ldub    [%g1 + 7], %g2
+       ldub    [%g1 + 6], %g2
+       cmp     %g2, 'T'
+       be,pt   %xcc, 70f
+        cmp    %g2, 'M'
+       bne,pn  %xcc, 4f
+        nop
+
+70:    ldub    [%g1 + 7], %g2
        cmp     %g2, '3'
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA3, %g4
+       cmp     %g2, '4'
+       be,pt   %xcc, 5f
+        mov    SUN4V_CHIP_NIAGARA4, %g4
+       cmp     %g2, '5'
+       be,pt   %xcc, 5f
+        mov    SUN4V_CHIP_NIAGARA5, %g4
        ba,pt   %xcc, 4f
         nop
 
@@ -543,6 +556,12 @@ niagara_tlb_fixup:
        be,pt   %xcc, niagara2_patch
         nop
        cmp     %g1, SUN4V_CHIP_NIAGARA3
+       be,pt   %xcc, niagara2_patch
+        nop
+       cmp     %g1, SUN4V_CHIP_NIAGARA4
+       be,pt   %xcc, niagara2_patch
+        nop
+       cmp     %g1, SUN4V_CHIP_NIAGARA5
        be,pt   %xcc, niagara2_patch
         nop
 
index c8cc461..f793742 100644 (file)
@@ -380,8 +380,7 @@ void flush_thread(void)
 #endif
        }
 
-       /* Now, this task is no longer a kernel thread. */
-       current->thread.current_ds = USER_DS;
+       /* This task is no longer a kernel thread. */
        if (current->thread.flags & SPARC_FLAG_KTHREAD) {
                current->thread.flags &= ~SPARC_FLAG_KTHREAD;
 
index c158a95..d959cd0 100644 (file)
@@ -368,9 +368,6 @@ void flush_thread(void)
 
        /* Clear FPU register state. */
        t->fpsaved[0] = 0;
-       
-       if (get_thread_current_ds() != ASI_AIUS)
-               set_fs(USER_DS);
 }
 
 /* It's a bit more tricky when 64-bit tasks are involved... */
index d26e1f6..3e3e291 100644 (file)
@@ -137,7 +137,7 @@ static void __init process_switch(char c)
                prom_halt();
                break;
        case 'p':
-               /* Just ignore, this behavior is now the default.  */
+               prom_early_console.flags &= ~CON_BOOT;
                break;
        default:
                printk("Unknown boot switch (-%c)\n", c);
index 3c5bb78..c965595 100644 (file)
@@ -106,7 +106,7 @@ static void __init process_switch(char c)
                prom_halt();
                break;
        case 'p':
-               /* Just ignore, this behavior is now the default.  */
+               prom_early_console.flags &= ~CON_BOOT;
                break;
        case 'P':
                /* Force UltraSPARC-III P-Cache on. */
@@ -425,10 +425,14 @@ static void __init init_sparc64_elf_hwcap(void)
        else if (tlb_type == hypervisor) {
                if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
-                   sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+                   sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+                   sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+                   sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
                        cap |= HWCAP_SPARC_BLKINIT;
                if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
-                   sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+                   sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+                   sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+                   sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
                        cap |= HWCAP_SPARC_N2;
        }
 
@@ -452,11 +456,15 @@ static void __init init_sparc64_elf_hwcap(void)
                        if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
                                cap |= AV_SPARC_ASI_BLK_INIT;
                        if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
-                           sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+                           sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+                           sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+                           sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
                                cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
                                        AV_SPARC_ASI_BLK_INIT |
                                        AV_SPARC_POPC);
-                       if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+                       if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+                           sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+                           sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
                                cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
                                        AV_SPARC_FMAF);
                }
index 581531d..8e073d8 100644 (file)
@@ -511,6 +511,11 @@ static void __init read_obp_translations(void)
                for (i = 0; i < prom_trans_ents; i++)
                        prom_trans[i].data &= ~0x0003fe0000000000UL;
        }
+
+       /* Force execute bit on.  */
+       for (i = 0; i < prom_trans_ents; i++)
+               prom_trans[i].data |= (tlb_type == hypervisor ?
+                                      _PAGE_EXEC_4V : _PAGE_EXEC_4U);
 }
 
 static void __init hypervisor_tlb_lock(unsigned long vaddr,
index d31ecf3..21bebe6 100644 (file)
@@ -10,6 +10,10 @@ config CMPXCHG_LOCAL
        bool
        default n
 
+config CMPXCHG_DOUBLE
+       bool
+       default n
+
 source "arch/x86/Kconfig.cpu"
 
 endmenu
index fab8121..c0f712c 100644 (file)
@@ -41,7 +41,7 @@ KBUILD_CPPFLAGS += -I$(srctree)/$(ARCH_DIR)/sys-$(SUBARCH)
 KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \
        $(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap      \
        -Din6addr_loopback=kernel_in6addr_loopback \
-       -Din6addr_any=kernel_in6addr_any
+       -Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr
 
 KBUILD_AFLAGS += $(ARCH_INCLUDE)
 
index d51c404..364c8a1 100644 (file)
@@ -399,8 +399,8 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
  * is done under a spinlock.  Checking whether the device is in use is
  * line->tty->count > 1, also under the spinlock.
  *
- * tty->count serves to decide whether the device should be enabled or
- * disabled on the host.  If it's equal to 1, then we are doing the
+ * line->count serves to decide whether the device should be enabled or
+ * disabled on the host.  If it's equal to 0, then we are doing the
  * first open or last close.  Otherwise, open and close just return.
  */
 
@@ -414,16 +414,16 @@ int line_open(struct line *lines, struct tty_struct *tty)
                goto out_unlock;
 
        err = 0;
-       if (tty->count > 1)
+       if (line->count++)
                goto out_unlock;
 
-       spin_unlock(&line->count_lock);
-
+       BUG_ON(tty->driver_data);
        tty->driver_data = line;
        line->tty = tty;
 
+       spin_unlock(&line->count_lock);
        err = enable_chan(line);
-       if (err)
+       if (err) /* line_close() will be called by our caller */
                return err;
 
        INIT_DELAYED_WORK(&line->task, line_timer_cb);
@@ -436,7 +436,7 @@ int line_open(struct line *lines, struct tty_struct *tty)
        chan_window_size(&line->chan_list, &tty->winsize.ws_row,
                         &tty->winsize.ws_col);
 
-       return err;
+       return 0;
 
 out_unlock:
        spin_unlock(&line->count_lock);
@@ -460,17 +460,16 @@ void line_close(struct tty_struct *tty, struct file * filp)
        flush_buffer(line);
 
        spin_lock(&line->count_lock);
-       if (!line->valid)
-               goto out_unlock;
+       BUG_ON(!line->valid);
 
-       if (tty->count > 1)
+       if (--line->count)
                goto out_unlock;
 
-       spin_unlock(&line->count_lock);
-
        line->tty = NULL;
        tty->driver_data = NULL;
 
+       spin_unlock(&line->count_lock);
+
        if (line->sigio) {
                unregister_winch(tty);
                line->sigio = 0;
@@ -498,7 +497,7 @@ static int setup_one_line(struct line *lines, int n, char *init, int init_prio,
 
        spin_lock(&line->count_lock);
 
-       if (line->tty != NULL) {
+       if (line->count) {
                *error_out = "Device is already open";
                goto out;
        }
@@ -722,41 +721,53 @@ struct winch {
        int pid;
        struct tty_struct *tty;
        unsigned long stack;
+       struct work_struct work;
 };
 
-static void free_winch(struct winch *winch, int free_irq_ok)
+static void __free_winch(struct work_struct *work)
 {
-       if (free_irq_ok)
-               free_irq(WINCH_IRQ, winch);
-
-       list_del(&winch->list);
+       struct winch *winch = container_of(work, struct winch, work);
+       free_irq(WINCH_IRQ, winch);
 
        if (winch->pid != -1)
                os_kill_process(winch->pid, 1);
-       if (winch->fd != -1)
-               os_close_file(winch->fd);
        if (winch->stack != 0)
                free_stack(winch->stack, 0);
        kfree(winch);
 }
 
+static void free_winch(struct winch *winch)
+{
+       int fd = winch->fd;
+       winch->fd = -1;
+       if (fd != -1)
+               os_close_file(fd);
+       list_del(&winch->list);
+       __free_winch(&winch->work);
+}
+
 static irqreturn_t winch_interrupt(int irq, void *data)
 {
        struct winch *winch = data;
        struct tty_struct *tty;
        struct line *line;
+       int fd = winch->fd;
        int err;
        char c;
 
-       if (winch->fd != -1) {
-               err = generic_read(winch->fd, &c, NULL);
+       if (fd != -1) {
+               err = generic_read(fd, &c, NULL);
                if (err < 0) {
                        if (err != -EAGAIN) {
+                               winch->fd = -1;
+                               list_del(&winch->list);
+                               os_close_file(fd);
                                printk(KERN_ERR "winch_interrupt : "
                                       "read failed, errno = %d\n", -err);
                                printk(KERN_ERR "fd %d is losing SIGWINCH "
                                       "support\n", winch->tty_fd);
-                               free_winch(winch, 0);
+                               INIT_WORK(&winch->work, __free_winch);
+                               schedule_work(&winch->work);
                                return IRQ_HANDLED;
                        }
                        goto out;
@@ -828,7 +839,7 @@ static void unregister_winch(struct tty_struct *tty)
        list_for_each_safe(ele, next, &winch_handlers) {
                winch = list_entry(ele, struct winch, list);
                if (winch->tty == tty) {
-                       free_winch(winch, 1);
+                       free_winch(winch);
                        break;
                }
        }
@@ -844,7 +855,7 @@ static void winch_cleanup(void)
 
        list_for_each_safe(ele, next, &winch_handlers) {
                winch = list_entry(ele, struct winch, list);
-               free_winch(winch, 1);
+               free_winch(winch);
        }
 
        spin_unlock(&winch_handler_lock);
index 8ac7146..2e1de57 100644 (file)
@@ -123,6 +123,7 @@ static int xterm_open(int input, int output, int primary, void *d,
                err = -errno;
                printk(UM_KERN_ERR "xterm_open : unlink failed, errno = %d\n",
                       errno);
+               close(fd);
                return err;
        }
        close(fd);
index ae084ad..1a7d275 100644 (file)
@@ -42,10 +42,6 @@ extern long subarch_ptrace(struct task_struct *child, long request,
        unsigned long addr, unsigned long data);
 extern unsigned long getreg(struct task_struct *child, int regno);
 extern int putreg(struct task_struct *child, int regno, unsigned long value);
-extern int get_fpregs(struct user_i387_struct __user *buf,
-                     struct task_struct *child);
-extern int set_fpregs(struct user_i387_struct __user *buf,
-                     struct task_struct *child);
 
 extern int arch_copy_tls(struct task_struct *new);
 extern void clear_flushed_tls(struct task_struct *task);
index 72f4f25..63df3ca 100644 (file)
@@ -33,6 +33,7 @@ struct line_driver {
 struct line {
        struct tty_struct *tty;
        spinlock_t count_lock;
+       unsigned long count;
        int valid;
 
        char *init_str;
index b0b4589..f1e0aa5 100644 (file)
@@ -16,7 +16,7 @@ extern int restore_fpx_registers(int pid, unsigned long *fp_regs);
 extern int save_registers(int pid, struct uml_pt_regs *regs);
 extern int restore_registers(int pid, struct uml_pt_regs *regs);
 extern int init_registers(int pid);
-extern void get_safe_registers(unsigned long *regs);
+extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs);
 extern unsigned long get_thread_reg(int reg, jmp_buf *buf);
 extern int get_fp_registers(int pid, unsigned long *regs);
 extern int put_fp_registers(int pid, unsigned long *regs);
index fab4371..21c1ae7 100644 (file)
@@ -202,7 +202,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                arch_copy_thread(&current->thread.arch, &p->thread.arch);
        }
        else {
-               get_safe_registers(p->thread.regs.regs.gp);
+               get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
                p->thread.request.u.thread = current->thread.request.u.thread;
                handler = new_thread_handler;
        }
index 701b672..c9da32b 100644 (file)
@@ -50,23 +50,11 @@ long arch_ptrace(struct task_struct *child, long request,
        void __user *vp = p;
 
        switch (request) {
-       /* read word at location addr. */
-       case PTRACE_PEEKTEXT:
-       case PTRACE_PEEKDATA:
-               ret = generic_ptrace_peekdata(child, addr, data);
-               break;
-
        /* read the word at location addr in the USER area. */
        case PTRACE_PEEKUSR:
                ret = peek_user(child, addr, data);
                break;
 
-       /* write the word at location addr. */
-       case PTRACE_POKETEXT:
-       case PTRACE_POKEDATA:
-               ret = generic_ptrace_pokedata(child, addr, data);
-               break;
-
        /* write the word at location addr in the USER area */
        case PTRACE_POKEUSR:
                ret = poke_user(child, addr, data);
@@ -106,16 +94,6 @@ long arch_ptrace(struct task_struct *child, long request,
                ret = 0;
                break;
        }
-#endif
-#ifdef PTRACE_GETFPREGS
-       case PTRACE_GETFPREGS: /* Get the child FPU state. */
-               ret = get_fpregs(vp, child);
-               break;
-#endif
-#ifdef PTRACE_SETFPREGS
-       case PTRACE_SETFPREGS: /* Set the child FPU state. */
-               ret = set_fpregs(vp, child);
-               break;
 #endif
        case PTRACE_GET_THREAD_AREA:
                ret = ptrace_get_thread_area(child, addr, vp);
@@ -153,12 +131,6 @@ long arch_ptrace(struct task_struct *child, long request,
                ret = -EIO;
                break;
        }
-#endif
-#ifdef PTRACE_ARCH_PRCTL
-       case PTRACE_ARCH_PRCTL:
-               /* XXX Calls ptrace on the host - needs some SMP thinking */
-               ret = arch_prctl(child, data, (void __user *) addr);
-               break;
 #endif
        default:
                ret = ptrace_request(child, request, addr, data);
index 830fe6a..b866b9e 100644 (file)
@@ -8,6 +8,8 @@
 #include <string.h>
 #include <sys/ptrace.h>
 #include "sysdep/ptrace.h"
+#include "sysdep/ptrace_user.h"
+#include "registers.h"
 
 int save_registers(int pid, struct uml_pt_regs *regs)
 {
@@ -32,6 +34,7 @@ int restore_registers(int pid, struct uml_pt_regs *regs)
 /* This is set once at boot time and not changed thereafter */
 
 static unsigned long exec_regs[MAX_REG_NR];
+static unsigned long exec_fp_regs[FP_SIZE];
 
 int init_registers(int pid)
 {
@@ -42,10 +45,14 @@ int init_registers(int pid)
                return -errno;
 
        arch_init_registers(pid);
+       get_fp_registers(pid, exec_fp_regs);
        return 0;
 }
 
-void get_safe_registers(unsigned long *regs)
+void get_safe_registers(unsigned long *regs, unsigned long *fp_regs)
 {
        memcpy(regs, exec_regs, sizeof(exec_regs));
+
+       if (fp_regs)
+               memcpy(fp_regs, exec_fp_regs, sizeof(exec_fp_regs));
 }
index d261f17..e771398 100644 (file)
@@ -39,7 +39,7 @@ static unsigned long syscall_regs[MAX_REG_NR];
 
 static int __init init_syscall_regs(void)
 {
-       get_safe_registers(syscall_regs);
+       get_safe_registers(syscall_regs, NULL);
        syscall_regs[REGS_IP_INDEX] = STUB_CODE +
                ((unsigned long) &batch_syscall_stub -
                 (unsigned long) &__syscall_stub_start);
index d6e0a22..dee0e8c 100644 (file)
@@ -373,6 +373,9 @@ void userspace(struct uml_pt_regs *regs)
                if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))
                        fatal_sigsegv();
 
+               if (put_fp_registers(pid, regs->fp))
+                       fatal_sigsegv();
+
                /* Now we set local_using_sysemu to be used for one loop */
                local_using_sysemu = get_using_sysemu();
 
@@ -399,6 +402,12 @@ void userspace(struct uml_pt_regs *regs)
                        fatal_sigsegv();
                }
 
+               if (get_fp_registers(pid, regs->fp)) {
+                       printk(UM_KERN_ERR "userspace -  get_fp_registers failed, "
+                              "errno = %d\n", errno);
+                       fatal_sigsegv();
+               }
+
                UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
 
                if (WIFSTOPPED(status)) {
@@ -457,10 +466,11 @@ void userspace(struct uml_pt_regs *regs)
 }
 
 static unsigned long thread_regs[MAX_REG_NR];
+static unsigned long thread_fp_regs[FP_SIZE];
 
 static int __init init_thread_regs(void)
 {
-       get_safe_registers(thread_regs);
+       get_safe_registers(thread_regs, thread_fp_regs);
        /* Set parent's instruction pointer to start of clone-stub */
        thread_regs[REGS_IP_INDEX] = STUB_CODE +
                                (unsigned long) stub_clone_handler -
@@ -503,6 +513,13 @@ int copy_context_skas0(unsigned long new_stack, int pid)
                return err;
        }
 
+       err = put_fp_registers(pid, thread_fp_regs);
+       if (err < 0) {
+               printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers "
+                      "failed, pid = %d, err = %d\n", pid, err);
+               return err;
+       }
+
        /* set a well known return code for detection of child write failure */
        child_data->err = 12345678;
 
index 0273e4d..5d2a591 100644 (file)
  */
 struct user_desc;
 
-extern int get_fpxregs(struct user_fxsr_struct __user *buf,
-                      struct task_struct *child);
-extern int set_fpxregs(struct user_fxsr_struct __user *buf,
-                      struct task_struct *tsk);
-
 extern int ptrace_get_thread_area(struct task_struct *child, int idx,
                                   struct user_desc __user *user_desc);
 
index d23b2d3..3375c27 100644 (file)
@@ -145,7 +145,7 @@ int peek_user(struct task_struct *child, long addr, long data)
        return put_user(tmp, (unsigned long __user *) data);
 }
 
-int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
 {
        int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
        struct user_i387_struct fpregs;
@@ -161,7 +161,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
        return n;
 }
 
-int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
 {
        int n, cpu = ((struct thread_info *) child->stack)->cpu;
        struct user_i387_struct fpregs;
@@ -174,7 +174,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
                                    (unsigned long *) &fpregs);
 }
 
-int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
+static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
 {
        int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
        struct user_fxsr_struct fpregs;
@@ -190,7 +190,7 @@ int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
        return n;
 }
 
-int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
+static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
 {
        int n, cpu = ((struct thread_info *) child->stack)->cpu;
        struct user_fxsr_struct fpregs;
@@ -206,5 +206,23 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
 long subarch_ptrace(struct task_struct *child, long request,
                    unsigned long addr, unsigned long data)
 {
-       return -EIO;
+       int ret = -EIO;
+       void __user *datap = (void __user *) data;
+       switch (request) {
+       case PTRACE_GETFPREGS: /* Get the child FPU state. */
+               ret = get_fpregs(datap, child);
+               break;
+       case PTRACE_SETFPREGS: /* Set the child FPU state. */
+               ret = set_fpregs(datap, child);
+               break;
+       case PTRACE_GETFPXREGS: /* Get the child FPU state. */
+               ret = get_fpxregs(datap, child);
+               break;
+       case PTRACE_SETFPXREGS: /* Set the child FPU state. */
+               ret = set_fpxregs(datap, child);
+               break;
+       default:
+               ret = -EIO;
+       }
+       return ret;
 }
index d50e62e..c398a50 100644 (file)
@@ -53,6 +53,7 @@ extern int sysemu_supported;
 
 struct uml_pt_regs {
        unsigned long gp[MAX_REG_NR];
+       unsigned long fp[HOST_FPX_SIZE];
        struct faultinfo faultinfo;
        long syscall;
        int is_user;
index f436136..4005506 100644 (file)
@@ -145,7 +145,7 @@ int is_syscall(unsigned long addr)
        return instr == 0x050f;
 }
 
-int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
 {
        int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
        long fpregs[HOST_FP_SIZE];
@@ -162,7 +162,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
        return n;
 }
 
-int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
 {
        int n, cpu = ((struct thread_info *) child->stack)->cpu;
        long fpregs[HOST_FP_SIZE];
@@ -182,12 +182,16 @@ long subarch_ptrace(struct task_struct *child, long request,
        void __user *datap = (void __user *) data;
 
        switch (request) {
-       case PTRACE_GETFPXREGS: /* Get the child FPU state. */
+       case PTRACE_GETFPREGS: /* Get the child FPU state. */
                ret = get_fpregs(datap, child);
                break;
-       case PTRACE_SETFPXREGS: /* Set the child FPU state. */
+       case PTRACE_SETFPREGS: /* Set the child FPU state. */
                ret = set_fpregs(datap, child);
                break;
+       case PTRACE_ARCH_PRCTL:
+               /* XXX Calls ptrace on the host - needs some SMP thinking */
+               ret = arch_prctl(child, data, (void __user *) addr);
+               break;
        }
 
        return ret;
index fdba545..8ee8f8e 100644 (file)
@@ -85,6 +85,7 @@
 
 struct uml_pt_regs {
        unsigned long gp[MAX_REG_NR];
+       unsigned long fp[HOST_FP_SIZE];
        struct faultinfo faultinfo;
        long syscall;
        int is_user;
index 4554cc6..091508b 100644 (file)
@@ -16,7 +16,6 @@
 #endif
 
 .macro altinstruction_entry orig alt feature orig_len alt_len
-       .align 8
        .long \orig - .
        .long \alt - .
        .word \feature
index 23fb6d7..37ad100 100644 (file)
@@ -48,9 +48,6 @@ struct alt_instr {
        u16 cpuid;              /* cpuid bit set for replacement */
        u8  instrlen;           /* length of original instruction */
        u8  replacementlen;     /* length of new instruction, <= instrlen */
-#ifdef CONFIG_X86_64
-       u32 pad2;
-#endif
 };
 
 extern void alternative_instructions(void);
@@ -83,7 +80,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
                                                                        \
       "661:\n\t" oldinstr "\n662:\n"                                   \
       ".section .altinstructions,\"a\"\n"                              \
-      _ASM_ALIGN "\n"                                                  \
       "         .long 661b - .\n"                      /* label           */   \
       "         .long 663f - .\n"                      /* new instruction */   \
       "         .word " __stringify(feature) "\n"      /* feature bit     */   \
index 4258aac..88b23a4 100644 (file)
@@ -332,7 +332,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
                asm goto("1: jmp %l[t_no]\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
-                        _ASM_ALIGN "\n"
                         " .long 1b - .\n"
                         " .long 0\n"           /* no replacement */
                         " .word %P0\n"         /* feature bit */
@@ -350,7 +349,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
                asm volatile("1: movb $0,%0\n"
                             "2:\n"
                             ".section .altinstructions,\"a\"\n"
-                            _ASM_ALIGN "\n"
                             " .long 1b - .\n"
                             " .long 3f - .\n"
                             " .word %P1\n"             /* feature bit */
index a518c0a..c59cc97 100644 (file)
@@ -44,7 +44,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
                : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
 #elif defined(__x86_64__)
        __asm__ (
-               "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
+               "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
                : [lo]"=a"(product),
                  [hi]"=d"(tmp)
                : "0"(delta),
index 4ee3abf..cfa62ec 100644 (file)
@@ -1900,6 +1900,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 
        perf_callchain_store(entry, regs->ip);
 
+       if (!current->mm)
+               return;
+
        if (perf_callchain_user32(regs, entry))
                return;
 
index 3f2ad26..ccdbc16 100644 (file)
@@ -42,8 +42,11 @@ int mach_set_rtc_mmss(unsigned long nowtime)
 {
        int real_seconds, real_minutes, cmos_minutes;
        unsigned char save_control, save_freq_select;
+       unsigned long flags;
        int retval = 0;
 
+       spin_lock_irqsave(&rtc_lock, flags);
+
         /* tell the clock it's being set */
        save_control = CMOS_READ(RTC_CONTROL);
        CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
@@ -93,12 +96,17 @@ int mach_set_rtc_mmss(unsigned long nowtime)
        CMOS_WRITE(save_control, RTC_CONTROL);
        CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
 
+       spin_unlock_irqrestore(&rtc_lock, flags);
+
        return retval;
 }
 
 unsigned long mach_get_cmos_time(void)
 {
        unsigned int status, year, mon, day, hour, min, sec, century = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&rtc_lock, flags);
 
        /*
         * If UIP is clear, then we have >= 244 microseconds before
@@ -125,6 +133,8 @@ unsigned long mach_get_cmos_time(void)
        status = CMOS_READ(RTC_CONTROL);
        WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY));
 
+       spin_unlock_irqrestore(&rtc_lock, flags);
+
        if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) {
                sec = bcd2bin(sec);
                min = bcd2bin(min);
@@ -169,24 +179,15 @@ EXPORT_SYMBOL(rtc_cmos_write);
 
 int update_persistent_clock(struct timespec now)
 {
-       unsigned long flags;
-       int retval;
-
-       spin_lock_irqsave(&rtc_lock, flags);
-       retval = x86_platform.set_wallclock(now.tv_sec);
-       spin_unlock_irqrestore(&rtc_lock, flags);
-
-       return retval;
+       return x86_platform.set_wallclock(now.tv_sec);
 }
 
 /* not static: needed by APM */
 void read_persistent_clock(struct timespec *ts)
 {
-       unsigned long retval, flags;
+       unsigned long retval;
 
-       spin_lock_irqsave(&rtc_lock, flags);
        retval = x86_platform.get_wallclock();
-       spin_unlock_irqrestore(&rtc_lock, flags);
 
        ts->tv_sec = retval;
        ts->tv_nsec = 0;
index 6f08bc9..8b4cc5f 100644 (file)
@@ -3603,7 +3603,7 @@ done_prefixes:
                break;
        case Src2CL:
                ctxt->src2.bytes = 1;
-               ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0x8;
+               ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
                break;
        case Src2ImmByte:
                rc = decode_imm(ctxt, &ctxt->src2, 1, true);
index 1c5b693..8e8da79 100644 (file)
@@ -400,7 +400,8 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
 
        /* xchg acts as a barrier before the setting of the high bits */
        orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
-       orig.spte_high = ssptep->spte_high = sspte.spte_high;
+       orig.spte_high = ssptep->spte_high;
+       ssptep->spte_high = sspte.spte_high;
        count_spte_clear(sptep, spte);
 
        return orig.spte;
index c953302..404f21a 100644 (file)
@@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
                 },
         },
+       /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
+       /* 2006 AMD HT/VIA system with two host bridges */
+       {
+               .callback = set_use_crs,
+               .ident = "ASUS M2V-MX SE",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
+                       DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+               },
+       },
        {}
 };
 
@@ -365,8 +376,13 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
         */
        if (bus) {
                struct pci_bus *child;
-               list_for_each_entry(child, &bus->children, node)
-                       pcie_bus_configure_settings(child, child->self->pcie_mpss);
+               list_for_each_entry(child, &bus->children, node) {
+                       struct pci_dev *self = child->self;
+                       if (!self)
+                               continue;
+
+                       pcie_bus_configure_settings(child, self->pcie_mpss);
+               }
        }
 
        if (!bus)
index 73d70d6..6d5dbcd 100644 (file)
@@ -58,8 +58,11 @@ EXPORT_SYMBOL_GPL(vrtc_cmos_write);
 unsigned long vrtc_get_time(void)
 {
        u8 sec, min, hour, mday, mon;
+       unsigned long flags;
        u32 year;
 
+       spin_lock_irqsave(&rtc_lock, flags);
+
        while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
                cpu_relax();
 
@@ -70,6 +73,8 @@ unsigned long vrtc_get_time(void)
        mon = vrtc_cmos_read(RTC_MONTH);
        year = vrtc_cmos_read(RTC_YEAR);
 
+       spin_unlock_irqrestore(&rtc_lock, flags);
+
        /* vRTC YEAR reg contains the offset to 1960 */
        year += 1960;
 
@@ -83,8 +88,10 @@ unsigned long vrtc_get_time(void)
 int vrtc_set_mmss(unsigned long nowtime)
 {
        int real_sec, real_min;
+       unsigned long flags;
        int vrtc_min;
 
+       spin_lock_irqsave(&rtc_lock, flags);
        vrtc_min = vrtc_cmos_read(RTC_MINUTES);
 
        real_sec = nowtime % 60;
@@ -95,6 +102,8 @@ int vrtc_set_mmss(unsigned long nowtime)
 
        vrtc_cmos_write(real_sec, RTC_SECONDS);
        vrtc_cmos_write(real_min, RTC_MINUTES);
+       spin_unlock_irqrestore(&rtc_lock, flags);
+
        return 0;
 }
 
index 20a6142..3dd53f9 100644 (file)
@@ -1721,10 +1721,8 @@ void __init xen_setup_machphys_mapping(void)
                machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
        }
 #ifdef CONFIG_X86_32
-       if ((machine_to_phys_mapping + machine_to_phys_nr)
-           < machine_to_phys_mapping)
-               machine_to_phys_nr = (unsigned long *)NULL
-                                    - machine_to_phys_mapping;
+       WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
+               < machine_to_phys_mapping);
 #endif
 }
 
index df118a8..46d6d21 100644 (file)
@@ -184,6 +184,19 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
                                        PFN_UP(start_pci), PFN_DOWN(last));
        return identity;
 }
+
+static unsigned long __init xen_get_max_pages(void)
+{
+       unsigned long max_pages = MAX_DOMAIN_PAGES;
+       domid_t domid = DOMID_SELF;
+       int ret;
+
+       ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+       if (ret > 0)
+               max_pages = ret;
+       return min(max_pages, MAX_DOMAIN_PAGES);
+}
+
 /**
  * machine_specific_memory_setup - Hook for machine specific memory setup.
  **/
@@ -292,6 +305,14 @@ char * __init xen_memory_setup(void)
 
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 
+       extra_limit = xen_get_max_pages();
+       if (max_pfn + extra_pages > extra_limit) {
+               if (extra_limit > max_pfn)
+                       extra_pages = extra_limit - max_pfn;
+               else
+                       extra_pages = 0;
+       }
+
        extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
 
        /*
index e79dbb9..041d4fe 100644 (file)
@@ -32,6 +32,7 @@
 #include <xen/page.h>
 #include <xen/events.h>
 
+#include <xen/hvc-console.h>
 #include "xen-ops.h"
 #include "mmu.h"
 
@@ -207,6 +208,15 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
        unsigned cpu;
        unsigned int i;
 
+       if (skip_ioapic_setup) {
+               char *m = (max_cpus == 0) ?
+                       "The nosmp parameter is incompatible with Xen; " \
+                       "use Xen dom0_max_vcpus=1 parameter" :
+                       "The noapic parameter is incompatible with Xen";
+
+               xen_raw_printk(m);
+               panic(m);
+       }
        xen_init_lock_cpu(0);
 
        smp_store_cpu_info(0);
@@ -522,7 +532,6 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
        WARN_ON(xen_smp_intr_init(0));
 
        xen_init_lock_cpu(0);
-       xen_init_spinlocks();
 }
 
 static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
index 5158c50..163b467 100644 (file)
@@ -168,9 +168,10 @@ cycle_t xen_clocksource_read(void)
         struct pvclock_vcpu_time_info *src;
        cycle_t ret;
 
-       src = &get_cpu_var(xen_vcpu)->time;
+       preempt_disable_notrace();
+       src = &__get_cpu_var(xen_vcpu)->time;
        ret = pvclock_clocksource_read(src);
-       put_cpu_var(xen_vcpu);
+       preempt_enable_notrace();
        return ret;
 }
 
index 22a2093..b040b0e 100644 (file)
@@ -113,11 +113,13 @@ xen_iret_start_crit:
 
        /*
         * If there's something pending, mask events again so we can
-        * jump back into xen_hypervisor_callback
+        * jump back into xen_hypervisor_callback. Otherwise do not
+        * touch XEN_vcpu_info_mask.
         */
-       sete XEN_vcpu_info_mask(%eax)
+       jne 1f
+       movb $1, XEN_vcpu_info_mask(%eax)
 
-       popl %eax
+1:     popl %eax
 
        /*
         * From this point on the registers are restored and the stack
index bcaf16e..b596e54 100644 (file)
@@ -785,10 +785,10 @@ static int blkio_policy_parse_and_set(char *buf,
 {
        char *s[4], *p, *major_s = NULL, *minor_s = NULL;
        int ret;
-       unsigned long major, minor, temp;
+       unsigned long major, minor;
        int i = 0;
        dev_t dev;
-       u64 bps, iops;
+       u64 temp;
 
        memset(s, 0, sizeof(s));
 
@@ -826,20 +826,23 @@ static int blkio_policy_parse_and_set(char *buf,
 
        dev = MKDEV(major, minor);
 
-       ret = blkio_check_dev_num(dev);
+       ret = strict_strtoull(s[1], 10, &temp);
        if (ret)
-               return ret;
+               return -EINVAL;
 
-       newpn->dev = dev;
+       /* For rule removal, do not check for device presence. */
+       if (temp) {
+               ret = blkio_check_dev_num(dev);
+               if (ret)
+                       return ret;
+       }
 
-       if (s[1] == NULL)
-               return -EINVAL;
+       newpn->dev = dev;
 
        switch (plid) {
        case BLKIO_POLICY_PROP:
-               ret = strict_strtoul(s[1], 10, &temp);
-               if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
-                       temp > BLKIO_WEIGHT_MAX)
+               if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
+                    temp > BLKIO_WEIGHT_MAX)
                        return -EINVAL;
 
                newpn->plid = plid;
@@ -850,26 +853,18 @@ static int blkio_policy_parse_and_set(char *buf,
                switch(fileid) {
                case BLKIO_THROTL_read_bps_device:
                case BLKIO_THROTL_write_bps_device:
-                       ret = strict_strtoull(s[1], 10, &bps);
-                       if (ret)
-                               return -EINVAL;
-
                        newpn->plid = plid;
                        newpn->fileid = fileid;
-                       newpn->val.bps = bps;
+                       newpn->val.bps = temp;
                        break;
                case BLKIO_THROTL_read_iops_device:
                case BLKIO_THROTL_write_iops_device:
-                       ret = strict_strtoull(s[1], 10, &iops);
-                       if (ret)
-                               return -EINVAL;
-
-                       if (iops > THROTL_IOPS_MAX)
+                       if (temp > THROTL_IOPS_MAX)
                                return -EINVAL;
 
                        newpn->plid = plid;
                        newpn->fileid = fileid;
-                       newpn->val.iops = (unsigned int)iops;
+                       newpn->val.iops = (unsigned int)temp;
                        break;
                }
                break;
index 90e1ffd..d34433a 100644 (file)
@@ -348,9 +348,10 @@ void blk_put_queue(struct request_queue *q)
 EXPORT_SYMBOL(blk_put_queue);
 
 /*
- * Note: If a driver supplied the queue lock, it should not zap that lock
- * unexpectedly as some queue cleanup components like elevator_exit() and
- * blk_throtl_exit() need queue lock.
+ * Note: If a driver supplied the queue lock, it is disconnected
+ * by this function. The actual state of the lock doesn't matter
+ * here as the request_queue isn't accessible after this point
+ * (QUEUE_FLAG_DEAD is set) and no other requests will be queued.
  */
 void blk_cleanup_queue(struct request_queue *q)
 {
@@ -367,10 +368,8 @@ void blk_cleanup_queue(struct request_queue *q)
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
        mutex_unlock(&q->sysfs_lock);
 
-       if (q->elevator)
-               elevator_exit(q->elevator);
-
-       blk_throtl_exit(q);
+       if (q->queue_lock != &q->__queue_lock)
+               q->queue_lock = &q->__queue_lock;
 
        blk_put_queue(q);
 }
@@ -1167,7 +1166,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,
  * true if merge was successful, otherwise false.
  */
 static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
-                              struct bio *bio)
+                              struct bio *bio, unsigned int *request_count)
 {
        struct blk_plug *plug;
        struct request *rq;
@@ -1176,10 +1175,13 @@ static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
        plug = tsk->plug;
        if (!plug)
                goto out;
+       *request_count = 0;
 
        list_for_each_entry_reverse(rq, &plug->list, queuelist) {
                int el_ret;
 
+               (*request_count)++;
+
                if (rq->q != q)
                        continue;
 
@@ -1219,6 +1221,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        struct blk_plug *plug;
        int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
        struct request *req;
+       unsigned int request_count = 0;
 
        /*
         * low level driver can indicate that it wants pages above a
@@ -1237,7 +1240,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
         * Check if we can merge with the plugged list before grabbing
         * any locks.
         */
-       if (attempt_plug_merge(current, q, bio))
+       if (attempt_plug_merge(current, q, bio, &request_count))
                goto out;
 
        spin_lock_irq(q->queue_lock);
@@ -1302,11 +1305,10 @@ get_rq:
                        if (__rq->q != q)
                                plug->should_sort = 1;
                }
+               if (request_count >= BLK_MAX_REQUEST_COUNT)
+                       blk_flush_plug_list(plug, false);
                list_add_tail(&req->queuelist, &plug->list);
-               plug->count++;
                drive_stat_acct(req, 1);
-               if (plug->count >= BLK_MAX_REQUEST_COUNT)
-                       blk_flush_plug_list(plug, false);
        } else {
                spin_lock_irq(q->queue_lock);
                add_acct_request(q, req, where);
@@ -2634,7 +2636,6 @@ void blk_start_plug(struct blk_plug *plug)
        INIT_LIST_HEAD(&plug->list);
        INIT_LIST_HEAD(&plug->cb_list);
        plug->should_sort = 0;
-       plug->count = 0;
 
        /*
         * If this is a nested plug, don't actually assign it. It will be
@@ -2718,7 +2719,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                return;
 
        list_splice_init(&plug->list, &list);
-       plug->count = 0;
 
        if (plug->should_sort) {
                list_sort(NULL, &list, plug_rq_cmp);
index 58340d0..1366a89 100644 (file)
@@ -115,7 +115,7 @@ void __blk_complete_request(struct request *req)
        /*
         * Select completion CPU
         */
-       if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) {
+       if (req->cpu != -1) {
                ccpu = req->cpu;
                if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) {
                        ccpu = blk_cpu_to_group(ccpu);
index 0ee17b5..60fda88 100644 (file)
@@ -258,11 +258,13 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
 
        ret = queue_var_store(&val, page, count);
        spin_lock_irq(q->queue_lock);
-       if (val) {
+       if (val == 2) {
                queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
-               if (val == 2)
-                       queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
-       } else {
+               queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
+       } else if (val == 1) {
+               queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+               queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+       } else if (val == 0) {
                queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
                queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
        }
@@ -477,6 +479,11 @@ static void blk_release_queue(struct kobject *kobj)
 
        blk_sync_queue(q);
 
+       if (q->elevator)
+               elevator_exit(q->elevator);
+
+       blk_throtl_exit(q);
+
        if (rl->rq_pool)
                mempool_destroy(rl->rq_pool);
 
index a33bd43..16ace89 100644 (file)
@@ -130,8 +130,8 @@ struct cfq_queue {
        unsigned long slice_end;
        long slice_resid;
 
-       /* pending metadata requests */
-       int meta_pending;
+       /* pending priority requests */
+       int prio_pending;
        /* number of requests that are on the dispatch list or inside driver */
        int dispatched;
 
@@ -684,8 +684,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
        if (rq_is_sync(rq1) != rq_is_sync(rq2))
                return rq_is_sync(rq1) ? rq1 : rq2;
 
-       if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META)
-               return rq1->cmd_flags & REQ_META ? rq1 : rq2;
+       if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
+               return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
 
        s1 = blk_rq_pos(rq1);
        s2 = blk_rq_pos(rq2);
@@ -1612,9 +1612,9 @@ static void cfq_remove_request(struct request *rq)
        cfqq->cfqd->rq_queued--;
        cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
                                        rq_data_dir(rq), rq_is_sync(rq));
-       if (rq->cmd_flags & REQ_META) {
-               WARN_ON(!cfqq->meta_pending);
-               cfqq->meta_pending--;
+       if (rq->cmd_flags & REQ_PRIO) {
+               WARN_ON(!cfqq->prio_pending);
+               cfqq->prio_pending--;
        }
 }
 
@@ -3372,7 +3372,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
         * So both queues are sync. Let the new request get disk time if
         * it's a metadata request and the current queue is doing regular IO.
         */
-       if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
+       if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
                return true;
 
        /*
@@ -3439,8 +3439,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        struct cfq_io_context *cic = RQ_CIC(rq);
 
        cfqd->rq_queued++;
-       if (rq->cmd_flags & REQ_META)
-               cfqq->meta_pending++;
+       if (rq->cmd_flags & REQ_PRIO)
+               cfqq->prio_pending++;
 
        cfq_update_io_thinktime(cfqd, cfqq, cic);
        cfq_update_io_seektime(cfqd, cfqq, rq);
index bc533dd..f895a24 100644 (file)
 
 /* Maximum sleep allowed via Sleep() operator */
 
-#define ACPI_MAX_SLEEP                  20000  /* Two seconds */
+#define ACPI_MAX_SLEEP                  2000   /* Two seconds */
 
 /******************************************************************************
  *
index c34aa51..e3f4787 100644 (file)
@@ -13,6 +13,7 @@ config ACPI_APEI_GHES
        bool "APEI Generic Hardware Error Source"
        depends on ACPI_APEI && X86
        select ACPI_HED
+       select IRQ_WORK
        select LLIST
        select GENERIC_ALLOCATOR
        help
index 8041248..6154036 100644 (file)
@@ -618,7 +618,7 @@ int apei_osc_setup(void)
        };
 
        capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-       capbuf[OSC_SUPPORT_TYPE] = 0;
+       capbuf[OSC_SUPPORT_TYPE] = 1;
        capbuf[OSC_CONTROL_TYPE] = 0;
 
        if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
index 2c18d58..b97294e 100644 (file)
@@ -41,6 +41,22 @@ static struct pm_clk_data *__to_pcd(struct device *dev)
        return dev ? dev->power.subsys_data : NULL;
 }
 
+/**
+ * pm_clk_acquire - Acquire a device clock.
+ * @dev: Device whose clock is to be acquired.
+ * @ce: PM clock entry corresponding to the clock.
+ */
+static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
+{
+       ce->clk = clk_get(dev, ce->con_id);
+       if (IS_ERR(ce->clk)) {
+               ce->status = PCE_STATUS_ERROR;
+       } else {
+               ce->status = PCE_STATUS_ACQUIRED;
+               dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
+       }
+}
+
 /**
  * pm_clk_add - Start using a device clock for power management.
  * @dev: Device whose clock is going to be used for power management.
@@ -73,6 +89,8 @@ int pm_clk_add(struct device *dev, const char *con_id)
                }
        }
 
+       pm_clk_acquire(dev, ce);
+
        spin_lock_irq(&pcd->lock);
        list_add_tail(&ce->node, &pcd->clock_list);
        spin_unlock_irq(&pcd->lock);
@@ -82,17 +100,12 @@ int pm_clk_add(struct device *dev, const char *con_id)
 /**
  * __pm_clk_remove - Destroy PM clock entry.
  * @ce: PM clock entry to destroy.
- *
- * This routine must be called under the spinlock protecting the PM list of
- * clocks corresponding the the @ce's device.
  */
 static void __pm_clk_remove(struct pm_clock_entry *ce)
 {
        if (!ce)
                return;
 
-       list_del(&ce->node);
-
        if (ce->status < PCE_STATUS_ERROR) {
                if (ce->status == PCE_STATUS_ENABLED)
                        clk_disable(ce->clk);
@@ -126,18 +139,22 @@ void pm_clk_remove(struct device *dev, const char *con_id)
        spin_lock_irq(&pcd->lock);
 
        list_for_each_entry(ce, &pcd->clock_list, node) {
-               if (!con_id && !ce->con_id) {
-                       __pm_clk_remove(ce);
-                       break;
-               } else if (!con_id || !ce->con_id) {
+               if (!con_id && !ce->con_id)
+                       goto remove;
+               else if (!con_id || !ce->con_id)
                        continue;
-               } else if (!strcmp(con_id, ce->con_id)) {
-                       __pm_clk_remove(ce);
-                       break;
-               }
+               else if (!strcmp(con_id, ce->con_id))
+                       goto remove;
        }
 
        spin_unlock_irq(&pcd->lock);
+       return;
+
+ remove:
+       list_del(&ce->node);
+       spin_unlock_irq(&pcd->lock);
+
+       __pm_clk_remove(ce);
 }
 
 /**
@@ -175,43 +192,33 @@ void pm_clk_destroy(struct device *dev)
 {
        struct pm_clk_data *pcd = __to_pcd(dev);
        struct pm_clock_entry *ce, *c;
+       struct list_head list;
 
        if (!pcd)
                return;
 
        dev->power.subsys_data = NULL;
+       INIT_LIST_HEAD(&list);
 
        spin_lock_irq(&pcd->lock);
 
        list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
-               __pm_clk_remove(ce);
+               list_move(&ce->node, &list);
 
        spin_unlock_irq(&pcd->lock);
 
        kfree(pcd);
+
+       list_for_each_entry_safe_reverse(ce, c, &list, node) {
+               list_del(&ce->node);
+               __pm_clk_remove(ce);
+       }
 }
 
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_RUNTIME
 
-/**
- * pm_clk_acquire - Acquire a device clock.
- * @dev: Device whose clock is to be acquired.
- * @con_id: Connection ID of the clock.
- */
-static void pm_clk_acquire(struct device *dev,
-                                   struct pm_clock_entry *ce)
-{
-       ce->clk = clk_get(dev, ce->con_id);
-       if (IS_ERR(ce->clk)) {
-               ce->status = PCE_STATUS_ERROR;
-       } else {
-               ce->status = PCE_STATUS_ACQUIRED;
-               dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
-       }
-}
-
 /**
  * pm_clk_suspend - Disable clocks in a device's PM clock list.
  * @dev: Device to disable the clocks for.
@@ -230,9 +237,6 @@ int pm_clk_suspend(struct device *dev)
        spin_lock_irqsave(&pcd->lock, flags);
 
        list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
-               if (ce->status == PCE_STATUS_NONE)
-                       pm_clk_acquire(dev, ce);
-
                if (ce->status < PCE_STATUS_ERROR) {
                        clk_disable(ce->clk);
                        ce->status = PCE_STATUS_ACQUIRED;
@@ -262,9 +266,6 @@ int pm_clk_resume(struct device *dev)
        spin_lock_irqsave(&pcd->lock, flags);
 
        list_for_each_entry(ce, &pcd->clock_list, node) {
-               if (ce->status == PCE_STATUS_NONE)
-                       pm_clk_acquire(dev, ce);
-
                if (ce->status < PCE_STATUS_ERROR) {
                        clk_enable(ce->clk);
                        ce->status = PCE_STATUS_ENABLED;
index 0eef4da..20663f8 100644 (file)
@@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
        map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
        if (map->work_buf == NULL) {
                ret = -ENOMEM;
-               goto err_bus;
+               goto err_map;
        }
 
        return map;
 
-err_bus:
-       module_put(map->bus->owner);
 err_map:
        kfree(map);
 err:
@@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
 void regmap_exit(struct regmap *map)
 {
        kfree(map->work_buf);
-       module_put(map->bus->owner);
        kfree(map);
 }
 EXPORT_SYMBOL_GPL(regmap_exit);
index 98de8f4..9955a53 100644 (file)
@@ -4250,7 +4250,7 @@ static int __init floppy_init(void)
        use_virtual_dma = can_use_virtual_dma & 1;
        fdc_state[0].address = FDC1;
        if (fdc_state[0].address == -1) {
-               del_timer(&fd_timeout);
+               del_timer_sync(&fd_timeout);
                err = -ENODEV;
                goto out_unreg_region;
        }
@@ -4261,7 +4261,7 @@ static int __init floppy_init(void)
        fdc = 0;                /* reset fdc in case of unexpected interrupt */
        err = floppy_grab_irq_and_dma();
        if (err) {
-               del_timer(&fd_timeout);
+               del_timer_sync(&fd_timeout);
                err = -EBUSY;
                goto out_unreg_region;
        }
@@ -4318,7 +4318,7 @@ static int __init floppy_init(void)
                user_reset_fdc(-1, FD_RESET_ALWAYS, false);
        }
        fdc = 0;
-       del_timer(&fd_timeout);
+       del_timer_sync(&fd_timeout);
        current_drive = 0;
        initialized = true;
        if (have_no_fdc) {
@@ -4368,7 +4368,7 @@ out_unreg_blkdev:
        unregister_blkdev(FLOPPY_MAJOR, "fd");
 out_put_disk:
        while (dr--) {
-               del_timer(&motor_off_timer[dr]);
+               del_timer_sync(&motor_off_timer[dr]);
                if (disks[dr]->queue)
                        blk_cleanup_queue(disks[dr]->queue);
                put_disk(disks[dr]);
index 9e40b28..00c57c9 100644 (file)
@@ -46,7 +46,7 @@
 
 #define DRV_PFX "xen-blkback:"
 #define DPRINTK(fmt, args...)                          \
-       pr_debug(DRV_PFX "(%s:%d) " fmt ".\n",  \
+       pr_debug(DRV_PFX "(%s:%d) " fmt ".\n",          \
                 __func__, __LINE__, ##args)
 
 
index 3f129b4..5fd2010 100644 (file)
@@ -590,7 +590,7 @@ static void frontend_changed(struct xenbus_device *dev,
 
                /*
                 * Enforce precondition before potential leak point.
-                * blkif_disconnect() is idempotent.
+                * xen_blkif_disconnect() is idempotent.
                 */
                xen_blkif_disconnect(be->blkif);
 
@@ -601,17 +601,17 @@ static void frontend_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateClosing:
-               xen_blkif_disconnect(be->blkif);
                xenbus_switch_state(dev, XenbusStateClosing);
                break;
 
        case XenbusStateClosed:
+               xen_blkif_disconnect(be->blkif);
                xenbus_switch_state(dev, XenbusStateClosed);
                if (xenbus_dev_is_online(dev))
                        break;
                /* fall through if not online */
        case XenbusStateUnknown:
-               /* implies blkif_disconnect() via blkback_remove() */
+               /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
                device_unregister(&dev->dev);
                break;
 
index 3ef4760..9cbac6b 100644 (file)
@@ -72,9 +72,15 @@ static struct usb_device_id btusb_table[] = {
        /* Apple MacBookAir3,1, MacBookAir3,2 */
        { USB_DEVICE(0x05ac, 0x821b) },
 
+       /* Apple MacBookAir4,1 */
+       { USB_DEVICE(0x05ac, 0x821f) },
+
        /* Apple MacBookPro8,2 */
        { USB_DEVICE(0x05ac, 0x821a) },
 
+       /* Apple MacMini5,1 */
+       { USB_DEVICE(0x05ac, 0x8281) },
+
        /* AVM BlueFRITZ! USB v2.0 */
        { USB_DEVICE(0x057c, 0x3800) },
 
index 65d27af..04d353f 100644 (file)
@@ -124,6 +124,13 @@ static long st_receive(void *priv_data, struct sk_buff *skb)
 /* ------- Interfaces to HCI layer ------ */
 /* protocol structure registered with shared transport */
 static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
+       {
+               .chnl_id = HCI_EVENT_PKT, /* HCI Events */
+               .hdr_len = sizeof(struct hci_event_hdr),
+               .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
+               .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
+               .reserve = 8,
+       },
        {
                .chnl_id = HCI_ACLDATA_PKT, /* ACL */
                .hdr_len = sizeof(struct hci_acl_hdr),
@@ -138,13 +145,6 @@ static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
                .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
                .reserve = 8,
        },
-       {
-               .chnl_id = HCI_EVENT_PKT, /* HCI Events */
-               .hdr_len = sizeof(struct hci_event_hdr),
-               .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
-               .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
-               .reserve = 8,
-       },
 };
 
 /* Called from HCI core to initialize the device */
@@ -240,7 +240,7 @@ static int ti_st_close(struct hci_dev *hdev)
        if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
                return 0;
 
-       for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+       for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
                err = st_unregister(&ti_st_proto[i]);
                if (err)
                        BT_ERR("st_unregister(%d) failed with error %d",
index f6595ab..fa567f1 100644 (file)
@@ -43,6 +43,7 @@ config TCG_NSC
 
 config TCG_ATMEL
        tristate "Atmel TPM Interface"
+       depends on PPC64 || HAS_IOPORT
        ---help---
          If you have a TPM security chip from Atmel say Yes and it 
          will be accessible from within Linux.  To compile this driver 
index caf8012..9ca5c02 100644 (file)
@@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
        u32 count, ordinal;
        unsigned long stop;
 
+       if (bufsiz > TPM_BUFSIZE)
+               bufsiz = TPM_BUFSIZE;
+
        count = be32_to_cpu(*((__be32 *) (buf + 2)));
        ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
        if (count == 0)
@@ -1102,6 +1105,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
 {
        struct tpm_chip *chip = file->private_data;
        ssize_t ret_size;
+       int rc;
 
        del_singleshot_timer_sync(&chip->user_read_timer);
        flush_work_sync(&chip->work);
@@ -1112,8 +1116,11 @@ ssize_t tpm_read(struct file *file, char __user *buf,
                        ret_size = size;
 
                mutex_lock(&chip->buffer_mutex);
-               if (copy_to_user(buf, chip->data_buffer, ret_size))
+               rc = copy_to_user(buf, chip->data_buffer, ret_size);
+               memset(chip->data_buffer, 0, ret_size);
+               if (rc)
                        ret_size = -EFAULT;
+
                mutex_unlock(&chip->buffer_mutex);
        }
 
index 82facc9..4d24648 100644 (file)
@@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void)
        if (pdev) {
                tpm_nsc_remove(&pdev->dev);
                platform_device_unregister(pdev);
-               kfree(pdev);
-               pdev = NULL;
        }
 
        platform_driver_unregister(&nsc_drv);
index 7b0603e..cdc02ac 100644 (file)
@@ -261,6 +261,9 @@ static int pcc_get_offset(int cpu)
        pr = per_cpu(processors, cpu);
        pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
 
+       if (!pr)
+               return -ENODEV;
+
        status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
        if (ACPI_FAILURE(status))
                return -ENODEV;
index cd3a7c7..467e4dc 100644 (file)
@@ -174,8 +174,10 @@ struct d40_base;
  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
  * transfer and call client callback.
  * @client: Cliented owned descriptor list.
+ * @pending_queue: Submitted jobs, to be issued by issue_pending()
  * @active: Active descriptor.
  * @queue: Queued jobs.
+ * @prepare_queue: Prepared jobs.
  * @dma_cfg: The client configuration of this dma channel.
  * @configured: whether the dma_cfg configuration is valid
  * @base: Pointer to the device instance struct.
@@ -203,6 +205,7 @@ struct d40_chan {
        struct list_head                 pending_queue;
        struct list_head                 active;
        struct list_head                 queue;
+       struct list_head                 prepare_queue;
        struct stedma40_chan_cfg         dma_cfg;
        bool                             configured;
        struct d40_base                 *base;
@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
 
                list_for_each_entry_safe(d, _d, &d40c->client, node)
                        if (async_tx_test_ack(&d->txd)) {
-                               d40_pool_lli_free(d40c, d);
                                d40_desc_remove(d);
                                desc = d;
                                memset(desc, 0, sizeof(*desc));
@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
        return d;
 }
 
+/* remove desc from current queue and add it to the pending_queue */
 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
 {
+       d40_desc_remove(desc);
+       desc->is_in_client_list = false;
        list_add_tail(&desc->node, &d40c->pending_queue);
 }
 
@@ -803,6 +808,7 @@ done:
 static void d40_term_all(struct d40_chan *d40c)
 {
        struct d40_desc *d40d;
+       struct d40_desc *_d;
 
        /* Release active descriptors */
        while ((d40d = d40_first_active_get(d40c))) {
@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
                d40_desc_free(d40c, d40d);
        }
 
+       /* Release client owned descriptors */
+       if (!list_empty(&d40c->client))
+               list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
+                       d40_desc_remove(d40d);
+                       d40_desc_free(d40c, d40d);
+               }
+
+       /* Release descriptors in prepare queue */
+       if (!list_empty(&d40c->prepare_queue))
+               list_for_each_entry_safe(d40d, _d,
+                                        &d40c->prepare_queue, node) {
+                       d40_desc_remove(d40d);
+                       d40_desc_free(d40c, d40d);
+               }
+
        d40c->pending_tx = 0;
        d40c->busy = false;
 }
@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
 
        if (!d40d->cyclic) {
                if (async_tx_test_ack(&d40d->txd)) {
-                       d40_pool_lli_free(d40c, d40d);
                        d40_desc_remove(d40d);
                        d40_desc_free(d40c, d40d);
                } else {
@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
        u32 event;
        struct d40_phy_res *phy = d40c->phy_chan;
        bool is_src;
-       struct d40_desc *d;
-       struct d40_desc *_d;
-
 
        /* Terminate all queued and active transfers */
        d40_term_all(d40c);
 
-       /* Release client owned descriptors */
-       if (!list_empty(&d40c->client))
-               list_for_each_entry_safe(d, _d, &d40c->client, node) {
-                       d40_pool_lli_free(d40c, d);
-                       d40_desc_remove(d);
-                       d40_desc_free(d40c, d);
-               }
-
        if (phy == NULL) {
                chan_err(d40c, "phy == null\n");
                return -EINVAL;
@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
                goto err;
        }
 
+       /*
+        * add descriptor to the prepare queue in order to be able
+        * to free them later in terminate_all
+        */
+       list_add_tail(&desc->node, &chan->prepare_queue);
+
        spin_unlock_irqrestore(&chan->lock, flags);
 
        return &desc->txd;
@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
                INIT_LIST_HEAD(&d40c->queue);
                INIT_LIST_HEAD(&d40c->pending_queue);
                INIT_LIST_HEAD(&d40c->client);
+               INIT_LIST_HEAD(&d40c->prepare_queue);
 
                tasklet_init(&d40c->tasklet, dma_tasklet,
                             (unsigned long) d40c);
index 57cd3a4..fd7170a 100644 (file)
@@ -290,6 +290,9 @@ static const struct {
        {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER},
 
+       {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
+               QUIRK_NO_MSI},
+
        {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER},
 
index 231714d..4e24436 100644 (file)
@@ -351,7 +351,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc,
        return 0;
 }
 
-int __devexit bgpio_remove(struct bgpio_chip *bgc)
+int bgpio_remove(struct bgpio_chip *bgc)
 {
        int err = gpiochip_remove(&bgc->gc);
 
@@ -361,15 +361,10 @@ int __devexit bgpio_remove(struct bgpio_chip *bgc)
 }
 EXPORT_SYMBOL_GPL(bgpio_remove);
 
-int __devinit bgpio_init(struct bgpio_chip *bgc,
-                        struct device *dev,
-                        unsigned long sz,
-                        void __iomem *dat,
-                        void __iomem *set,
-                        void __iomem *clr,
-                        void __iomem *dirout,
-                        void __iomem *dirin,
-                        bool big_endian)
+int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
+              unsigned long sz, void __iomem *dat, void __iomem *set,
+              void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
+              bool big_endian)
 {
        int ret;
 
index 802b61a..f7c6854 100644 (file)
@@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
 {
        printk(KERN_ERR "panic occurred, switching back to text console\n");
        return drm_fb_helper_force_kernel_mode();
-       return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_panic);
 
index ce045a8..f07e425 100644 (file)
@@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
 MODULE_PARM_DESC(i915_enable_rc6,
                "Enable power-saving render C-state 6 (default: true)");
 
-unsigned int i915_enable_fbc __read_mostly = 1;
+unsigned int i915_enable_fbc __read_mostly = -1;
 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
 MODULE_PARM_DESC(i915_enable_fbc,
                "Enable frame buffer compression for power savings "
-               "(default: false)");
+               "(default: -1 (use per-chip default))");
 
 unsigned int i915_lvds_downclock __read_mostly = 0;
 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
index 56a8554..04411ad 100644 (file)
@@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev)
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj;
+       int enable_fbc;
 
        DRM_DEBUG_KMS("\n");
 
@@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev)
        intel_fb = to_intel_framebuffer(fb);
        obj = intel_fb->obj;
 
-       if (!i915_enable_fbc) {
-               DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
+       enable_fbc = i915_enable_fbc;
+       if (enable_fbc < 0) {
+               DRM_DEBUG_KMS("fbc set to per-chip default\n");
+               enable_fbc = 1;
+               if (INTEL_INFO(dev)->gen <= 5)
+                       enable_fbc = 0;
+       }
+       if (!enable_fbc) {
+               DRM_DEBUG_KMS("fbc disabled per module param\n");
                dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
                goto out_disable;
        }
@@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                bpc = 6; /* min is 18bpp */
                break;
        case 24:
-               bpc = min((unsigned int)8, display_bpc);
+               bpc = 8;
                break;
        case 30:
-               bpc = min((unsigned int)10, display_bpc);
+               bpc = 10;
                break;
        case 48:
-               bpc = min((unsigned int)12, display_bpc);
+               bpc = 12;
                break;
        default:
                DRM_DEBUG("unsupported depth, assuming 24 bits\n");
@@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                break;
        }
 
+       display_bpc = min(display_bpc, bpc);
+
        DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
                         bpc, display_bpc);
 
-       *pipe_bpp = bpc * 3;
+       *pipe_bpp = display_bpc * 3;
 
        return display_bpc != bpc;
 }
index 0b2ee9d..fe1099d 100644 (file)
@@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
                                           struct drm_connector *connector,
                                           struct intel_load_detect_pipe *old);
 
-extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
-extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
-extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
 extern void intelfb_restore(void);
 extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
                                    u16 blue, int regno);
index 30fe554..6348c49 100644 (file)
@@ -92,6 +92,11 @@ struct intel_sdvo {
        */
        uint16_t attached_output;
 
+       /*
+        * Hotplug activation bits for this device
+        */
+       uint8_t hotplug_active[2];
+
        /**
         * This is used to select the color range of RBG outputs in HDMI mode.
         * It is only valid when using TMDS encoding and 8 bit per color mode.
@@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
        return true;
 }
 
-/* No use! */
-#if 0
-struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
-{
-       struct drm_connector *connector = NULL;
-       struct intel_sdvo *iout = NULL;
-       struct intel_sdvo *sdvo;
-
-       /* find the sdvo connector */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               iout = to_intel_sdvo(connector);
-
-               if (iout->type != INTEL_OUTPUT_SDVO)
-                       continue;
-
-               sdvo = iout->dev_priv;
-
-               if (sdvo->sdvo_reg == SDVOB && sdvoB)
-                       return connector;
-
-               if (sdvo->sdvo_reg == SDVOC && !sdvoB)
-                       return connector;
-
-       }
-
-       return NULL;
-}
-
-int intel_sdvo_supports_hotplug(struct drm_connector *connector)
+static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
 {
        u8 response[2];
-       u8 status;
-       struct intel_sdvo *intel_sdvo;
-       DRM_DEBUG_KMS("\n");
-
-       if (!connector)
-               return 0;
-
-       intel_sdvo = to_intel_sdvo(connector);
 
        return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
                                    &response, 2) && response[0];
 }
 
-void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
 {
-       u8 response[2];
-       u8 status;
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
-
-       intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
-       intel_sdvo_read_response(intel_sdvo, &response, 2);
-
-       if (on) {
-               intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
-               status = intel_sdvo_read_response(intel_sdvo, &response, 2);
-
-               intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
-       } else {
-               response[0] = 0;
-               response[1] = 0;
-               intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
-       }
+       struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
 
-       intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
-       intel_sdvo_read_response(intel_sdvo, &response, 2);
+       intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
 }
-#endif
 
 static bool
 intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
@@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
 {
        struct drm_encoder *encoder = &intel_sdvo->base.base;
        struct drm_connector *connector;
+       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
        struct intel_connector *intel_connector;
        struct intel_sdvo_connector *intel_sdvo_connector;
 
@@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
 
        intel_connector = &intel_sdvo_connector->base;
        connector = &intel_connector->base;
-       connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+       if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
+               connector->polled = DRM_CONNECTOR_POLL_HPD;
+               intel_sdvo->hotplug_active[0] |= 1 << device;
+               /* Some SDVO devices have one-shot hotplug interrupts.
+                * Ensure that they get re-enabled when an interrupt happens.
+                */
+               intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+               intel_sdvo_enable_hotplug(intel_encoder);
+       }
+       else
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
        encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
        connector->connector_type = DRM_MODE_CONNECTOR_DVID;
 
@@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
        if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
                goto err;
 
+       /* Set up hotplug command - note paranoia about contents of reply.
+        * We assume that the hardware is in a sane state, and only touch
+        * the bits we think we understand.
+        */
+       intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
+                            &intel_sdvo->hotplug_active, 2);
+       intel_sdvo->hotplug_active[0] &= ~0x3;
+
        if (intel_sdvo_output_setup(intel_sdvo,
                                    intel_sdvo->caps.output_flags) != true) {
                DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
index 8d02d87..c919cfc 100644 (file)
@@ -530,7 +530,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
                nouveau_gpuobj_ref(NULL, &obj);
                if (ret)
                        return ret;
-       } else {
+       } else
+       if (USE_SEMA(dev)) {
                /* map fence bo into channel's vm */
                ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
                                         &chan->fence.vma);
index c444cad..2706cb3 100644 (file)
@@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
                return -ENOMEM;
 
        nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
-       if (!nvbe->ttm_alloced)
+       if (!nvbe->ttm_alloced) {
+               kfree(nvbe->pages);
+               nvbe->pages = NULL;
                return -ENOMEM;
+       }
 
        nvbe->nr_pages = 0;
        while (num_pages--) {
@@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
 
                for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
                        nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
-                       dma_offset += NV_CTXDMA_PAGE_SIZE;
+                       offset_l += NV_CTXDMA_PAGE_SIZE;
                }
        }
 
index 118261d..5e45398 100644 (file)
@@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
-       struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
-       struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+       struct drm_framebuffer *drm_fb;
+       struct nouveau_framebuffer *fb;
        int arb_burst, arb_lwm;
        int ret;
 
+       NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+
+       /* no fb bound */
+       if (!atomic && !crtc->fb) {
+               NV_DEBUG_KMS(dev, "No FB bound\n");
+               return 0;
+       }
+
+
        /* If atomic, we want to switch to the fb we were passed, so
         * now we update pointers to do that.  (We don't pin; just
         * assume we're already pinned and update the base address.)
@@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
                drm_fb = passed_fb;
                fb = nouveau_framebuffer(passed_fb);
        } else {
+               drm_fb = crtc->fb;
+               fb = nouveau_framebuffer(crtc->fb);
                /* If not atomic, we can go ahead and pin, and unpin the
                 * old fb we were passed.
                 */
index 46ad59e..5d98907 100644 (file)
@@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
        struct drm_device *dev = nv_crtc->base.dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
-       struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+       struct drm_framebuffer *drm_fb;
+       struct nouveau_framebuffer *fb;
        int ret;
 
        NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
 
+       /* no fb bound */
+       if (!atomic && !crtc->fb) {
+               NV_DEBUG_KMS(dev, "No FB bound\n");
+               return 0;
+       }
+
        /* If atomic, we want to switch to the fb we were passed, so
         * now we update pointers to do that.  (We don't pin; just
         * assume we're already pinned and update the base address.)
@@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
                drm_fb = passed_fb;
                fb = nouveau_framebuffer(passed_fb);
        } else {
+               drm_fb = crtc->fb;
+               fb = nouveau_framebuffer(crtc->fb);
                /* If not atomic, we can go ahead and pin, and unpin the
                 * old fb we were passed.
                 */
index 7ad43c6..4da2388 100644 (file)
@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
        u8 msg[20];
        int msg_bytes = send_bytes + 4;
        u8 ack;
+       unsigned retry;
 
        if (send_bytes > 16)
                return -1;
@@ -125,20 +126,20 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
        msg[3] = (msg_bytes << 4) | (send_bytes - 1);
        memcpy(&msg[4], send, send_bytes);
 
-       while (1) {
+       for (retry = 0; retry < 4; retry++) {
                ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
                                            msg, msg_bytes, NULL, 0, delay, &ack);
                if (ret < 0)
                        return ret;
                if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
-                       break;
+                       return send_bytes;
                else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
                        udelay(400);
                else
                        return -EIO;
        }
 
-       return send_bytes;
+       return -EIO;
 }
 
 static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
@@ -149,26 +150,29 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
        int msg_bytes = 4;
        u8 ack;
        int ret;
+       unsigned retry;
 
        msg[0] = address;
        msg[1] = address >> 8;
        msg[2] = AUX_NATIVE_READ << 4;
        msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
 
-       while (1) {
+       for (retry = 0; retry < 4; retry++) {
                ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
                                            msg, msg_bytes, recv, recv_bytes, delay, &ack);
-               if (ret == 0)
-                       return -EPROTO;
                if (ret < 0)
                        return ret;
                if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
                        return ret;
                else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
                        udelay(400);
+               else if (ret == 0)
+                       return -EPROTO;
                else
                        return -EIO;
        }
+
+       return -EIO;
 }
 
 static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
index dc0a5b5..c4ffa14 100644 (file)
@@ -1404,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       WREG32(CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB_WPTR, rdev->cp.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
@@ -1426,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
        rdev->cp.rptr = RREG32(CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB_WPTR);
 
        evergreen_cp_start(rdev);
        rdev->cp.ready = true;
@@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
        return backend_map;
 }
 
-static void evergreen_program_channel_remap(struct radeon_device *rdev)
-{
-       u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
-
-       tmp = RREG32(MC_SHARED_CHMAP);
-       switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
-       case 0:
-       case 1:
-       case 2:
-       case 3:
-       default:
-               /* default mapping */
-               mc_shared_chremap = 0x00fac688;
-               break;
-       }
-
-       switch (rdev->family) {
-       case CHIP_HEMLOCK:
-       case CHIP_CYPRESS:
-       case CHIP_BARTS:
-               tcp_chan_steer_lo = 0x54763210;
-               tcp_chan_steer_hi = 0x0000ba98;
-               break;
-       case CHIP_JUNIPER:
-       case CHIP_REDWOOD:
-       case CHIP_CEDAR:
-       case CHIP_PALM:
-       case CHIP_SUMO:
-       case CHIP_SUMO2:
-       case CHIP_TURKS:
-       case CHIP_CAICOS:
-       default:
-               tcp_chan_steer_lo = 0x76543210;
-               tcp_chan_steer_hi = 0x0000ba98;
-               break;
-       }
-
-       WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
-       WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
-       WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
-}
-
 static void evergreen_gpu_init(struct radeon_device *rdev)
 {
        u32 cc_rb_backend_disable = 0;
@@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-       evergreen_program_channel_remap(rdev);
-
        num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
        grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
 
@@ -3171,21 +3127,23 @@ int evergreen_suspend(struct radeon_device *rdev)
 }
 
 int evergreen_copy_blit(struct radeon_device *rdev,
-                       uint64_t src_offset, uint64_t dst_offset,
-                       unsigned num_pages, struct radeon_fence *fence)
+                       uint64_t src_offset,
+                       uint64_t dst_offset,
+                       unsigned num_gpu_pages,
+                       struct radeon_fence *fence)
 {
        int r;
 
        mutex_lock(&rdev->r600_blit.mutex);
        rdev->r600_blit.vb_ib = NULL;
-       r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        if (r) {
                if (rdev->r600_blit.vb_ib)
                        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
                mutex_unlock(&rdev->r600_blit.mutex);
                return r;
        }
-       evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+       evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        evergreen_blit_done_copy(rdev, fence);
        mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
index cbf57d7..8c79ca9 100644 (file)
@@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
        return backend_map;
 }
 
-static void cayman_program_channel_remap(struct radeon_device *rdev)
-{
-       u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
-
-       tmp = RREG32(MC_SHARED_CHMAP);
-       switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
-       case 0:
-       case 1:
-       case 2:
-       case 3:
-       default:
-               /* default mapping */
-               mc_shared_chremap = 0x00fac688;
-               break;
-       }
-
-       switch (rdev->family) {
-       case CHIP_CAYMAN:
-       default:
-               //tcp_chan_steer_lo = 0x54763210
-               tcp_chan_steer_lo = 0x76543210;
-               tcp_chan_steer_hi = 0x0000ba98;
-               break;
-       }
-
-       WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
-       WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
-       WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
-}
-
 static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
                                            u32 disable_mask_per_se,
                                            u32 max_disable_mask_per_se,
@@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-       cayman_program_channel_remap(rdev);
-
        /* primary versions */
        WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
        WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
@@ -1187,7 +1155,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB0_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB0_WPTR, rdev->cp.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1207,7 +1176,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
 
        rdev->cp.rptr = RREG32(CP_RB0_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB0_WPTR);
 
        /* ring1  - compute only */
        /* Set ring buffer size */
@@ -1220,7 +1188,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB1_WPTR, 0);
+       rdev->cp1.wptr = 0;
+       WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1232,7 +1201,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
 
        rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
-       rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
 
        /* ring2 - compute only */
        /* Set ring buffer size */
@@ -1245,7 +1213,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB2_WPTR, 0);
+       rdev->cp2.wptr = 0;
+       WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1257,7 +1226,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
 
        rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
-       rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
 
        /* start the rings */
        cayman_cp_start(rdev);
index f2204cb..7fcdbbb 100644 (file)
@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
 int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
-                  unsigned num_pages,
+                  unsigned num_gpu_pages,
                   struct radeon_fence *fence)
 {
        uint32_t cur_pages;
-       uint32_t stride_bytes = PAGE_SIZE;
+       uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
        uint32_t pitch;
        uint32_t stride_pixels;
        unsigned ndw;
@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,
        /* radeon pitch is /64 */
        pitch = stride_bytes / 64;
        stride_pixels = stride_bytes / 4;
-       num_loops = DIV_ROUND_UP(num_pages, 8191);
+       num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
 
        /* Ask for enough room for blit + flush + fence */
        ndw = 64 + (10 * num_loops);
@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,
                DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
                return -EINVAL;
        }
-       while (num_pages > 0) {
-               cur_pages = num_pages;
+       while (num_gpu_pages > 0) {
+               cur_pages = num_gpu_pages;
                if (cur_pages > 8191) {
                        cur_pages = 8191;
                }
-               num_pages -= cur_pages;
+               num_gpu_pages -= cur_pages;
 
                /* pages are in Y direction - height
                   page width in X direction - width */
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,
                radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
                radeon_ring_write(rdev, 0);
                radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
-               radeon_ring_write(rdev, num_pages);
-               radeon_ring_write(rdev, num_pages);
+               radeon_ring_write(rdev, num_gpu_pages);
+               radeon_ring_write(rdev, num_gpu_pages);
                radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
        }
        radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
@@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        /* Force read & write ptr to 0 */
        WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
        WREG32(RADEON_CP_RB_RPTR_WR, 0);
-       WREG32(RADEON_CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
 
        /* set the wb address whether it's enabled or not */
        WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        WREG32(RADEON_CP_RB_CNTL, tmp);
        udelay(10);
        rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
-       /* protect against crazy HW on resume */
-       rdev->cp.wptr &= rdev->cp.ptr_mask;
        /* Set cp mode to bus mastering & enable cp*/
        WREG32(RADEON_CP_CSQ_MODE,
               REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
index f240583..a1f3ba0 100644 (file)
@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
 int r200_copy_dma(struct radeon_device *rdev,
                  uint64_t src_offset,
                  uint64_t dst_offset,
-                 unsigned num_pages,
+                 unsigned num_gpu_pages,
                  struct radeon_fence *fence)
 {
        uint32_t size;
@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,
        int r = 0;
 
        /* radeon pitch is /64 */
-       size = num_pages << PAGE_SHIFT;
+       size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
        num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
        r = radeon_ring_lock(rdev, num_loops * 4 + 64);
        if (r) {
index aa5571b..720dd99 100644 (file)
@@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev)
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       WREG32(CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB_WPTR, rdev->cp.wptr);
 
        /* set the wb address whether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
@@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev)
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
        rdev->cp.rptr = RREG32(CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB_WPTR);
 
        r600_cp_start(rdev);
        rdev->cp.ready = true;
@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
 }
 
 int r600_copy_blit(struct radeon_device *rdev,
-                  uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_pages, struct radeon_fence *fence)
+                  uint64_t src_offset,
+                  uint64_t dst_offset,
+                  unsigned num_gpu_pages,
+                  struct radeon_fence *fence)
 {
        int r;
 
        mutex_lock(&rdev->r600_blit.mutex);
        rdev->r600_blit.vb_ib = NULL;
-       r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        if (r) {
                if (rdev->r600_blit.vb_ib)
                        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
                mutex_unlock(&rdev->r600_blit.mutex);
                return r;
        }
-       r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+       r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        r600_blit_done_copy(rdev, fence);
        mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
index 32807ba..c1e056b 100644 (file)
@@ -322,6 +322,7 @@ union radeon_gart_table {
 
 #define RADEON_GPU_PAGE_SIZE 4096
 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+#define RADEON_GPU_PAGE_SHIFT 12
 
 struct radeon_gart {
        dma_addr_t                      table_addr;
@@ -914,17 +915,17 @@ struct radeon_asic {
        int (*copy_blit)(struct radeon_device *rdev,
                         uint64_t src_offset,
                         uint64_t dst_offset,
-                        unsigned num_pages,
+                        unsigned num_gpu_pages,
                         struct radeon_fence *fence);
        int (*copy_dma)(struct radeon_device *rdev,
                        uint64_t src_offset,
                        uint64_t dst_offset,
-                       unsigned num_pages,
+                       unsigned num_gpu_pages,
                        struct radeon_fence *fence);
        int (*copy)(struct radeon_device *rdev,
                    uint64_t src_offset,
                    uint64_t dst_offset,
-                   unsigned num_pages,
+                   unsigned num_gpu_pages,
                    struct radeon_fence *fence);
        uint32_t (*get_engine_clock)(struct radeon_device *rdev);
        void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
index 3d7a0d7..3dedaa0 100644 (file)
@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
 int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
-                  unsigned num_pages,
+                  unsigned num_gpu_pages,
                   struct radeon_fence *fence);
 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
 extern int r200_copy_dma(struct radeon_device *rdev,
                         uint64_t src_offset,
                         uint64_t dst_offset,
-                        unsigned num_pages,
+                        unsigned num_gpu_pages,
                         struct radeon_fence *fence);
 void r200_set_safe_registers(struct radeon_device *rdev);
 
@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r600_ring_test(struct radeon_device *rdev);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_pages, struct radeon_fence *fence);
+                  unsigned num_gpu_pages, struct radeon_fence *fence);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);
 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int evergreen_copy_blit(struct radeon_device *rdev,
                        uint64_t src_offset, uint64_t dst_offset,
-                       unsigned num_pages, struct radeon_fence *fence);
+                       unsigned num_gpu_pages, struct radeon_fence *fence);
 void evergreen_hpd_init(struct radeon_device *rdev);
 void evergreen_hpd_fini(struct radeon_device *rdev);
 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
index 4f0c1ec..bce63fd 100644 (file)
@@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
        if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
                int saved_dpms = connector->dpms;
 
-               if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
-                   radeon_dp_needs_link_train(radeon_connector))
-                       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
-               else
+               /* Only turn off the display it it's physically disconnected */
+               if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
                        drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+               else if (radeon_dp_needs_link_train(radeon_connector))
+                       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
                connector->dpms = saved_dpms;
        }
 }
@@ -1297,12 +1297,33 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
                if (!radeon_dig_connector->edp_on)
                        atombios_set_edp_panel_power(connector,
                                                     ATOM_TRANSMITTER_ACTION_POWER_OFF);
-       } else {
-               /* need to setup ddc on the bridge */
-               if (radeon_connector_encoder_is_dp_bridge(connector)) {
+       } else if (radeon_connector_encoder_is_dp_bridge(connector)) {
+               /* DP bridges are always DP */
+               radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+               /* get the DPCD from the bridge */
+               radeon_dp_getdpcd(radeon_connector);
+
+               if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+                       ret = connector_status_connected;
+               else {
+                       /* need to setup ddc on the bridge */
                        if (encoder)
                                radeon_atom_ext_encoder_setup_ddc(encoder);
+                       if (radeon_ddc_probe(radeon_connector,
+                                            radeon_connector->requires_extended_probe))
+                               ret = connector_status_connected;
                }
+
+               if ((ret == connector_status_disconnected) &&
+                   radeon_connector->dac_load_detect) {
+                       struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+                       struct drm_encoder_helper_funcs *encoder_funcs;
+                       if (encoder) {
+                               encoder_funcs = encoder->helper_private;
+                               ret = encoder_funcs->detect(encoder, connector);
+                       }
+               }
+       } else {
                radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
                if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
                        ret = connector_status_connected;
@@ -1318,16 +1339,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
                                        ret = connector_status_connected;
                        }
                }
-
-               if ((ret == connector_status_disconnected) &&
-                   radeon_connector->dac_load_detect) {
-                       struct drm_encoder *encoder = radeon_best_single_encoder(connector);
-                       struct drm_encoder_helper_funcs *encoder_funcs;
-                       if (encoder) {
-                               encoder_funcs = encoder->helper_private;
-                               ret = encoder_funcs->detect(encoder, connector);
-                       }
-               }
        }
 
        radeon_connector_update_scratch_regs(connector, ret);
index 3189a7e..fde25c0 100644 (file)
@@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
        int xorigin = 0, yorigin = 0;
        int w = radeon_crtc->cursor_width;
 
-       if (x < 0)
-               xorigin = -x + 1;
-       if (y < 0)
-               yorigin = -y + 1;
-       if (xorigin >= CURSOR_WIDTH)
-               xorigin = CURSOR_WIDTH - 1;
-       if (yorigin >= CURSOR_HEIGHT)
-               yorigin = CURSOR_HEIGHT - 1;
-
        if (ASIC_IS_AVIVO(rdev)) {
-               int i = 0;
-               struct drm_crtc *crtc_p;
-
                /* avivo cursor are offset into the total surface */
                x += crtc->x;
                y += crtc->y;
-               DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+       }
+       DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+       if (x < 0) {
+               xorigin = min(-x, CURSOR_WIDTH - 1);
+               x = 0;
+       }
+       if (y < 0) {
+               yorigin = min(-y, CURSOR_HEIGHT - 1);
+               y = 0;
+       }
+
+       if (ASIC_IS_AVIVO(rdev)) {
+               int i = 0;
+               struct drm_crtc *crtc_p;
 
                /* avivo cursor image can't end on 128 pixel boundary or
                 * go past the end of the frame if both crtcs are enabled
@@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
 
        radeon_lock_cursor(crtc, true);
        if (ASIC_IS_DCE4(rdev)) {
-               WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
-                      ((xorigin ? 0 : x) << 16) |
-                      (yorigin ? 0 : y));
+               WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
                WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
                WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
                       ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
        } else if (ASIC_IS_AVIVO(rdev)) {
-               WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
-                            ((xorigin ? 0 : x) << 16) |
-                            (yorigin ? 0 : y));
+               WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
                WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
                WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
                       ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
@@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                        | yorigin));
                WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
                       (RADEON_CUR_LOCK
-                       | ((xorigin ? 0 : x) << 16)
-                       | (yorigin ? 0 : y)));
+                       | (x << 16)
+                       | y));
                /* offset is from DISP(2)_BASE_ADDRESS */
                WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
                                                                      (yorigin * 256)));
index 1a85894..6adb3e5 100644 (file)
@@ -473,8 +473,8 @@ pflip_cleanup:
        spin_lock_irqsave(&dev->event_lock, flags);
        radeon_crtc->unpin_work = NULL;
 unlock_free:
-       drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
        spin_unlock_irqrestore(&dev->event_lock, flags);
+       drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
        radeon_fence_unref(&work->fence);
        kfree(work);
 
@@ -707,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
                radeon_router_select_ddc_port(radeon_connector);
 
        if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
-           (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
+           (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
+           radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) {
                struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
                if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
                     dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
-                       radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
-       }
-       if (!radeon_connector->ddc_bus)
-               return -1;
-       if (!radeon_connector->edid) {
-               radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+                       radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+                                                             &dig->dp_i2c_bus->adapter);
+               else if (radeon_connector->ddc_bus && !radeon_connector->edid)
+                       radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+                                                             &radeon_connector->ddc_bus->adapter);
+       } else {
+               if (radeon_connector->ddc_bus && !radeon_connector->edid)
+                       radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+                                                             &radeon_connector->ddc_bus->adapter);
        }
 
        if (!radeon_connector->edid) {
index 319d85d..13690f3 100644 (file)
@@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
                switch (mode) {
                case DRM_MODE_DPMS_ON:
                        args.ucAction = ATOM_ENABLE;
-                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                       /* workaround for DVOOutputControl on some RS690 systems */
+                       if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
+                               u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
+                               WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                               WREG32(RADEON_BIOS_3_SCRATCH, reg);
+                       } else
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
                        if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
                                args.ucAction = ATOM_LCD_BLON;
                                atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
index 9b86fb0..0b5468b 100644 (file)
@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
                DRM_ERROR("Trying to move memory with CP turned off.\n");
                return -EINVAL;
        }
-       r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
+
+       BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+
+       r = radeon_copy(rdev, old_start, new_start,
+                       new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
+                       fence);
        /* FIXME: handle copy error */
        r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
                                      evict, no_wait_reserve, no_wait_gpu, new_mem);
index 4720d00..b13c2ee 100644 (file)
@@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
        return backend_map;
 }
 
-static void rv770_program_channel_remap(struct radeon_device *rdev)
-{
-       u32 tcp_chan_steer, mc_shared_chremap, tmp;
-       bool force_no_swizzle;
-
-       switch (rdev->family) {
-       case CHIP_RV770:
-       case CHIP_RV730:
-               force_no_swizzle = false;
-               break;
-       case CHIP_RV710:
-       case CHIP_RV740:
-       default:
-               force_no_swizzle = true;
-               break;
-       }
-
-       tmp = RREG32(MC_SHARED_CHMAP);
-       switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
-       case 0:
-       case 1:
-       default:
-               /* default mapping */
-               mc_shared_chremap = 0x00fac688;
-               break;
-       case 2:
-       case 3:
-               if (force_no_swizzle)
-                       mc_shared_chremap = 0x00fac688;
-               else
-                       mc_shared_chremap = 0x00bbc298;
-               break;
-       }
-
-       if (rdev->family == CHIP_RV740)
-               tcp_chan_steer = 0x00ef2a60;
-       else
-               tcp_chan_steer = 0x00fac688;
-
-       /* RV770 CE has special chremap setup */
-       if (rdev->pdev->device == 0x944e) {
-               tcp_chan_steer = 0x00b08b08;
-               mc_shared_chremap = 0x00b08b08;
-       }
-
-       WREG32(TCP_CHAN_STEER, tcp_chan_steer);
-       WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
-}
-
 static void rv770_gpu_init(struct radeon_device *rdev)
 {
        int i, j, num_qd_pipes;
@@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
        WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 
-       rv770_program_channel_remap(rdev);
-
        WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
        WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
        WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
index a4d38d8..ef06194 100644 (file)
@@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
                if (bo->ttm == NULL) {
-                       ret = ttm_bo_add_ttm(bo, false);
+                       bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+                       ret = ttm_bo_add_ttm(bo, zero);
                        if (ret)
                                goto out_err;
                }
index 1130a89..2b006bf 100644 (file)
@@ -69,7 +69,7 @@ config HID_ACRUX
        Say Y here if you want to enable support for ACRUX game controllers.
 
 config HID_ACRUX_FF
-       tristate "ACRUX force feedback support"
+       bool "ACRUX force feedback support"
        depends on HID_ACRUX
        select INPUT_FF_MEMLESS
        ---help---
@@ -245,6 +245,15 @@ config HID_LOGITECH
        ---help---
        Support for Logitech devices that are not fully compliant with HID standard.
 
+config HID_LOGITECH_DJ
+       tristate "Logitech Unifying receivers full support"
+       depends on HID_LOGITECH
+       default m
+       ---help---
+       Say Y if you want support for Logitech Unifying receivers and devices.
+       Unifying receivers are capable of pairing up to 6 Logitech compliant
+       devices to the same receiver.
+
 config LOGITECH_FF
        bool "Logitech force feedback support"
        depends on HID_LOGITECH
@@ -278,13 +287,21 @@ config LOGIG940_FF
          Say Y here if you want to enable force feedback support for Logitech
          Flight System G940 devices.
 
-config LOGIWII_FF
-       bool "Logitech Speed Force Wireless force feedback support"
+config LOGIWHEELS_FF
+       bool "Logitech wheels configuration and force feedback support"
        depends on HID_LOGITECH
        select INPUT_FF_MEMLESS
+       default LOGITECH_FF
        help
-         Say Y here if you want to enable force feedback support for Logitech
-         Speed Force Wireless (Wii) devices.
+         Say Y here if you want to enable force feedback and range setting
+         support for following Logitech wheels:
+         - Logitech Driving Force
+         - Logitech Driving Force Pro
+         - Logitech Driving Force GT
+         - Logitech G25
+         - Logitech G27
+         - Logitech MOMO/MOMO 2
+         - Logitech Formula Force EX
 
 config HID_MAGICMOUSE
        tristate "Apple MagicMouse multi-touch support"
@@ -328,6 +345,7 @@ config HID_MULTITOUCH
          - Hanvon dual touch panels
          - Ilitek dual touch panels
          - IrTouch Infrared USB panels
+         - LG Display panels (Dell ST2220Tc)
          - Lumio CrystalTouch panels
          - MosArt dual-touch panels
          - PenMount dual touch panels
@@ -590,6 +608,7 @@ config HID_WIIMOTE
        tristate "Nintendo Wii Remote support"
        depends on BT_HIDP
        depends on LEDS_CLASS
+       select POWER_SUPPLY
        ---help---
        Support for the Nintendo Wii Remote bluetooth device.
 
index 0a0a38e..b7ddabb 100644 (file)
@@ -21,7 +21,7 @@ endif
 ifdef CONFIG_LOGIG940_FF
        hid-logitech-y  += hid-lg3ff.o
 endif
-ifdef CONFIG_LOGIWII_FF
+ifdef CONFIG_LOGIWHEELS_FF
        hid-logitech-y  += hid-lg4ff.o
 endif
 
@@ -43,6 +43,7 @@ obj-$(CONFIG_HID_KEYTOUCH)    += hid-keytouch.o
 obj-$(CONFIG_HID_KYE)          += hid-kye.o
 obj-$(CONFIG_HID_LCPOWER)       += hid-lcpower.o
 obj-$(CONFIG_HID_LOGITECH)     += hid-logitech.o
+obj-$(CONFIG_HID_LOGITECH_DJ)  += hid-logitech-dj.o
 obj-$(CONFIG_HID_MAGICMOUSE)    += hid-magicmouse.o
 obj-$(CONFIG_HID_MICROSOFT)    += hid-microsoft.o
 obj-$(CONFIG_HID_MONTEREY)     += hid-monterey.o
index 1215141..3bdb450 100644 (file)
@@ -6,7 +6,7 @@
  * Xbox 360 controller.
  *
  * 1a34:0802 "ACRUX USB GAMEPAD 8116"
- *  - tested with a EXEQ EQ-PCU-02090 game controller.
+ *  - tested with an EXEQ EQ-PCU-02090 game controller.
  *
  * Copyright (c) 2010 Sergei Kolzun <x0r@dv-life.ru>
  */
@@ -45,7 +45,10 @@ static int axff_play(struct input_dev *dev, void *data, struct ff_effect *effect
 {
        struct hid_device *hid = input_get_drvdata(dev);
        struct axff_device *axff = data;
+       struct hid_report *report = axff->report;
+       int field_count = 0;
        int left, right;
+       int i, j;
 
        left = effect->u.rumble.strong_magnitude;
        right = effect->u.rumble.weak_magnitude;
@@ -55,10 +58,14 @@ static int axff_play(struct input_dev *dev, void *data, struct ff_effect *effect
        left = left * 0xff / 0xffff;
        right = right * 0xff / 0xffff;
 
-       axff->report->field[0]->value[0] = left;
-       axff->report->field[1]->value[0] = right;
-       axff->report->field[2]->value[0] = left;
-       axff->report->field[3]->value[0] = right;
+       for (i = 0; i < report->maxfield; i++) {
+               for (j = 0; j < report->field[i]->report_count; j++) {
+                       report->field[i]->value[j] =
+                               field_count % 2 ? right : left;
+                       field_count++;
+               }
+       }
+
        dbg_hid("running with 0x%02x 0x%02x", left, right);
        usbhid_submit_report(hid, axff->report, USB_DIR_OUT);
 
@@ -72,6 +79,8 @@ static int axff_init(struct hid_device *hid)
        struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
        struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
        struct input_dev *dev = hidinput->input;
+       int field_count = 0;
+       int i, j;
        int error;
 
        if (list_empty(report_list)) {
@@ -80,9 +89,16 @@ static int axff_init(struct hid_device *hid)
        }
 
        report = list_first_entry(report_list, struct hid_report, list);
+       for (i = 0; i < report->maxfield; i++) {
+               for (j = 0; j < report->field[i]->report_count; j++) {
+                       report->field[i]->value[j] = 0x00;
+                       field_count++;
+               }
+       }
 
-       if (report->maxfield < 4) {
-               hid_err(hid, "no fields in the report: %d\n", report->maxfield);
+       if (field_count < 4) {
+               hid_err(hid, "not enough fields in the report: %d\n",
+                       field_count);
                return -ENODEV;
        }
 
@@ -97,13 +113,9 @@ static int axff_init(struct hid_device *hid)
                goto err_free_mem;
 
        axff->report = report;
-       axff->report->field[0]->value[0] = 0x00;
-       axff->report->field[1]->value[0] = 0x00;
-       axff->report->field[2]->value[0] = 0x00;
-       axff->report->field[3]->value[0] = 0x00;
        usbhid_submit_report(hid, axff->report, USB_DIR_OUT);
 
-       hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun<x0r@dv-life.ru>\n");
+       hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun <x0r@dv-life.ru>\n");
 
        return 0;
 
index 242353d..c560672 100644 (file)
@@ -1212,6 +1212,12 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
        if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
                                connect_mask & HID_CONNECT_HIDINPUT_FORCE))
                hdev->claimed |= HID_CLAIMED_INPUT;
+       if (hdev->quirks & HID_QUIRK_MULTITOUCH) {
+               /* this device should be handled by hid-multitouch, skip it */
+               hdev->quirks &= ~HID_QUIRK_MULTITOUCH;
+               return -ENODEV;
+       }
+
        if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
                        !hdev->hiddev_connect(hdev,
                                connect_mask & HID_CONNECT_HIDDEV_FORCE))
@@ -1391,6 +1397,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
@@ -1399,6 +1406,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MULTITOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1420,8 +1428,11 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
index 7d27d2b..d5dbb1a 100644 (file)
 #define USB_DEVICE_ID_PENPOWER         0x00f4
 
 #define USB_VENDOR_ID_GREENASIA                0x0e8f
+#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD        0x3013
 
 #define USB_VENDOR_ID_GRETAGMACBETH    0x0971
 #define USB_DEVICE_ID_GRETAGMACBETH_HUEY       0x2005
 #define USB_DEVICE_ID_UGCI_FLYING      0x0020
 #define USB_DEVICE_ID_UGCI_FIGHTING    0x0030
 
+#define USB_VENDOR_ID_IDEACOM          0x1cb6
+#define USB_DEVICE_ID_IDEACOM_IDC6650  0x6650
+
 #define USB_VENDOR_ID_ILITEK           0x222a
 #define USB_DEVICE_ID_ILITEK_MULTITOUCH        0x0001
 
 #define USB_DEVICE_ID_LD_HYBRID                0x2090
 #define USB_DEVICE_ID_LD_HEATCONTROL   0x20A0
 
+#define USB_VENDOR_ID_LG               0x1fd2
+#define USB_DEVICE_ID_LG_MULTITOUCH    0x0064
+
 #define USB_VENDOR_ID_LOGITECH         0x046d
 #define USB_DEVICE_ID_LOGITECH_RECEIVER        0xc101
 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST  0xc110
 #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL      0xc295
 #define USB_DEVICE_ID_LOGITECH_DFP_WHEEL       0xc298
 #define USB_DEVICE_ID_LOGITECH_G25_WHEEL       0xc299
+#define USB_DEVICE_ID_LOGITECH_DFGT_WHEEL      0xc29a
 #define USB_DEVICE_ID_LOGITECH_G27_WHEEL       0xc29b
 #define USB_DEVICE_ID_LOGITECH_WII_WHEEL       0xc29c
 #define USB_DEVICE_ID_LOGITECH_ELITE_KBD       0xc30a
 #define USB_DEVICE_ID_S510_RECEIVER_2  0xc517
 #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500  0xc512
 #define USB_DEVICE_ID_MX3000_RECEIVER  0xc513
+#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER       0xc52b
+#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2     0xc532
 #define USB_DEVICE_ID_SPACETRAVELLER   0xc623
 #define USB_DEVICE_ID_SPACENAVIGATOR   0xc626
 #define USB_DEVICE_ID_DINOVO_DESKTOP   0xc704
index 6559e2e..f333139 100644 (file)
@@ -474,6 +474,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                        map_key_clear(BTN_STYLUS2);
                        break;
 
+               case 0x51: /* ContactID */
+                       device->quirks |= HID_QUIRK_MULTITOUCH;
+                       goto unknown;
+
                default:  goto unknown;
                }
                break;
@@ -978,6 +982,13 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
                }
        }
 
+       if (hid->quirks & HID_QUIRK_MULTITOUCH) {
+               /* generic hid does not know how to handle multitouch devices */
+               if (hidinput)
+                       goto out_cleanup;
+               goto out_unwind;
+       }
+
        if (hidinput && input_register_device(hidinput->input))
                goto out_cleanup;
 
index a7f916e..e7a7bd1 100644 (file)
@@ -363,7 +363,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto err_free;
        }
 
-       if (quirks & (LG_FF | LG_FF2 | LG_FF3))
+       if (quirks & (LG_FF | LG_FF2 | LG_FF3 | LG_FF4))
                connect_mask &= ~HID_CONNECT_FF;
 
        ret = hid_hw_start(hdev, connect_mask);
@@ -372,7 +372,8 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto err_free;
        }
 
-       if (quirks & LG_FF4) {
+       /* Setup wireless link with Logitech Wii wheel */
+       if(hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
                unsigned char buf[] = { 0x00, 0xAF,  0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 
                ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
@@ -405,6 +406,15 @@ err_free:
        return ret;
 }
 
+static void lg_remove(struct hid_device *hdev)
+{
+       unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+       if(quirks & LG_FF4)
+               lg4ff_deinit(hdev);
+
+       hid_hw_stop(hdev);
+}
+
 static const struct hid_device_id lg_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
                .driver_data = LG_RDESC | LG_WIRELESS },
@@ -431,7 +441,7 @@ static const struct hid_device_id lg_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D),
                .driver_data = LG_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL),
-               .driver_data = LG_NOGET | LG_FF },
+               .driver_data = LG_NOGET | LG_FF4 },
 
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD),
                .driver_data = LG_FF2 },
@@ -444,15 +454,17 @@ static const struct hid_device_id lg_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO),
                .driver_data = LG_FF },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL),
-               .driver_data = LG_FF },
+               .driver_data = LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
-               .driver_data = LG_FF },
+               .driver_data = LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
-               .driver_data = LG_FF },
+               .driver_data = LG_FF4 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL),
+               .driver_data = LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL),
-               .driver_data = LG_FF },
+               .driver_data = LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL),
-               .driver_data = LG_NOGET | LG_FF },
+               .driver_data = LG_NOGET | LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL),
                .driver_data = LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ),
@@ -478,6 +490,7 @@ static struct hid_driver lg_driver = {
        .input_mapped = lg_input_mapped,
        .event = lg_event,
        .probe = lg_probe,
+       .remove = lg_remove,
 };
 
 static int __init lg_init(void)
index b0100ba..4b09728 100644 (file)
@@ -19,10 +19,12 @@ int lg3ff_init(struct hid_device *hdev);
 static inline int lg3ff_init(struct hid_device *hdev) { return -1; }
 #endif
 
-#ifdef CONFIG_LOGIWII_FF
+#ifdef CONFIG_LOGIWHEELS_FF
 int lg4ff_init(struct hid_device *hdev);
+int lg4ff_deinit(struct hid_device *hdev);
 #else
 static inline int lg4ff_init(struct hid_device *hdev) { return -1; }
+static inline int lg4ff_deinit(struct hid_device *hdev) { return -1; }
 #endif
 
 #endif
index fa550c8..103f30d 100644 (file)
 
 #include "usbhid/usbhid.h"
 #include "hid-lg.h"
+#include "hid-ids.h"
 
-struct lg4ff_device {
-       struct hid_report *report;
+#define DFGT_REV_MAJ 0x13
+#define DFGT_REV_MIN 0x22
+#define DFP_REV_MAJ 0x11
+#define DFP_REV_MIN 0x06
+#define FFEX_REV_MAJ 0x21
+#define FFEX_REV_MIN 0x00
+#define G25_REV_MAJ 0x12
+#define G25_REV_MIN 0x22
+#define G27_REV_MAJ 0x12
+#define G27_REV_MIN 0x38
+
+#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
+
+static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
+static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range);
+static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf);
+static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
+
+static DEVICE_ATTR(range, S_IRWXU | S_IRWXG | S_IRWXO, lg4ff_range_show, lg4ff_range_store);
+
+static bool list_inited;
+
+struct lg4ff_device_entry {
+       char  *device_id;       /* Use name in respective kobject structure's address as the ID */
+       __u16 range;
+       __u16 min_range;
+       __u16 max_range;
+       __u8  leds;
+       struct list_head list;
+       void (*set_range)(struct hid_device *hid, u16 range);
 };
 
-static const signed short ff4_wheel_ac[] = {
+static struct lg4ff_device_entry device_list;
+
+static const signed short lg4ff_wheel_effects[] = {
        FF_CONSTANT,
        FF_AUTOCENTER,
        -1
 };
 
-static int hid_lg4ff_play(struct input_dev *dev, void *data,
-                        struct ff_effect *effect)
+struct lg4ff_wheel {
+       const __u32 product_id;
+       const signed short *ff_effects;
+       const __u16 min_range;
+       const __u16 max_range;
+       void (*set_range)(struct hid_device *hid, u16 range);
+};
+
+static const struct lg4ff_wheel lg4ff_devices[] = {
+       {USB_DEVICE_ID_LOGITECH_WHEEL,       lg4ff_wheel_effects, 40, 270, NULL},
+       {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL,  lg4ff_wheel_effects, 40, 270, NULL},
+       {USB_DEVICE_ID_LOGITECH_DFP_WHEEL,   lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_dfp},
+       {USB_DEVICE_ID_LOGITECH_G25_WHEEL,   lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+       {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL,  lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+       {USB_DEVICE_ID_LOGITECH_G27_WHEEL,   lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+       {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2, lg4ff_wheel_effects, 40, 270, NULL},
+       {USB_DEVICE_ID_LOGITECH_WII_WHEEL,   lg4ff_wheel_effects, 40, 270, NULL}
+};
+
+struct lg4ff_native_cmd {
+       const __u8 cmd_num;     /* Number of commands to send */
+       const __u8 cmd[];
+};
+
+struct lg4ff_usb_revision {
+       const __u16 rev_maj;
+       const __u16 rev_min;
+       const struct lg4ff_native_cmd *command;
+};
+
+static const struct lg4ff_native_cmd native_dfp = {
+       1,
+       {0xf8, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct lg4ff_native_cmd native_dfgt = {
+       2,
+       {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,      /* 1st command */
+        0xf8, 0x09, 0x03, 0x01, 0x00, 0x00, 0x00}      /* 2nd command */
+};
+
+static const struct lg4ff_native_cmd native_g25 = {
+       1,
+       {0xf8, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct lg4ff_native_cmd native_g27 = {
+       2,
+       {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,      /* 1st command */
+        0xf8, 0x09, 0x04, 0x01, 0x00, 0x00, 0x00}      /* 2nd command */
+};
+
+static const struct lg4ff_usb_revision lg4ff_revs[] = {
+       {DFGT_REV_MAJ, DFGT_REV_MIN, &native_dfgt},     /* Driving Force GT */
+       {DFP_REV_MAJ,  DFP_REV_MIN,  &native_dfp},      /* Driving Force Pro */
+       {G25_REV_MAJ,  G25_REV_MIN,  &native_g25},      /* G25 */
+       {G27_REV_MAJ,  G27_REV_MIN,  &native_g27},      /* G27 */
+};
+
+static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
 {
        struct hid_device *hid = input_get_drvdata(dev);
        struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
@@ -55,13 +144,12 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data,
                x = effect->u.ramp.start_level + 0x80;  /* 0x80 is no force */
                CLAMP(x);
                report->field[0]->value[0] = 0x11;      /* Slot 1 */
-               report->field[0]->value[1] = 0x10;
+               report->field[0]->value[1] = 0x08;
                report->field[0]->value[2] = x;
-               report->field[0]->value[3] = 0x00;
+               report->field[0]->value[3] = 0x80;
                report->field[0]->value[4] = 0x00;
-               report->field[0]->value[5] = 0x08;
+               report->field[0]->value[5] = 0x00;
                report->field[0]->value[6] = 0x00;
-               dbg_hid("Autocenter, x=0x%02X\n", x);
 
                usbhid_submit_report(hid, report, USB_DIR_OUT);
                break;
@@ -69,24 +157,184 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data,
        return 0;
 }
 
-static void hid_lg4ff_set_autocenter(struct input_dev *dev, u16 magnitude)
+/* Sends default autocentering command compatible with
+ * all wheels except Formula Force EX */
+static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude)
 {
        struct hid_device *hid = input_get_drvdata(dev);
        struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
        struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
-       __s32 *value = report->field[0]->value;
 
-       *value++ = 0xfe;
-       *value++ = 0x0d;
-       *value++ = 0x07;
-       *value++ = 0x07;
-       *value++ = (magnitude >> 8) & 0xff;
-       *value++ = 0x00;
-       *value = 0x00;
+       report->field[0]->value[0] = 0xfe;
+       report->field[0]->value[1] = 0x0d;
+       report->field[0]->value[2] = magnitude >> 13;
+       report->field[0]->value[3] = magnitude >> 13;
+       report->field[0]->value[4] = magnitude >> 8;
+       report->field[0]->value[5] = 0x00;
+       report->field[0]->value[6] = 0x00;
+
+       usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends autocentering command compatible with Formula Force EX */
+static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude)
+{
+       struct hid_device *hid = input_get_drvdata(dev);
+       struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+       struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+       magnitude = magnitude * 90 / 65535;
+       
+
+       report->field[0]->value[0] = 0xfe;
+       report->field[0]->value[1] = 0x03;
+       report->field[0]->value[2] = magnitude >> 14;
+       report->field[0]->value[3] = magnitude >> 14;
+       report->field[0]->value[4] = magnitude;
+       report->field[0]->value[5] = 0x00;
+       report->field[0]->value[6] = 0x00;
+
+       usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends command to set range compatible with G25/G27/Driving Force GT */
+static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range)
+{
+       struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+       struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+       dbg_hid("G25/G27/DFGT: setting range to %u\n", range);
+
+       report->field[0]->value[0] = 0xf8;
+       report->field[0]->value[1] = 0x81;
+       report->field[0]->value[2] = range & 0x00ff;
+       report->field[0]->value[3] = (range & 0xff00) >> 8;
+       report->field[0]->value[4] = 0x00;
+       report->field[0]->value[5] = 0x00;
+       report->field[0]->value[6] = 0x00;
+
+       usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends commands to set range compatible with Driving Force Pro wheel */
+static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range)
+{
+       struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+       struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+       int start_left, start_right, full_range;
+       dbg_hid("Driving Force Pro: setting range to %u\n", range);
+
+       /* Prepare "coarse" limit command */
+       report->field[0]->value[0] = 0xf8;
+       report->field[0]->value[1] = 0x00;      /* Set later */
+       report->field[0]->value[2] = 0x00;
+       report->field[0]->value[3] = 0x00;
+       report->field[0]->value[4] = 0x00;
+       report->field[0]->value[5] = 0x00;
+       report->field[0]->value[6] = 0x00;
+
+       if (range > 200) {
+               report->field[0]->value[1] = 0x03;
+               full_range = 900;
+       } else {
+               report->field[0]->value[1] = 0x02;
+               full_range = 200;
+       }
+       usbhid_submit_report(hid, report, USB_DIR_OUT);
+
+       /* Prepare "fine" limit command */
+       report->field[0]->value[0] = 0x81;
+       report->field[0]->value[1] = 0x0b;
+       report->field[0]->value[2] = 0x00;
+       report->field[0]->value[3] = 0x00;
+       report->field[0]->value[4] = 0x00;
+       report->field[0]->value[5] = 0x00;
+       report->field[0]->value[6] = 0x00;
+
+       if (range == 200 || range == 900) {     /* Do not apply any fine limit */
+               usbhid_submit_report(hid, report, USB_DIR_OUT);
+               return;
+       }
+
+       /* Construct fine limit command */
+       start_left = (((full_range - range + 1) * 2047) / full_range);
+       start_right = 0xfff - start_left;
+
+       report->field[0]->value[2] = start_left >> 4;
+       report->field[0]->value[3] = start_right >> 4;
+       report->field[0]->value[4] = 0xff;
+       report->field[0]->value[5] = (start_right & 0xe) << 4 | (start_left & 0xe);
+       report->field[0]->value[6] = 0xff;
 
        usbhid_submit_report(hid, report, USB_DIR_OUT);
 }
 
+static void hid_lg4ff_switch_native(struct hid_device *hid, const struct lg4ff_native_cmd *cmd)
+{
+       struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+       struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+       __u8 i, j;
+
+       j = 0;
+       while (j < 7*cmd->cmd_num) {
+               for (i = 0; i < 7; i++)
+                       report->field[0]->value[i] = cmd->cmd[j++];
+
+               usbhid_submit_report(hid, report, USB_DIR_OUT);
+       }
+}
+
+/* Read current range and display it in terminal */
+static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct lg4ff_device_entry *uninitialized_var(entry);
+       struct list_head *h;
+       struct hid_device *hid = to_hid_device(dev);
+       size_t count;
+
+       list_for_each(h, &device_list.list) {
+               entry = list_entry(h, struct lg4ff_device_entry, list);
+               if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0)
+                       break;
+       }
+       if (h == &device_list.list) {
+               dbg_hid("Device not found!");
+               return 0;
+       }
+
+       count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->range);
+       return count;
+}
+
+/* Set range to user specified value, call appropriate function
+ * according to the type of the wheel */
+static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct lg4ff_device_entry *uninitialized_var(entry);
+       struct list_head *h;
+       struct hid_device *hid = to_hid_device(dev);
+       __u16 range = simple_strtoul(buf, NULL, 10);
+
+       list_for_each(h, &device_list.list) {
+               entry = list_entry(h, struct lg4ff_device_entry, list);
+               if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0)
+                       break;
+       }
+       if (h == &device_list.list) {
+               dbg_hid("Device not found!");
+               return count;
+       }
+
+       if (range == 0)
+               range = entry->max_range;
+
+       /* Check if the wheel supports range setting
+        * and that the range is within limits for the wheel */
+       if (entry->set_range != NULL && range >= entry->min_range && range <= entry->max_range) {
+               entry->set_range(hid, range);
+               entry->range = range;
+       }
+
+       return count;
+}
 
 int lg4ff_init(struct hid_device *hid)
 {
@@ -95,9 +343,10 @@ int lg4ff_init(struct hid_device *hid)
        struct input_dev *dev = hidinput->input;
        struct hid_report *report;
        struct hid_field *field;
-       const signed short *ff_bits = ff4_wheel_ac;
-       int error;
-       int i;
+       struct lg4ff_device_entry *entry;
+       struct usb_device_descriptor *udesc;
+       int error, i, j;
+       __u16 bcdDevice, rev_maj, rev_min;
 
        /* Find the report to use */
        if (list_empty(report_list)) {
@@ -118,18 +367,122 @@ int lg4ff_init(struct hid_device *hid)
                return -1;
        }
 
-       for (i = 0; ff_bits[i] >= 0; i++)
-               set_bit(ff_bits[i], dev->ffbit);
+       /* Check what wheel has been connected */
+       for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
+               if (hid->product == lg4ff_devices[i].product_id) {
+                       dbg_hid("Found compatible device, product ID %04X\n", lg4ff_devices[i].product_id);
+                       break;
+               }
+       }
+
+       if (i == ARRAY_SIZE(lg4ff_devices)) {
+               hid_err(hid, "Device is not supported by lg4ff driver. If you think it should be, consider reporting a bug to"
+                            "LKML, Simon Wood <simon@mungewell.org> or Michal Maly <madcatxster@gmail.com>\n");
+               return -1;
+       }
+
+       /* Attempt to switch wheel to native mode when applicable */
+       udesc = &(hid_to_usb_dev(hid)->descriptor);
+       if (!udesc) {
+               hid_err(hid, "NULL USB device descriptor\n");
+               return -1;
+       }
+       bcdDevice = le16_to_cpu(udesc->bcdDevice);
+       rev_maj = bcdDevice >> 8;
+       rev_min = bcdDevice & 0xff;
+
+       if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_WHEEL) {
+               dbg_hid("Generic wheel detected, can it do native?\n");
+               dbg_hid("USB revision: %2x.%02x\n", rev_maj, rev_min);
+
+               for (j = 0; j < ARRAY_SIZE(lg4ff_revs); j++) {
+                       if (lg4ff_revs[j].rev_maj == rev_maj && lg4ff_revs[j].rev_min == rev_min) {
+                               hid_lg4ff_switch_native(hid, lg4ff_revs[j].command);
+                               hid_info(hid, "Switched to native mode\n");
+                       }
+               }
+       }
+
+       /* Set supported force feedback capabilities */
+       for (j = 0; lg4ff_devices[i].ff_effects[j] >= 0; j++)
+               set_bit(lg4ff_devices[i].ff_effects[j], dev->ffbit);
 
        error = input_ff_create_memless(dev, NULL, hid_lg4ff_play);
 
        if (error)
                return error;
 
-       if (test_bit(FF_AUTOCENTER, dev->ffbit))
-               dev->ff->set_autocenter = hid_lg4ff_set_autocenter;
+       /* Check if autocentering is available and
+        * set the centering force to zero by default */
+       if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
+               if(rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN)  /* Formula Force EX expects different autocentering command */
+                       dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
+               else
+                       dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
+
+               dev->ff->set_autocenter(dev, 0);
+       }
+
+               /* Initialize device_list if this is the first device to handle by lg4ff */
+       if (!list_inited) {
+               INIT_LIST_HEAD(&device_list.list);
+               list_inited = 1;
+       }
+
+       /* Add the device to device_list */
+       entry = (struct lg4ff_device_entry *)kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
+       if (!entry) {
+               hid_err(hid, "Cannot add device, insufficient memory.\n");
+               return -ENOMEM;
+       }
+       entry->device_id = kstrdup((&hid->dev)->kobj.name, GFP_KERNEL);
+       if (!entry->device_id) {
+               hid_err(hid, "Cannot set device_id, insufficient memory.\n");
+               kfree(entry);
+               return -ENOMEM;
+       }
+       entry->min_range = lg4ff_devices[i].min_range;
+       entry->max_range = lg4ff_devices[i].max_range;
+       entry->set_range = lg4ff_devices[i].set_range;
+       list_add(&entry->list, &device_list.list);
+
+       /* Create sysfs interface */
+       error = device_create_file(&hid->dev, &dev_attr_range);
+       if (error)
+               return error;
+       dbg_hid("sysfs interface created\n");
+
+       /* Set the maximum range to start with */
+       entry->range = entry->max_range;
+       if (entry->set_range != NULL)
+               entry->set_range(hid, entry->range);
 
        hid_info(hid, "Force feedback for Logitech Speed Force Wireless by Simon Wood <simon@mungewell.org>\n");
        return 0;
 }
 
+int lg4ff_deinit(struct hid_device *hid)
+{
+       bool found = 0;
+       struct lg4ff_device_entry *entry;
+       struct list_head *h, *g;
+       list_for_each_safe(h, g, &device_list.list) {
+               entry = list_entry(h, struct lg4ff_device_entry, list);
+               if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0) {
+                       list_del(h);
+                       kfree(entry->device_id);
+                       kfree(entry);
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found) {
+               dbg_hid("Device entry not found!\n");
+               return -1;
+       }
+
+       device_remove_file(&hid->dev, &dev_attr_range);
+       dbg_hid("Device successfully unregistered\n");
+       return 0;
+}
index 088f850..27bc54f 100644 (file)
@@ -58,12 +58,6 @@ static const signed short ff_joystick_ac[] = {
        -1
 };
 
-static const signed short ff_wheel[] = {
-       FF_CONSTANT,
-       FF_AUTOCENTER,
-       -1
-};
-
 static const struct dev_type devices[] = {
        { 0x046d, 0xc211, ff_rumble },
        { 0x046d, 0xc219, ff_rumble },
@@ -71,14 +65,7 @@ static const struct dev_type devices[] = {
        { 0x046d, 0xc286, ff_joystick_ac },
        { 0x046d, 0xc287, ff_joystick_ac },
        { 0x046d, 0xc293, ff_joystick },
-       { 0x046d, 0xc294, ff_wheel },
-       { 0x046d, 0xc298, ff_wheel },
-       { 0x046d, 0xc299, ff_wheel },
-       { 0x046d, 0xc29b, ff_wheel },
        { 0x046d, 0xc295, ff_joystick },
-       { 0x046d, 0xc298, ff_wheel },
-       { 0x046d, 0xc299, ff_wheel },
-       { 0x046d, 0xca03, ff_wheel },
 };
 
 static int hid_lgff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
new file mode 100644 (file)
index 0000000..38b12e4
--- /dev/null
@@ -0,0 +1,922 @@
+/*
+ *  HID driver for Logitech Unifying receivers
+ *
+ *  Copyright (c) 2011 Logitech
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+#include "hid-logitech-dj.h"
+
+/* Keyboard descriptor (1) */
+static const char kbd_descriptor[] = {
+       0x05, 0x01,             /* USAGE_PAGE (generic Desktop)     */
+       0x09, 0x06,             /* USAGE (Keyboard)         */
+       0xA1, 0x01,             /* COLLECTION (Application)     */
+       0x85, 0x01,             /* REPORT_ID (1)            */
+       0x95, 0x08,             /*   REPORT_COUNT (8)           */
+       0x75, 0x01,             /*   REPORT_SIZE (1)            */
+       0x15, 0x00,             /*   LOGICAL_MINIMUM (0)        */
+       0x25, 0x01,             /*   LOGICAL_MAXIMUM (1)        */
+       0x05, 0x07,             /*   USAGE_PAGE (Keyboard)      */
+       0x19, 0xE0,             /*   USAGE_MINIMUM (Left Control)   */
+       0x29, 0xE7,             /*   USAGE_MAXIMUM (Right GUI)      */
+       0x81, 0x02,             /*   INPUT (Data,Var,Abs)       */
+       0x95, 0x05,             /*   REPORT COUNT (5)           */
+       0x05, 0x08,             /*   USAGE PAGE (LED page)      */
+       0x19, 0x01,             /*   USAGE MINIMUM (1)          */
+       0x29, 0x05,             /*   USAGE MAXIMUM (5)          */
+       0x91, 0x02,             /*   OUTPUT (Data, Variable, Absolute)  */
+       0x95, 0x01,             /*   REPORT COUNT (1)           */
+       0x75, 0x03,             /*   REPORT SIZE (3)            */
+       0x91, 0x01,             /*   OUTPUT (Constant)          */
+       0x95, 0x06,             /*   REPORT_COUNT (6)           */
+       0x75, 0x08,             /*   REPORT_SIZE (8)            */
+       0x15, 0x00,             /*   LOGICAL_MINIMUM (0)        */
+       0x26, 0xFF, 0x00,       /*   LOGICAL_MAXIMUM (255)      */
+       0x05, 0x07,             /*   USAGE_PAGE (Keyboard)      */
+       0x19, 0x00,             /*   USAGE_MINIMUM (no event)       */
+       0x2A, 0xFF, 0x00,       /*   USAGE_MAXIMUM (reserved)       */
+       0x81, 0x00,             /*   INPUT (Data,Ary,Abs)       */
+       0xC0
+};
+
+/* Mouse descriptor (2)     */
+static const char mse_descriptor[] = {
+       0x05, 0x01,             /*  USAGE_PAGE (Generic Desktop)        */
+       0x09, 0x02,             /*  USAGE (Mouse)                       */
+       0xA1, 0x01,             /*  COLLECTION (Application)            */
+       0x85, 0x02,             /*    REPORT_ID = 2                     */
+       0x09, 0x01,             /*    USAGE (pointer)                   */
+       0xA1, 0x00,             /*    COLLECTION (physical)             */
+       0x05, 0x09,             /*      USAGE_PAGE (buttons)            */
+       0x19, 0x01,             /*      USAGE_MIN (1)                   */
+       0x29, 0x10,             /*      USAGE_MAX (16)                  */
+       0x15, 0x00,             /*      LOGICAL_MIN (0)                 */
+       0x25, 0x01,             /*      LOGICAL_MAX (1)                 */
+       0x95, 0x10,             /*      REPORT_COUNT (16)               */
+       0x75, 0x01,             /*      REPORT_SIZE (1)                 */
+       0x81, 0x02,             /*      INPUT (data var abs)            */
+       0x05, 0x01,             /*      USAGE_PAGE (generic desktop)    */
+       0x16, 0x01, 0xF8,       /*      LOGICAL_MIN (-2047)             */
+       0x26, 0xFF, 0x07,       /*      LOGICAL_MAX (2047)              */
+       0x75, 0x0C,             /*      REPORT_SIZE (12)                */
+       0x95, 0x02,             /*      REPORT_COUNT (2)                */
+       0x09, 0x30,             /*      USAGE (X)                       */
+       0x09, 0x31,             /*      USAGE (Y)                       */
+       0x81, 0x06,             /*      INPUT                           */
+       0x15, 0x81,             /*      LOGICAL_MIN (-127)              */
+       0x25, 0x7F,             /*      LOGICAL_MAX (127)               */
+       0x75, 0x08,             /*      REPORT_SIZE (8)                 */
+       0x95, 0x01,             /*      REPORT_COUNT (1)                */
+       0x09, 0x38,             /*      USAGE (wheel)                   */
+       0x81, 0x06,             /*      INPUT                           */
+       0x05, 0x0C,             /*      USAGE_PAGE(consumer)            */
+       0x0A, 0x38, 0x02,       /*      USAGE(AC Pan)                   */
+       0x95, 0x01,             /*      REPORT_COUNT (1)                */
+       0x81, 0x06,             /*      INPUT                           */
+       0xC0,                   /*    END_COLLECTION                    */
+       0xC0,                   /*  END_COLLECTION                      */
+};
+
+/* Consumer Control descriptor (3) */
+static const char consumer_descriptor[] = {
+       0x05, 0x0C,             /* USAGE_PAGE (Consumer Devices)       */
+       0x09, 0x01,             /* USAGE (Consumer Control)            */
+       0xA1, 0x01,             /* COLLECTION (Application)            */
+       0x85, 0x03,             /* REPORT_ID = 3                       */
+       0x75, 0x10,             /* REPORT_SIZE (16)                    */
+       0x95, 0x02,             /* REPORT_COUNT (2)                    */
+       0x15, 0x01,             /* LOGICAL_MIN (1)                     */
+       0x26, 0x8C, 0x02,       /* LOGICAL_MAX (652)                   */
+       0x19, 0x01,             /* USAGE_MIN (1)                       */
+       0x2A, 0x8C, 0x02,       /* USAGE_MAX (652)                     */
+       0x81, 0x00,             /* INPUT (Data Ary Abs)                */
+       0xC0,                   /* END_COLLECTION                      */
+};                             /*                                     */
+
+/* System control descriptor (4) */
+static const char syscontrol_descriptor[] = {
+       0x05, 0x01,             /*   USAGE_PAGE (Generic Desktop)      */
+       0x09, 0x80,             /*   USAGE (System Control)            */
+       0xA1, 0x01,             /*   COLLECTION (Application)          */
+       0x85, 0x04,             /*   REPORT_ID = 4                     */
+       0x75, 0x02,             /*   REPORT_SIZE (2)                   */
+       0x95, 0x01,             /*   REPORT_COUNT (1)                  */
+       0x15, 0x01,             /*   LOGICAL_MIN (1)                   */
+       0x25, 0x03,             /*   LOGICAL_MAX (3)                   */
+       0x09, 0x82,             /*   USAGE (System Sleep)              */
+       0x09, 0x81,             /*   USAGE (System Power Down)         */
+       0x09, 0x83,             /*   USAGE (System Wake Up)            */
+       0x81, 0x60,             /*   INPUT (Data Ary Abs NPrf Null)    */
+       0x75, 0x06,             /*   REPORT_SIZE (6)                   */
+       0x81, 0x03,             /*   INPUT (Cnst Var Abs)              */
+       0xC0,                   /*   END_COLLECTION                    */
+};
+
+/* Media descriptor (8) */
+static const char media_descriptor[] = {
+       0x06, 0xbc, 0xff,       /* Usage Page 0xffbc                   */
+       0x09, 0x88,             /* Usage 0x0088                        */
+       0xa1, 0x01,             /* BeginCollection                     */
+       0x85, 0x08,             /*   Report ID 8                       */
+       0x19, 0x01,             /*   Usage Min 0x0001                  */
+       0x29, 0xff,             /*   Usage Max 0x00ff                  */
+       0x15, 0x01,             /*   Logical Min 1                     */
+       0x26, 0xff, 0x00,       /*   Logical Max 255                   */
+       0x75, 0x08,             /*   Report Size 8                     */
+       0x95, 0x01,             /*   Report Count 1                    */
+       0x81, 0x00,             /*   Input                             */
+       0xc0,                   /* EndCollection                       */
+};                             /*                                     */
+
+/* Maximum size of all defined hid reports in bytes (including report id) */
+#define MAX_REPORT_SIZE 8
+
+/* Number of possible hid report types that can be created by this driver.
+ *
+ * Right now, RF report types have the same report types (or report id's)
+ * than the hid report created from those RF reports. In the future
+ * this doesnt have to be true.
+ *
+ * For instance, RF report type 0x01 which has a size of 8 bytes, corresponds
+ * to hid report id 0x01, this is standard keyboard. Same thing applies to mice
+ * reports and consumer control, etc. If a new RF report is created, it doesn't
+ * has to have the same report id as its corresponding hid report, so an
+ * translation may have to take place for future report types.
+ */
+#define NUMBER_OF_HID_REPORTS 32
+static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = {
+       [1] = 8,                /* Standard keyboard */
+       [2] = 8,                /* Standard mouse */
+       [3] = 5,                /* Consumer control */
+       [4] = 2,                /* System control */
+       [8] = 2,                /* Media Center */
+};
+
+
+#define LOGITECH_DJ_INTERFACE_NUMBER 0x02
+
+static struct hid_ll_driver logi_dj_ll_driver;
+
+static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
+                                       size_t count,
+                                       unsigned char report_type);
+
+static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
+                                               struct dj_report *dj_report)
+{
+       /* Called in delayed work context */
+       struct dj_device *dj_dev;
+       unsigned long flags;
+
+       spin_lock_irqsave(&djrcv_dev->lock, flags);
+       dj_dev = djrcv_dev->paired_dj_devices[dj_report->device_index];
+       djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+       spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+       if (dj_dev != NULL) {
+               hid_destroy_device(dj_dev->hdev);
+               kfree(dj_dev);
+       } else {
+               dev_err(&djrcv_dev->hdev->dev, "%s: can't destroy a NULL device\n",
+                       __func__);
+       }
+}
+
+static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
+                                         struct dj_report *dj_report)
+{
+       /* Called in delayed work context */
+       struct hid_device *djrcv_hdev = djrcv_dev->hdev;
+       struct usb_interface *intf = to_usb_interface(djrcv_hdev->dev.parent);
+       struct usb_device *usbdev = interface_to_usbdev(intf);
+       struct hid_device *dj_hiddev;
+       struct dj_device *dj_dev;
+
+       /* Device index goes from 1 to 6, we need 3 bytes to store the
+        * semicolon, the index, and a null terminator
+        */
+       unsigned char tmpstr[3];
+
+       if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
+           SPFUNCTION_DEVICE_LIST_EMPTY) {
+               dbg_hid("%s: device list is empty\n", __func__);
+               return;
+       }
+
+       if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+           (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+               dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
+                       __func__, dj_report->device_index);
+               return;
+       }
+
+       dj_hiddev = hid_allocate_device();
+       if (IS_ERR(dj_hiddev)) {
+               dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
+                       __func__);
+               return;
+       }
+
+       dj_hiddev->ll_driver = &logi_dj_ll_driver;
+       dj_hiddev->hid_output_raw_report = logi_dj_output_hidraw_report;
+
+       dj_hiddev->dev.parent = &djrcv_hdev->dev;
+       dj_hiddev->bus = BUS_USB;
+       dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor);
+       dj_hiddev->product = le16_to_cpu(usbdev->descriptor.idProduct);
+       snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
+               "Logitech Unifying Device. Wireless PID:%02x%02x",
+               dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB],
+               dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB]);
+
+       usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys));
+       snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index);
+       strlcat(dj_hiddev->phys, tmpstr, sizeof(dj_hiddev->phys));
+
+       dj_dev = kzalloc(sizeof(struct dj_device), GFP_KERNEL);
+
+       if (!dj_dev) {
+               dev_err(&djrcv_hdev->dev, "%s: failed allocating dj_device\n",
+                       __func__);
+               goto dj_device_allocate_fail;
+       }
+
+       dj_dev->reports_supported = le32_to_cpu(
+               dj_report->report_params[DEVICE_PAIRED_RF_REPORT_TYPE]);
+       dj_dev->hdev = dj_hiddev;
+       dj_dev->dj_receiver_dev = djrcv_dev;
+       dj_dev->device_index = dj_report->device_index;
+       dj_hiddev->driver_data = dj_dev;
+
+       djrcv_dev->paired_dj_devices[dj_report->device_index] = dj_dev;
+
+       if (hid_add_device(dj_hiddev)) {
+               dev_err(&djrcv_hdev->dev, "%s: failed adding dj_device\n",
+                       __func__);
+               goto hid_add_device_fail;
+       }
+
+       return;
+
+hid_add_device_fail:
+       djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+       kfree(dj_dev);
+dj_device_allocate_fail:
+       hid_destroy_device(dj_hiddev);
+}
+
+static void delayedwork_callback(struct work_struct *work)
+{
+       struct dj_receiver_dev *djrcv_dev =
+               container_of(work, struct dj_receiver_dev, work);
+
+       struct dj_report dj_report;
+       unsigned long flags;
+       int count;
+
+       dbg_hid("%s\n", __func__);
+
+       spin_lock_irqsave(&djrcv_dev->lock, flags);
+
+       count = kfifo_out(&djrcv_dev->notif_fifo, &dj_report,
+                               sizeof(struct dj_report));
+
+       if (count != sizeof(struct dj_report)) {
+               dev_err(&djrcv_dev->hdev->dev, "%s: workitem triggered without "
+                       "notifications available\n", __func__);
+               spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+               return;
+       }
+
+       if (!kfifo_is_empty(&djrcv_dev->notif_fifo)) {
+               if (schedule_work(&djrcv_dev->work) == 0) {
+                       dbg_hid("%s: did not schedule the work item, was "
+                               "already queued\n", __func__);
+               }
+       }
+
+       spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+       switch (dj_report.report_type) {
+       case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+               logi_dj_recv_add_djhid_device(djrcv_dev, &dj_report);
+               break;
+       case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+               logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
+               break;
+       default:
+               dbg_hid("%s: unexpected report type\n", __func__);
+       }
+}
+
+static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
+                                          struct dj_report *dj_report)
+{
+       /* We are called from atomic context (tasklet && djrcv->lock held) */
+
+       kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
+
+       if (schedule_work(&djrcv_dev->work) == 0) {
+               dbg_hid("%s: did not schedule the work item, was already "
+                       "queued\n", __func__);
+       }
+}
+
+static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
+                                            struct dj_report *dj_report)
+{
+       /* We are called from atomic context (tasklet && djrcv->lock held) */
+       unsigned int i;
+       u8 reportbuffer[MAX_REPORT_SIZE];
+       struct dj_device *djdev;
+
+       djdev = djrcv_dev->paired_dj_devices[dj_report->device_index];
+
+       if (!djdev) {
+               dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+                       " is NULL, index %d\n", dj_report->device_index);
+               return;
+       }
+
+       memset(reportbuffer, 0, sizeof(reportbuffer));
+
+       for (i = 0; i < NUMBER_OF_HID_REPORTS; i++) {
+               if (djdev->reports_supported & (1 << i)) {
+                       reportbuffer[0] = i;
+                       if (hid_input_report(djdev->hdev,
+                                            HID_INPUT_REPORT,
+                                            reportbuffer,
+                                            hid_reportid_size_map[i], 1)) {
+                               dbg_hid("hid_input_report error sending null "
+                                       "report\n");
+                       }
+               }
+       }
+}
+
+static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
+                                       struct dj_report *dj_report)
+{
+       /* We are called from atomic context (tasklet && djrcv->lock held) */
+       struct dj_device *dj_device;
+
+       dj_device = djrcv_dev->paired_dj_devices[dj_report->device_index];
+
+       if (dj_device == NULL) {
+               dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+                       " is NULL, index %d\n", dj_report->device_index);
+               return;
+       }
+
+       if ((dj_report->report_type > ARRAY_SIZE(hid_reportid_size_map) - 1) ||
+           (hid_reportid_size_map[dj_report->report_type] == 0)) {
+               dbg_hid("invalid report type:%x\n", dj_report->report_type);
+               return;
+       }
+
+       if (hid_input_report(dj_device->hdev,
+                       HID_INPUT_REPORT, &dj_report->report_type,
+                       hid_reportid_size_map[dj_report->report_type], 1)) {
+               dbg_hid("hid_input_report error\n");
+       }
+}
+
+
+static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
+                                   struct dj_report *dj_report)
+{
+       struct hid_device *hdev = djrcv_dev->hdev;
+       int sent_bytes;
+
+       if (!hdev->hid_output_raw_report) {
+               dev_err(&hdev->dev, "%s:"
+                       "hid_output_raw_report is null\n", __func__);
+               return -ENODEV;
+       }
+
+       sent_bytes = hdev->hid_output_raw_report(hdev, (u8 *) dj_report,
+                                                sizeof(struct dj_report),
+                                                HID_OUTPUT_REPORT);
+
+       return (sent_bytes < 0) ? sent_bytes : 0;
+}
+
+static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
+{
+       struct dj_report dj_report;
+
+       memset(&dj_report, 0, sizeof(dj_report));
+       dj_report.report_id = REPORT_ID_DJ_SHORT;
+       dj_report.device_index = 0xFF;
+       dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+       return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+}
+
+static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+                                         unsigned timeout)
+{
+       struct dj_report dj_report;
+
+       memset(&dj_report, 0, sizeof(dj_report));
+       dj_report.report_id = REPORT_ID_DJ_SHORT;
+       dj_report.device_index = 0xFF;
+       dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
+       dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x1F;
+       dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+       return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+}
+
+
+static int logi_dj_ll_open(struct hid_device *hid)
+{
+       dbg_hid("%s:%s\n", __func__, hid->phys);
+       return 0;
+
+}
+
+static void logi_dj_ll_close(struct hid_device *hid)
+{
+       dbg_hid("%s:%s\n", __func__, hid->phys);
+}
+
+static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
+                                       size_t count,
+                                       unsigned char report_type)
+{
+       /* Called by hid raw to send data */
+       dbg_hid("%s\n", __func__);
+
+       return 0;
+}
+
+static int logi_dj_ll_parse(struct hid_device *hid)
+{
+       struct dj_device *djdev = hid->driver_data;
+       int retval;
+
+       dbg_hid("%s\n", __func__);
+
+       djdev->hdev->version = 0x0111;
+       djdev->hdev->country = 0x00;
+
+       if (djdev->reports_supported & STD_KEYBOARD) {
+               dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n",
+                       __func__, djdev->reports_supported);
+               retval = hid_parse_report(hid,
+                                         (u8 *) kbd_descriptor,
+                                         sizeof(kbd_descriptor));
+               if (retval) {
+                       dbg_hid("%s: sending a kbd descriptor, hid_parse failed"
+                               " error: %d\n", __func__, retval);
+                       return retval;
+               }
+       }
+
+       if (djdev->reports_supported & STD_MOUSE) {
+               dbg_hid("%s: sending a mouse descriptor, reports_supported: "
+                       "%x\n", __func__, djdev->reports_supported);
+               retval = hid_parse_report(hid,
+                                         (u8 *) mse_descriptor,
+                                         sizeof(mse_descriptor));
+               if (retval) {
+                       dbg_hid("%s: sending a mouse descriptor, hid_parse "
+                               "failed error: %d\n", __func__, retval);
+                       return retval;
+               }
+       }
+
+       if (djdev->reports_supported & MULTIMEDIA) {
+               dbg_hid("%s: sending a multimedia report descriptor: %x\n",
+                       __func__, djdev->reports_supported);
+               retval = hid_parse_report(hid,
+                                         (u8 *) consumer_descriptor,
+                                         sizeof(consumer_descriptor));
+               if (retval) {
+                       dbg_hid("%s: sending a consumer_descriptor, hid_parse "
+                               "failed error: %d\n", __func__, retval);
+                       return retval;
+               }
+       }
+
+       if (djdev->reports_supported & POWER_KEYS) {
+               dbg_hid("%s: sending a power keys report descriptor: %x\n",
+                       __func__, djdev->reports_supported);
+               retval = hid_parse_report(hid,
+                                         (u8 *) syscontrol_descriptor,
+                                         sizeof(syscontrol_descriptor));
+               if (retval) {
+                       dbg_hid("%s: sending a syscontrol_descriptor, "
+                               "hid_parse failed error: %d\n",
+                               __func__, retval);
+                       return retval;
+               }
+       }
+
+       if (djdev->reports_supported & MEDIA_CENTER) {
+               dbg_hid("%s: sending a media center report descriptor: %x\n",
+                       __func__, djdev->reports_supported);
+               retval = hid_parse_report(hid,
+                                         (u8 *) media_descriptor,
+                                         sizeof(media_descriptor));
+               if (retval) {
+                       dbg_hid("%s: sending a media_descriptor, hid_parse "
+                               "failed error: %d\n", __func__, retval);
+                       return retval;
+               }
+       }
+
+       if (djdev->reports_supported & KBD_LEDS) {
+               dbg_hid("%s: need to send kbd leds report descriptor: %x\n",
+                       __func__, djdev->reports_supported);
+       }
+
+       return 0;
+}
+
+static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
+                                 unsigned int code, int value)
+{
+       /* Sent by the input layer to handle leds and Force Feedback */
+       struct hid_device *dj_hiddev = input_get_drvdata(dev);
+       struct dj_device *dj_dev = dj_hiddev->driver_data;
+
+       struct dj_receiver_dev *djrcv_dev =
+           dev_get_drvdata(dj_hiddev->dev.parent);
+       struct hid_device *dj_rcv_hiddev = djrcv_dev->hdev;
+       struct hid_report_enum *output_report_enum;
+
+       struct hid_field *field;
+       struct hid_report *report;
+       unsigned char data[8];
+       int offset;
+
+       dbg_hid("%s: %s, type:%d | code:%d | value:%d\n",
+               __func__, dev->phys, type, code, value);
+
+       if (type != EV_LED)
+               return -1;
+
+       offset = hidinput_find_field(dj_hiddev, type, code, &field);
+
+       if (offset == -1) {
+               dev_warn(&dev->dev, "event field not found\n");
+               return -1;
+       }
+       hid_set_field(field, offset, value);
+       hid_output_report(field->report, &data[0]);
+
+       output_report_enum = &dj_rcv_hiddev->report_enum[HID_OUTPUT_REPORT];
+       report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
+       hid_set_field(report->field[0], 0, dj_dev->device_index);
+       hid_set_field(report->field[0], 1, REPORT_TYPE_LEDS);
+       hid_set_field(report->field[0], 2, data[1]);
+
+       usbhid_submit_report(dj_rcv_hiddev, report, USB_DIR_OUT);
+
+       return 0;
+
+}
+
+static int logi_dj_ll_start(struct hid_device *hid)
+{
+       dbg_hid("%s\n", __func__);
+       return 0;
+}
+
+static void logi_dj_ll_stop(struct hid_device *hid)
+{
+       dbg_hid("%s\n", __func__);
+}
+
+
+static struct hid_ll_driver logi_dj_ll_driver = {
+       .parse = logi_dj_ll_parse,
+       .start = logi_dj_ll_start,
+       .stop = logi_dj_ll_stop,
+       .open = logi_dj_ll_open,
+       .close = logi_dj_ll_close,
+       .hidinput_input_event = logi_dj_ll_input_event,
+};
+
+
+static int logi_dj_raw_event(struct hid_device *hdev,
+                            struct hid_report *report, u8 *data,
+                            int size)
+{
+       struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+       struct dj_report *dj_report = (struct dj_report *) data;
+       unsigned long flags;
+       bool report_processed = false;
+
+       dbg_hid("%s, size:%d\n", __func__, size);
+
+       /* Here we receive all data coming from iface 2, there are 4 cases:
+        *
+        * 1) Data should continue its normal processing i.e. data does not
+        * come from the DJ collection, in which case we do nothing and
+        * return 0, so hid-core can continue normal processing (will forward
+        * to associated hidraw device)
+        *
+        * 2) Data is from DJ collection, and is intended for this driver i. e.
+        * data contains arrival, departure, etc notifications, in which case
+        * we queue them for delayed processing by the work queue. We return 1
+        * to hid-core as no further processing is required from it.
+        *
+        * 3) Data is from DJ collection, and informs a connection change,
+        * if the change means rf link loss, then we must send a null report
+        * to the upper layer to discard potentially pressed keys that may be
+        * repeated forever by the input layer. Return 1 to hid-core as no
+        * further processing is required.
+        *
+        * 4) Data is from DJ collection and is an actual input event from
+        * a paired DJ device in which case we forward it to the correct hid
+        * device (via hid_input_report() ) and return 1 so hid-core does not do
+        * anything else with it.
+        */
+
+       spin_lock_irqsave(&djrcv_dev->lock, flags);
+       if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
+               switch (dj_report->report_type) {
+               case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+               case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+                       logi_dj_recv_queue_notification(djrcv_dev, dj_report);
+                       break;
+               case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
+                       if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
+                           STATUS_LINKLOSS) {
+                               logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
+                       }
+                       break;
+               default:
+                       logi_dj_recv_forward_report(djrcv_dev, dj_report);
+               }
+               report_processed = true;
+       }
+       spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+       return report_processed;
+}
+
+static int logi_dj_probe(struct hid_device *hdev,
+                        const struct hid_device_id *id)
+{
+       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+       struct dj_receiver_dev *djrcv_dev;
+       int retval;
+
+       if (is_dj_device((struct dj_device *)hdev->driver_data))
+               return -ENODEV;
+
+       dbg_hid("%s called for ifnum %d\n", __func__,
+               intf->cur_altsetting->desc.bInterfaceNumber);
+
+       /* Ignore interfaces 0 and 1, they will not carry any data, dont create
+        * any hid_device for them */
+       if (intf->cur_altsetting->desc.bInterfaceNumber !=
+           LOGITECH_DJ_INTERFACE_NUMBER) {
+               dbg_hid("%s: ignoring ifnum %d\n", __func__,
+                       intf->cur_altsetting->desc.bInterfaceNumber);
+               return -ENODEV;
+       }
+
+       /* Treat interface 2 */
+
+       djrcv_dev = kzalloc(sizeof(struct dj_receiver_dev), GFP_KERNEL);
+       if (!djrcv_dev) {
+               dev_err(&hdev->dev,
+                       "%s:failed allocating dj_receiver_dev\n", __func__);
+               return -ENOMEM;
+       }
+       djrcv_dev->hdev = hdev;
+       INIT_WORK(&djrcv_dev->work, delayedwork_callback);
+       spin_lock_init(&djrcv_dev->lock);
+       if (kfifo_alloc(&djrcv_dev->notif_fifo,
+                       DJ_MAX_NUMBER_NOTIFICATIONS * sizeof(struct dj_report),
+                       GFP_KERNEL)) {
+               dev_err(&hdev->dev,
+                       "%s:failed allocating notif_fifo\n", __func__);
+               kfree(djrcv_dev);
+               return -ENOMEM;
+       }
+       hid_set_drvdata(hdev, djrcv_dev);
+
+       /* Call  to usbhid to fetch the HID descriptors of interface 2 and
+        * subsequently call to the hid/hid-core to parse the fetched
+        * descriptors, this will in turn create the hidraw and hiddev nodes
+        * for interface 2 of the receiver */
+       retval = hid_parse(hdev);
+       if (retval) {
+               dev_err(&hdev->dev,
+                       "%s:parse of interface 2 failed\n", __func__);
+               goto hid_parse_fail;
+       }
+
+       /* Starts the usb device and connects to upper interfaces hiddev and
+        * hidraw */
+       retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+       if (retval) {
+               dev_err(&hdev->dev,
+                       "%s:hid_hw_start returned error\n", __func__);
+               goto hid_hw_start_fail;
+       }
+
+       retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+       if (retval < 0) {
+               dev_err(&hdev->dev,
+                       "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+                       __func__, retval);
+               goto switch_to_dj_mode_fail;
+       }
+
+       /* This is enabling the polling urb on the IN endpoint */
+       retval = hdev->ll_driver->open(hdev);
+       if (retval < 0) {
+               dev_err(&hdev->dev, "%s:hdev->ll_driver->open returned "
+                       "error:%d\n", __func__, retval);
+               goto llopen_failed;
+       }
+
+       retval = logi_dj_recv_query_paired_devices(djrcv_dev);
+       if (retval < 0) {
+               dev_err(&hdev->dev, "%s:logi_dj_recv_query_paired_devices "
+                       "error:%d\n", __func__, retval);
+               goto logi_dj_recv_query_paired_devices_failed;
+       }
+
+       return retval;
+
+logi_dj_recv_query_paired_devices_failed:
+       hdev->ll_driver->close(hdev);
+
+llopen_failed:
+switch_to_dj_mode_fail:
+       hid_hw_stop(hdev);
+
+hid_hw_start_fail:
+hid_parse_fail:
+       kfifo_free(&djrcv_dev->notif_fifo);
+       kfree(djrcv_dev);
+       hid_set_drvdata(hdev, NULL);
+       return retval;
+
+}
+
+#ifdef CONFIG_PM
+static int logi_dj_reset_resume(struct hid_device *hdev)
+{
+       int retval;
+       struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+
+       retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+       if (retval < 0) {
+               dev_err(&hdev->dev,
+                       "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+                       __func__, retval);
+       }
+
+       return 0;
+}
+#endif
+
+static void logi_dj_remove(struct hid_device *hdev)
+{
+       struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+       struct dj_device *dj_dev;
+       int i;
+
+       dbg_hid("%s\n", __func__);
+
+       cancel_work_sync(&djrcv_dev->work);
+
+       hdev->ll_driver->close(hdev);
+       hid_hw_stop(hdev);
+
+       /* I suppose that at this point the only context that can access
+        * the djrecv_data is this thread as the work item is guaranteed to
+        * have finished and no more raw_event callbacks should arrive after
+        * the remove callback was triggered so no locks are put around the
+        * code below */
+       for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) {
+               dj_dev = djrcv_dev->paired_dj_devices[i];
+               if (dj_dev != NULL) {
+                       hid_destroy_device(dj_dev->hdev);
+                       kfree(dj_dev);
+                       djrcv_dev->paired_dj_devices[i] = NULL;
+               }
+       }
+
+       kfifo_free(&djrcv_dev->notif_fifo);
+       kfree(djrcv_dev);
+       hid_set_drvdata(hdev, NULL);
+}
+
+static int logi_djdevice_probe(struct hid_device *hdev,
+                        const struct hid_device_id *id)
+{
+       int ret;
+       struct dj_device *dj_dev = hdev->driver_data;
+
+       if (!is_dj_device(dj_dev))
+               return -ENODEV;
+
+       ret = hid_parse(hdev);
+       if (!ret)
+               ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+       return ret;
+}
+
+static const struct hid_device_id logi_dj_receivers[] = {
+       {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+               USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
+       {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+               USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
+       {}
+};
+
+MODULE_DEVICE_TABLE(hid, logi_dj_receivers);
+
+static struct hid_driver logi_djreceiver_driver = {
+       .name = "logitech-djreceiver",
+       .id_table = logi_dj_receivers,
+       .probe = logi_dj_probe,
+       .remove = logi_dj_remove,
+       .raw_event = logi_dj_raw_event,
+#ifdef CONFIG_PM
+       .reset_resume = logi_dj_reset_resume,
+#endif
+};
+
+
+static const struct hid_device_id logi_dj_devices[] = {
+       {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+               USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
+       {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+               USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
+       {}
+};
+
+static struct hid_driver logi_djdevice_driver = {
+       .name = "logitech-djdevice",
+       .id_table = logi_dj_devices,
+       .probe = logi_djdevice_probe,
+};
+
+
+static int __init logi_dj_init(void)
+{
+       int retval;
+
+       dbg_hid("Logitech-DJ:%s\n", __func__);
+
+       retval = hid_register_driver(&logi_djreceiver_driver);
+       if (retval)
+               return retval;
+
+       retval = hid_register_driver(&logi_djdevice_driver);
+       if (retval)
+               hid_unregister_driver(&logi_djreceiver_driver);
+
+       return retval;
+
+}
+
+static void __exit logi_dj_exit(void)
+{
+       dbg_hid("Logitech-DJ:%s\n", __func__);
+
+       hid_unregister_driver(&logi_djdevice_driver);
+       hid_unregister_driver(&logi_djreceiver_driver);
+
+}
+
+module_init(logi_dj_init);
+module_exit(logi_dj_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Logitech");
+MODULE_AUTHOR("Nestor Lopez Casado");
+MODULE_AUTHOR("nlopezcasad@logitech.com");
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
new file mode 100644 (file)
index 0000000..fd28a5e
--- /dev/null
@@ -0,0 +1,123 @@
+#ifndef __HID_LOGITECH_DJ_H
+#define __HID_LOGITECH_DJ_H
+
+/*
+ *  HID driver for Logitech Unifying receivers
+ *
+ *  Copyright (c) 2011 Logitech
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kfifo.h>
+
+#define DJ_MAX_PAIRED_DEVICES                  6
+#define DJ_MAX_NUMBER_NOTIFICATIONS            8
+#define DJ_DEVICE_INDEX_MIN                    1
+#define DJ_DEVICE_INDEX_MAX                    6
+
+#define DJREPORT_SHORT_LENGTH                  15
+#define DJREPORT_LONG_LENGTH                   32
+
+#define REPORT_ID_DJ_SHORT                     0x20
+#define REPORT_ID_DJ_LONG                      0x21
+
+#define REPORT_TYPE_RFREPORT_FIRST             0x01
+#define REPORT_TYPE_RFREPORT_LAST              0x1F
+
+/* Command Switch to DJ mode */
+#define REPORT_TYPE_CMD_SWITCH                 0x80
+#define CMD_SWITCH_PARAM_DEVBITFIELD           0x00
+#define CMD_SWITCH_PARAM_TIMEOUT_SECONDS       0x01
+#define TIMEOUT_NO_KEEPALIVE                   0x00
+
+/* Command to Get the list of Paired devices */
+#define REPORT_TYPE_CMD_GET_PAIRED_DEVICES     0x81
+
+/* Device Paired Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_PAIRED                0x41
+#define SPFUNCTION_MORE_NOTIF_EXPECTED         0x01
+#define SPFUNCTION_DEVICE_LIST_EMPTY           0x02
+#define DEVICE_PAIRED_PARAM_SPFUNCTION         0x00
+#define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB       0x01
+#define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB       0x02
+#define DEVICE_PAIRED_RF_REPORT_TYPE           0x03
+
+/* Device Un-Paired Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED      0x40
+
+
+/* Connection Status Notification */
+#define REPORT_TYPE_NOTIF_CONNECTION_STATUS    0x42
+#define CONNECTION_STATUS_PARAM_STATUS         0x00
+#define STATUS_LINKLOSS                                0x01
+
+/* Error Notification */
+#define REPORT_TYPE_NOTIF_ERROR                        0x7F
+#define NOTIF_ERROR_PARAM_ETYPE                        0x00
+#define ETYPE_KEEPALIVE_TIMEOUT                        0x01
+
+/* supported DJ HID && RF report types */
+#define REPORT_TYPE_KEYBOARD                   0x01
+#define REPORT_TYPE_MOUSE                      0x02
+#define REPORT_TYPE_CONSUMER_CONTROL           0x03
+#define REPORT_TYPE_SYSTEM_CONTROL             0x04
+#define REPORT_TYPE_MEDIA_CENTER               0x08
+#define REPORT_TYPE_LEDS                       0x0E
+
+/* RF Report types bitfield */
+#define STD_KEYBOARD                           0x00000002
+#define STD_MOUSE                              0x00000004
+#define MULTIMEDIA                             0x00000008
+#define POWER_KEYS                             0x00000010
+#define MEDIA_CENTER                           0x00000100
+#define KBD_LEDS                               0x00004000
+
+struct dj_report {
+       u8 report_id;
+       u8 device_index;
+       u8 report_type;
+       u8 report_params[DJREPORT_SHORT_LENGTH - 3];
+};
+
+struct dj_receiver_dev {
+       struct hid_device *hdev;
+       struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES +
+                                           DJ_DEVICE_INDEX_MIN];
+       struct work_struct work;
+       struct kfifo notif_fifo;
+       spinlock_t lock;
+};
+
+struct dj_device {
+       struct hid_device *hdev;
+       struct dj_receiver_dev *dj_receiver_dev;
+       u32 reports_supported;
+       u8 device_index;
+};
+
+/**
+ * is_dj_device - know if the given dj_device is not the receiver.
+ * @dj_dev: the dj device to test
+ *
+ * This macro tests if a struct dj_device pointer is a device created
+ * by the bus enumarator.
+ */
+#define is_dj_device(dj_dev) \
+       (&(dj_dev)->dj_receiver_dev->hdev->dev == (dj_dev)->hdev->dev.parent)
+
+#endif
index 0ec91c1..f0fbd7b 100644 (file)
@@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
 #define NO_TOUCHES -1
 #define SINGLE_TOUCH_UP -2
 
+/* Touch surface information. Dimension is in hundredths of a mm, min and max
+ * are in units. */
+#define MOUSE_DIMENSION_X (float)9056
+#define MOUSE_MIN_X -1100
+#define MOUSE_MAX_X 1258
+#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
+#define MOUSE_DIMENSION_Y (float)5152
+#define MOUSE_MIN_Y -1589
+#define MOUSE_MAX_Y 2047
+#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
+
+#define TRACKPAD_DIMENSION_X (float)13000
+#define TRACKPAD_MIN_X -2909
+#define TRACKPAD_MAX_X 3167
+#define TRACKPAD_RES_X \
+       ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
+#define TRACKPAD_DIMENSION_Y (float)11000
+#define TRACKPAD_MIN_Y -2456
+#define TRACKPAD_MAX_Y 2565
+#define TRACKPAD_RES_Y \
+       ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
+
 /**
  * struct magicmouse_sc - Tracks Magic Mouse-specific data.
  * @input: Input device through which we report events.
@@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
                 * inverse of the reported Y.
                 */
                if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
-                       input_set_abs_params(input, ABS_MT_POSITION_X, -1100,
-                               1358, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_Y, -1589,
-                               2047, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_X,
+                               MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_Y,
+                               MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
+
+                       input_abs_set_res(input, ABS_MT_POSITION_X,
+                               MOUSE_RES_X);
+                       input_abs_set_res(input, ABS_MT_POSITION_Y,
+                               MOUSE_RES_Y);
                } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
-                       input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0);
-                       input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_X, -2909,
-                               3167, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_Y, -2456,
-                               2565, 4, 0);
+                       input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
+                               TRACKPAD_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
+                               TRACKPAD_MAX_Y, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_X,
+                               TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_Y,
+                               TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
+
+                       input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
+                       input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
+                       input_abs_set_res(input, ABS_MT_POSITION_X,
+                               TRACKPAD_RES_X);
+                       input_abs_set_res(input, ABS_MT_POSITION_Y,
+                               TRACKPAD_RES_Y);
                }
 
                input_set_events_per_packet(input, 60);
@@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
        }
        report->size = 6;
 
+       /*
+        * Some devices repond with 'invalid report id' when feature
+        * report switching it into multitouch mode is sent to it.
+        *
+        * This results in -EIO from the _raw low-level transport callback,
+        * but there seems to be no other way of switching the mode.
+        * Thus the super-ugly hacky success check below.
+        */
        ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
                        HID_FEATURE_REPORT);
-       if (ret != sizeof(feature)) {
+       if (ret != -EIO && ret != sizeof(feature)) {
                hid_err(hdev, "unable to request touch data (%d)\n", ret);
                goto err_stop_hw;
        }
index 58d0e7a..fa5d7a1 100644 (file)
@@ -47,10 +47,11 @@ MODULE_LICENSE("GPL");
 #define MT_QUIRK_SLOT_IS_CONTACTID     (1 << 1)
 #define MT_QUIRK_CYPRESS               (1 << 2)
 #define MT_QUIRK_SLOT_IS_CONTACTNUMBER (1 << 3)
-#define MT_QUIRK_VALID_IS_INRANGE      (1 << 4)
-#define MT_QUIRK_VALID_IS_CONFIDENCE   (1 << 5)
-#define MT_QUIRK_EGALAX_XYZ_FIXUP      (1 << 6)
-#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE   (1 << 7)
+#define MT_QUIRK_ALWAYS_VALID          (1 << 4)
+#define MT_QUIRK_VALID_IS_INRANGE      (1 << 5)
+#define MT_QUIRK_VALID_IS_CONFIDENCE   (1 << 6)
+#define MT_QUIRK_EGALAX_XYZ_FIXUP      (1 << 7)
+#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE   (1 << 8)
 
 struct mt_slot {
        __s32 x, y, p, w, h;
@@ -86,11 +87,12 @@ struct mt_class {
 /* classes of device behavior */
 #define MT_CLS_DEFAULT                         0x0001
 
-#define MT_CLS_CONFIDENCE                      0x0002
-#define MT_CLS_CONFIDENCE_MINUS_ONE            0x0003
-#define MT_CLS_DUAL_INRANGE_CONTACTID          0x0004
-#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER      0x0005
-#define MT_CLS_DUAL_NSMU_CONTACTID             0x0006
+#define MT_CLS_SERIAL                          0x0002
+#define MT_CLS_CONFIDENCE                      0x0003
+#define MT_CLS_CONFIDENCE_MINUS_ONE            0x0004
+#define MT_CLS_DUAL_INRANGE_CONTACTID          0x0005
+#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER      0x0006
+#define MT_CLS_DUAL_NSMU_CONTACTID             0x0007
 
 /* vendor specific classes */
 #define MT_CLS_3M                              0x0101
@@ -134,6 +136,8 @@ static int find_slot_from_contactid(struct mt_device *td)
 struct mt_class mt_classes[] = {
        { .name = MT_CLS_DEFAULT,
                .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP },
+       { .name = MT_CLS_SERIAL,
+               .quirks = MT_QUIRK_ALWAYS_VALID},
        { .name = MT_CLS_CONFIDENCE,
                .quirks = MT_QUIRK_VALID_IS_CONFIDENCE },
        { .name = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -213,6 +217,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
        struct mt_class *cls = td->mtclass;
        __s32 quirks = cls->quirks;
 
+       /* Only map fields from TouchScreen or TouchPad collections.
+         * We need to ignore fields that belong to other collections
+         * such as Mouse that might have the same GenericDesktop usages. */
+       if (field->application == HID_DG_TOUCHSCREEN)
+               set_bit(INPUT_PROP_DIRECT, hi->input->propbit);
+       else if (field->application == HID_DG_TOUCHPAD)
+               set_bit(INPUT_PROP_POINTER, hi->input->propbit);
+       else
+               return 0;
+
        switch (usage->hid & HID_USAGE_PAGE) {
 
        case HID_UP_GENDESK:
@@ -277,6 +291,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        td->last_slot_field = usage->hid;
                        td->last_field_index = field->index;
                        td->last_mt_collection = usage->collection_index;
+                       hdev->quirks &= ~HID_QUIRK_MULTITOUCH;
                        return 1;
                case HID_DG_WIDTH:
                        hid_map_usage(hi, usage, bit, max,
@@ -435,7 +450,9 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
        if (hid->claimed & HID_CLAIMED_INPUT && td->slots) {
                switch (usage->hid) {
                case HID_DG_INRANGE:
-                       if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+                       if (quirks & MT_QUIRK_ALWAYS_VALID)
+                               td->curvalid = true;
+                       else if (quirks & MT_QUIRK_VALID_IS_INRANGE)
                                td->curvalid = value;
                        break;
                case HID_DG_TIPSWITCH:
@@ -513,12 +530,44 @@ static void mt_set_input_mode(struct hid_device *hdev)
        }
 }
 
+/* a list of devices for which there is a specialized multitouch driver */
+static const struct hid_device_id mt_have_special_driver[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0001) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0006) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+                       USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+                       USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+       { }
+};
+
+static bool mt_match_one_id(struct hid_device *hdev,
+               const struct hid_device_id *id)
+{
+       return id->bus == hdev->bus &&
+               (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
+               (id->product == HID_ANY_ID || id->product == hdev->product);
+}
+
+static const struct hid_device_id *mt_match_id(struct hid_device *hdev,
+               const struct hid_device_id *id)
+{
+       for (; id->bus; id++)
+               if (mt_match_one_id(hdev, id))
+                       return id;
+
+       return NULL;
+}
+
 static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int ret, i;
        struct mt_device *td;
        struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
 
+       if (mt_match_id(hdev, mt_have_special_driver))
+               return -ENODEV;
+
        for (i = 0; mt_classes[i].name ; i++) {
                if (id->driver_data == mt_classes[i].name) {
                        mtclass = &(mt_classes[i]);
@@ -526,10 +575,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
                }
        }
 
-       /* This allows the driver to correctly support devices
-        * that emit events over several HID messages.
-        */
-       hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
 
        td = kzalloc(sizeof(struct mt_device), GFP_KERNEL);
        if (!td) {
@@ -545,10 +590,16 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (ret != 0)
                goto fail;
 
+       hdev->quirks |= HID_QUIRK_MULTITOUCH;
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret)
                goto fail;
 
+       /* This allows the driver to correctly support devices
+        * that emit events over several HID messages.
+        */
+       hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
        td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
                                GFP_KERNEL);
        if (!td->slots) {
@@ -662,6 +713,11 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
                        USB_DEVICE_ID_GOODTOUCH_000f) },
 
+       /* Ideacom panel */
+       { .driver_data = MT_CLS_SERIAL,
+               HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
+                       USB_DEVICE_ID_IDEACOM_IDC6650) },
+
        /* Ilitek dual touch panel */
        {  .driver_data = MT_CLS_DEFAULT,
                HID_USB_DEVICE(USB_VENDOR_ID_ILITEK,
@@ -672,6 +728,11 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS,
                        USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
 
+       /* LG Display panels */
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_LG,
+                       USB_DEVICE_ID_LG_MULTITOUCH) },
+
        /* Lumio panels */
        { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
                HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
@@ -732,6 +793,10 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_XAT,
                        USB_DEVICE_ID_XAT_CSR) },
 
+       /* Rest of the world */
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+
        { }
 };
 MODULE_DEVICE_TABLE(hid, mt_devices);
index 0688832..72ca689 100644 (file)
@@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
        if (ret) {
                hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
                         ret);
-               /*
-                * battery attribute is not critical for the tablet, but if it
-                * failed then there is no need to create ac attribute
-                */
-               goto move_on;
+               goto err_battery;
        }
 
        wdata->ac.properties = wacom_ac_props;
@@ -371,18 +367,14 @@ static int wacom_probe(struct hid_device *hdev,
        if (ret) {
                hid_warn(hdev,
                         "can't create ac battery attribute, err: %d\n", ret);
-               /*
-                * ac attribute is not critical for the tablet, but if it
-                * failed then we don't want to battery attribute to exist
-                */
-               power_supply_unregister(&wdata->battery);
+               goto err_ac;
        }
-
-move_on:
 #endif
        hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
        input = hidinput->input;
 
+       __set_bit(INPUT_PROP_POINTER, input->propbit);
+
        /* Basics */
        input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
 
@@ -416,6 +408,13 @@ move_on:
 
        return 0;
 
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+err_ac:
+       power_supply_unregister(&wdata->battery);
+err_battery:
+       device_remove_file(&hdev->dev, &dev_attr_speed);
+       hid_hw_stop(hdev);
+#endif
 err_free:
        kfree(wdata);
        return ret;
@@ -426,6 +425,7 @@ static void wacom_remove(struct hid_device *hdev)
 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY
        struct wacom_data *wdata = hid_get_drvdata(hdev);
 #endif
+       device_remove_file(&hdev->dev, &dev_attr_speed);
        hid_hw_stop(hdev);
 
 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY
index 85a02e5..76739c0 100644 (file)
  * any later version.
  */
 
+#include <linux/completion.h>
 #include <linux/device.h>
 #include <linux/hid.h>
 #include <linux/input.h>
 #include <linux/leds.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
 #include <linux/spinlock.h>
 #include "hid-ids.h"
 
-#define WIIMOTE_VERSION "0.1"
+#define WIIMOTE_VERSION "0.2"
 #define WIIMOTE_NAME "Nintendo Wii Remote"
 #define WIIMOTE_BUFSIZE 32
 
@@ -30,12 +33,26 @@ struct wiimote_buf {
 struct wiimote_state {
        spinlock_t lock;
        __u8 flags;
+       __u8 accel_split[2];
+
+       /* synchronous cmd requests */
+       struct mutex sync;
+       struct completion ready;
+       int cmd;
+       __u32 opt;
+
+       /* results of synchronous requests */
+       __u8 cmd_battery;
+       __u8 cmd_err;
 };
 
 struct wiimote_data {
        struct hid_device *hdev;
        struct input_dev *input;
        struct led_classdev *leds[4];
+       struct input_dev *accel;
+       struct input_dev *ir;
+       struct power_supply battery;
 
        spinlock_t qlock;
        __u8 head;
@@ -46,23 +63,47 @@ struct wiimote_data {
        struct wiimote_state state;
 };
 
-#define WIIPROTO_FLAG_LED1 0x01
-#define WIIPROTO_FLAG_LED2 0x02
-#define WIIPROTO_FLAG_LED3 0x04
-#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAG_LED1             0x01
+#define WIIPROTO_FLAG_LED2             0x02
+#define WIIPROTO_FLAG_LED3             0x04
+#define WIIPROTO_FLAG_LED4             0x08
+#define WIIPROTO_FLAG_RUMBLE           0x10
+#define WIIPROTO_FLAG_ACCEL            0x20
+#define WIIPROTO_FLAG_IR_BASIC         0x40
+#define WIIPROTO_FLAG_IR_EXT           0x80
+#define WIIPROTO_FLAG_IR_FULL          0xc0 /* IR_BASIC | IR_EXT */
 #define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
                                        WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
+#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
+                                                       WIIPROTO_FLAG_IR_FULL)
 
 /* return flag for led \num */
 #define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
 
 enum wiiproto_reqs {
        WIIPROTO_REQ_NULL = 0x0,
+       WIIPROTO_REQ_RUMBLE = 0x10,
        WIIPROTO_REQ_LED = 0x11,
        WIIPROTO_REQ_DRM = 0x12,
+       WIIPROTO_REQ_IR1 = 0x13,
+       WIIPROTO_REQ_SREQ = 0x15,
+       WIIPROTO_REQ_WMEM = 0x16,
+       WIIPROTO_REQ_RMEM = 0x17,
+       WIIPROTO_REQ_IR2 = 0x1a,
        WIIPROTO_REQ_STATUS = 0x20,
+       WIIPROTO_REQ_DATA = 0x21,
        WIIPROTO_REQ_RETURN = 0x22,
        WIIPROTO_REQ_DRM_K = 0x30,
+       WIIPROTO_REQ_DRM_KA = 0x31,
+       WIIPROTO_REQ_DRM_KE = 0x32,
+       WIIPROTO_REQ_DRM_KAI = 0x33,
+       WIIPROTO_REQ_DRM_KEE = 0x34,
+       WIIPROTO_REQ_DRM_KAE = 0x35,
+       WIIPROTO_REQ_DRM_KIE = 0x36,
+       WIIPROTO_REQ_DRM_KAIE = 0x37,
+       WIIPROTO_REQ_DRM_E = 0x3d,
+       WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
+       WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
 };
 
 enum wiiproto_keys {
@@ -94,6 +135,56 @@ static __u16 wiiproto_keymap[] = {
        BTN_MODE,       /* WIIPROTO_KEY_HOME */
 };
 
+static enum power_supply_property wiimote_battery_props[] = {
+       POWER_SUPPLY_PROP_CAPACITY
+};
+
+/* requires the state.lock spinlock to be held */
+static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
+                                                               __u32 opt)
+{
+       return wdata->state.cmd == cmd && wdata->state.opt == opt;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
+{
+       wdata->state.cmd = WIIPROTO_REQ_NULL;
+       complete(&wdata->state.ready);
+}
+
+static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
+{
+       return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
+                                                               __u32 opt)
+{
+       INIT_COMPLETION(wdata->state.ready);
+       wdata->state.cmd = cmd;
+       wdata->state.opt = opt;
+}
+
+static inline void wiimote_cmd_release(struct wiimote_data *wdata)
+{
+       mutex_unlock(&wdata->state.sync);
+}
+
+static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
+{
+       int ret;
+
+       ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
+       if (ret < 0)
+               return -ERESTARTSYS;
+       else if (ret == 0)
+               return -EIO;
+       else
+               return 0;
+}
+
 static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
                                                                size_t count)
 {
@@ -172,6 +263,39 @@ static void wiimote_queue(struct wiimote_data *wdata, const __u8 *buffer,
        spin_unlock_irqrestore(&wdata->qlock, flags);
 }
 
+/*
+ * This sets the rumble bit on the given output report if rumble is
+ * currently enabled.
+ * \cmd1 must point to the second byte in the output report => &cmd[1]
+ * This must be called on nearly every output report before passing it
+ * into the output queue!
+ */
+static inline void wiiproto_keep_rumble(struct wiimote_data *wdata, __u8 *cmd1)
+{
+       if (wdata->state.flags & WIIPROTO_FLAG_RUMBLE)
+               *cmd1 |= 0x01;
+}
+
+static void wiiproto_req_rumble(struct wiimote_data *wdata, __u8 rumble)
+{
+       __u8 cmd[2];
+
+       rumble = !!rumble;
+       if (rumble == !!(wdata->state.flags & WIIPROTO_FLAG_RUMBLE))
+               return;
+
+       if (rumble)
+               wdata->state.flags |= WIIPROTO_FLAG_RUMBLE;
+       else
+               wdata->state.flags &= ~WIIPROTO_FLAG_RUMBLE;
+
+       cmd[0] = WIIPROTO_REQ_RUMBLE;
+       cmd[1] = 0;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
 static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
 {
        __u8 cmd[2];
@@ -193,6 +317,7 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
        if (leds & WIIPROTO_FLAG_LED4)
                cmd[1] |= 0x80;
 
+       wiiproto_keep_rumble(wdata, &cmd[1]);
        wiimote_queue(wdata, cmd, sizeof(cmd));
 }
 
@@ -203,7 +328,23 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
  */
 static __u8 select_drm(struct wiimote_data *wdata)
 {
-       return WIIPROTO_REQ_DRM_K;
+       __u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR;
+
+       if (ir == WIIPROTO_FLAG_IR_BASIC) {
+               if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
+                       return WIIPROTO_REQ_DRM_KAIE;
+               else
+                       return WIIPROTO_REQ_DRM_KIE;
+       } else if (ir == WIIPROTO_FLAG_IR_EXT) {
+               return WIIPROTO_REQ_DRM_KAI;
+       } else if (ir == WIIPROTO_FLAG_IR_FULL) {
+               return WIIPROTO_REQ_DRM_SKAI1;
+       } else {
+               if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
+                       return WIIPROTO_REQ_DRM_KA;
+               else
+                       return WIIPROTO_REQ_DRM_K;
+       }
 }
 
 static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
@@ -217,9 +358,256 @@ static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
        cmd[1] = 0;
        cmd[2] = drm;
 
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_status(struct wiimote_data *wdata)
+{
+       __u8 cmd[2];
+
+       cmd[0] = WIIPROTO_REQ_SREQ;
+       cmd[1] = 0;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_accel(struct wiimote_data *wdata, __u8 accel)
+{
+       accel = !!accel;
+       if (accel == !!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
+               return;
+
+       if (accel)
+               wdata->state.flags |= WIIPROTO_FLAG_ACCEL;
+       else
+               wdata->state.flags &= ~WIIPROTO_FLAG_ACCEL;
+
+       wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+}
+
+static void wiiproto_req_ir1(struct wiimote_data *wdata, __u8 flags)
+{
+       __u8 cmd[2];
+
+       cmd[0] = WIIPROTO_REQ_IR1;
+       cmd[1] = flags;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_ir2(struct wiimote_data *wdata, __u8 flags)
+{
+       __u8 cmd[2];
+
+       cmd[0] = WIIPROTO_REQ_IR2;
+       cmd[1] = flags;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+#define wiiproto_req_wreg(wdata, os, buf, sz) \
+                       wiiproto_req_wmem((wdata), false, (os), (buf), (sz))
+
+#define wiiproto_req_weeprom(wdata, os, buf, sz) \
+                       wiiproto_req_wmem((wdata), true, (os), (buf), (sz))
+
+static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom,
+                               __u32 offset, const __u8 *buf, __u8 size)
+{
+       __u8 cmd[22];
+
+       if (size > 16 || size == 0) {
+               hid_warn(wdata->hdev, "Invalid length %d wmem request\n", size);
+               return;
+       }
+
+       memset(cmd, 0, sizeof(cmd));
+       cmd[0] = WIIPROTO_REQ_WMEM;
+       cmd[2] = (offset >> 16) & 0xff;
+       cmd[3] = (offset >> 8) & 0xff;
+       cmd[4] = offset & 0xff;
+       cmd[5] = size;
+       memcpy(&cmd[6], buf, size);
+
+       if (!eeprom)
+               cmd[1] |= 0x04;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
        wiimote_queue(wdata, cmd, sizeof(cmd));
 }
 
+/* requries the cmd-mutex to be held */
+static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+                                               const __u8 *wmem, __u8 size)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiimote_cmd_set(wdata, WIIPROTO_REQ_WMEM, 0);
+       wiiproto_req_wreg(wdata, offset, wmem, size);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_wait(wdata);
+       if (!ret && wdata->state.cmd_err)
+               ret = -EIO;
+
+       return ret;
+}
+
+static int wiimote_battery_get_property(struct power_supply *psy,
+                                               enum power_supply_property psp,
+                                               union power_supply_propval *val)
+{
+       struct wiimote_data *wdata = container_of(psy,
+                                               struct wiimote_data, battery);
+       int ret = 0, state;
+       unsigned long flags;
+
+       ret = wiimote_cmd_acquire(wdata);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiimote_cmd_set(wdata, WIIPROTO_REQ_SREQ, 0);
+       wiiproto_req_status(wdata);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_wait(wdata);
+       state = wdata->state.cmd_battery;
+       wiimote_cmd_release(wdata);
+
+       if (ret)
+               return ret;
+
+       switch (psp) {
+               case POWER_SUPPLY_PROP_CAPACITY:
+                       val->intval = state * 100 / 255;
+                       break;
+               default:
+                       ret = -EINVAL;
+                       break;
+       }
+
+       return ret;
+}
+
+static int wiimote_init_ir(struct wiimote_data *wdata, __u16 mode)
+{
+       int ret;
+       unsigned long flags;
+       __u8 format = 0;
+       static const __u8 data_enable[] = { 0x01 };
+       static const __u8 data_sens1[] = { 0x02, 0x00, 0x00, 0x71, 0x01,
+                                               0x00, 0xaa, 0x00, 0x64 };
+       static const __u8 data_sens2[] = { 0x63, 0x03 };
+       static const __u8 data_fin[] = { 0x08 };
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+
+       if (mode == (wdata->state.flags & WIIPROTO_FLAGS_IR)) {
+               spin_unlock_irqrestore(&wdata->state.lock, flags);
+               return 0;
+       }
+
+       if (mode == 0) {
+               wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
+               wiiproto_req_ir1(wdata, 0);
+               wiiproto_req_ir2(wdata, 0);
+               wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+               spin_unlock_irqrestore(&wdata->state.lock, flags);
+               return 0;
+       }
+
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_acquire(wdata);
+       if (ret)
+               return ret;
+
+       /* send PIXEL CLOCK ENABLE cmd first */
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiimote_cmd_set(wdata, WIIPROTO_REQ_IR1, 0);
+       wiiproto_req_ir1(wdata, 0x06);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_wait(wdata);
+       if (ret)
+               goto unlock;
+       if (wdata->state.cmd_err) {
+               ret = -EIO;
+               goto unlock;
+       }
+
+       /* enable IR LOGIC */
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiimote_cmd_set(wdata, WIIPROTO_REQ_IR2, 0);
+       wiiproto_req_ir2(wdata, 0x06);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_wait(wdata);
+       if (ret)
+               goto unlock;
+       if (wdata->state.cmd_err) {
+               ret = -EIO;
+               goto unlock;
+       }
+
+       /* enable IR cam but do not make it send data, yet */
+       ret = wiimote_cmd_write(wdata, 0xb00030, data_enable,
+                                                       sizeof(data_enable));
+       if (ret)
+               goto unlock;
+
+       /* write first sensitivity block */
+       ret = wiimote_cmd_write(wdata, 0xb00000, data_sens1,
+                                                       sizeof(data_sens1));
+       if (ret)
+               goto unlock;
+
+       /* write second sensitivity block */
+       ret = wiimote_cmd_write(wdata, 0xb0001a, data_sens2,
+                                                       sizeof(data_sens2));
+       if (ret)
+               goto unlock;
+
+       /* put IR cam into desired state */
+       switch (mode) {
+               case WIIPROTO_FLAG_IR_FULL:
+                       format = 5;
+                       break;
+               case WIIPROTO_FLAG_IR_EXT:
+                       format = 3;
+                       break;
+               case WIIPROTO_FLAG_IR_BASIC:
+                       format = 1;
+                       break;
+       }
+       ret = wiimote_cmd_write(wdata, 0xb00033, &format, sizeof(format));
+       if (ret)
+               goto unlock;
+
+       /* make IR cam send data */
+       ret = wiimote_cmd_write(wdata, 0xb00030, data_fin, sizeof(data_fin));
+       if (ret)
+               goto unlock;
+
+       /* request new DRM mode compatible to IR mode */
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
+       wdata->state.flags |= mode & WIIPROTO_FLAGS_IR;
+       wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+unlock:
+       wiimote_cmd_release(wdata);
+       return ret;
+}
+
 static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev)
 {
        struct wiimote_data *wdata;
@@ -268,9 +656,28 @@ static void wiimote_leds_set(struct led_classdev *led_dev,
        }
 }
 
-static int wiimote_input_event(struct input_dev *dev, unsigned int type,
-                                               unsigned int code, int value)
+static int wiimote_ff_play(struct input_dev *dev, void *data,
+                                                       struct ff_effect *eff)
 {
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+       __u8 value;
+       unsigned long flags;
+
+       /*
+        * The wiimote supports only a single rumble motor so if any magnitude
+        * is set to non-zero then we start the rumble motor. If both are set to
+        * zero, we stop the rumble motor.
+        */
+
+       if (eff->u.rumble.strong_magnitude || eff->u.rumble.weak_magnitude)
+               value = 1;
+       else
+               value = 0;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiiproto_req_rumble(wdata, value);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
        return 0;
 }
 
@@ -288,6 +695,61 @@ static void wiimote_input_close(struct input_dev *dev)
        hid_hw_close(wdata->hdev);
 }
 
+static int wiimote_accel_open(struct input_dev *dev)
+{
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+       int ret;
+       unsigned long flags;
+
+       ret = hid_hw_open(wdata->hdev);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiiproto_req_accel(wdata, true);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       return 0;
+}
+
+static void wiimote_accel_close(struct input_dev *dev)
+{
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiiproto_req_accel(wdata, false);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       hid_hw_close(wdata->hdev);
+}
+
+static int wiimote_ir_open(struct input_dev *dev)
+{
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+       int ret;
+
+       ret = hid_hw_open(wdata->hdev);
+       if (ret)
+               return ret;
+
+       ret = wiimote_init_ir(wdata, WIIPROTO_FLAG_IR_BASIC);
+       if (ret) {
+               hid_hw_close(wdata->hdev);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void wiimote_ir_close(struct input_dev *dev)
+{
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+
+       wiimote_init_ir(wdata, 0);
+       hid_hw_close(wdata->hdev);
+}
+
 static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
 {
        input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_LEFT],
@@ -315,12 +777,100 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
        input_sync(wdata->input);
 }
 
+static void handler_accel(struct wiimote_data *wdata, const __u8 *payload)
+{
+       __u16 x, y, z;
+
+       if (!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
+               return;
+
+       /*
+        * payload is: BB BB XX YY ZZ
+        * Accelerometer data is encoded into 3 10bit values. XX, YY and ZZ
+        * contain the upper 8 bits of each value. The lower 2 bits are
+        * contained in the buttons data BB BB.
+        * Bits 6 and 7 of the first buttons byte BB is the lower 2 bits of the
+        * X accel value. Bit 5 of the second buttons byte is the 2nd bit of Y
+        * accel value and bit 6 is the second bit of the Z value.
+        * The first bit of Y and Z values is not available and always set to 0.
+        * 0x200 is returned on no movement.
+        */
+
+       x = payload[2] << 2;
+       y = payload[3] << 2;
+       z = payload[4] << 2;
+
+       x |= (payload[0] >> 5) & 0x3;
+       y |= (payload[1] >> 4) & 0x2;
+       z |= (payload[1] >> 5) & 0x2;
+
+       input_report_abs(wdata->accel, ABS_RX, x - 0x200);
+       input_report_abs(wdata->accel, ABS_RY, y - 0x200);
+       input_report_abs(wdata->accel, ABS_RZ, z - 0x200);
+       input_sync(wdata->accel);
+}
+
+#define ir_to_input0(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+                                                       ABS_HAT0X, ABS_HAT0Y)
+#define ir_to_input1(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+                                                       ABS_HAT1X, ABS_HAT1Y)
+#define ir_to_input2(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+                                                       ABS_HAT2X, ABS_HAT2Y)
+#define ir_to_input3(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+                                                       ABS_HAT3X, ABS_HAT3Y)
+
+static void __ir_to_input(struct wiimote_data *wdata, const __u8 *ir,
+                                               bool packed, __u8 xid, __u8 yid)
+{
+       __u16 x, y;
+
+       if (!(wdata->state.flags & WIIPROTO_FLAGS_IR))
+               return;
+
+       /*
+        * Basic IR data is encoded into 3 bytes. The first two bytes are the
+        * upper 8 bit of the X/Y data, the 3rd byte contains the lower 2 bits
+        * of both.
+        * If data is packed, then the 3rd byte is put first and slightly
+        * reordered. This allows to interleave packed and non-packed data to
+        * have two IR sets in 5 bytes instead of 6.
+        * The resulting 10bit X/Y values are passed to the ABS_HATXY input dev.
+        */
+
+       if (packed) {
+               x = ir[1] << 2;
+               y = ir[2] << 2;
+
+               x |= ir[0] & 0x3;
+               y |= (ir[0] >> 2) & 0x3;
+       } else {
+               x = ir[0] << 2;
+               y = ir[1] << 2;
+
+               x |= (ir[2] >> 4) & 0x3;
+               y |= (ir[2] >> 6) & 0x3;
+       }
+
+       input_report_abs(wdata->ir, xid, x);
+       input_report_abs(wdata->ir, yid, y);
+}
+
 static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
 {
        handler_keys(wdata, payload);
 
        /* on status reports the drm is reset so we need to resend the drm */
        wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+
+       if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) {
+               wdata->state.cmd_battery = payload[5];
+               wiimote_cmd_complete(wdata);
+       }
+}
+
+static void handler_data(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
 }
 
 static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
@@ -330,9 +880,105 @@ static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
 
        handler_keys(wdata, payload);
 
-       if (err)
+       if (wiimote_cmd_pending(wdata, cmd, 0)) {
+               wdata->state.cmd_err = err;
+               wiimote_cmd_complete(wdata);
+       } else if (err) {
                hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
                                                                        cmd);
+       }
+}
+
+static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       handler_accel(wdata, payload);
+}
+
+static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+}
+
+static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       handler_accel(wdata, payload);
+       ir_to_input0(wdata, &payload[5], false);
+       ir_to_input1(wdata, &payload[8], false);
+       ir_to_input2(wdata, &payload[11], false);
+       ir_to_input3(wdata, &payload[14], false);
+       input_sync(wdata->ir);
+}
+
+static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+}
+
+static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       ir_to_input0(wdata, &payload[2], false);
+       ir_to_input1(wdata, &payload[4], true);
+       ir_to_input2(wdata, &payload[7], false);
+       ir_to_input3(wdata, &payload[9], true);
+       input_sync(wdata->ir);
+}
+
+static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       handler_accel(wdata, payload);
+}
+
+static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       handler_accel(wdata, payload);
+       ir_to_input0(wdata, &payload[5], false);
+       ir_to_input1(wdata, &payload[7], true);
+       ir_to_input2(wdata, &payload[10], false);
+       ir_to_input3(wdata, &payload[12], true);
+       input_sync(wdata->ir);
+}
+
+static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload)
+{
+}
+
+static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+
+       wdata->state.accel_split[0] = payload[2];
+       wdata->state.accel_split[1] = (payload[0] >> 1) & (0x10 | 0x20);
+       wdata->state.accel_split[1] |= (payload[1] << 1) & (0x40 | 0x80);
+
+       ir_to_input0(wdata, &payload[3], false);
+       ir_to_input1(wdata, &payload[12], false);
+       input_sync(wdata->ir);
+}
+
+static void handler_drm_SKAI2(struct wiimote_data *wdata, const __u8 *payload)
+{
+       __u8 buf[5];
+
+       handler_keys(wdata, payload);
+
+       wdata->state.accel_split[1] |= (payload[0] >> 5) & (0x01 | 0x02);
+       wdata->state.accel_split[1] |= (payload[1] >> 3) & (0x04 | 0x08);
+
+       buf[0] = 0;
+       buf[1] = 0;
+       buf[2] = wdata->state.accel_split[0];
+       buf[3] = payload[2];
+       buf[4] = wdata->state.accel_split[1];
+       handler_accel(wdata, buf);
+
+       ir_to_input2(wdata, &payload[3], false);
+       ir_to_input3(wdata, &payload[12], false);
+       input_sync(wdata->ir);
 }
 
 struct wiiproto_handler {
@@ -343,8 +989,19 @@ struct wiiproto_handler {
 
 static struct wiiproto_handler handlers[] = {
        { .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
+       { .id = WIIPROTO_REQ_DATA, .size = 21, .func = handler_data },
        { .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },
        { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
+       { .id = WIIPROTO_REQ_DRM_KA, .size = 5, .func = handler_drm_KA },
+       { .id = WIIPROTO_REQ_DRM_KE, .size = 10, .func = handler_drm_KE },
+       { .id = WIIPROTO_REQ_DRM_KAI, .size = 17, .func = handler_drm_KAI },
+       { .id = WIIPROTO_REQ_DRM_KEE, .size = 21, .func = handler_drm_KEE },
+       { .id = WIIPROTO_REQ_DRM_KAE, .size = 21, .func = handler_drm_KAE },
+       { .id = WIIPROTO_REQ_DRM_KIE, .size = 21, .func = handler_drm_KIE },
+       { .id = WIIPROTO_REQ_DRM_KAIE, .size = 21, .func = handler_drm_KAIE },
+       { .id = WIIPROTO_REQ_DRM_E, .size = 21, .func = handler_drm_E },
+       { .id = WIIPROTO_REQ_DRM_SKAI1, .size = 21, .func = handler_drm_SKAI1 },
+       { .id = WIIPROTO_REQ_DRM_SKAI2, .size = 21, .func = handler_drm_SKAI2 },
        { .id = 0 }
 };
 
@@ -355,6 +1012,7 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
        struct wiiproto_handler *h;
        int i;
        unsigned long flags;
+       bool handled = false;
 
        if (size < 1)
                return -EINVAL;
@@ -363,10 +1021,16 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
 
        for (i = 0; handlers[i].id; ++i) {
                h = &handlers[i];
-               if (h->id == raw_data[0] && h->size < size)
+               if (h->id == raw_data[0] && h->size < size) {
                        h->func(wdata, &raw_data[1]);
+                       handled = true;
+               }
        }
 
+       if (!handled)
+               hid_warn(hdev, "Unhandled report %hhu size %d\n", raw_data[0],
+                                                                       size);
+
        spin_unlock_irqrestore(&wdata->state.lock, flags);
 
        return 0;
@@ -434,16 +1098,13 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
                return NULL;
 
        wdata->input = input_allocate_device();
-       if (!wdata->input) {
-               kfree(wdata);
-               return NULL;
-       }
+       if (!wdata->input)
+               goto err;
 
        wdata->hdev = hdev;
        hid_set_drvdata(hdev, wdata);
 
        input_set_drvdata(wdata->input, wdata);
-       wdata->input->event = wiimote_input_event;
        wdata->input->open = wiimote_input_open;
        wdata->input->close = wiimote_input_close;
        wdata->input->dev.parent = &wdata->hdev->dev;
@@ -457,18 +1118,89 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
        for (i = 0; i < WIIPROTO_KEY_COUNT; ++i)
                set_bit(wiiproto_keymap[i], wdata->input->keybit);
 
+       set_bit(FF_RUMBLE, wdata->input->ffbit);
+       if (input_ff_create_memless(wdata->input, NULL, wiimote_ff_play))
+               goto err_input;
+
+       wdata->accel = input_allocate_device();
+       if (!wdata->accel)
+               goto err_input;
+
+       input_set_drvdata(wdata->accel, wdata);
+       wdata->accel->open = wiimote_accel_open;
+       wdata->accel->close = wiimote_accel_close;
+       wdata->accel->dev.parent = &wdata->hdev->dev;
+       wdata->accel->id.bustype = wdata->hdev->bus;
+       wdata->accel->id.vendor = wdata->hdev->vendor;
+       wdata->accel->id.product = wdata->hdev->product;
+       wdata->accel->id.version = wdata->hdev->version;
+       wdata->accel->name = WIIMOTE_NAME " Accelerometer";
+
+       set_bit(EV_ABS, wdata->accel->evbit);
+       set_bit(ABS_RX, wdata->accel->absbit);
+       set_bit(ABS_RY, wdata->accel->absbit);
+       set_bit(ABS_RZ, wdata->accel->absbit);
+       input_set_abs_params(wdata->accel, ABS_RX, -500, 500, 2, 4);
+       input_set_abs_params(wdata->accel, ABS_RY, -500, 500, 2, 4);
+       input_set_abs_params(wdata->accel, ABS_RZ, -500, 500, 2, 4);
+
+       wdata->ir = input_allocate_device();
+       if (!wdata->ir)
+               goto err_ir;
+
+       input_set_drvdata(wdata->ir, wdata);
+       wdata->ir->open = wiimote_ir_open;
+       wdata->ir->close = wiimote_ir_close;
+       wdata->ir->dev.parent = &wdata->hdev->dev;
+       wdata->ir->id.bustype = wdata->hdev->bus;
+       wdata->ir->id.vendor = wdata->hdev->vendor;
+       wdata->ir->id.product = wdata->hdev->product;
+       wdata->ir->id.version = wdata->hdev->version;
+       wdata->ir->name = WIIMOTE_NAME " IR";
+
+       set_bit(EV_ABS, wdata->ir->evbit);
+       set_bit(ABS_HAT0X, wdata->ir->absbit);
+       set_bit(ABS_HAT0Y, wdata->ir->absbit);
+       set_bit(ABS_HAT1X, wdata->ir->absbit);
+       set_bit(ABS_HAT1Y, wdata->ir->absbit);
+       set_bit(ABS_HAT2X, wdata->ir->absbit);
+       set_bit(ABS_HAT2Y, wdata->ir->absbit);
+       set_bit(ABS_HAT3X, wdata->ir->absbit);
+       set_bit(ABS_HAT3Y, wdata->ir->absbit);
+       input_set_abs_params(wdata->ir, ABS_HAT0X, 0, 1023, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT0Y, 0, 767, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT1X, 0, 1023, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT1Y, 0, 767, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT2X, 0, 1023, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT2Y, 0, 767, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT3X, 0, 1023, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT3Y, 0, 767, 2, 4);
+
        spin_lock_init(&wdata->qlock);
        INIT_WORK(&wdata->worker, wiimote_worker);
 
        spin_lock_init(&wdata->state.lock);
+       init_completion(&wdata->state.ready);
+       mutex_init(&wdata->state.sync);
 
        return wdata;
+
+err_ir:
+       input_free_device(wdata->accel);
+err_input:
+       input_free_device(wdata->input);
+err:
+       kfree(wdata);
+       return NULL;
 }
 
 static void wiimote_destroy(struct wiimote_data *wdata)
 {
        wiimote_leds_destroy(wdata);
 
+       power_supply_unregister(&wdata->battery);
+       input_unregister_device(wdata->accel);
+       input_unregister_device(wdata->ir);
        input_unregister_device(wdata->input);
        cancel_work_sync(&wdata->worker);
        hid_hw_stop(wdata->hdev);
@@ -500,12 +1232,37 @@ static int wiimote_hid_probe(struct hid_device *hdev,
                goto err;
        }
 
-       ret = input_register_device(wdata->input);
+       ret = input_register_device(wdata->accel);
        if (ret) {
                hid_err(hdev, "Cannot register input device\n");
                goto err_stop;
        }
 
+       ret = input_register_device(wdata->ir);
+       if (ret) {
+               hid_err(hdev, "Cannot register input device\n");
+               goto err_ir;
+       }
+
+       ret = input_register_device(wdata->input);
+       if (ret) {
+               hid_err(hdev, "Cannot register input device\n");
+               goto err_input;
+       }
+
+       wdata->battery.properties = wiimote_battery_props;
+       wdata->battery.num_properties = ARRAY_SIZE(wiimote_battery_props);
+       wdata->battery.get_property = wiimote_battery_get_property;
+       wdata->battery.name = "wiimote_battery";
+       wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+       wdata->battery.use_for_apm = 0;
+
+       ret = power_supply_register(&wdata->hdev->dev, &wdata->battery);
+       if (ret) {
+               hid_err(hdev, "Cannot register battery device\n");
+               goto err_battery;
+       }
+
        ret = wiimote_leds_create(wdata);
        if (ret)
                goto err_free;
@@ -523,9 +1280,20 @@ err_free:
        wiimote_destroy(wdata);
        return ret;
 
+err_battery:
+       input_unregister_device(wdata->input);
+       wdata->input = NULL;
+err_input:
+       input_unregister_device(wdata->ir);
+       wdata->ir = NULL;
+err_ir:
+       input_unregister_device(wdata->accel);
+       wdata->accel = NULL;
 err_stop:
        hid_hw_stop(hdev);
 err:
+       input_free_device(wdata->ir);
+       input_free_device(wdata->accel);
        input_free_device(wdata->input);
        kfree(wdata);
        return ret;
index 4bdb5d4..3146fdc 100644 (file)
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
 
        { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
        { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
index 59d83e8..9323837 100644 (file)
 #include <linux/cpu.h>
 #include <linux/pci.h>
 #include <linux/smp.h>
+#include <linux/moduleparam.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
 
 #define DRVNAME        "coretemp"
 
+/*
+ * force_tjmax only matters when TjMax can't be read from the CPU itself.
+ * When set, it replaces the driver's suboptimal heuristic.
+ */
+static int force_tjmax;
+module_param_named(tjmax, force_tjmax, int, 0444);
+MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+
 #define BASE_SYSFS_ATTR_NO     2       /* Sysfs Base attr no for coretemp */
 #define NUM_REAL_CORES         16      /* Number of Real cores per cpu */
 #define CORETEMP_NAME_LENGTH   17      /* String Length of attrs */
 #define MAX_CORE_ATTRS         4       /* Maximum no of basic attrs */
-#define MAX_THRESH_ATTRS       3       /* Maximum no of Threshold attrs */
-#define TOTAL_ATTRS            (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
+#define TOTAL_ATTRS            (MAX_CORE_ATTRS + 1)
 #define MAX_CORE_DATA          (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
 
 #ifdef CONFIG_SMP
@@ -69,8 +77,6 @@
  *             This value is passed as "id" field to rdmsr/wrmsr functions.
  * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
  *             from where the temperature values should be read.
- * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
- *             from where the thresholds are read.
  * @attr_size:  Total number of pre-core attrs displayed in the sysfs.
  * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
  *             Otherwise, temp_data holds coretemp data.
 struct temp_data {
        int temp;
        int ttarget;
-       int tmin;
        int tjmax;
        unsigned long last_updated;
        unsigned int cpu;
        u32 cpu_core_id;
        u32 status_reg;
-       u32 intrpt_reg;
        int attr_size;
        bool is_pkg_data;
        bool valid;
@@ -143,19 +147,6 @@ static ssize_t show_crit_alarm(struct device *dev,
        return sprintf(buf, "%d\n", (eax >> 5) & 1);
 }
 
-static ssize_t show_max_alarm(struct device *dev,
-                               struct device_attribute *devattr, char *buf)
-{
-       u32 eax, edx;
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-       struct platform_data *pdata = dev_get_drvdata(dev);
-       struct temp_data *tdata = pdata->core_data[attr->index];
-
-       rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
-
-       return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
-}
-
 static ssize_t show_tjmax(struct device *dev,
                        struct device_attribute *devattr, char *buf)
 {
@@ -174,83 +165,6 @@ static ssize_t show_ttarget(struct device *dev,
        return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
 }
 
-static ssize_t store_ttarget(struct device *dev,
-                               struct device_attribute *devattr,
-                               const char *buf, size_t count)
-{
-       struct platform_data *pdata = dev_get_drvdata(dev);
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-       struct temp_data *tdata = pdata->core_data[attr->index];
-       u32 eax, edx;
-       unsigned long val;
-       int diff;
-
-       if (strict_strtoul(buf, 10, &val))
-               return -EINVAL;
-
-       /*
-        * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
-        * of milli degree celsius. Hence don't accept val > (127 * 1000)
-        */
-       if (val > tdata->tjmax || val > 127000)
-               return -EINVAL;
-
-       diff = (tdata->tjmax - val) / 1000;
-
-       mutex_lock(&tdata->update_lock);
-       rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
-       eax = (eax & ~THERM_MASK_THRESHOLD1) |
-                               (diff << THERM_SHIFT_THRESHOLD1);
-       wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
-       tdata->ttarget = val;
-       mutex_unlock(&tdata->update_lock);
-
-       return count;
-}
-
-static ssize_t show_tmin(struct device *dev,
-                       struct device_attribute *devattr, char *buf)
-{
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-       struct platform_data *pdata = dev_get_drvdata(dev);
-
-       return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
-}
-
-static ssize_t store_tmin(struct device *dev,
-                               struct device_attribute *devattr,
-                               const char *buf, size_t count)
-{
-       struct platform_data *pdata = dev_get_drvdata(dev);
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-       struct temp_data *tdata = pdata->core_data[attr->index];
-       u32 eax, edx;
-       unsigned long val;
-       int diff;
-
-       if (strict_strtoul(buf, 10, &val))
-               return -EINVAL;
-
-       /*
-        * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
-        * of milli degree celsius. Hence don't accept val > (127 * 1000)
-        */
-       if (val > tdata->tjmax || val > 127000)
-               return -EINVAL;
-
-       diff = (tdata->tjmax - val) / 1000;
-
-       mutex_lock(&tdata->update_lock);
-       rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
-       eax = (eax & ~THERM_MASK_THRESHOLD0) |
-                               (diff << THERM_SHIFT_THRESHOLD0);
-       wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
-       tdata->tmin = val;
-       mutex_unlock(&tdata->update_lock);
-
-       return count;
-}
-
 static ssize_t show_temp(struct device *dev,
                        struct device_attribute *devattr, char *buf)
 {
@@ -374,7 +288,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
 
 static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
 {
-       /* The 100C is default for both mobile and non mobile CPUs */
        int err;
        u32 eax, edx;
        u32 val;
@@ -385,7 +298,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
         */
        err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
        if (err) {
-               dev_warn(dev, "Unable to read TjMax from CPU.\n");
+               if (c->x86_model > 0xe && c->x86_model != 0x1c)
+                       dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
        } else {
                val = (eax >> 16) & 0xff;
                /*
@@ -393,11 +307,17 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
                 * will be used
                 */
                if (val) {
-                       dev_info(dev, "TjMax is %d C.\n", val);
+                       dev_dbg(dev, "TjMax is %d degrees C\n", val);
                        return val * 1000;
                }
        }
 
+       if (force_tjmax) {
+               dev_notice(dev, "TjMax forced to %d degrees C by user\n",
+                          force_tjmax);
+               return force_tjmax * 1000;
+       }
+
        /*
         * An assumption is made for early CPUs and unreadable MSR.
         * NOTE: the calculated value may not be correct.
@@ -414,21 +334,6 @@ static void __devinit get_ucode_rev_on_cpu(void *edx)
        rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
 }
 
-static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
-{
-       int err;
-       u32 eax, edx, val;
-
-       err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
-       if (!err) {
-               val = (eax >> 16) & 0xff;
-               if (val)
-                       return val * 1000;
-       }
-       dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
-       return 100000; /* Default TjMax: 100 degree celsius */
-}
-
 static int create_name_attr(struct platform_data *pdata, struct device *dev)
 {
        sysfs_attr_init(&pdata->name_attr.attr);
@@ -442,19 +347,14 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
                                int attr_no)
 {
        int err, i;
-       static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev,
+       static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
                        struct device_attribute *devattr, char *buf) = {
                        show_label, show_crit_alarm, show_temp, show_tjmax,
-                       show_max_alarm, show_ttarget, show_tmin };
-       static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
-                       struct device_attribute *devattr, const char *buf,
-                       size_t count) = { NULL, NULL, NULL, NULL, NULL,
-                                       store_ttarget, store_tmin };
-       static const char *names[TOTAL_ATTRS] = {
+                       show_ttarget };
+       static const char *const names[TOTAL_ATTRS] = {
                                        "temp%d_label", "temp%d_crit_alarm",
                                        "temp%d_input", "temp%d_crit",
-                                       "temp%d_max_alarm", "temp%d_max",
-                                       "temp%d_max_hyst" };
+                                       "temp%d_max" };
 
        for (i = 0; i < tdata->attr_size; i++) {
                snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
@@ -462,10 +362,6 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
                sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
                tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
                tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
-               if (rw_ptr[i]) {
-                       tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
-                       tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
-               }
                tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
                tdata->sd_attrs[i].index = attr_no;
                err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
@@ -481,9 +377,9 @@ exit_free:
 }
 
 
-static int __devinit chk_ucode_version(struct platform_device *pdev)
+static int __cpuinit chk_ucode_version(unsigned int cpu)
 {
-       struct cpuinfo_x86 *c = &cpu_data(pdev->id);
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
        int err;
        u32 edx;
 
@@ -494,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev)
         */
        if (c->x86_model == 0xe && c->x86_mask < 0xc) {
                /* check for microcode update */
-               err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu,
+               err = smp_call_function_single(cpu, get_ucode_rev_on_cpu,
                                               &edx, 1);
                if (err) {
-                       dev_err(&pdev->dev,
-                               "Cannot determine microcode revision of "
-                               "CPU#%u (%d)!\n", pdev->id, err);
+                       pr_err("Cannot determine microcode revision of "
+                              "CPU#%u (%d)!\n", cpu, err);
                        return -ENODEV;
                } else if (edx < 0x39) {
-                       dev_err(&pdev->dev,
-                               "Errata AE18 not fixed, update BIOS or "
-                               "microcode of the CPU!\n");
+                       pr_err("Errata AE18 not fixed, update BIOS or "
+                              "microcode of the CPU!\n");
                        return -ENODEV;
                }
        }
@@ -538,8 +432,6 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
 
        tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
                                                        MSR_IA32_THERM_STATUS;
-       tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
-                                               MSR_IA32_THERM_INTERRUPT;
        tdata->is_pkg_data = pkg_flag;
        tdata->cpu = cpu;
        tdata->cpu_core_id = TO_CORE_ID(cpu);
@@ -548,11 +440,11 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
        return tdata;
 }
 
-static int create_core_data(struct platform_data *pdata,
-                               struct platform_device *pdev,
+static int create_core_data(struct platform_device *pdev,
                                unsigned int cpu, int pkg_flag)
 {
        struct temp_data *tdata;
+       struct platform_data *pdata = platform_get_drvdata(pdev);
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        u32 eax, edx;
        int err, attr_no;
@@ -588,20 +480,21 @@ static int create_core_data(struct platform_data *pdata,
                goto exit_free;
 
        /* We can access status register. Get Critical Temperature */
-       if (pkg_flag)
-               tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev);
-       else
-               tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
+       tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
 
        /*
-        * Test if we can access the intrpt register. If so, increase the
-        * 'size' enough to have ttarget/tmin/max_alarm interfaces.
-        * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT
+        * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET.
+        * The target temperature is available on older CPUs but not in this
+        * register. Atoms don't have the register at all.
         */
-       err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
-       if (!err) {
-               tdata->attr_size += MAX_THRESH_ATTRS;
-               tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
+       if (c->x86_model > 0xe && c->x86_model != 0x1c) {
+               err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET,
+                                       &eax, &edx);
+               if (!err) {
+                       tdata->ttarget
+                         = tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
+                       tdata->attr_size++;
+               }
        }
 
        pdata->core_data[attr_no] = tdata;
@@ -613,22 +506,20 @@ static int create_core_data(struct platform_data *pdata,
 
        return 0;
 exit_free:
+       pdata->core_data[attr_no] = NULL;
        kfree(tdata);
        return err;
 }
 
 static void coretemp_add_core(unsigned int cpu, int pkg_flag)
 {
-       struct platform_data *pdata;
        struct platform_device *pdev = coretemp_get_pdev(cpu);
        int err;
 
        if (!pdev)
                return;
 
-       pdata = platform_get_drvdata(pdev);
-
-       err = create_core_data(pdata, pdev, cpu, pkg_flag);
+       err = create_core_data(pdev, cpu, pkg_flag);
        if (err)
                dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
 }
@@ -652,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
        struct platform_data *pdata;
        int err;
 
-       /* Check the microcode version of the CPU */
-       err = chk_ucode_version(pdev);
-       if (err)
-               return err;
-
        /* Initialize the per-package data structures */
        pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
        if (!pdata)
@@ -666,7 +552,7 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
        if (err)
                goto exit_free;
 
-       pdata->phys_proc_id = TO_PHYS_ID(pdev->id);
+       pdata->phys_proc_id = pdev->id;
        platform_set_drvdata(pdev, pdata);
 
        pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
@@ -718,7 +604,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
 
        mutex_lock(&pdev_list_mutex);
 
-       pdev = platform_device_alloc(DRVNAME, cpu);
+       pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));
        if (!pdev) {
                err = -ENOMEM;
                pr_err("Device allocation failed\n");
@@ -738,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
        }
 
        pdev_entry->pdev = pdev;
-       pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
+       pdev_entry->phys_proc_id = pdev->id;
 
        list_add_tail(&pdev_entry->list, &pdev_list);
        mutex_unlock(&pdev_list_mutex);
@@ -799,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu)
                return;
 
        if (!pdev) {
+               /* Check the microcode version of the CPU */
+               if (chk_ucode_version(cpu))
+                       return;
+
                /*
                 * Alright, we have DTS support.
                 * We are bringing the _first_ core in this pkg
index 257957c..4f7c3fc 100644 (file)
@@ -72,7 +72,7 @@ struct ds620_data {
        char valid;             /* !=0 if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
 
-       u16 temp[3];            /* Register values, word */
+       s16 temp[3];            /* Register values, word */
 };
 
 /*
index d94a24f..dd2d7b9 100644 (file)
@@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
 
 static inline int ADC_TO_CURR(int adc, int gain)
 {
-       return adc * 1400000 / gain * 255;
+       return adc * 1400000 / (gain * 255);
 }
 
 /*
index a561c3a..397fc59 100644 (file)
@@ -978,6 +978,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,
 struct pmbus_limit_attr {
        u16 reg;                /* Limit register */
        bool update;            /* True if register needs updates */
+       bool low;               /* True if low limit; for limits with compare
+                                  functions only */
        const char *attr;       /* Attribute name */
        const char *alarm;      /* Alarm attribute name */
        u32 sbit;               /* Alarm attribute status bit */
@@ -1029,7 +1031,8 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
                                if (attr->compare) {
                                        pmbus_add_boolean_cmp(data, name,
                                                l->alarm, index,
-                                               cbase, cindex,
+                                               l->low ? cindex : cbase,
+                                               l->low ? cbase : cindex,
                                                attr->sbase + page, l->sbit);
                                } else {
                                        pmbus_add_boolean_reg(data, name,
@@ -1366,11 +1369,13 @@ static const struct pmbus_sensor_attr power_attributes[] = {
 static const struct pmbus_limit_attr temp_limit_attrs[] = {
        {
                .reg = PMBUS_UT_WARN_LIMIT,
+               .low = true,
                .attr = "min",
                .alarm = "min_alarm",
                .sbit = PB_TEMP_UT_WARNING,
        }, {
                .reg = PMBUS_UT_FAULT_LIMIT,
+               .low = true,
                .attr = "lcrit",
                .alarm = "lcrit_alarm",
                .sbit = PB_TEMP_UT_FAULT,
@@ -1399,11 +1404,13 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
 static const struct pmbus_limit_attr temp_limit_attrs23[] = {
        {
                .reg = PMBUS_UT_WARN_LIMIT,
+               .low = true,
                .attr = "min",
                .alarm = "min_alarm",
                .sbit = PB_TEMP_UT_WARNING,
        }, {
                .reg = PMBUS_UT_FAULT_LIMIT,
+               .low = true,
                .attr = "lcrit",
                .alarm = "lcrit_alarm",
                .sbit = PB_TEMP_UT_FAULT,
index ace1c73..d0ddb60 100644 (file)
@@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
        block_buffer[ret] = '\0';
        dev_info(&client->dev, "Device ID %s\n", block_buffer);
 
-       mid = NULL;
-       for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
-               mid = &ucd9000_id[i];
+       for (mid = ucd9000_id; mid->name[0]; mid++) {
                if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
                        break;
        }
-       if (!mid || !strlen(mid->name)) {
+       if (!mid->name[0]) {
                dev_err(&client->dev, "Unsupported device\n");
                return -ENODEV;
        }
index ffcc1cf..c65e9da 100644 (file)
@@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
        block_buffer[ret] = '\0';
        dev_info(&client->dev, "Device ID %s\n", block_buffer);
 
-       mid = NULL;
-       for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
-               mid = &ucd9200_id[i];
+       for (mid = ucd9200_id; mid->name[0]; mid++) {
                if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
                        break;
        }
-       if (!mid || !strlen(mid->name)) {
+       if (!mid->name[0]) {
                dev_err(&client->dev, "Unsupported device\n");
                return -ENODEV;
        }
index 17cf1ab..8c2844e 100644 (file)
@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_client *client,
                          struct i2c_board_info *info);
 static int w83791d_remove(struct i2c_client *client);
 
-static int w83791d_read(struct i2c_client *client, u8 register);
-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
+static int w83791d_read(struct i2c_client *client, u8 reg);
+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
 static struct w83791d_data *w83791d_update_device(struct device *dev);
 
 #ifdef DEBUG
index 6659d26..b73da6c 100644 (file)
@@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
                return -EINVAL;
        }
        sds = kzalloc(sizeof(*sds), GFP_KERNEL);
-       if (!sds)
+       if (!sds) {
+               ret = -ENOMEM;
                goto err_mem;
+       }
 
        for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
                sds->pdev[i] = add_i2c_device(dev, i);
                if (IS_ERR(sds->pdev[i])) {
+                       ret = PTR_ERR(sds->pdev[i]);
                        while (--i >= 0)
                                platform_device_unregister(sds->pdev[i]);
                        goto err_dev_add;
index 2440b74..3c94c4a 100644 (file)
@@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
 
        /* Rounds down to not include partial word at the end of buf */
        words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
-       if (words_to_transfer > tx_fifo_avail)
-               words_to_transfer = tx_fifo_avail;
 
-       i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
-
-       buf += words_to_transfer * BYTES_PER_FIFO_WORD;
-       buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
-       tx_fifo_avail -= words_to_transfer;
+       /* It's very common to have < 4 bytes, so optimize that case. */
+       if (words_to_transfer) {
+               if (words_to_transfer > tx_fifo_avail)
+                       words_to_transfer = tx_fifo_avail;
+
+               /*
+                * Update state before writing to FIFO.  If this casues us
+                * to finish writing all bytes (AKA buf_remaining goes to 0) we
+                * have a potential for an interrupt (PACKET_XFER_COMPLETE is
+                * not maskable).  We need to make sure that the isr sees
+                * buf_remaining as 0 and doesn't call us back re-entrantly.
+                */
+               buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+               tx_fifo_avail -= words_to_transfer;
+               i2c_dev->msg_buf_remaining = buf_remaining;
+               i2c_dev->msg_buf = buf +
+                       words_to_transfer * BYTES_PER_FIFO_WORD;
+               barrier();
+
+               i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+
+               buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+       }
 
        /*
         * If there is a partial word at the end of buf, handle it manually to
@@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
        if (tx_fifo_avail > 0 && buf_remaining > 0) {
                BUG_ON(buf_remaining > 3);
                memcpy(&val, buf, buf_remaining);
+
+               /* Again update before writing to FIFO to make sure isr sees. */
+               i2c_dev->msg_buf_remaining = 0;
+               i2c_dev->msg_buf = NULL;
+               barrier();
+
                i2c_writel(i2c_dev, val, I2C_TX_FIFO);
-               buf_remaining = 0;
-               tx_fifo_avail--;
        }
 
-       BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
-       i2c_dev->msg_buf_remaining = buf_remaining;
-       i2c_dev->msg_buf = buf;
        return 0;
 }
 
@@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
                        tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
        }
 
-       if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
-                       !i2c_dev->msg_buf_remaining)
+       if (status & I2C_INT_PACKET_XFER_COMPLETE) {
+               BUG_ON(i2c_dev->msg_buf_remaining);
                complete(&i2c_dev->msg_complete);
+       }
 
        i2c_writel(i2c_dev, status, I2C_INT_STATUS);
        if (i2c_dev->is_dvc)
@@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 
 static u32 tegra_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C;
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
 static const struct i2c_algorithm tegra_i2c_algo = {
@@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
 }
 #endif
 
+#if defined(CONFIG_OF)
+/* Match table for of_platform binding */
+static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
+       { .compatible = "nvidia,tegra20-i2c", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
+#else
+#define tegra_i2c_of_match NULL
+#endif
+
 static struct platform_driver tegra_i2c_driver = {
        .probe   = tegra_i2c_probe,
        .remove  = tegra_i2c_remove,
@@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
        .driver  = {
                .name  = "tegra-i2c",
                .owner = THIS_MODULE,
+               .of_match_table = tegra_i2c_of_match,
        },
 };
 
index 2747980..16f69be 100644 (file)
@@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
        if (!(rq->cmd_flags & REQ_FLUSH))
                return BLKPREP_OK;
 
-       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+       if (rq->special) {
+               cmd = rq->special;
+               memset(cmd, 0, sizeof(*cmd));
+       } else {
+               cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+       }
 
        /* FIXME: map struct ide_taskfile on rq->cmd[] */
        BUG_ON(cmd == NULL);
index 17bf9d9..6cd642a 100644 (file)
@@ -287,7 +287,7 @@ void __free_ep(struct kref *kref)
        if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
                cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
                dst_release(ep->dst);
-               l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+               l2t_release(ep->com.tdev, ep->l2t);
        }
        kfree(ep);
 }
@@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                release_tid(ep->com.tdev, GET_TID(rpl), NULL);
        cxgb3_free_atid(ep->com.tdev, ep->atid);
        dst_release(ep->dst);
-       l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+       l2t_release(ep->com.tdev, ep->l2t);
        put_ep(&ep->com);
        return CPL_RET_BUF_DONE;
 }
@@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        if (!child_ep) {
                printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
                       __func__);
-               l2t_release(L2DATA(tdev), l2t);
+               l2t_release(tdev, l2t);
                dst_release(dst);
                goto reject;
        }
@@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        if (!err)
                goto out;
 
-       l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
+       l2t_release(h->rdev.t3cdev_p, ep->l2t);
 fail4:
        dst_release(ep->dst);
 fail3:
@@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
        PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
             l2t);
        dst_hold(new);
-       l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+       l2t_release(ep->com.tdev, ep->l2t);
        ep->l2t = l2t;
        dst_release(old);
        ep->dst = new;
index 7b404e5..e34eeb8 100644 (file)
@@ -668,4 +668,3 @@ module_exit(adp5588_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("ADP5588/87 Keypad driver");
-MODULE_ALIAS("platform:adp5588-keys");
index b09c7d1..ab86051 100644 (file)
@@ -475,7 +475,7 @@ static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on)
                                le16_to_cpu(dev->ctl_req->wIndex),
                                dev->ctl_data,
                                USB_PKT_LEN, USB_CTRL_SET_TIMEOUT);
-       if (error && error != EINTR)
+       if (error < 0 && error != -EINTR)
                err("%s: usb_control_msg() failed %d", __func__, error);
 }
 
index da28018..5ec617e 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI   0x0245
 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO    0x0246
 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS    0x0247
+/* MacbookAir4,1 (unibody, July 2011) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI  0x0249
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO   0x024a
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS   0x024b
 /* MacbookAir4,2 (unibody, July 2011) */
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI   0x024c
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO    0x024d
@@ -112,6 +116,10 @@ static const struct usb_device_id bcm5974_table[] = {
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
+       /* MacbookAir4,1 */
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
        /* MacbookAir4,2 */
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
@@ -334,6 +342,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
                { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
        },
+       {
+               USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI,
+               USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO,
+               USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
+               HAS_INTEGRATED_BUTTON,
+               0x84, sizeof(struct bt_data),
+               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+               { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+               { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
+               { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
+       },
        {}
 };
 
index d27c9d9..958b4eb 100644 (file)
@@ -229,13 +229,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
                                                        get_unaligned_le16(&report[i + 3]);
                                                i += 4;
                                        }
-                               } else if (usage == WCM_DIGITIZER) {
-                                       /* max pressure isn't reported
-                                       features->pressure_max = (unsigned short)
-                                                       (report[i+4] << 8  | report[i + 3]);
-                                       */
-                                       features->pressure_max = 255;
-                                       i += 4;
                                }
                                break;
 
@@ -291,13 +284,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
                                pen = 1;
                                i++;
                                break;
-
-                       case HID_USAGE_UNDEFINED:
-                               if (usage == WCM_DESKTOP && finger) /* capacity */
-                                       features->pressure_max =
-                                               get_unaligned_le16(&report[i + 3]);
-                               i += 4;
-                               break;
                        }
                        break;
 
index c1c2f7b..9dea718 100644 (file)
@@ -800,25 +800,26 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
        int i;
 
        for (i = 0; i < 2; i++) {
-               int p = data[9 * i + 2];
-               bool touch = p && !wacom->shared->stylus_in_proximity;
+               int offset = (data[1] & 0x80) ? (8 * i) : (9 * i);
+               bool touch = data[offset + 3] & 0x80;
 
-               input_mt_slot(input, i);
-               input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
                /*
                 * Touch events need to be disabled while stylus is
                 * in proximity because user's hand is resting on touchpad
                 * and sending unwanted events.  User expects tablet buttons
                 * to continue working though.
                 */
+               touch = touch && !wacom->shared->stylus_in_proximity;
+
+               input_mt_slot(input, i);
+               input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
                if (touch) {
-                       int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff;
-                       int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff;
+                       int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff;
+                       int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff;
                        if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {
                                x <<= 5;
                                y <<= 5;
                        }
-                       input_report_abs(input, ABS_MT_PRESSURE, p);
                        input_report_abs(input, ABS_MT_POSITION_X, x);
                        input_report_abs(input, ABS_MT_POSITION_Y, y);
                }
@@ -1056,10 +1057,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
                             features->x_fuzz, 0);
        input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
                             features->y_fuzz, 0);
-       input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
-                            features->pressure_fuzz, 0);
 
        if (features->device_type == BTN_TOOL_PEN) {
+               input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
+                            features->pressure_fuzz, 0);
+
                /* penabled devices have fixed resolution for each model */
                input_abs_set_res(input_dev, ABS_X, features->x_resolution);
                input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
@@ -1098,6 +1100,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
                __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
                __set_bit(BTN_STYLUS, input_dev->keybit);
                __set_bit(BTN_STYLUS2, input_dev->keybit);
+
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                break;
 
        case WACOM_21UX2:
@@ -1120,12 +1124,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
                for (i = 0; i < 8; i++)
                        __set_bit(BTN_0 + i, input_dev->keybit);
 
-               if (wacom_wac->features.type != WACOM_21UX2) {
-                       input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
-                       input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
-               }
-
+               input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
+               input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
                input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+
+               __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
                wacom_setup_cintiq(wacom_wac);
                break;
 
@@ -1150,6 +1154,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
                /* fall through */
 
        case INTUOS:
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
                wacom_setup_intuos(wacom_wac);
                break;
 
@@ -1165,6 +1171,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
 
                input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
                wacom_setup_intuos(wacom_wac);
+
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                break;
 
        case TABLETPC2FG:
@@ -1183,26 +1191,40 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
        case TABLETPC:
                __clear_bit(ABS_MISC, input_dev->absbit);
 
+               __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
                if (features->device_type != BTN_TOOL_PEN)
                        break;  /* no need to process stylus stuff */
 
                /* fall through */
 
        case PL:
-       case PTU:
        case DTU:
                __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+               __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
                __set_bit(BTN_STYLUS, input_dev->keybit);
                __set_bit(BTN_STYLUS2, input_dev->keybit);
+
+               __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+               break;
+
+       case PTU:
+               __set_bit(BTN_STYLUS2, input_dev->keybit);
                /* fall through */
 
        case PENPARTNER:
+               __set_bit(BTN_TOOL_PEN, input_dev->keybit);
                __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+               __set_bit(BTN_STYLUS, input_dev->keybit);
+
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                break;
 
        case BAMBOO_PT:
                __clear_bit(ABS_MISC, input_dev->absbit);
 
+               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
                if (features->device_type == BTN_TOOL_DOUBLETAP) {
                        __set_bit(BTN_LEFT, input_dev->keybit);
                        __set_bit(BTN_FORWARD, input_dev->keybit);
index c14412e..9941d39 100644 (file)
@@ -383,6 +383,8 @@ static int w8001_setup(struct w8001 *w8001)
        dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
        strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name));
 
+       __set_bit(INPUT_PROP_DIRECT, dev->propbit);
+
        /* penabled? */
        error = w8001_command(w8001, W8001_CMD_QUERY, true);
        if (!error) {
index a14f8dc..0e4227f 100644 (file)
@@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
  * Writes the command to the IOMMUs command buffer and informs the
  * hardware about the new command.
  */
-static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+static int iommu_queue_command_sync(struct amd_iommu *iommu,
+                                   struct iommu_cmd *cmd,
+                                   bool sync)
 {
        u32 left, tail, head, next_tail;
        unsigned long flags;
@@ -639,13 +641,18 @@ again:
        copy_cmd_to_buffer(iommu, cmd, tail);
 
        /* We need to sync now to make sure all commands are processed */
-       iommu->need_sync = true;
+       iommu->need_sync = sync;
 
        spin_unlock_irqrestore(&iommu->lock, flags);
 
        return 0;
 }
 
+static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+{
+       return iommu_queue_command_sync(iommu, cmd, true);
+}
+
 /*
  * This function queues a completion wait command into the command
  * buffer of an IOMMU
@@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
 
        build_completion_wait(&cmd, (u64)&sem);
 
-       ret = iommu_queue_command(iommu, &cmd);
+       ret = iommu_queue_command_sync(iommu, &cmd, false);
        if (ret)
                return ret;
 
@@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
 static void domain_flush_devices(struct protection_domain *domain)
 {
        struct iommu_dev_data *dev_data;
-       unsigned long flags;
-
-       spin_lock_irqsave(&domain->lock, flags);
 
        list_for_each_entry(dev_data, &domain->dev_list, list)
                device_flush_dte(dev_data);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
 /****************************************************************************
index 3dc9bef..6dcc7e2 100644 (file)
@@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
                return ret;
        }
 
-       ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
+       ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
        if (ret)
                printk(KERN_ERR "IOMMU: can't request irq\n");
        return ret;
index d87c9d0..328c64c 100644 (file)
@@ -41,6 +41,7 @@ static ssize_t led_delay_on_store(struct device *dev,
 
        if (count == size) {
                led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
+               led_cdev->blink_delay_on = state;
                ret = count;
        }
 
@@ -69,6 +70,7 @@ static ssize_t led_delay_off_store(struct device *dev,
 
        if (count == size) {
                led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
+               led_cdev->blink_delay_off = state;
                ret = count;
        }
 
index 49da55c..8c2a000 100644 (file)
@@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        ti->num_flush_requests = 1;
+       ti->discard_zeroes_data_unsupported = 1;
+
        return 0;
 
 bad:
index 89f73ca..f84c080 100644 (file)
@@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
                 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
                 */
                if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
-                       if (!argc)
+                       if (!argc) {
                                ti->error = "Feature corrupt_bio_byte requires parameters";
+                               return -EINVAL;
+                       }
 
                        r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
                        if (r)
index a002dd8..86df8b2 100644 (file)
@@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                                rs->ti->error = "write_mostly option is only valid for RAID1";
                                return -EINVAL;
                        }
-                       if (value > rs->md.raid_disks) {
+                       if (value >= rs->md.raid_disks) {
                                rs->ti->error = "Invalid write_mostly drive index given";
                                return -EINVAL;
                        }
index 986b875..bc04518 100644 (file)
@@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t)
                return;
 
        template_disk = dm_table_get_integrity_disk(t, true);
-       if (!template_disk &&
-           blk_integrity_is_initialized(dm_disk(t->md))) {
+       if (template_disk)
+               blk_integrity_register(dm_disk(t->md),
+                                      blk_get_integrity(template_disk));
+       else if (blk_integrity_is_initialized(dm_disk(t->md)))
                DMWARN("%s: device no longer has a valid integrity profile",
                       dm_device_name(t->md));
-               return;
-       }
-       blk_integrity_register(dm_disk(t->md),
-                              blk_get_integrity(template_disk));
+       else
+               DMWARN("%s: unable to establish an integrity profile",
+                      dm_device_name(t->md));
 }
 
 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
        return 0;
 }
 
+static bool dm_table_discard_zeroes_data(struct dm_table *t)
+{
+       struct dm_target *ti;
+       unsigned i = 0;
+
+       /* Ensure that all targets supports discard_zeroes_data. */
+       while (i < dm_table_get_num_targets(t)) {
+               ti = dm_table_get_target(t, i++);
+
+               if (ti->discard_zeroes_data_unsupported)
+                       return 0;
+       }
+
+       return 1;
+}
+
 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
                               struct queue_limits *limits)
 {
@@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
        }
        blk_queue_flush(q, flush);
 
+       if (!dm_table_discard_zeroes_data(t))
+               q->limits.discard_zeroes_data = 0;
+
        dm_table_set_integrity(t);
 
        /*
index 3742ce8..5c95ccb 100644 (file)
 static void autostart_arrays(int part);
 #endif
 
+/* pers_list is a list of registered personalities protected
+ * by pers_lock.
+ * pers_lock does extra service to protect accesses to
+ * mddev->thread when the mutex cannot be held.
+ */
 static LIST_HEAD(pers_list);
 static DEFINE_SPINLOCK(pers_lock);
 
@@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev)
        } else
                mutex_unlock(&mddev->reconfig_mutex);
 
+       /* was we've dropped the mutex we need a spinlock to
+        * make sur the thread doesn't disappear
+        */
+       spin_lock(&pers_lock);
        md_wakeup_thread(mddev->thread);
+       spin_unlock(&pers_lock);
 }
 
 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
@@ -1138,8 +1148,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
                        ret = 0;
        }
        rdev->sectors = rdev->sb_start;
+       /* Limit to 4TB as metadata cannot record more than that */
+       if (rdev->sectors >= (2ULL << 32))
+               rdev->sectors = (2ULL << 32) - 2;
 
-       if (rdev->sectors < sb->size * 2 && sb->level > 1)
+       if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
                /* "this cannot possibly happen" ... */
                ret = -EINVAL;
 
@@ -1173,7 +1186,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->clevel[0] = 0;
                mddev->layout = sb->layout;
                mddev->raid_disks = sb->raid_disks;
-               mddev->dev_sectors = sb->size * 2;
+               mddev->dev_sectors = ((sector_t)sb->size) * 2;
                mddev->events = ev1;
                mddev->bitmap_info.offset = 0;
                mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
@@ -1415,6 +1428,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
        rdev->sb_start = calc_dev_sboffset(rdev);
        if (!num_sectors || num_sectors > rdev->sb_start)
                num_sectors = rdev->sb_start;
+       /* Limit to 4TB as metadata cannot record more than that.
+        * 4TB == 2^32 KB, or 2*2^32 sectors.
+        */
+       if (num_sectors >= (2ULL << 32))
+               num_sectors = (2ULL << 32) - 2;
        md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
                       rdev->sb_page);
        md_super_wait(rdev->mddev);
@@ -6421,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
        return thread;
 }
 
-void md_unregister_thread(mdk_thread_t *thread)
+void md_unregister_thread(mdk_thread_t **threadp)
 {
+       mdk_thread_t *thread = *threadp;
        if (!thread)
                return;
        dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
+       /* Locking ensures that mddev_unlock does not wake_up a
+        * non-existent thread
+        */
+       spin_lock(&pers_lock);
+       *threadp = NULL;
+       spin_unlock(&pers_lock);
 
        kthread_stop(thread->tsk);
        kfree(thread);
@@ -7332,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev)
        mdk_rdev_t *rdev;
 
        /* resync has finished, collect result */
-       md_unregister_thread(mddev->sync_thread);
-       mddev->sync_thread = NULL;
+       md_unregister_thread(&mddev->sync_thread);
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
                /* success...*/
index 1e586bb..0a309dc 100644 (file)
@@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p);
 extern int unregister_md_personality(struct mdk_personality *p);
 extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
                                mddev_t *mddev, const char *name);
-extern void md_unregister_thread(mdk_thread_t *thread);
+extern void md_unregister_thread(mdk_thread_t **threadp);
 extern void md_wakeup_thread(mdk_thread_t *thread);
 extern void md_check_recovery(mddev_t *mddev);
 extern void md_write_start(mddev_t *mddev, struct bio *bi);
index 3535c23..d5b5fb3 100644 (file)
@@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev)
 {
        multipath_conf_t *conf = mddev->private;
 
-       md_unregister_thread(mddev->thread);
-       mddev->thread = NULL;
+       md_unregister_thread(&mddev->thread);
        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
        mempool_destroy(conf->pool);
        kfree(conf->multipaths);
index 32323f0..d9587df 100644 (file)
@@ -1099,12 +1099,11 @@ read_again:
                bio_list_add(&conf->pending_bio_list, mbio);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
-       r1_bio_write_done(r1_bio);
-
-       /* In case raid1d snuck in to freeze_array */
-       wake_up(&conf->wait_barrier);
-
+       /* Mustn't call r1_bio_write_done before this next test,
+        * as it could result in the bio being freed.
+        */
        if (sectors_handled < (bio->bi_size >> 9)) {
+               r1_bio_write_done(r1_bio);
                /* We need another r1_bio.  It has already been counted
                 * in bio->bi_phys_segments
                 */
@@ -1117,6 +1116,11 @@ read_again:
                goto retry_write;
        }
 
+       r1_bio_write_done(r1_bio);
+
+       /* In case raid1d snuck in to freeze_array */
+       wake_up(&conf->wait_barrier);
+
        if (do_sync || !bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
 
@@ -2558,8 +2562,7 @@ static int stop(mddev_t *mddev)
        raise_barrier(conf);
        lower_barrier(conf);
 
-       md_unregister_thread(mddev->thread);
-       mddev->thread = NULL;
+       md_unregister_thread(&mddev->thread);
        if (conf->r1bio_pool)
                mempool_destroy(conf->r1bio_pool);
        kfree(conf->mirrors);
index 8b29cd4..0cd9672 100644 (file)
@@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)
        md_write_end(r10_bio->mddev);
 }
 
+static void one_write_done(r10bio_t *r10_bio)
+{
+       if (atomic_dec_and_test(&r10_bio->remaining)) {
+               if (test_bit(R10BIO_WriteError, &r10_bio->state))
+                       reschedule_retry(r10_bio);
+               else {
+                       close_write(r10_bio);
+                       if (test_bit(R10BIO_MadeGood, &r10_bio->state))
+                               reschedule_retry(r10_bio);
+                       else
+                               raid_end_bio_io(r10_bio);
+               }
+       }
+}
+
 static void raid10_end_write_request(struct bio *bio, int error)
 {
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
         * Let's see if all mirrored write operations have finished
         * already.
         */
-       if (atomic_dec_and_test(&r10_bio->remaining)) {
-               if (test_bit(R10BIO_WriteError, &r10_bio->state))
-                       reschedule_retry(r10_bio);
-               else {
-                       close_write(r10_bio);
-                       if (test_bit(R10BIO_MadeGood, &r10_bio->state))
-                               reschedule_retry(r10_bio);
-                       else
-                               raid_end_bio_io(r10_bio);
-               }
-       }
+       one_write_done(r10_bio);
        if (dec_rdev)
                rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 }
@@ -1127,20 +1132,12 @@ retry_write:
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
 
-       if (atomic_dec_and_test(&r10_bio->remaining)) {
-               /* This matches the end of raid10_end_write_request() */
-               bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
-                               r10_bio->sectors,
-                               !test_bit(R10BIO_Degraded, &r10_bio->state),
-                               0);
-               md_write_end(mddev);
-               raid_end_bio_io(r10_bio);
-       }
-
-       /* In case raid10d snuck in to freeze_array */
-       wake_up(&conf->wait_barrier);
+       /* Don't remove the bias on 'remaining' (one_write_done) until
+        * after checking if we need to go around again.
+        */
 
        if (sectors_handled < (bio->bi_size >> 9)) {
+               one_write_done(r10_bio);
                /* We need another r10_bio.  It has already been counted
                 * in bio->bi_phys_segments.
                 */
@@ -1154,6 +1151,10 @@ retry_write:
                r10_bio->state = 0;
                goto retry_write;
        }
+       one_write_done(r10_bio);
+
+       /* In case raid10d snuck in to freeze_array */
+       wake_up(&conf->wait_barrier);
 
        if (do_sync || !mddev->bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
@@ -2954,7 +2955,7 @@ static int run(mddev_t *mddev)
        return 0;
 
 out_free_conf:
-       md_unregister_thread(mddev->thread);
+       md_unregister_thread(&mddev->thread);
        if (conf->r10bio_pool)
                mempool_destroy(conf->r10bio_pool);
        safe_put_page(conf->tmppage);
@@ -2972,8 +2973,7 @@ static int stop(mddev_t *mddev)
        raise_barrier(conf, 0);
        lower_barrier(conf);
 
-       md_unregister_thread(mddev->thread);
-       mddev->thread = NULL;
+       md_unregister_thread(&mddev->thread);
        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
        if (conf->r10bio_pool)
                mempool_destroy(conf->r10bio_pool);
index 43709fa..ac5e8b5 100644 (file)
@@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev)
 
        return 0;
 abort:
-       md_unregister_thread(mddev->thread);
-       mddev->thread = NULL;
+       md_unregister_thread(&mddev->thread);
        if (conf) {
                print_raid5_conf(conf);
                free_conf(conf);
@@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev)
 {
        raid5_conf_t *conf = mddev->private;
 
-       md_unregister_thread(mddev->thread);
-       mddev->thread = NULL;
+       md_unregister_thread(&mddev->thread);
        if (mddev->queue)
                mddev->queue->backing_dev_info.congested_fn = NULL;
        free_conf(conf);
index 3db89e3..536c16c 100644 (file)
@@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties;
 static int vp7045_usb_probe(struct usb_interface *intf,
                const struct usb_device_id *id)
 {
-       struct dvb_usb_device *d;
-       int ret = dvb_usb_device_init(intf, &vp7045_properties,
-                                  THIS_MODULE, &d, adapter_nr);
-       if (ret)
-               return ret;
-
-       d->priv = kmalloc(20, GFP_KERNEL);
-       if (!d->priv) {
-               dvb_usb_device_exit(intf);
-               return -ENOMEM;
-       }
-
-       return ret;
-}
-
-static void vp7045_usb_disconnect(struct usb_interface *intf)
-{
-       struct dvb_usb_device *d = usb_get_intfdata(intf);
-       kfree(d->priv);
-       dvb_usb_device_exit(intf);
+       return dvb_usb_device_init(intf, &vp7045_properties,
+                                  THIS_MODULE, NULL, adapter_nr);
 }
 
 static struct usb_device_id vp7045_usb_table [] = {
@@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table);
 static struct dvb_usb_device_properties vp7045_properties = {
        .usb_ctrl = CYPRESS_FX2,
        .firmware = "dvb-usb-vp7045-01.fw",
-       .size_of_priv = sizeof(u8 *),
+       .size_of_priv = 20,
 
        .num_adapters = 1,
        .adapter = {
@@ -305,7 +287,7 @@ static struct dvb_usb_device_properties vp7045_properties = {
 static struct usb_driver vp7045_usb_driver = {
        .name           = "dvb_usb_vp7045",
        .probe          = vp7045_usb_probe,
-       .disconnect     = vp7045_usb_disconnect,
+       .disconnect     = dvb_usb_device_exit,
        .id_table       = vp7045_usb_table,
 };
 
index eae05b5..144f3f5 100644 (file)
@@ -618,7 +618,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
 static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
 {
        DEFINE_IR_RAW_EVENT(rawir);
-       unsigned int count;
        u32 carrier;
        u8 sample;
        int i;
@@ -631,65 +630,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
        if (nvt->carrier_detect_enabled)
                carrier = nvt_rx_carrier_detect(nvt);
 
-       count = nvt->pkts;
-       nvt_dbg_verbose("Processing buffer of len %d", count);
+       nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
 
        init_ir_raw_event(&rawir);
 
-       for (i = 0; i < count; i++) {
-               nvt->pkts--;
+       for (i = 0; i < nvt->pkts; i++) {
                sample = nvt->buf[i];
 
                rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
                rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
                                          * SAMPLE_PERIOD);
 
-               if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
-                       if (nvt->rawir.pulse == rawir.pulse)
-                               nvt->rawir.duration += rawir.duration;
-                       else {
-                               nvt->rawir.duration = rawir.duration;
-                               nvt->rawir.pulse = rawir.pulse;
-                       }
-                       continue;
-               }
-
-               rawir.duration += nvt->rawir.duration;
+               nvt_dbg("Storing %s with duration %d",
+                       rawir.pulse ? "pulse" : "space", rawir.duration);
 
-               init_ir_raw_event(&nvt->rawir);
-               nvt->rawir.duration = 0;
-               nvt->rawir.pulse = rawir.pulse;
-
-               if (sample == BUF_PULSE_BIT)
-                       rawir.pulse = false;
-
-               if (rawir.duration) {
-                       nvt_dbg("Storing %s with duration %d",
-                               rawir.pulse ? "pulse" : "space",
-                               rawir.duration);
-
-                       ir_raw_event_store_with_filter(nvt->rdev, &rawir);
-               }
+               ir_raw_event_store_with_filter(nvt->rdev, &rawir);
 
                /*
                 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
                 * indicates end of IR signal, but new data incoming. In both
                 * cases, it means we're ready to call ir_raw_event_handle
                 */
-               if ((sample == BUF_PULSE_BIT) && nvt->pkts) {
+               if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
                        nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
                        ir_raw_event_handle(nvt->rdev);
                }
        }
 
+       nvt->pkts = 0;
+
        nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
        ir_raw_event_handle(nvt->rdev);
 
-       if (nvt->pkts) {
-               nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
-               nvt->pkts = 0;
-       }
-
        nvt_dbg_verbose("%s done", __func__);
 }
 
@@ -1048,7 +1020,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 
        spin_lock_init(&nvt->nvt_lock);
        spin_lock_init(&nvt->tx.lock);
-       init_ir_raw_event(&nvt->rawir);
 
        ret = -EBUSY;
        /* now claim resources */
index 1241fc8..0d5e087 100644 (file)
@@ -67,7 +67,6 @@ static int debug;
 struct nvt_dev {
        struct pnp_dev *pdev;
        struct rc_dev *rdev;
-       struct ir_raw_event rawir;
 
        spinlock_t nvt_lock;
 
index 0800433..18305c8 100644 (file)
@@ -2858,7 +2858,6 @@ static void ov7xx0_configure(struct sd *sd)
                        case 0x60:
                                PDEBUG(D_PROBE, "Sensor is a OV7660");
                                sd->sensor = SEN_OV7660;
-                               sd->invert_led = 0;
                                break;
                        default:
                                PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low);
@@ -3337,7 +3336,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
        case BRIDGE_OV519:
                cam->cam_mode = ov519_vga_mode;
                cam->nmodes = ARRAY_SIZE(ov519_vga_mode);
-               sd->invert_led = !sd->invert_led;
                break;
        case BRIDGE_OVFX2:
                cam->cam_mode = ov519_vga_mode;
@@ -5005,24 +5003,24 @@ static const struct sd_desc sd_desc = {
 /* -- module initialisation -- */
 static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
-       {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
-       {USB_DEVICE(0x041e, 0x405f),
+       {USB_DEVICE(0x041e, 0x4052),
                .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+       {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
-       {USB_DEVICE(0x041e, 0x4064),
-               .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+       {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 },
-       {USB_DEVICE(0x041e, 0x4068),
+       {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
+       {USB_DEVICE(0x045e, 0x028c),
                .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
-       {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
-       {USB_DEVICE(0x054c, 0x0155),
-               .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+       {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
        {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
-       {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
-       {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 },
+       {USB_DEVICE(0x05a9, 0x0519),
+               .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+       {USB_DEVICE(0x05a9, 0x0530),
+               .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
        {USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 },
        {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
index 81b8a60..c477ad1 100644 (file)
@@ -2386,7 +2386,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
                reg_w1(gspca_dev, 0x01, 0x22);
                msleep(100);
                reg01 = SCL_SEL_OD | S_PDN_INV;
-               reg17 &= MCK_SIZE_MASK;
+               reg17 &= ~MCK_SIZE_MASK;
                reg17 |= 0x04;          /* clock / 4 */
                break;
        }
@@ -2532,6 +2532,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
                if (!mode) {                    /* if 640x480 */
                        reg17 &= ~MCK_SIZE_MASK;
                        reg17 |= 0x04;          /* clock / 4 */
+               } else {
+                       reg01 &= ~SYS_SEL_48M;  /* clk 24Mz */
+                       reg17 &= ~MCK_SIZE_MASK;
+                       reg17 |= 0x02;          /* clock / 2 */
                }
                break;
        case SENSOR_OV7630:
index b5ef362..b3a5ecd 100644 (file)
@@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev)
                                        "'%s' Display already enabled\n",
                                        def_display->name);
                        }
-                       /* set the update mode */
-                       if (def_display->caps &
-                                       OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
-                               if (dssdrv->enable_te)
-                                       dssdrv->enable_te(def_display, 0);
-                               if (dssdrv->set_update_mode)
-                                       dssdrv->set_update_mode(def_display,
-                                                       OMAP_DSS_UPDATE_MANUAL);
-                       } else {
-                               if (dssdrv->set_update_mode)
-                                       dssdrv->set_update_mode(def_display,
-                                                       OMAP_DSS_UPDATE_AUTO);
-                       }
                }
        }
 
index 9d3459d..80796eb 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
+#include <linux/slab.h>
 #include <media/v4l2-event.h>
 
 #include "isp.h"
index e9a0e94..8c70e64 100644 (file)
@@ -338,7 +338,7 @@ int pwc_init_controls(struct pwc_device *pdev)
        if (pdev->restore_factory)
                pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE;
 
-       if (!pdev->features & FEATURE_MOTOR_PANTILT)
+       if (!(pdev->features & FEATURE_MOTOR_PANTILT))
                return hdl->error;
 
        /* Motor pan / tilt / reset */
index d29f9c2..e4100b1 100644 (file)
@@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
 
        list_for_each_entry(stream, &dev->streams, list) {
                if (stream->intf == intf)
-                       return uvc_video_resume(stream);
+                       return uvc_video_resume(stream, reset);
        }
 
        uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "
index 48fea37..29e2399 100644 (file)
@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
                if (remote == NULL)
                        return -EINVAL;
 
-               source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
+               source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
                       ? (remote->vdev ? &remote->vdev->entity : NULL)
                       : &remote->subdev.entity;
                if (source == NULL)
index 8244167..ffd1158 100644 (file)
@@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream)
  * buffers, making sure userspace applications are notified of the problem
  * instead of waiting forever.
  */
-int uvc_video_resume(struct uvc_streaming *stream)
+int uvc_video_resume(struct uvc_streaming *stream, int reset)
 {
        int ret;
 
+       /* If the bus has been reset on resume, set the alternate setting to 0.
+        * This should be the default value, but some devices crash or otherwise
+        * misbehave if they don't receive a SET_INTERFACE request before any
+        * other video control request.
+        */
+       if (reset)
+               usb_set_interface(stream->dev->udev, stream->intfnum, 0);
+
        stream->frozen = 0;
 
        ret = uvc_commit_video(stream, &stream->ctrl);
index df32a43..cbdd49b 100644 (file)
@@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
 /* Video */
 extern int uvc_video_init(struct uvc_streaming *stream);
 extern int uvc_video_suspend(struct uvc_streaming *stream);
-extern int uvc_video_resume(struct uvc_streaming *stream);
+extern int uvc_video_resume(struct uvc_streaming *stream, int reset);
 extern int uvc_video_enable(struct uvc_streaming *stream, int enable);
 extern int uvc_probe_video(struct uvc_streaming *stream,
                struct uvc_streaming_control *probe);
index 06f1400..d721565 100644 (file)
@@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd)
                media_device_unregister_entity(&vdev->entity);
 #endif
 
+       /* Do not call v4l2_device_put if there is no release callback set.
+        * Drivers that have no v4l2_device release callback might free the
+        * v4l2_dev instance in the video_device release callback below, so we
+        * must perform this check here.
+        *
+        * TODO: In the long run all drivers that use v4l2_device should use the
+        * v4l2_device release callback. This check will then be unnecessary.
+        */
+       if (v4l2_dev->release == NULL)
+               v4l2_dev = NULL;
+
        /* Release video_device and perform other
           cleanups as needed. */
        vdev->release(vdev);
index c72856c..e6a2c3b 100644 (file)
@@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
        mutex_init(&v4l2_dev->ioctl_lock);
        v4l2_prio_init(&v4l2_dev->prio);
        kref_init(&v4l2_dev->ref);
+       get_device(dev);
        v4l2_dev->dev = dev;
        if (dev == NULL) {
                /* If dev == NULL, then name must be filled in by the caller */
@@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
 
        if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
                dev_set_drvdata(v4l2_dev->dev, NULL);
+       put_device(v4l2_dev->dev);
        v4l2_dev->dev = NULL;
 }
 EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
index 85d3048..bb7f17f 100644 (file)
@@ -1332,6 +1332,8 @@ static __devinit bool viacam_serial_is_enabled(void)
        struct pci_bus *pbus = pci_find_bus(0, 0);
        u8 cbyte;
 
+       if (!pbus)
+               return false;
        pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,
                        VIACAM_SERIAL_CREG, &cbyte);
        if ((cbyte & VIACAM_SERIAL_BIT) == 0)
index 21131c7..563654c 100644 (file)
@@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
        ct->regs.ack = JZ_REG_ADC_STATUS;
        ct->chip.irq_mask = irq_gc_mask_set_bit;
        ct->chip.irq_unmask = irq_gc_mask_clr_bit;
-       ct->chip.irq_ack = irq_gc_ack;
+       ct->chip.irq_ack = irq_gc_ack_set_bit;
 
        irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
 
index 5d1fca0..f83103b 100644 (file)
@@ -135,10 +135,13 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
        max8997->dev = &i2c->dev;
        max8997->i2c = i2c;
        max8997->type = id->driver_data;
+       max8997->irq = i2c->irq;
 
        if (!pdata)
                goto err;
 
+       max8997->irq_base = pdata->irq_base;
+       max8997->ono = pdata->ono;
        max8997->wakeup = pdata->wakeup;
 
        mutex_init(&max8997->iolock);
@@ -152,6 +155,8 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
 
        pm_runtime_set_active(max8997->dev);
 
+       max8997_irq_init(max8997);
+
        mfd_add_devices(max8997->dev, -1, max8997_devs,
                        ARRAY_SIZE(max8997_devs),
                        NULL, 0);
index 29601e7..86e1458 100644 (file)
@@ -17,6 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -676,7 +677,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
                                | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
                                | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
 
-                       reg |= (1 << (i + 1));
                } else
                        continue;
 
index 2bfad5c..a56be93 100644 (file)
@@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
        switch (tps65910_chip_id(tps65910)) {
        case TPS65910:
                tps65910->irq_num = TPS65910_NUM_IRQ;
+               break;
        case TPS65911:
                tps65910->irq_num = TPS65911_NUM_IRQ;
+               break;
        }
 
        /* Register with genirq */
index b5d598c..7cbf2aa 100644 (file)
@@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
        u8 ch_msb, ch_lsb;
        int ret;
 
-       if (!req)
+       if (!req || !twl4030_madc)
                return -EINVAL;
+
        mutex_lock(&twl4030_madc->lock);
        if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
                ret = -EINVAL;
@@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
        if (!madc)
                return -ENOMEM;
 
+       madc->dev = &pdev->dev;
+
        /*
         * Phoenix provides 2 interrupt lines. The first one is connected to
         * the OMAP. The other one can be connected to the other processor such
index ebf99be..d584f6b 100644 (file)
@@ -37,7 +37,7 @@ static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir)
        return ret;
 }
 
-static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
+static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
 {
        if (db == WM8350_GPIO_DEBOUNCE_ON)
                return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE,
@@ -210,7 +210,7 @@ int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func,
                goto err;
        if (gpio_set_polarity(wm8350, gpio, pol))
                goto err;
-       if (gpio_set_debounce(wm8350, gpio, debounce))
+       if (wm8350_gpio_set_debounce(wm8350, gpio, debounce))
                goto err;
        if (gpio_set_dir(wm8350, gpio, dir))
                goto err;
index b928bc1..8b51cd6 100644 (file)
@@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
         *      both have been read. So the value read will always be correct.
         * Set BOOT bit to refresh factory tuning values.
         */
-       lis3->read(lis3, CTRL_REG2, &reg);
-       if (lis3->whoami ==  WAI_12B)
-               reg |= CTRL2_BDU | CTRL2_BOOT;
-       else
-               reg |= CTRL2_BOOT_8B;
-       lis3->write(lis3, CTRL_REG2, reg);
+       if (lis3->pdata) {
+               lis3->read(lis3, CTRL_REG2, &reg);
+               if (lis3->whoami ==  WAI_12B)
+                       reg |= CTRL2_BDU | CTRL2_BOOT;
+               else
+                       reg |= CTRL2_BOOT_8B;
+               lis3->write(lis3, CTRL_REG2, reg);
+       }
 
        /* LIS3 power on delay is quite long */
        msleep(lis3->pwron_delay / lis3lv02d_get_odr());
index 06df187..0b56e3f 100644 (file)
@@ -165,6 +165,11 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc,
 static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
                                             const char *thread_name)
 {
+       /*
+        * Since we access the comm member in current's task_struct, we only
+        * need to be as large as what 'comm' in that structure is.
+        */
+       char comm[TASK_COMM_LEN];
        struct pti_masterchannel mccontrol = {.master = CONTROL_ID,
                                              .channel = 0};
        const char *thread_name_p;
@@ -172,13 +177,6 @@ static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
        u8 control_frame[CONTROL_FRAME_LEN];
 
        if (!thread_name) {
-               /*
-                * Since we access the comm member in current's task_struct,
-                * we only need to be as large as what 'comm' in that
-                * structure is.
-                */
-               char comm[TASK_COMM_LEN];
-
                if (!in_interrupt())
                        get_task_comm(comm, current);
                else
index 1ff5486..4c1a648 100644 (file)
@@ -926,6 +926,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
        /*
         * Reliable writes are used to implement Forced Unit Access and
         * REQ_META accesses, and are supported only on MMCs.
+        *
+        * XXX: this really needs a good explanation of why REQ_META
+        * is treated special.
         */
        bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
                          (req->cmd_flags & REQ_META)) &&
index 91a0a74..b27b940 100644 (file)
@@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
                if (mrq->done)
                        mrq->done(mrq);
 
-               mmc_host_clk_gate(host);
+               mmc_host_clk_release(host);
        }
 }
 
@@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
                        mrq->stop->mrq = mrq;
                }
        }
-       mmc_host_clk_ungate(host);
+       mmc_host_clk_hold(host);
        led_trigger_event(host->led, LED_FULL);
        host->ops->request(host, mrq);
 }
@@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
  */
 void mmc_set_chip_select(struct mmc_host *host, int mode)
 {
+       mmc_host_clk_hold(host);
        host->ios.chip_select = mode;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
  * Sets the host clock to the highest possible frequency that
  * is below "hz".
  */
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
 {
        WARN_ON(hz < host->f_min);
 
@@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
        mmc_set_ios(host);
 }
 
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+       mmc_host_clk_hold(host);
+       __mmc_set_clock(host, hz);
+       mmc_host_clk_release(host);
+}
+
 #ifdef CONFIG_MMC_CLKGATE
 /*
  * This gates the clock by setting it to 0 Hz.
@@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
        if (host->clk_old) {
                BUG_ON(host->ios.clock);
                /* This call will also set host->clk_gated to false */
-               mmc_set_clock(host, host->clk_old);
+               __mmc_set_clock(host, host->clk_old);
        }
 }
 
@@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
  */
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 {
+       mmc_host_clk_hold(host);
        host->ios.bus_mode = mode;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  */
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 {
+       mmc_host_clk_hold(host);
        host->ios.bus_width = width;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /**
@@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
 
                ocr &= 3 << bit;
 
+               mmc_host_clk_hold(host);
                host->ios.vdd = bit;
                mmc_set_ios(host);
+               mmc_host_clk_release(host);
        } else {
                pr_warning("%s: host doesn't support card's voltages\n",
                                mmc_hostname(host));
@@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
  */
 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
 {
+       mmc_host_clk_hold(host);
        host->ios.timing = timing;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  */
 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
 {
+       mmc_host_clk_hold(host);
        host->ios.drv_type = drv_type;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
 {
        int bit;
 
+       mmc_host_clk_hold(host);
+
        /* If ocr is set, we use it */
        if (host->ocr)
                bit = ffs(host->ocr) - 1;
@@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
         * time required to reach a stable voltage.
         */
        mmc_delay(10);
+
+       mmc_host_clk_release(host);
 }
 
 static void mmc_power_off(struct mmc_host *host)
 {
+       mmc_host_clk_hold(host);
+
        host->ios.clock = 0;
        host->ios.vdd = 0;
 
@@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
        host->ios.bus_width = MMC_BUS_WIDTH_1;
        host->ios.timing = MMC_TIMING_LEGACY;
        mmc_set_ios(host);
+
+       mmc_host_clk_release(host);
 }
 
 /*
index b29d3e8..793d0a0 100644 (file)
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
 }
 
 /**
- *     mmc_host_clk_ungate - ungate hardware MCI clocks
+ *     mmc_host_clk_hold - ungate hardware MCI clocks
  *     @host: host to ungate.
  *
  *     Makes sure the host ios.clock is restored to a non-zero value
  *     past this call. Increase clock reference count and ungate clock
  *     if we're the first user.
  */
-void mmc_host_clk_ungate(struct mmc_host *host)
+void mmc_host_clk_hold(struct mmc_host *host)
 {
        unsigned long flags;
 
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
 }
 
 /**
- *     mmc_host_clk_gate - gate off hardware MCI clocks
+ *     mmc_host_clk_release - gate off hardware MCI clocks
  *     @host: host to gate.
  *
  *     Calls the host driver with ios.clock set to zero as often as possible
  *     in order to gate off hardware MCI clocks. Decrease clock reference
  *     count and schedule disabling of clock.
  */
-void mmc_host_clk_gate(struct mmc_host *host)
+void mmc_host_clk_release(struct mmc_host *host)
 {
        unsigned long flags;
 
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
        host->clk_requests--;
        if (mmc_host_may_gate_card(host->card) &&
            !host->clk_requests)
-               schedule_work(&host->clk_gate_work);
+               queue_work(system_nrt_wq, &host->clk_gate_work);
        spin_unlock_irqrestore(&host->clk_lock, flags);
 }
 
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
        if (cancel_work_sync(&host->clk_gate_work))
                mmc_host_clk_gate_delayed(host);
        if (host->clk_gated)
-               mmc_host_clk_ungate(host);
+               mmc_host_clk_hold(host);
        /* There should be only one user now */
        WARN_ON(host->clk_requests > 1);
 }
index de199f9..fb8a5cd 100644 (file)
@@ -16,16 +16,16 @@ int mmc_register_host_class(void);
 void mmc_unregister_host_class(void);
 
 #ifdef CONFIG_MMC_CLKGATE
-void mmc_host_clk_ungate(struct mmc_host *host);
-void mmc_host_clk_gate(struct mmc_host *host);
+void mmc_host_clk_hold(struct mmc_host *host);
+void mmc_host_clk_release(struct mmc_host *host);
 unsigned int mmc_host_clk_rate(struct mmc_host *host);
 
 #else
-static inline void mmc_host_clk_ungate(struct mmc_host *host)
+static inline void mmc_host_clk_hold(struct mmc_host *host)
 {
 }
 
-static inline void mmc_host_clk_gate(struct mmc_host *host)
+static inline void mmc_host_clk_release(struct mmc_host *host)
 {
 }
 
index 633975f..0370e03 100644 (file)
@@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
        return 0;
 }
 
-static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+static void sd_update_bus_speed_mode(struct mmc_card *card)
 {
-       unsigned int bus_speed = 0, timing = 0;
-       int err;
-
        /*
         * If the host doesn't support any of the UHS-I modes, fallback on
         * default speed.
         */
        if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
-           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
-               return 0;
+           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
+               card->sd_bus_speed = 0;
+               return;
+       }
 
        if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
            (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
-                       bus_speed = UHS_SDR104_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR104;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
        } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
                   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
-                       bus_speed = UHS_DDR50_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_DDR50;
-                       card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+                       card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
                    SD_MODE_UHS_SDR50)) {
-                       bus_speed = UHS_SDR50_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR50;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
                   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
-                       bus_speed = UHS_SDR25_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR25;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
                    MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
                    SD_MODE_UHS_SDR12)) {
-                       bus_speed = UHS_SDR12_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR12;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+       }
+}
+
+static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+{
+       int err;
+       unsigned int timing = 0;
+
+       switch (card->sd_bus_speed) {
+       case UHS_SDR104_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR104;
+               card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+               break;
+       case UHS_DDR50_BUS_SPEED:
+               timing = MMC_TIMING_UHS_DDR50;
+               card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+               break;
+       case UHS_SDR50_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR50;
+               card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+               break;
+       case UHS_SDR25_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR25;
+               card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+               break;
+       case UHS_SDR12_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR12;
+               card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+               break;
+       default:
+               return 0;
        }
 
-       card->sd_bus_speed = bus_speed;
-       err = mmc_sd_switch(card, 1, 0, bus_speed, status);
+       err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
        if (err)
                return err;
 
-       if ((status[16] & 0xF) != bus_speed)
+       if ((status[16] & 0xF) != card->sd_bus_speed)
                printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
                        mmc_hostname(card->host));
        else {
@@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
                mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
        }
 
+       /*
+        * Select the bus speed mode depending on host
+        * and card capability.
+        */
+       sd_update_bus_speed_mode(card);
+
        /* Set the driver strength for the card */
        err = sd_select_driver_type(card, status);
        if (err)
                goto out;
 
-       /* Set bus speed mode of the card */
-       err = sd_set_bus_speed_mode(card, status);
+       /* Set current limit for the card */
+       err = sd_set_current_limit(card, status);
        if (err)
                goto out;
 
-       /* Set current limit for the card */
-       err = sd_set_current_limit(card, status);
+       /* Set bus speed mode of the card */
+       err = sd_set_bus_speed_mode(card, status);
        if (err)
                goto out;
 
index 0e9780f..4dc0028 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/gpio.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
index 2bd7bf4..fe886d6 100644 (file)
@@ -302,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
                ctrl &= ~SDHCI_CTRL_8BITBUS;
                break;
        default:
+               ctrl &= ~SDHCI_CTRL_4BITBUS;
+               ctrl &= ~SDHCI_CTRL_8BITBUS;
                break;
        }
 
index 774f643..0c4a672 100644 (file)
@@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
        mmc_data->hclk = clk_get_rate(priv->clk);
        mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
        mmc_data->get_cd = sh_mobile_sdhi_get_cd;
-       if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
-               mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
        mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
        if (p) {
                mmc_data->flags = p->tmio_flags;
+               if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
+                       mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
                mmc_data->ocr_mask = p->tmio_ocr_mask;
                mmc_data->capabilities |= p->tmio_caps;
 
index 65b5b76..64fbb00 100644 (file)
@@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
 
 #define ubi_dbg_msg(fmt, ...) do {                                           \
        if (0)                                                               \
-               pr_debug(fmt "\n", ##__VA_ARGS__);                           \
+               printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__);                  \
 } while (0)
 
 #define dbg_msg(fmt, ...)  ubi_dbg_msg(fmt, ##__VA_ARGS__)
index 8d0314d..a44874e 100644 (file)
@@ -2535,7 +2535,7 @@ config S6GMAC
 source "drivers/net/stmmac/Kconfig"
 
 config PCH_GBE
-       tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
+       tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
        depends on PCI
        select MII
        ---help---
@@ -2548,10 +2548,11 @@ config PCH_GBE
          This driver enables Gigabit Ethernet function.
 
          This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7223.
-         ML7223 IOH is for MP(Media Phone) use.
-         ML7223 is companion chip for Intel Atom E6xx series.
-         ML7223 is completely compatible for Intel EG20T PCH.
+         Output Hub), ML7223/ML7831.
+         ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
+         purpose use.
+         ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 config FTGMAC100
        tristate "Faraday FTGMAC100 Gigabit Ethernet support"
index 52fe21e..3b1416e 100644 (file)
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
        struct net_device *dev = (struct net_device *)data;
        struct dev_priv *priv = netdev_priv(dev);
        unsigned int lnkstat, carrier;
+       unsigned long flags;
 
+       spin_lock_irqsave(&priv->chip_lock, flags);
        lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
+       spin_unlock_irqrestore(&priv->chip_lock, flags);
        carrier = netif_carrier_ok(dev);
 
        if (lnkstat && !carrier) {
index c423504..e46df53 100644 (file)
@@ -315,6 +315,14 @@ union db_prod {
        u32             raw;
 };
 
+/* dropless fc FW/HW related params */
+#define BRB_SIZE(bp)           (CHIP_IS_E3(bp) ? 1024 : 512)
+#define MAX_AGG_QS(bp)         (CHIP_IS_E1(bp) ? \
+                                       ETH_MAX_AGGREGATION_QUEUES_E1 :\
+                                       ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
+#define FW_DROP_LEVEL(bp)      (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
+#define FW_PREFETCH_CNT                16
+#define DROPLESS_FC_HEADROOM   100
 
 /* MC hsi */
 #define BCM_PAGE_SHIFT         12
@@ -331,15 +339,35 @@ union db_prod {
 /* SGE ring related macros */
 #define NUM_RX_SGE_PAGES       2
 #define RX_SGE_CNT             (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
-#define MAX_RX_SGE_CNT         (RX_SGE_CNT - 2)
+#define NEXT_PAGE_SGE_DESC_CNT 2
+#define MAX_RX_SGE_CNT         (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
 /* RX_SGE_CNT is promised to be a power of 2 */
 #define RX_SGE_MASK            (RX_SGE_CNT - 1)
 #define NUM_RX_SGE             (RX_SGE_CNT * NUM_RX_SGE_PAGES)
 #define MAX_RX_SGE             (NUM_RX_SGE - 1)
 #define NEXT_SGE_IDX(x)                ((((x) & RX_SGE_MASK) == \
-                                 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
+                                 (MAX_RX_SGE_CNT - 1)) ? \
+                                       (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
+                                       (x) + 1)
 #define RX_SGE(x)              ((x) & MAX_RX_SGE)
 
+/*
+ * Number of required  SGEs is the sum of two:
+ * 1. Number of possible opened aggregations (next packet for
+ *    these aggregations will probably consume SGE immidiatelly)
+ * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
+ *    after placement on BD for new TPA aggregation)
+ *
+ * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
+ */
+#define NUM_SGE_REQ            (MAX_AGG_QS(bp) + \
+                                       (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
+#define NUM_SGE_PG_REQ         ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
+                                               MAX_RX_SGE_CNT)
+#define SGE_TH_LO(bp)          (NUM_SGE_REQ + \
+                                NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
+#define SGE_TH_HI(bp)          (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
 /* Manipulate a bit vector defined as an array of u64 */
 
 /* Number of bits in one sge_mask array element */
@@ -551,24 +579,43 @@ struct bnx2x_fastpath {
 
 #define NUM_TX_RINGS           16
 #define TX_DESC_CNT            (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
-#define MAX_TX_DESC_CNT                (TX_DESC_CNT - 1)
+#define NEXT_PAGE_TX_DESC_CNT  1
+#define MAX_TX_DESC_CNT                (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
 #define NUM_TX_BD              (TX_DESC_CNT * NUM_TX_RINGS)
 #define MAX_TX_BD              (NUM_TX_BD - 1)
 #define MAX_TX_AVAIL           (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
 #define NEXT_TX_IDX(x)         ((((x) & MAX_TX_DESC_CNT) == \
-                                 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+                                 (MAX_TX_DESC_CNT - 1)) ? \
+                                       (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
+                                       (x) + 1)
 #define TX_BD(x)               ((x) & MAX_TX_BD)
 #define TX_BD_POFF(x)          ((x) & MAX_TX_DESC_CNT)
 
 /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
 #define NUM_RX_RINGS           8
 #define RX_DESC_CNT            (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
-#define MAX_RX_DESC_CNT                (RX_DESC_CNT - 2)
+#define NEXT_PAGE_RX_DESC_CNT  2
+#define MAX_RX_DESC_CNT                (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
 #define RX_DESC_MASK           (RX_DESC_CNT - 1)
 #define NUM_RX_BD              (RX_DESC_CNT * NUM_RX_RINGS)
 #define MAX_RX_BD              (NUM_RX_BD - 1)
 #define MAX_RX_AVAIL           (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
-#define MIN_RX_AVAIL           128
+
+/* dropless fc calculations for BDs
+ *
+ * Number of BDs should as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_BD_REQ             BRB_SIZE(bp)
+#define NUM_BD_PG_REQ          ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
+                                             MAX_RX_DESC_CNT)
+#define BD_TH_LO(bp)           (NUM_BD_REQ + \
+                                NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
+                                FW_DROP_LEVEL(bp))
+#define BD_TH_HI(bp)           (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+#define MIN_RX_AVAIL           ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
 
 #define MIN_RX_SIZE_TPA_HW     (CHIP_IS_E1(bp) ? \
                                        ETH_MIN_RX_CQES_WITH_TPA_E1 : \
@@ -579,7 +626,9 @@ struct bnx2x_fastpath {
                                                                MIN_RX_AVAIL))
 
 #define NEXT_RX_IDX(x)         ((((x) & RX_DESC_MASK) == \
-                                 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
+                                 (MAX_RX_DESC_CNT - 1)) ? \
+                                       (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
+                                       (x) + 1)
 #define RX_BD(x)               ((x) & MAX_RX_BD)
 
 /*
@@ -589,14 +638,31 @@ struct bnx2x_fastpath {
 #define CQE_BD_REL     (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
 #define NUM_RCQ_RINGS          (NUM_RX_RINGS * CQE_BD_REL)
 #define RCQ_DESC_CNT           (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
-#define MAX_RCQ_DESC_CNT       (RCQ_DESC_CNT - 1)
+#define NEXT_PAGE_RCQ_DESC_CNT 1
+#define MAX_RCQ_DESC_CNT       (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
 #define NUM_RCQ_BD             (RCQ_DESC_CNT * NUM_RCQ_RINGS)
 #define MAX_RCQ_BD             (NUM_RCQ_BD - 1)
 #define MAX_RCQ_AVAIL          (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
 #define NEXT_RCQ_IDX(x)                ((((x) & MAX_RCQ_DESC_CNT) == \
-                                 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+                                 (MAX_RCQ_DESC_CNT - 1)) ? \
+                                       (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
+                                       (x) + 1)
 #define RCQ_BD(x)              ((x) & MAX_RCQ_BD)
 
+/* dropless fc calculations for RCQs
+ *
+ * Number of RCQs should be as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_RCQ_REQ            BRB_SIZE(bp)
+#define NUM_RCQ_PG_REQ         ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
+                                             MAX_RCQ_DESC_CNT)
+#define RCQ_TH_LO(bp)          (NUM_RCQ_REQ + \
+                                NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
+                                FW_DROP_LEVEL(bp))
+#define RCQ_TH_HI(bp)          (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
 
 /* This is needed for determining of last_max */
 #define SUB_S16(a, b)          (s16)((s16)(a) - (s16)(b))
@@ -685,24 +751,17 @@ struct bnx2x_fastpath {
 #define FP_CSB_FUNC_OFF        \
                        offsetof(struct cstorm_status_block_c, func)
 
-#define HC_INDEX_TOE_RX_CQ_CONS                0 /* Formerly Ustorm TOE CQ index */
-                                         /* (HC_INDEX_U_TOE_RX_CQ_CONS)  */
-#define HC_INDEX_ETH_RX_CQ_CONS                1 /* Formerly Ustorm ETH CQ index */
-                                         /* (HC_INDEX_U_ETH_RX_CQ_CONS)  */
-#define HC_INDEX_ETH_RX_BD_CONS                2 /* Formerly Ustorm ETH BD index */
-                                         /* (HC_INDEX_U_ETH_RX_BD_CONS)  */
-
-#define HC_INDEX_TOE_TX_CQ_CONS                4 /* Formerly Cstorm TOE CQ index   */
-                                         /* (HC_INDEX_C_TOE_TX_CQ_CONS)    */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS0   5 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS1   6 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS2   7 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
+#define HC_INDEX_ETH_RX_CQ_CONS                1
 
-#define HC_INDEX_ETH_FIRST_TX_CQ_CONS  HC_INDEX_ETH_TX_CQ_CONS_COS0
+#define HC_INDEX_OOO_TX_CQ_CONS                4
 
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0   5
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1   6
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2   7
+
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS  HC_INDEX_ETH_TX_CQ_CONS_COS0
 
 #define BNX2X_RX_SB_INDEX \
        (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
@@ -1100,11 +1159,12 @@ struct bnx2x {
 #define BP_PORT(bp)                    (bp->pfid & 1)
 #define BP_FUNC(bp)                    (bp->pfid)
 #define BP_ABS_FUNC(bp)                        (bp->pf_num)
-#define BP_E1HVN(bp)                   (bp->pfid >> 1)
-#define BP_VN(bp)                      (BP_E1HVN(bp)) /*remove when approved*/
-#define BP_L_ID(bp)                    (BP_E1HVN(bp) << 2)
-#define BP_FW_MB_IDX(bp)               (BP_PORT(bp) +\
-         BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1))
+#define BP_VN(bp)                      ((bp)->pfid >> 1)
+#define BP_MAX_VN_NUM(bp)              (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
+#define BP_L_ID(bp)                    (BP_VN(bp) << 2)
+#define BP_FW_MB_IDX_VN(bp, vn)                (BP_PORT(bp) +\
+         (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1))
+#define BP_FW_MB_IDX(bp)               BP_FW_MB_IDX_VN(bp, BP_VN(bp))
 
        struct net_device       *dev;
        struct pci_dev          *pdev;
@@ -1767,7 +1827,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 
 #define MAX_DMAE_C_PER_PORT            8
 #define INIT_DMAE_C(bp)                        (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
-                                        BP_E1HVN(bp))
+                                        BP_VN(bp))
 #define PMF_DMAE_C(bp)                 (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
                                         E1HVN_MAX)
 
@@ -1793,7 +1853,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 
 /* must be used on a CID before placing it on a HW ring */
 #define HW_CID(bp, x)                  ((BP_PORT(bp) << 23) | \
-                                        (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \
+                                        (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
                                         (x))
 
 #define SP_DESC_CNT            (BCM_PAGE_SIZE / sizeof(struct eth_spe))
index 37e5790..c4cbf97 100644 (file)
@@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp)
 void bnx2x_init_rx_rings(struct bnx2x *bp)
 {
        int func = BP_FUNC(bp);
-       int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
-                                             ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
        u16 ring_prod;
        int i, j;
 
@@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
 
                if (!fp->disable_tpa) {
                        /* Fill the per-aggregtion pool */
-                       for (i = 0; i < max_agg_queues; i++) {
+                       for (i = 0; i < MAX_AGG_QS(bp); i++) {
                                struct bnx2x_agg_info *tpa_info =
                                        &fp->tpa_info[i];
                                struct sw_rx_bd *first_buf =
@@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                                        bnx2x_free_rx_sge_range(bp, fp,
                                                                ring_prod);
                                        bnx2x_free_tpa_pool(bp, fp,
-                                                           max_agg_queues);
+                                                           MAX_AGG_QS(bp));
                                        fp->disable_tpa = 1;
                                        ring_prod = 0;
                                        break;
@@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
                bnx2x_free_rx_bds(fp);
 
                if (!fp->disable_tpa)
-                       bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
-                                           ETH_MAX_AGGREGATION_QUEUES_E1 :
-                                           ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+                       bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
        }
 }
 
@@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
        struct bnx2x_fastpath *fp = &bp->fp[index];
        int ring_size = 0;
        u8 cos;
+       int rx_ring_size = 0;
 
        /* if rx_ring_size specified - use it */
-       int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
-                          MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+       if (!bp->rx_ring_size) {
 
-       /* allocate at least number of buffers required by FW */
-       rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
-                                                   MIN_RX_SIZE_TPA,
-                                 rx_ring_size);
+               rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+
+               /* allocate at least number of buffers required by FW */
+               rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+                                    MIN_RX_SIZE_TPA, rx_ring_size);
+
+               bp->rx_ring_size = rx_ring_size;
+       } else
+               rx_ring_size = bp->rx_ring_size;
 
        /* Common */
        sb = &bnx2x_fp(bp, index, status_blk);
index a1e004a..0b4acf6 100644 (file)
@@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
                        break;
                case DCB_CAP_ATTR_DCBX:
                        *cap = BNX2X_DCBX_CAPS;
+                       break;
                default:
                        rval = -EINVAL;
                        break;
index 2218630..cf3e479 100644 (file)
@@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                }
 
                /* advertise the requested speed and duplex if supported */
-               cmd->advertising &= bp->port.supported[cfg_idx];
+               if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
+                       DP(NETIF_MSG_LINK, "Advertisement parameters "
+                                          "are not supported\n");
+                       return -EINVAL;
+               }
 
                bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
-               bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
-               bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
+               bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+               bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
                                         cmd->advertising);
+               if (cmd->advertising) {
+
+                       bp->link_params.speed_cap_mask[cfg_idx] = 0;
+                       if (cmd->advertising & ADVERTISED_10baseT_Half) {
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
+                       }
+                       if (cmd->advertising & ADVERTISED_10baseT_Full)
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
 
+                       if (cmd->advertising & ADVERTISED_100baseT_Full)
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
+
+                       if (cmd->advertising & ADVERTISED_100baseT_Half) {
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                                    PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
+                       }
+                       if (cmd->advertising & ADVERTISED_1000baseT_Half) {
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+                       }
+                       if (cmd->advertising & (ADVERTISED_1000baseT_Full |
+                                               ADVERTISED_1000baseKX_Full))
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+
+                       if (cmd->advertising & (ADVERTISED_10000baseT_Full |
+                                               ADVERTISED_10000baseKX4_Full |
+                                               ADVERTISED_10000baseKR_Full))
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+               }
        } else { /* forced speed */
                /* advertise the requested speed and duplex if supported */
                switch (speed) {
@@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
        if (bp->rx_ring_size)
                ering->rx_pending = bp->rx_ring_size;
        else
-               if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
-                       ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
-               else
-                       ering->rx_pending = MAX_RX_AVAIL;
+               ering->rx_pending = MAX_RX_AVAIL;
 
        ering->rx_mini_pending = 0;
        ering->rx_jumbo_pending = 0;
index d45b155..ba15bdc 100644 (file)
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
 {
        u32 nig_reg_adress_crd_weight = 0;
        u32 pbf_reg_adress_crd_weight = 0;
-       /* Calculate and set BW for this COS*/
-       const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw;
-       const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw;
+       /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
+       const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
+       const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
 
        switch (cos_entry) {
        case 0:
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(
        /* Calculate total BW requested */
        for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
                if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
-
-                       if (0 == ets_params->cos[cos_idx].params.bw_params.bw) {
-                               DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
-                                                  "was set to 0\n");
-                       return -EINVAL;
+                       *total_bw +=
+                               ets_params->cos[cos_idx].params.bw_params.bw;
                }
-               *total_bw +=
-                   ets_params->cos[cos_idx].params.bw_params.bw;
-           }
        }
 
-       /*Check taotl BW is valid */
+       /* Check total BW is valid */
        if ((100 != *total_bw) || (0 == *total_bw)) {
                if (0 == *total_bw) {
                        DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
 
        /* Check loopback mode */
        if (lb)
-               val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK;
+               val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
        REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
        bnx2x_set_xumac_nig(params,
                            ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
                         MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
 
+       /* Advertised and set FEC (Forward Error Correction) */
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+                        MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
+                        (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
+                         MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
+
        /* Enable CL37 BAM */
        if (REG_RD(bp, params->shmem_base +
                   offsetof(struct shmem_region, dev_info.
@@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params,
                                        (tmp | EMAC_LED_OVERRIDE));
                                /*
                                 * return here without enabling traffic
-                                * LED blink andsetting rate in ON mode.
+                                * LED blink and setting rate in ON mode.
                                 * In oper mode, enabling LED blink
                                 * and setting rate is needed.
                                 */
@@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params,
                         * This is a work-around for HW issue found when link
                         * is up in CL73
                         */
-                       REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+                       if ((!CHIP_IS_E3(bp)) ||
+                           (CHIP_IS_E3(bp) &&
+                            mode == LED_MODE_ON))
+                               REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
                        if (CHIP_IS_E1x(bp) ||
                            CHIP_IS_E2(bp) ||
                            (mode == LED_MODE_ON))
@@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = (FLAGS_HW_LOCK_REQUIRED |
-                          FLAGS_TX_ERROR_CHECK),
+       .flags          = FLAGS_HW_LOCK_REQUIRED,
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = (FLAGS_INIT_XGXS_FIRST |
-                          FLAGS_TX_ERROR_CHECK),
+       .flags          = FLAGS_INIT_XGXS_FIRST,
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = {
        .addr           = 0xff,
        .def_md_devad   = 0,
        .flags          = (FLAGS_HW_LOCK_REQUIRED |
-                          FLAGS_INIT_XGXS_FIRST |
-                          FLAGS_TX_ERROR_CHECK),
+                          FLAGS_INIT_XGXS_FIRST),
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = (FLAGS_FAN_FAILURE_DET_REQ |
-                          FLAGS_TX_ERROR_CHECK),
+       .flags          = FLAGS_FAN_FAILURE_DET_REQ,
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
index f74582a..15f8000 100644 (file)
@@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
        opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
 
        opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
-       opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
-                  (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+       opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
+                  (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
        opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
 
 #ifdef __BIG_ENDIAN
@@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
        if (!CHIP_IS_E1(bp)) {
                /* init leading/trailing edge */
                if (IS_MF(bp)) {
-                       val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+                       val = (0xee0f | (1 << (BP_VN(bp) + 4)));
                        if (bp->port.pmf)
                                /* enable nig and gpio3 attention */
                                val |= 0x1100;
@@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
 
        /* init leading/trailing edge */
        if (IS_MF(bp)) {
-               val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+               val = (0xee0f | (1 << (BP_VN(bp) + 4)));
                if (bp->port.pmf)
                        /* enable nig and gpio3 attention */
                        val |= 0x1100;
@@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
        int vn;
 
        bp->vn_weight_sum = 0;
-       for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
                u32 vn_cfg = bp->mf_config[vn];
                u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
                                   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
                                        CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+       return 2 * vn + BP_PORT(bp);
+}
+
 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
 {
        struct rate_shaping_vars_per_vn m_rs_vn;
        struct fairness_vars_per_vn m_fair_vn;
        u32 vn_cfg = bp->mf_config[vn];
-       int func = 2*vn + BP_PORT(bp);
+       int func = func_by_vn(bp, vn);
        u16 vn_min_rate, vn_max_rate;
        int i;
 
@@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
         *
         *      and there are 2 functions per port
         */
-       for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
                int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
 
                if (func >= E1H_FUNC_MAX)
@@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 
                /* calculate and set min-max rate for each vn */
                if (bp->port.pmf)
-                       for (vn = VN_0; vn < E1HVN_MAX; vn++)
+                       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
                                bnx2x_init_vn_minmax(bp, vn);
 
                /* always enable rate shaping and fairness */
@@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 
 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
 {
-       int port = BP_PORT(bp);
        int func;
        int vn;
 
        /* Set the attention towards other drivers on the same port */
-       for (vn = VN_0; vn < E1HVN_MAX; vn++) {
-               if (vn == BP_E1HVN(bp))
+       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+               if (vn == BP_VN(bp))
                        continue;
 
-               func = ((vn << 1) | port);
+               func = func_by_vn(bp, vn);
                REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
                       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
        }
@@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
        bnx2x_dcbx_pmf_update(bp);
 
        /* enable nig attention */
-       val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
+       val = (0xff0f | (1 << (BP_VN(bp) + 4)));
        if (bp->common.int_block == INT_BLOCK_HC) {
                REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
                REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
@@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
        u16 tpa_agg_size = 0;
 
        if (!fp->disable_tpa) {
-               pause->sge_th_hi = 250;
-               pause->sge_th_lo = 150;
+               pause->sge_th_lo = SGE_TH_LO(bp);
+               pause->sge_th_hi = SGE_TH_HI(bp);
+
+               /* validate SGE ring has enough to cross high threshold */
+               WARN_ON(bp->dropless_fc &&
+                               pause->sge_th_hi + FW_PREFETCH_CNT >
+                               MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
+
                tpa_agg_size = min_t(u32,
                        (min_t(u32, 8, MAX_SKB_FRAGS) *
                        SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
@@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
 
        /* pause - not for e1 */
        if (!CHIP_IS_E1(bp)) {
-               pause->bd_th_hi = 350;
-               pause->bd_th_lo = 250;
-               pause->rcq_th_hi = 350;
-               pause->rcq_th_lo = 250;
+               pause->bd_th_lo = BD_TH_LO(bp);
+               pause->bd_th_hi = BD_TH_HI(bp);
+
+               pause->rcq_th_lo = RCQ_TH_LO(bp);
+               pause->rcq_th_hi = RCQ_TH_HI(bp);
+               /*
+                * validate that rings have enough entries to cross
+                * high thresholds
+                */
+               WARN_ON(bp->dropless_fc &&
+                               pause->bd_th_hi + FW_PREFETCH_CNT >
+                               bp->rx_ring_size);
+               WARN_ON(bp->dropless_fc &&
+                               pause->rcq_th_hi + FW_PREFETCH_CNT >
+                               NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
 
                pause->pri_map = 1;
        }
@@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
         * For PF Clients it should be the maximum avaliable number.
         * VF driver(s) may want to define it to a smaller value.
         */
-       rxq_init->max_tpa_queues =
-               (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
-               ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+       rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
 
        rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
        rxq_init->fw_sb_id = fp->fw_sb_id;
@@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
        hc_sm->time_to_expire = 0xFFFFFFFF;
 }
 
+
+/* allocates state machine ids. */
+static inline
+void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+{
+       /* zero out state machine indices */
+       /* rx indices */
+       index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+
+       /* tx indices */
+       index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
+
+       /* map indices */
+       /* rx indices */
+       index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
+               SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+
+       /* tx indices */
+       index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
+               SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
+               SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
+               SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
+               SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+}
+
 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
                          u8 vf_valid, int fw_sb_id, int igu_sb_id)
 {
@@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
                hc_sm_p = sb_data_e2.common.state_machine;
                sb_data_p = (u32 *)&sb_data_e2;
                data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+               bnx2x_map_sb_state_machines(sb_data_e2.index_data);
        } else {
                memset(&sb_data_e1x, 0,
                       sizeof(struct hc_status_block_data_e1x));
@@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
                hc_sm_p = sb_data_e1x.common.state_machine;
                sb_data_p = (u32 *)&sb_data_e1x;
                data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+               bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
        }
 
        bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
@@ -4890,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
        int igu_seg_id;
        int port = BP_PORT(bp);
        int func = BP_FUNC(bp);
-       int reg_offset;
+       int reg_offset, reg_offset_en5;
        u64 section;
        int index;
        struct hc_sp_status_block_data sp_sb_data;
@@ -4913,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
 
        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+       reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
+                                MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
        for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
                int sindex;
                /* take care of sig[0]..sig[4] */
@@ -4927,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
                         * and not 16 between the different groups
                         */
                        bp->attn_group[index].sig[4] = REG_RD(bp,
-                                       reg_offset + 0x10 + 0x4*index);
+                                       reg_offset_en5 + 0x4*index);
                else
                        bp->attn_group[index].sig[4] = 0;
        }
@@ -5802,7 +5857,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
         * take the UNDI lock to protect undi_unload flow from accessing
         * registers while we're resetting the chip
         */
-       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 
        bnx2x_reset_common(bp);
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
@@ -5814,7 +5869,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
        }
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
 
-       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 
        bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
 
@@ -6671,12 +6726,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
                        if (CHIP_MODE_IS_4_PORT(bp))
                                dsb_idx = BP_FUNC(bp);
                        else
-                               dsb_idx = BP_E1HVN(bp);
+                               dsb_idx = BP_VN(bp);
 
                        prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
                                       IGU_BC_BASE_DSB_PROD + dsb_idx :
                                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
 
+                       /*
+                        * igu prods come in chunks of E1HVN_MAX (4) -
+                        * does not matters what is the current chip mode
+                        */
                        for (i = 0; i < (num_segs * E1HVN_MAX);
                             i += E1HVN_MAX) {
                                addr = IGU_REG_PROD_CONS_MEMORY +
@@ -7568,9 +7627,12 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
                u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
                u8 *mac_addr = bp->dev->dev_addr;
                u32 val;
+               u16 pmc;
+
                /* The mac address is written to entries 1-4 to
-                  preserve entry 0 which is used by the PMF */
-               u8 entry = (BP_E1HVN(bp) + 1)*8;
+                * preserve entry 0 which is used by the PMF
+                */
+               u8 entry = (BP_VN(bp) + 1)*8;
 
                val = (mac_addr[0] << 8) | mac_addr[1];
                EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
@@ -7579,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
                      (mac_addr[4] << 8) | mac_addr[5];
                EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
 
+               /* Enable the PME and clear the status */
+               pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
+               pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
+               pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
+
                reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
 
        } else
@@ -8546,10 +8613,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
        /* Check if there is any driver already loaded */
        val = REG_RD(bp, MISC_REG_UNPREPARED);
        if (val == 0x1) {
-               /* Check if it is the UNDI driver
+
+               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+               /*
+                * Check if it is the UNDI driver
                 * UNDI driver initializes CID offset for normal bell to 0x7
                 */
-               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
                val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
                if (val == 0x7) {
                        u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -8587,9 +8656,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
                                bnx2x_fw_command(bp, reset_code, 0);
                        }
 
-                       /* now it's safe to release the lock */
-                       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
-
                        bnx2x_undi_int_disable(bp);
                        port = BP_PORT(bp);
 
@@ -8639,8 +8705,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
                        bp->fw_seq =
                              (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
                                DRV_MSG_SEQ_NUMBER_MASK);
-               } else
-                       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+               }
+
+               /* now it's safe to release the lock */
+               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
        }
 }
 
@@ -8777,13 +8845,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
 {
        int pfid = BP_FUNC(bp);
-       int vn = BP_E1HVN(bp);
        int igu_sb_id;
        u32 val;
        u8 fid, igu_sb_cnt = 0;
 
        bp->igu_base_sb = 0xff;
        if (CHIP_INT_MODE_IS_BC(bp)) {
+               int vn = BP_VN(bp);
                igu_sb_cnt = bp->igu_sb_cnt;
                bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
                        FP_SB_MAX_E1x;
@@ -9416,6 +9484,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
                bp->igu_base_sb = 0;
        } else {
                bp->common.int_block = INT_BLOCK_IGU;
+
+               /* do not allow device reset during IGU info preocessing */
+               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
                val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
 
                if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
@@ -9447,6 +9519,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 
                bnx2x_get_igu_cam_info(bp);
 
+               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
        }
 
        /*
@@ -9473,7 +9546,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 
        bp->mf_ov = 0;
        bp->mf_mode = 0;
-       vn = BP_E1HVN(bp);
+       vn = BP_VN(bp);
 
        if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
                BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -9593,13 +9666,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
        /* port info */
        bnx2x_get_port_hwinfo(bp);
 
-       if (!BP_NOMCP(bp)) {
-               bp->fw_seq =
-                       (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
-                        DRV_MSG_SEQ_NUMBER_MASK);
-               BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-       }
-
        /* Get MAC addresses */
        bnx2x_get_mac_hwinfo(bp);
 
@@ -9765,6 +9831,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
        if (!BP_NOMCP(bp))
                bnx2x_undi_unload(bp);
 
+       /* init fw_seq after undi_unload! */
+       if (!BP_NOMCP(bp)) {
+               bp->fw_seq =
+                       (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+                        DRV_MSG_SEQ_NUMBER_MASK);
+               BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+       }
+
        if (CHIP_REV_IS_FPGA(bp))
                dev_err(&bp->pdev->dev, "FPGA detected\n");
 
@@ -10259,17 +10333,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        /* clean indirect addresses */
        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
                               PCICFG_VENDOR_ID_OFFSET);
-       /* Clean the following indirect addresses for all functions since it
+       /*
+        * Clean the following indirect addresses for all functions since it
         * is not used by the driver.
         */
        REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
        REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
        REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
        REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+
+       if (CHIP_IS_E1x(bp)) {
+               REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+               REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+               REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+               REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+       }
 
        /*
         * Enable internal target-read (in case we are probed after PF FLR).
index 40266c1..fc7bd0f 100644 (file)
    Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
 #define MISC_REG_AEU_ENABLE4_PXP_0                              0xa108
 #define MISC_REG_AEU_ENABLE4_PXP_1                              0xa1a8
+/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0                       0xa688
+/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0                       0xa6b0
 /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
    128 bit vector */
 #define MISC_REG_AEU_GENERAL_ATTN_0                             0xa000
 #define XCM_REG_XX_OVFL_EVNT_ID                                 0x20058
 #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS   (0x1<<0)
 #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS  (0x1<<1)
-#define XMAC_CTRL_REG_CORE_LOCAL_LPBK                           (0x1<<3)
+#define XMAC_CTRL_REG_LINE_LOCAL_LPBK                           (0x1<<2)
 #define XMAC_CTRL_REG_RX_EN                                     (0x1<<1)
 #define XMAC_CTRL_REG_SOFT_RESET                                (0x1<<6)
 #define XMAC_CTRL_REG_TX_EN                                     (0x1<<0)
 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0                      8
 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1                      9
 #define HW_LOCK_RESOURCE_SPIO                                   2
-#define HW_LOCK_RESOURCE_UNDI                                   5
+#define HW_LOCK_RESOURCE_RESET                                  5
 #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT                   (0x1<<4)
 #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR                   (0x1<<5)
 #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR                   (0x1<<18)
@@ -6853,6 +6865,9 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP                 0x7
 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0      0x10
 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1      0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2      0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY    0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ                0x8000
 #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150  0x96
 #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL               0x8000
 #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1              0x800e
index 771f680..9908f2b 100644 (file)
@@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
                break;
 
        case MAC_TYPE_NONE: /* unreached */
-               BNX2X_ERR("stats updated by DMAE but no MAC active\n");
+               DP(BNX2X_MSG_STATS,
+                  "stats updated by DMAE but no MAC active\n");
                return -1;
 
        default: /* unreached */
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
 
 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
 {
-       int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
+       int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
        u32 func_stx;
 
        /* sanity */
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
        func_stx = bp->func_stx;
 
        for (vn = VN_0; vn < vn_max; vn++) {
-               int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn;
+               int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
 
                bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
                bnx2x_func_stats_init(bp);
index a047eb9..47b928e 100644 (file)
@@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        }
 
 re_arm:
-       queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
+       if (!bond->kill_timers)
+               queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
 out:
        read_unlock(&bond->lock);
 }
index 7f8b20a..d4fbd2e 100644 (file)
@@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work)
        }
 
 re_arm:
-       queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
+       if (!bond->kill_timers)
+               queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
 out:
        read_unlock(&bond->lock);
 }
index 43f2ea5..6d79b78 100644 (file)
@@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
 
        read_lock(&bond->lock);
 
+       if (bond->kill_timers)
+               goto out;
+
        /* rejoin all groups on bond device */
        __bond_resend_igmp_join_requests(bond->dev);
 
@@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
                        __bond_resend_igmp_join_requests(vlan_dev);
        }
 
-       if (--bond->igmp_retrans > 0)
+       if ((--bond->igmp_retrans > 0) && !bond->kill_timers)
                queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
-
+out:
        read_unlock(&bond->lock);
 }
 
@@ -2538,7 +2541,7 @@ void bond_mii_monitor(struct work_struct *work)
        }
 
 re_arm:
-       if (bond->params.miimon)
+       if (bond->params.miimon && !bond->kill_timers)
                queue_delayed_work(bond->wq, &bond->mii_work,
                                   msecs_to_jiffies(bond->params.miimon));
 out:
@@ -2886,7 +2889,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
        }
 
 re_arm:
-       if (bond->params.arp_interval)
+       if (bond->params.arp_interval && !bond->kill_timers)
                queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
 out:
        read_unlock(&bond->lock);
@@ -3154,7 +3157,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
        bond_ab_arp_probe(bond);
 
 re_arm:
-       if (bond->params.arp_interval)
+       if (bond->params.arp_interval && !bond->kill_timers)
                queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
 out:
        read_unlock(&bond->lock);
index a812492..2adc294 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/skbuff.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/io.h>
 
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
index 805076c..da5a5d9 100644 (file)
@@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
                if (te && te->ctx && te->client && te->client->redirect) {
                        update_tcb = te->client->redirect(te->ctx, old, new, e);
                        if (update_tcb) {
+                               rcu_read_lock();
                                l2t_hold(L2DATA(tdev), e);
+                               rcu_read_unlock();
                                set_l2t_ix(tdev, tid, e);
                        }
                }
        }
-       l2t_release(L2DATA(tdev), e);
+       l2t_release(tdev, e);
 }
 
 /*
@@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
                goto out_free;
 
        err = -ENOMEM;
-       L2DATA(dev) = t3_init_l2t(l2t_capacity);
+       RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
        if (!L2DATA(dev))
                goto out_free;
 
@@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter)
 
 out_free_l2t:
        t3_free_l2t(L2DATA(dev));
-       L2DATA(dev) = NULL;
+       rcu_assign_pointer(dev->l2opt, NULL);
 out_free:
        kfree(t);
        return err;
 }
 
+static void clean_l2_data(struct rcu_head *head)
+{
+       struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
+       t3_free_l2t(d);
+}
+
+
 void cxgb3_offload_deactivate(struct adapter *adapter)
 {
        struct t3cdev *tdev = &adapter->tdev;
        struct t3c_data *t = T3C_DATA(tdev);
+       struct l2t_data *d;
 
        remove_adapter(adapter);
        if (list_empty(&adapter_list))
@@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
 
        free_tid_maps(&t->tid_maps);
        T3C_DATA(tdev) = NULL;
-       t3_free_l2t(L2DATA(tdev));
-       L2DATA(tdev) = NULL;
+       rcu_read_lock();
+       d = L2DATA(tdev);
+       rcu_read_unlock();
+       rcu_assign_pointer(tdev->l2opt, NULL);
+       call_rcu(&d->rcu_head, clean_l2_data);
        if (t->nofail_skb)
                kfree_skb(t->nofail_skb);
        kfree(t);
index f452c40..4154097 100644 (file)
@@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
 struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
                             struct net_device *dev)
 {
-       struct l2t_entry *e;
-       struct l2t_data *d = L2DATA(cdev);
+       struct l2t_entry *e = NULL;
+       struct l2t_data *d;
+       int hash;
        u32 addr = *(u32 *) neigh->primary_key;
        int ifidx = neigh->dev->ifindex;
-       int hash = arp_hash(addr, ifidx, d);
        struct port_info *p = netdev_priv(dev);
        int smt_idx = p->port_id;
 
+       rcu_read_lock();
+       d = L2DATA(cdev);
+       if (!d)
+               goto done_rcu;
+
+       hash = arp_hash(addr, ifidx, d);
+
        write_lock_bh(&d->lock);
        for (e = d->l2tab[hash].first; e; e = e->next)
                if (e->addr == addr && e->ifindex == ifidx &&
@@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
        }
 done:
        write_unlock_bh(&d->lock);
+done_rcu:
+       rcu_read_unlock();
        return e;
 }
 
index 7a12d52..c5f5479 100644 (file)
@@ -76,6 +76,7 @@ struct l2t_data {
        atomic_t nfree;         /* number of free entries */
        rwlock_t lock;
        struct l2t_entry l2tab[0];
+       struct rcu_head rcu_head;       /* to handle rcu cleanup */
 };
 
 typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
@@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
 /*
  * Getting to the L2 data from an offload device.
  */
-#define L2DATA(dev) ((dev)->l2opt)
+#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
 
 #define W_TCB_L2T_IX    0
 #define S_TCB_L2T_IX    7
@@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
        return t3_l2t_send_slow(dev, skb, e);
 }
 
-static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
+static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
 {
-       if (atomic_dec_and_test(&e->refcnt))
+       struct l2t_data *d;
+
+       rcu_read_lock();
+       d = L2DATA(t);
+
+       if (atomic_dec_and_test(&e->refcnt) && d)
                t3_l2e_free(d, e);
+
+       rcu_read_unlock();
 }
 
 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
 {
-       if (atomic_add_return(1, &e->refcnt) == 1)      /* 0 -> 1 transition */
+       if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
                atomic_dec(&d->nfree);
 }
 
index c9957b7..b4efa29 100644 (file)
@@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev,
                setup_debugfs(adapter);
        }
 
+       /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+       pdev->needs_freset = 1;
+
        if (is_offload(adapter))
                attach_ulds(adapter);
 
index 8545c7a..a5a89ec 100644 (file)
@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
                checksum += eeprom_data;
        }
 
+#ifdef CONFIG_PARISC
+       /* This is a signature and not a checksum on HP c8000 */
+       if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
+               return E1000_SUCCESS;
+
+#endif
        if (checksum == (u16) EEPROM_SUM)
                return E1000_SUCCESS;
        else {
index 25a8c2a..0caf3c3 100644 (file)
@@ -1669,10 +1669,10 @@ static int gfar_get_cls_all(struct gfar_private *priv,
        u32 i = 0;
 
        list_for_each_entry(comp, &priv->rx_list.list, list) {
-               if (i <= cmd->rule_cnt) {
-                       rule_locs[i] = comp->fs.location;
-                       i++;
-               }
+               if (i == cmd->rule_cnt)
+                       return -EMSGSIZE;
+               rule_locs[i] = comp->fs.location;
+               i++;
        }
 
        cmd->data = MAX_FILER_IDX;
index 16ce45c..52a3900 100644 (file)
@@ -428,6 +428,7 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
 
        status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
+       greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
 
        /* Wrap around descriptor ring */
        if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -490,7 +491,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
        if (nr_frags != 0)
                status = GRETH_TXBD_MORE;
 
-       status |= GRETH_TXBD_CSALL;
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               status |= GRETH_TXBD_CSALL;
        status |= skb_headlen(skb) & GRETH_BD_LEN;
        if (greth->tx_next == GRETH_TXBD_NUM_MASK)
                status |= GRETH_BD_WR;
@@ -513,7 +515,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
                greth->tx_skbuff[curr_tx] = NULL;
                bdp = greth->tx_bd_base + curr_tx;
 
-               status = GRETH_TXBD_CSALL | GRETH_BD_EN;
+               status = GRETH_BD_EN;
+               if (skb->ip_summed == CHECKSUM_PARTIAL)
+                       status |= GRETH_TXBD_CSALL;
                status |= frag->size & GRETH_BD_LEN;
 
                /* Wrap around descriptor ring */
@@ -641,6 +645,7 @@ static void greth_clean_tx(struct net_device *dev)
                                dev->stats.tx_fifo_errors++;
                }
                dev->stats.tx_packets++;
+               dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
                greth->tx_last = NEXT_TX(greth->tx_last);
                greth->tx_free++;
        }
@@ -695,6 +700,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
                greth->tx_skbuff[greth->tx_last] = NULL;
 
                greth_update_tx_stats(dev, stat);
+               dev->stats.tx_bytes += skb->len;
 
                bdp = greth->tx_bd_base + greth->tx_last;
 
@@ -796,6 +802,7 @@ static int greth_rx(struct net_device *dev, int limit)
                                memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
 
                                skb->protocol = eth_type_trans(skb, dev);
+                               dev->stats.rx_bytes += pkt_len;
                                dev->stats.rx_packets++;
                                netif_receive_skb(skb);
                        }
@@ -910,6 +917,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
 
                                skb->protocol = eth_type_trans(skb, dev);
                                dev->stats.rx_packets++;
+                               dev->stats.rx_bytes += pkt_len;
                                netif_receive_skb(skb);
 
                                greth->rx_skbuff[greth->rx_cur] = newskb;
index 9a0040d..232a622 100644 (file)
@@ -103,6 +103,7 @@ struct greth_private {
 
        unsigned char *tx_bufs[GRETH_TXBD_NUM];
        unsigned char *rx_bufs[GRETH_RXBD_NUM];
+       u16 tx_bufs_length[GRETH_TXBD_NUM];
 
        u16 tx_next;
        u16 tx_last;
index 3e66792..d393f1e 100644 (file)
@@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev)
                netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
                           netdev->irq, rc);
                do {
-                       rc = h_free_logical_lan(adapter->vdev->unit_address);
-               } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
+                       lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
+               } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
 
                goto err_out;
        }
@@ -757,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
        struct ibmveth_adapter *adapter = netdev_priv(dev);
        unsigned long set_attr, clr_attr, ret_attr;
        unsigned long set_attr6, clr_attr6;
-       long ret, ret6;
+       long ret, ret4, ret6;
        int rc1 = 0, rc2 = 0;
        int restart = 0;
 
@@ -770,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
 
        set_attr = 0;
        clr_attr = 0;
+       set_attr6 = 0;
+       clr_attr6 = 0;
 
        if (data) {
                set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
@@ -784,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
        if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
            !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
            (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
-               ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+               ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
                                         set_attr, &ret_attr);
 
-               if (ret != H_SUCCESS) {
+               if (ret4 != H_SUCCESS) {
                        netdev_err(dev, "unable to change IPv4 checksum "
                                        "offload settings. %d rc=%ld\n",
-                                       data, ret);
+                                       data, ret4);
+
+                       h_illan_attributes(adapter->vdev->unit_address,
+                                          set_attr, clr_attr, &ret_attr);
+
+                       if (data == 1)
+                               dev->features &= ~NETIF_F_IP_CSUM;
 
-                       ret = h_illan_attributes(adapter->vdev->unit_address,
-                                                set_attr, clr_attr, &ret_attr);
                } else {
                        adapter->fw_ipv4_csum_support = data;
                }
@@ -804,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
                if (ret6 != H_SUCCESS) {
                        netdev_err(dev, "unable to change IPv6 checksum "
                                        "offload settings. %d rc=%ld\n",
-                                       data, ret);
+                                       data, ret6);
+
+                       h_illan_attributes(adapter->vdev->unit_address,
+                                          set_attr6, clr_attr6, &ret_attr);
+
+                       if (data == 1)
+                               dev->features &= ~NETIF_F_IPV6_CSUM;
 
-                       ret = h_illan_attributes(adapter->vdev->unit_address,
-                                                set_attr6, clr_attr6,
-                                                &ret_attr);
                } else
                        adapter->fw_ipv6_csum_support = data;
 
-               if (ret != H_SUCCESS || ret6 != H_SUCCESS)
+               if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
                        adapter->rx_csum = data;
                else
                        rc1 = -EIO;
@@ -930,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
        union ibmveth_buf_desc descs[6];
        int last, i;
        int force_bounce = 0;
+       dma_addr_t dma_addr;
 
        /*
         * veth handles a maximum of 6 segments including the header, so
@@ -994,17 +1004,16 @@ retry_bounce:
        }
 
        /* Map the header */
-       descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
-                                                skb_headlen(skb),
-                                                DMA_TO_DEVICE);
-       if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
+       dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+                                 skb_headlen(skb), DMA_TO_DEVICE);
+       if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
                goto map_failed;
 
        descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
+       descs[0].fields.address = dma_addr;
 
        /* Map the frags */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               unsigned long dma_addr;
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
@@ -1026,7 +1035,12 @@ retry_bounce:
                netdev->stats.tx_bytes += skb->len;
        }
 
-       for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
+       dma_unmap_single(&adapter->vdev->dev,
+                        descs[0].fields.address,
+                        descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+                        DMA_TO_DEVICE);
+
+       for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
                dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
                               descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
                               DMA_TO_DEVICE);
index 2279039..e1fcc95 100644 (file)
@@ -1321,8 +1321,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                if (ring_is_rsc_enabled(rx_ring))
                        pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
 
-               /* if this is a skb from previous receive DMA will be 0 */
-               if (rx_buffer_info->dma) {
+               /* linear means we are building an skb from multiple pages */
+               if (!skb_is_nonlinear(skb)) {
                        u16 hlen;
                        if (pkt_is_rsc &&
                            !(staterr & IXGBE_RXD_STAT_EOP) &&
index 05172c3..376e3e9 100644 (file)
@@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
                dest = macvlan_hash_lookup(port, eth->h_dest);
                if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
                        /* send to lowerdev first for its network taps */
-                       vlan->forward(vlan->lowerdev, skb);
+                       dev_forward_skb(vlan->lowerdev, skb);
 
                        return NET_XMIT_SUCCESS;
                }
index dfc8272..ed2a397 100644 (file)
@@ -799,5 +799,11 @@ static void __exit cleanup_netconsole(void)
        }
 }
 
-module_init(init_netconsole);
+/*
+ * Use late_initcall to ensure netconsole is
+ * initialized after network device driver if built-in.
+ *
+ * late_initcall() and module_init() are identical if built as module.
+ */
+late_initcall(init_netconsole);
 module_exit(cleanup_netconsole);
index 59fac77..a09a071 100644 (file)
@@ -127,8 +127,8 @@ struct pch_gbe_regs {
 
 /* Reset */
 #define PCH_GBE_ALL_RST         0x80000000  /* All reset */
-#define PCH_GBE_TX_RST          0x40000000  /* TX MAC, TX FIFO, TX DMA reset */
-#define PCH_GBE_RX_RST          0x04000000  /* RX MAC, RX FIFO, RX DMA reset */
+#define PCH_GBE_TX_RST          0x00008000  /* TX MAC, TX FIFO, TX DMA reset */
+#define PCH_GBE_RX_RST          0x00004000  /* RX MAC, RX FIFO, RX DMA reset */
 
 /* TCP/IP Accelerator Control */
 #define PCH_GBE_EX_LIST_EN      0x00000008  /* External List Enable */
@@ -276,6 +276,9 @@ struct pch_gbe_regs {
 #define PCH_GBE_RX_DMA_EN       0x00000002   /* Enables Receive DMA */
 #define PCH_GBE_TX_DMA_EN       0x00000001   /* Enables Transmission DMA */
 
+/* RX DMA STATUS */
+#define PCH_GBE_IDLE_CHECK       0xFFFFFFFE
+
 /* Wake On LAN Status */
 #define PCH_GBE_WLS_BR          0x00000008 /* Broadcas Address */
 #define PCH_GBE_WLS_MLT         0x00000004 /* Multicast Address */
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {
 struct pch_gbe_buffer {
        struct sk_buff *skb;
        dma_addr_t dma;
+       unsigned char *rx_buffer;
        unsigned long time_stamp;
        u16 length;
        bool mapped;
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {
 struct pch_gbe_rx_ring {
        struct pch_gbe_rx_desc *desc;
        dma_addr_t dma;
+       unsigned char *rx_buff_pool;
+       dma_addr_t rx_buff_pool_logic;
+       unsigned int rx_buff_pool_size;
        unsigned int size;
        unsigned int count;
        unsigned int next_to_use;
@@ -622,6 +629,7 @@ struct pch_gbe_adapter {
        unsigned long rx_buffer_len;
        unsigned long tx_queue_len;
        bool have_msi;
+       bool rx_stop_flag;
 };
 
 extern const char pch_driver_version[];
index eac3c5c..b8b4ba2 100644 (file)
@@ -20,7 +20,6 @@
 
 #include "pch_gbe.h"
 #include "pch_gbe_api.h"
-#include <linux/prefetch.h>
 
 #define DRV_VERSION     "1.00"
 const char pch_driver_version[] = DRV_VERSION;
@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;
 #define PCH_GBE_WATCHDOG_PERIOD                (1 * HZ)        /* watchdog time */
 #define PCH_GBE_COPYBREAK_DEFAULT      256
 #define PCH_GBE_PCI_BAR                        1
+#define PCH_GBE_RESERVE_MEMORY         0x200000        /* 2MB */
 
 /* Macros for ML7223 */
 #define PCI_VENDOR_ID_ROHM                     0x10db
 #define PCI_DEVICE_ID_ROHM_ML7223_GBE          0x8013
 
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_GBE          0x8802
+
 #define PCH_GBE_TX_WEIGHT         64
 #define PCH_GBE_RX_WEIGHT         64
 #define PCH_GBE_RX_BUFFER_WRITE   16
@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;
        )
 
 /* Ethertype field values */
+#define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880
 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
 #define PCH_GBE_FRAME_SIZE_2048         2048
 #define PCH_GBE_FRAME_SIZE_4096         4096
@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;
 #define PCH_GBE_INT_ENABLE_MASK ( \
        PCH_GBE_INT_RX_DMA_CMPLT |    \
        PCH_GBE_INT_RX_DSC_EMP   |    \
+       PCH_GBE_INT_RX_FIFO_ERR  |    \
        PCH_GBE_INT_WOL_DET      |    \
        PCH_GBE_INT_TX_CMPLT          \
        )
 
+#define PCH_GBE_INT_DISABLE_ALL                0
 
 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 
@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
        if (!tmp)
                pr_err("Error: busy bit is not cleared\n");
 }
+
+/**
+ * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
+ * @reg:       Pointer of register
+ * @busy:      Busy bit
+ */
+static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
+{
+       u32 tmp;
+       int ret = -1;
+       /* wait busy */
+       tmp = 20;
+       while ((ioread32(reg) & bit) && --tmp)
+               udelay(5);
+       if (!tmp)
+               pr_err("Error: busy bit is not cleared\n");
+       else
+               ret = 0;
+       return ret;
+}
+
 /**
  * pch_gbe_mac_mar_set - Set MAC address register
  * @hw:            Pointer to the HW structure
@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
        return;
 }
 
+static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
+{
+       /* Read the MAC address. and store to the private data */
+       pch_gbe_mac_read_mac_addr(hw);
+       iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
+       pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
+       /* Setup the MAC address */
+       pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+       return;
+}
+
 /**
  * pch_gbe_mac_init_rx_addrs - Initialize receive address's
  * @hw:        Pointer to the HW structure
@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
 
        tcpip = ioread32(&hw->reg->TCPIP_ACC);
 
-       if (netdev->features & NETIF_F_RXCSUM) {
-               tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
-               tcpip |= PCH_GBE_RX_TCPIPACC_EN;
-       } else {
-               tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
-               tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
-       }
+       tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
+       tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
        iowrite32(tcpip, &hw->reg->TCPIP_ACC);
        return;
 }
@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
        iowrite32(rdba, &hw->reg->RX_DSC_BASE);
        iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
        iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
-
-       /* Enables Receive DMA */
-       rxdma = ioread32(&hw->reg->DMA_CTRL);
-       rxdma |= PCH_GBE_RX_DMA_EN;
-       iowrite32(rxdma, &hw->reg->DMA_CTRL);
-       /* Enables Receive */
-       iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
 }
 
 /**
@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
        spin_unlock_irqrestore(&adapter->stats_lock, flags);
 }
 
+static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
+{
+       struct pch_gbe_hw *hw = &adapter->hw;
+       u32 rxdma;
+       u16 value;
+       int ret;
+
+       /* Disable Receive DMA */
+       rxdma = ioread32(&hw->reg->DMA_CTRL);
+       rxdma &= ~PCH_GBE_RX_DMA_EN;
+       iowrite32(rxdma, &hw->reg->DMA_CTRL);
+       /* Wait Rx DMA BUS is IDLE */
+       ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
+       if (ret) {
+               /* Disable Bus master */
+               pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
+               value &= ~PCI_COMMAND_MASTER;
+               pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+               /* Stop Receive */
+               pch_gbe_mac_reset_rx(hw);
+               /* Enable Bus master */
+               value |= PCI_COMMAND_MASTER;
+               pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+       } else {
+               /* Stop Receive */
+               pch_gbe_mac_reset_rx(hw);
+       }
+}
+
+static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
+{
+       u32 rxdma;
+
+       /* Enables Receive DMA */
+       rxdma = ioread32(&hw->reg->DMA_CTRL);
+       rxdma |= PCH_GBE_RX_DMA_EN;
+       iowrite32(rxdma, &hw->reg->DMA_CTRL);
+       /* Enables Receive */
+       iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
+       return;
+}
+
 /**
  * pch_gbe_intr - Interrupt Handler
  * @irq:   Interrupt number
@@ -1123,7 +1191,17 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
        if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
                adapter->stats.intr_rx_frame_err_count++;
        if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
-               adapter->stats.intr_rx_fifo_err_count++;
+               if (!adapter->rx_stop_flag) {
+                       adapter->stats.intr_rx_fifo_err_count++;
+                       pr_debug("Rx fifo over run\n");
+                       adapter->rx_stop_flag = true;
+                       int_en = ioread32(&hw->reg->INT_EN);
+                       iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
+                                 &hw->reg->INT_EN);
+                       pch_gbe_stop_receive(adapter);
+                       int_st |= ioread32(&hw->reg->INT_ST);
+                       int_st = int_st & ioread32(&hw->reg->INT_EN);
+               }
        if (int_st & PCH_GBE_INT_RX_DMA_ERR)
                adapter->stats.intr_rx_dma_err_count++;
        if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
@@ -1135,21 +1213,18 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
        /* When Rx descriptor is empty  */
        if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
                adapter->stats.intr_rx_dsc_empty_count++;
-               pr_err("Rx descriptor is empty\n");
+               pr_debug("Rx descriptor is empty\n");
                int_en = ioread32(&hw->reg->INT_EN);
                iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
                if (hw->mac.tx_fc_enable) {
                        /* Set Pause packet */
                        pch_gbe_mac_set_pause_packet(hw);
                }
-               if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
-                   == 0) {
-                       return IRQ_HANDLED;
-               }
        }
 
        /* When request status is Receive interruption */
-       if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
+       if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
+           (adapter->rx_stop_flag == true)) {
                if (likely(napi_schedule_prep(&adapter->napi))) {
                        /* Enable only Rx Descriptor empty */
                        atomic_inc(&adapter->irq_sem);
@@ -1185,29 +1260,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
        unsigned int i;
        unsigned int bufsz;
 
-       bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
+       bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
        i = rx_ring->next_to_use;
 
        while ((cleaned_count--)) {
                buffer_info = &rx_ring->buffer_info[i];
-               skb = buffer_info->skb;
-               if (skb) {
-                       skb_trim(skb, 0);
-               } else {
-                       skb = netdev_alloc_skb(netdev, bufsz);
-                       if (unlikely(!skb)) {
-                               /* Better luck next round */
-                               adapter->stats.rx_alloc_buff_failed++;
-                               break;
-                       }
-                       /* 64byte align */
-                       skb_reserve(skb, PCH_GBE_DMA_ALIGN);
-
-                       buffer_info->skb = skb;
-                       buffer_info->length = adapter->rx_buffer_len;
+               skb = netdev_alloc_skb(netdev, bufsz);
+               if (unlikely(!skb)) {
+                       /* Better luck next round */
+                       adapter->stats.rx_alloc_buff_failed++;
+                       break;
                }
+               /* align */
+               skb_reserve(skb, NET_IP_ALIGN);
+               buffer_info->skb = skb;
+
                buffer_info->dma = dma_map_single(&pdev->dev,
-                                                 skb->data,
+                                                 buffer_info->rx_buffer,
                                                  buffer_info->length,
                                                  DMA_FROM_DEVICE);
                if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
@@ -1240,6 +1309,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
        return;
 }
 
+static int
+pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
+                        struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct pch_gbe_buffer *buffer_info;
+       unsigned int i;
+       unsigned int bufsz;
+       unsigned int size;
+
+       bufsz = adapter->rx_buffer_len;
+
+       size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
+       rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
+                                               &rx_ring->rx_buff_pool_logic,
+                                               GFP_KERNEL);
+       if (!rx_ring->rx_buff_pool) {
+               pr_err("Unable to allocate memory for the receive poll buffer\n");
+               return -ENOMEM;
+       }
+       memset(rx_ring->rx_buff_pool, 0, size);
+       rx_ring->rx_buff_pool_size = size;
+       for (i = 0; i < rx_ring->count; i++) {
+               buffer_info = &rx_ring->buffer_info[i];
+               buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
+               buffer_info->length = bufsz;
+       }
+       return 0;
+}
+
 /**
  * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
  * @adapter:   Board private structure
@@ -1285,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
        struct sk_buff *skb;
        unsigned int i;
        unsigned int cleaned_count = 0;
-       bool cleaned = false;
+       bool cleaned = true;
 
        pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
 
@@ -1296,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
 
        while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
                pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
-               cleaned = true;
                buffer_info = &tx_ring->buffer_info[i];
                skb = buffer_info->skb;
 
@@ -1339,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
                tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
 
                /* weight of a sort for tx, to avoid endless transmit cleanup */
-               if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
+               if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
+                       cleaned = false;
                        break;
+               }
        }
        pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
                 cleaned_count);
@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
        unsigned int i;
        unsigned int cleaned_count = 0;
        bool cleaned = false;
-       struct sk_buff *skb, *new_skb;
+       struct sk_buff *skb;
        u8 dma_status;
        u16 gbec_status;
        u32 tcp_ip_status;
@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
                rx_desc->gbec_status = DSC_INIT16;
                buffer_info = &rx_ring->buffer_info[i];
                skb = buffer_info->skb;
+               buffer_info->skb = NULL;
 
                /* unmap dma */
                dma_unmap_single(&pdev->dev, buffer_info->dma,
                                   buffer_info->length, DMA_FROM_DEVICE);
                buffer_info->mapped = false;
-               /* Prefetch the packet */
-               prefetch(skb->data);
 
                pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "
                         "TCP:0x%08x]  BufInf = 0x%p\n",
@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
                        pr_err("Receive CRC Error\n");
                } else {
                        /* get receive length */
-                       /* length convert[-3] */
-                       length = (rx_desc->rx_words_eob) - 3;
-
-                       /* Decide the data conversion method */
-                       if (!(netdev->features & NETIF_F_RXCSUM)) {
-                               /* [Header:14][payload] */
-                               if (NET_IP_ALIGN) {
-                                       /* Because alignment differs,
-                                        * the new_skb is newly allocated,
-                                        * and data is copied to new_skb.*/
-                                       new_skb = netdev_alloc_skb(netdev,
-                                                        length + NET_IP_ALIGN);
-                                       if (!new_skb) {
-                                               /* dorrop error */
-                                               pr_err("New skb allocation "
-                                                       "Error\n");
-                                               goto dorrop;
-                                       }
-                                       skb_reserve(new_skb, NET_IP_ALIGN);
-                                       memcpy(new_skb->data, skb->data,
-                                              length);
-                                       skb = new_skb;
-                               } else {
-                                       /* DMA buffer is used as SKB as it is.*/
-                                       buffer_info->skb = NULL;
-                               }
-                       } else {
-                               /* [Header:14][padding:2][payload] */
-                               /* The length includes padding length */
-                               length = length - PCH_GBE_DMA_PADDING;
-                               if ((length < copybreak) ||
-                                   (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
-                                       /* Because alignment differs,
-                                        * the new_skb is newly allocated,
-                                        * and data is copied to new_skb.
-                                        * Padding data is deleted
-                                        * at the time of a copy.*/
-                                       new_skb = netdev_alloc_skb(netdev,
-                                                        length + NET_IP_ALIGN);
-                                       if (!new_skb) {
-                                               /* dorrop error */
-                                               pr_err("New skb allocation "
-                                                       "Error\n");
-                                               goto dorrop;
-                                       }
-                                       skb_reserve(new_skb, NET_IP_ALIGN);
-                                       memcpy(new_skb->data, skb->data,
-                                              ETH_HLEN);
-                                       memcpy(&new_skb->data[ETH_HLEN],
-                                              &skb->data[ETH_HLEN +
-                                              PCH_GBE_DMA_PADDING],
-                                              length - ETH_HLEN);
-                                       skb = new_skb;
-                               } else {
-                                       /* Padding data is deleted
-                                        * by moving header data.*/
-                                       memmove(&skb->data[PCH_GBE_DMA_PADDING],
-                                               &skb->data[0], ETH_HLEN);
-                                       skb_reserve(skb, NET_IP_ALIGN);
-                                       buffer_info->skb = NULL;
-                               }
-                       }
-                       /* The length includes FCS length */
-                       length = length - ETH_FCS_LEN;
+                       /* length convert[-3], length includes FCS length */
+                       length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
+                       if (rx_desc->rx_words_eob & 0x02)
+                               length = length - 4;
+                       /*
+                        * buffer_info->rx_buffer: [Header:14][payload]
+                        * skb->data: [Reserve:2][Header:14][payload]
+                        */
+                       memcpy(skb->data, buffer_info->rx_buffer, length);
+
                        /* update status of driver */
                        adapter->stats.rx_bytes += length;
                        adapter->stats.rx_packets++;
@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
                        pr_debug("Receive skb->ip_summed: %d length: %d\n",
                                 skb->ip_summed, length);
                }
-dorrop:
                /* return some buffers to hardware, one at a time is too slow */
                if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
                        pch_gbe_alloc_rx_buffers(adapter, rx_ring,
@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
                pr_err("Error: can't bring device up\n");
                return err;
        }
+       err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
+       if (err) {
+               pr_err("Error: can't bring device up\n");
+               return err;
+       }
        pch_gbe_alloc_tx_buffers(adapter, tx_ring);
        pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
        adapter->tx_queue_len = netdev->tx_queue_len;
+       pch_gbe_start_receive(&adapter->hw);
 
        mod_timer(&adapter->watchdog_timer, jiffies);
 
@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
 void pch_gbe_down(struct pch_gbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
 
        /* signal that we're down so the interrupt handler does not
         * reschedule our watchdog timer */
@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
        pch_gbe_reset(adapter);
        pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
        pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
+
+       pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
+                           rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
+       rx_ring->rx_buff_pool_logic = 0;
+       rx_ring->rx_buff_pool_size = 0;
+       rx_ring->rx_buff_pool = NULL;
 }
 
 /**
@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
        int max_frame;
+       unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
+       int err;
 
        max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
        if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
        else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
                adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
        else
-               adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
-       netdev->mtu = new_mtu;
-       adapter->hw.mac.max_frame_size = max_frame;
+               adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
 
-       if (netif_running(netdev))
-               pch_gbe_reinit_locked(adapter);
-       else
+       if (netif_running(netdev)) {
+               pch_gbe_down(adapter);
+               err = pch_gbe_up(adapter);
+               if (err) {
+                       adapter->rx_buffer_len = old_rx_buffer_len;
+                       pch_gbe_up(adapter);
+                       return -ENOMEM;
+               } else {
+                       netdev->mtu = new_mtu;
+                       adapter->hw.mac.max_frame_size = max_frame;
+               }
+       } else {
                pch_gbe_reset(adapter);
+               netdev->mtu = new_mtu;
+               adapter->hw.mac.max_frame_size = max_frame;
+       }
 
        pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
                 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
@@ -2099,33 +2168,39 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
 {
        struct pch_gbe_adapter *adapter =
            container_of(napi, struct pch_gbe_adapter, napi);
-       struct net_device *netdev = adapter->netdev;
        int work_done = 0;
        bool poll_end_flag = false;
        bool cleaned = false;
+       u32 int_en;
 
        pr_debug("budget : %d\n", budget);
 
-       /* Keep link state information with original netdev */
-       if (!netif_carrier_ok(netdev)) {
-               poll_end_flag = true;
-       } else {
-               cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
-               pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+       pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+       cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
 
-               if (cleaned)
-                       work_done = budget;
-               /* If no Tx and not enough Rx work done,
-                * exit the polling mode
-                */
-               if ((work_done < budget) || !netif_running(netdev))
-                       poll_end_flag = true;
-       }
+       if (!cleaned)
+               work_done = budget;
+       /* If no Tx and not enough Rx work done,
+        * exit the polling mode
+        */
+       if (work_done < budget)
+               poll_end_flag = true;
 
        if (poll_end_flag) {
                napi_complete(napi);
+               if (adapter->rx_stop_flag) {
+                       adapter->rx_stop_flag = false;
+                       pch_gbe_start_receive(&adapter->hw);
+               }
                pch_gbe_irq_enable(adapter);
-       }
+       } else
+               if (adapter->rx_stop_flag) {
+                       adapter->rx_stop_flag = false;
+                       pch_gbe_start_receive(&adapter->hw);
+                       int_en = ioread32(&adapter->hw.reg->INT_EN);
+                       iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
+                               &adapter->hw.reg->INT_EN);
+               }
 
        pr_debug("poll_end_flag : %d  work_done : %d  budget : %d\n",
                 poll_end_flag, work_done, budget);
@@ -2452,6 +2527,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
         .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
         .class_mask = (0xFFFF00)
         },
+       {.vendor = PCI_VENDOR_ID_ROHM,
+        .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
+        .subvendor = PCI_ANY_ID,
+        .subdevice = PCI_ANY_ID,
+        .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+        .class_mask = (0xFFFF00)
+        },
        /* required last entry */
        {0}
 };
index cb6e0b4..edd7304 100644 (file)
@@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
        prune_rx_ts(dp83640);
 
        if (list_empty(&dp83640->rxpool)) {
-               pr_warning("dp83640: rx timestamp pool is empty\n");
+               pr_debug("dp83640: rx timestamp pool is empty\n");
                goto out;
        }
        rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640,
        skb = skb_dequeue(&dp83640->tx_queue);
 
        if (!skb) {
-               pr_warning("dp83640: have timestamp but tx_queue empty\n");
+               pr_debug("dp83640: have timestamp but tx_queue empty\n");
                return;
        }
        ns = phy2txts(phy_txts);
index 10e5d98..edfa15d 100644 (file)
@@ -1465,7 +1465,12 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
                        continue;
                }
 
-               mtu = pch->chan->mtu - hdrlen;
+               /*
+                * hdrlen includes the 2-byte PPP protocol field, but the
+                * MTU counts only the payload excluding the protocol field.
+                * (RFC1661 Section 2)
+                */
+               mtu = pch->chan->mtu - (hdrlen - 2);
                if (mtu < 4)
                        mtu = 4;
                if (flen > mtu)
index 1a3033d..d17d062 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/clk.h>
 #include <linux/phy.h>
 #include <linux/io.h>
+#include <linux/interrupt.h>
 #include <linux/types.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
index 02339b3..c236670 100644 (file)
@@ -407,6 +407,7 @@ enum rtl_register_content {
        RxOK            = 0x0001,
 
        /* RxStatusDesc */
+       RxBOVF  = (1 << 24),
        RxFOVF  = (1 << 23),
        RxRWT   = (1 << 22),
        RxRES   = (1 << 21),
@@ -682,6 +683,7 @@ struct rtl8169_private {
        struct mii_if_info mii;
        struct rtl8169_counters counters;
        u32 saved_wolopts;
+       u32 opts1_mask;
 
        struct rtl_fw {
                const struct firmware *fw;
@@ -710,6 +712,7 @@ MODULE_FIRMWARE(FIRMWARE_8168D_1);
 MODULE_FIRMWARE(FIRMWARE_8168D_2);
 MODULE_FIRMWARE(FIRMWARE_8168E_1);
 MODULE_FIRMWARE(FIRMWARE_8168E_2);
+MODULE_FIRMWARE(FIRMWARE_8168E_3);
 MODULE_FIRMWARE(FIRMWARE_8105E_1);
 
 static int rtl8169_open(struct net_device *dev);
@@ -3077,6 +3080,14 @@ static void rtl8169_phy_reset(struct net_device *dev,
        netif_err(tp, link, dev, "PHY reset failed\n");
 }
 
+static bool rtl_tbi_enabled(struct rtl8169_private *tp)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
+           (RTL_R8(PHYstatus) & TBI_Enable);
+}
+
 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
 {
        void __iomem *ioaddr = tp->mmio_addr;
@@ -3109,7 +3120,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
                           ADVERTISED_1000baseT_Half |
                           ADVERTISED_1000baseT_Full : 0));
 
-       if (RTL_R8(PHYstatus) & TBI_Enable)
+       if (rtl_tbi_enabled(tp))
                netif_info(tp, link, dev, "TBI auto-negotiating\n");
 }
 
@@ -3319,9 +3330,16 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
 
 static void r810x_pll_power_down(struct rtl8169_private *tp)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
+
        if (__rtl8169_get_wol(tp) & WAKE_ANY) {
                rtl_writephy(tp, 0x1f, 0x0000);
                rtl_writephy(tp, MII_BMCR, 0x0000);
+
+               if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
+                   tp->mac_version == RTL_GIGA_MAC_VER_30)
+                       RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
+                               AcceptMulticast | AcceptMyPhys);
                return;
        }
 
@@ -3417,7 +3435,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
                rtl_writephy(tp, MII_BMCR, 0x0000);
 
                if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
-                   tp->mac_version == RTL_GIGA_MAC_VER_33)
+                   tp->mac_version == RTL_GIGA_MAC_VER_33 ||
+                   tp->mac_version == RTL_GIGA_MAC_VER_34)
                        RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
                                AcceptMulticast | AcceptMyPhys);
                return;
@@ -3727,8 +3746,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
-       if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) &&
-           (RTL_R8(PHYstatus) & TBI_Enable)) {
+       if (rtl_tbi_enabled(tp)) {
                tp->set_speed = rtl8169_set_speed_tbi;
                tp->get_settings = rtl8169_gset_tbi;
                tp->phy_reset_enable = rtl8169_tbi_reset_enable;
@@ -3777,6 +3795,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->intr_event = cfg->intr_event;
        tp->napi_event = cfg->napi_event;
 
+       tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
+               ~(RxBOVF | RxFOVF) : ~0;
+
        init_timer(&tp->timer);
        tp->timer.data = (unsigned long) dev;
        tp->timer.function = rtl8169_phy_timer;
@@ -3988,6 +4009,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
                while (RTL_R8(TxPoll) & NPQ)
                        udelay(20);
        } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+               RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
                while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
                        udelay(100);
        } else {
@@ -5314,7 +5336,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
                u32 status;
 
                rmb();
-               status = le32_to_cpu(desc->opts1);
+               status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
 
                if (status & DescOwn)
                        break;
index faca764..b59abc7 100644 (file)
@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)
 {
        struct pci_dev *pci_dev = efx->pci_dev;
        dma_addr_t dma_mask = efx->type->max_dma_mask;
-       bool use_wc;
        int rc;
 
        netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)
                rc = -EIO;
                goto fail3;
        }
-
-       /* bug22643: If SR-IOV is enabled then tx push over a write combined
-        * mapping is unsafe. We need to disable write combining in this case.
-        * MSI is unsupported when SR-IOV is enabled, and the firmware will
-        * have removed the MSI capability. So write combining is safe if
-        * there is an MSI capability.
-        */
-       use_wc = (!EFX_WORKAROUND_22643(efx) ||
-                 pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
-       if (use_wc)
-               efx->membase = ioremap_wc(efx->membase_phys,
-                                         efx->type->mem_map_size);
-       else
-               efx->membase = ioremap_nocache(efx->membase_phys,
-                                              efx->type->mem_map_size);
+       efx->membase = ioremap_nocache(efx->membase_phys,
+                                      efx->type->mem_map_size);
        if (!efx->membase) {
                netif_err(efx, probe, efx->net_dev,
                          "could not map memory BAR at %llx+%x\n",
index cc97880..751d1ec 100644 (file)
@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
        _efx_writed(efx, value->u32[2], reg + 8);
        _efx_writed(efx, value->u32[3], reg + 12);
 #endif
-       wmb();
        mmiowb();
        spin_unlock_irqrestore(&efx->biu_lock, flags);
 }
@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
        __raw_writel((__force u32)value->u32[0], membase + addr);
        __raw_writel((__force u32)value->u32[1], membase + addr + 4);
 #endif
-       wmb();
        mmiowb();
        spin_unlock_irqrestore(&efx->biu_lock, flags);
 }
@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
 
        /* No lock required */
        _efx_writed(efx, value->u32[0], reg);
-       wmb();
 }
 
 /* Read a 128-bit CSR, locking as appropriate. */
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
 
        spin_lock_irqsave(&efx->biu_lock, flags);
        value->u32[0] = _efx_readd(efx, reg + 0);
-       rmb();
        value->u32[1] = _efx_readd(efx, reg + 4);
        value->u32[2] = _efx_readd(efx, reg + 8);
        value->u32[3] = _efx_readd(efx, reg + 12);
@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
        value->u64[0] = (__force __le64)__raw_readq(membase + addr);
 #else
        value->u32[0] = (__force __le32)__raw_readl(membase + addr);
-       rmb();
        value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
 #endif
        spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
        _efx_writed(efx, value->u32[2], reg + 8);
        _efx_writed(efx, value->u32[3], reg + 12);
 #endif
-       wmb();
 }
 #define efx_writeo_page(efx, value, reg, page)                         \
        _efx_writeo_page(efx, value,                                    \
index 3dd45ed..81a4253 100644 (file)
@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
        return &nic_data->mcdi;
 }
 
-static inline void
-efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
-{
-       struct siena_nic_data *nic_data = efx->nic_data;
-       value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
-}
-
-static inline void
-efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
-{
-       struct siena_nic_data *nic_data = efx->nic_data;
-       __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
-}
-
 void efx_mcdi_init(struct efx_nic *efx)
 {
        struct efx_mcdi_iface *mcdi;
@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
                            const u8 *inbuf, size_t inlen)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-       unsigned pdu = MCDI_PDU(efx);
-       unsigned doorbell = MCDI_DOORBELL(efx);
+       unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+       unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
        unsigned int i;
        efx_dword_t hdr;
        u32 xflags, seqno;
@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
                             MCDI_HEADER_SEQ, seqno,
                             MCDI_HEADER_XFLAGS, xflags);
 
-       efx_mcdi_writed(efx, &hdr, pdu);
+       efx_writed(efx, &hdr, pdu);
 
        for (i = 0; i < inlen; i += 4)
-               efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
-                               pdu + 4 + i);
+               _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
+
+       /* Ensure the payload is written out before the header */
+       wmb();
 
        /* ring the doorbell with a distinctive value */
-       EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
-       efx_mcdi_writed(efx, &hdr, doorbell);
+       _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
 }
 
 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-       unsigned int pdu = MCDI_PDU(efx);
+       unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
        int i;
 
        BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
        BUG_ON(outlen & 3 || outlen >= 0x100);
 
        for (i = 0; i < outlen; i += 4)
-               efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
+               *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
 }
 
 static int efx_mcdi_poll(struct efx_nic *efx)
@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
        unsigned int time, finish;
        unsigned int respseq, respcmd, error;
-       unsigned int pdu = MCDI_PDU(efx);
+       unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
        unsigned int rc, spins;
        efx_dword_t reg;
 
@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)
 
                time = get_seconds();
 
-               efx_mcdi_readd(efx, &reg, pdu);
+               rmb();
+               efx_readd(efx, &reg, pdu);
 
                /* All 1's indicates that shared memory is in reset (and is
                 * not a valid header). Wait for it to come out reset before
@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
                          respseq, mcdi->seqno);
                rc = EIO;
        } else if (error) {
-               efx_mcdi_readd(efx, &reg, pdu + 4);
+               efx_readd(efx, &reg, pdu + 4);
                switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
 #define TRANSLATE_ERROR(name)                                  \
                case MC_CMD_ERR_ ## name:                       \
@@ -222,21 +210,21 @@ out:
 /* Test and clear MC-rebooted flag for this port/function */
 int efx_mcdi_poll_reboot(struct efx_nic *efx)
 {
-       unsigned int addr = MCDI_REBOOT_FLAG(efx);
+       unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
        efx_dword_t reg;
        uint32_t value;
 
        if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
                return false;
 
-       efx_mcdi_readd(efx, &reg, addr);
+       efx_readd(efx, &reg, addr);
        value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
 
        if (value == 0)
                return 0;
 
        EFX_ZERO_DWORD(reg);
-       efx_mcdi_writed(efx, &reg, addr);
+       efx_writed(efx, &reg, addr);
 
        if (value == MC_STATUS_DWORD_ASSERT)
                return -EINTR;
index bafa23a..3edfbaf 100644 (file)
@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
 
                size = min_t(size_t, table->step, 16);
 
-               if (table->offset >= efx->type->mem_map_size) {
-                       /* No longer mapped; return dummy data */
-                       memcpy(buf, "\xde\xc0\xad\xde", 4);
-                       buf += table->rows * size;
-                       continue;
-               }
-
                for (i = 0; i < table->rows; i++) {
                        switch (table->step) {
                        case 4: /* 32-bit register or SRAM */
index 4bd1f28..7443f99 100644 (file)
@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
 /**
  * struct siena_nic_data - Siena NIC state
  * @mcdi: Management-Controller-to-Driver Interface
- * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
  * @wol_filter_id: Wake-on-LAN packet filter id
  */
 struct siena_nic_data {
        struct efx_mcdi_iface mcdi;
-       void __iomem *mcdi_smem;
        int wol_filter_id;
 };
 
index 5735e84..2c3bd93 100644 (file)
@@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx)
        efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
        efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
 
-       /* Initialise MCDI */
-       nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
-                                             FR_CZ_MC_TREG_SMEM,
-                                             FR_CZ_MC_TREG_SMEM_STEP *
-                                             FR_CZ_MC_TREG_SMEM_ROWS);
-       if (!nic_data->mcdi_smem) {
-               netif_err(efx, probe, efx->net_dev,
-                         "could not map MCDI at %llx+%x\n",
-                         (unsigned long long)efx->membase_phys +
-                         FR_CZ_MC_TREG_SMEM,
-                         FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
-               rc = -ENOMEM;
-               goto fail1;
-       }
        efx_mcdi_init(efx);
 
        /* Recover from a failed assertion before probing */
        rc = efx_mcdi_handle_assertion(efx);
        if (rc)
-               goto fail2;
+               goto fail1;
 
        /* Let the BMC know that the driver is now in charge of link and
         * filter settings. We must do this before we reset the NIC */
@@ -324,7 +310,6 @@ fail4:
 fail3:
        efx_mcdi_drv_attach(efx, false, NULL);
 fail2:
-       iounmap(nic_data->mcdi_smem);
 fail1:
        kfree(efx->nic_data);
        return rc;
@@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx)
 
 static void siena_remove_nic(struct efx_nic *efx)
 {
-       struct siena_nic_data *nic_data = efx->nic_data;
-
        efx_nic_free_buffer(efx, &efx->irq_status);
 
        siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx)
                efx_mcdi_drv_attach(efx, false, NULL);
 
        /* Tear down the private nic state */
-       iounmap(nic_data->mcdi_smem);
-       kfree(nic_data);
+       kfree(efx->nic_data);
        efx->nic_data = NULL;
 }
 
@@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = {
        .default_mac_ops = &efx_mcdi_mac_operations,
 
        .revision = EFX_REV_SIENA_A0,
-       .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
+       .mem_map_size = (FR_CZ_MC_TREG_SMEM +
+                        FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
        .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
        .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
        .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
index 99ff114..e4dd3a7 100644 (file)
@@ -38,8 +38,6 @@
 #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
 /* Legacy interrupt storm when interrupt fifo fills */
 #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
-/* Write combining and sriov=enabled are incompatible */
-#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
 
 /* Spurious parity errors in TSORT buffers */
 #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
index dc3fbf6..4a1374d 100644 (file)
@@ -6234,12 +6234,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 
-#ifdef BCM_KERNEL_SUPPORTS_8021Q
        if (vlan_tx_tag_present(skb)) {
                base_flags |= TXD_FLAG_VLAN;
                vlan = vlan_tx_tag_get(skb);
        }
-#endif
 
        if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
            !mss && skb->len > VLAN_ETH_FRAME_LEN)
index 15772b1..13c1f04 100644 (file)
@@ -59,6 +59,7 @@
 #define USB_PRODUCT_IPHONE_3G   0x1292
 #define USB_PRODUCT_IPHONE_3GS  0x1294
 #define USB_PRODUCT_IPHONE_4   0x1297
+#define USB_PRODUCT_IPHONE_4_VZW 0x129c
 
 #define IPHETH_USBINTF_CLASS    255
 #define IPHETH_USBINTF_SUBCLASS 253
@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
                IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, ipheth_table);
index 2d4c091..2d394af 100644 (file)
@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
        case ADC_DC_CAL:
                /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
                if (!IS_CHAN_B(chan) &&
-                   !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
+                   !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+                     IS_CHAN_HT20(chan)))
                        supported = true;
                break;
        }
index 2339728..3e69c63 100644 (file)
@@ -1514,7 +1514,7 @@ static const u32 ar9300_2p2_mac_core[][2] = {
        {0x00008258, 0x00000000},
        {0x0000825c, 0x40000000},
        {0x00008260, 0x00080922},
-       {0x00008264, 0x9bc00010},
+       {0x00008264, 0x9d400010},
        {0x00008268, 0xffffffff},
        {0x0000826c, 0x0000ffff},
        {0x00008270, 0x00000000},
index 1baca8e..fcafec0 100644 (file)
@@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
                REG_WRITE_ARRAY(&ah->iniModesAdditional,
                                modesIndex, regWrites);
 
-       if (AR_SREV_9300(ah))
+       if (AR_SREV_9330(ah))
                REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
 
        if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
index 6530694..722967b 100644 (file)
@@ -2303,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
        mutex_lock(&sc->mutex);
        cancel_delayed_work_sync(&sc->tx_complete_work);
 
+       if (ah->ah_flags & AH_UNPLUGGED) {
+               ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
+               mutex_unlock(&sc->mutex);
+               return;
+       }
+
        if (sc->sc_flags & SC_OP_INVALID) {
                ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
                mutex_unlock(&sc->mutex);
index 9a48501..4c21f8c 100644 (file)
@@ -205,14 +205,22 @@ static void ath_rx_remove_buffer(struct ath_softc *sc,
 
 static void ath_rx_edma_cleanup(struct ath_softc *sc)
 {
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        struct ath_buf *bf;
 
        ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
        ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
 
        list_for_each_entry(bf, &sc->rx.rxbuf, list) {
-               if (bf->bf_mpdu)
+               if (bf->bf_mpdu) {
+                       dma_unmap_single(sc->dev, bf->bf_buf_addr,
+                                       common->rx_bufsize,
+                                       DMA_BIDIRECTIONAL);
                        dev_kfree_skb_any(bf->bf_mpdu);
+                       bf->bf_buf_addr = 0;
+                       bf->bf_mpdu = NULL;
+               }
        }
 
        INIT_LIST_HEAD(&sc->rx.rxbuf);
index 26f1ab8..e293a79 100644 (file)
@@ -1632,7 +1632,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
        u32 cmd, beacon0_valid, beacon1_valid;
 
        if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
-           !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
+           !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) &&
+           !b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
                return;
 
        /* This is the bottom half of the asynchronous beacon update. */
index 3774dd0..ef9ad79 100644 (file)
@@ -1901,17 +1901,19 @@ static void ipw2100_down(struct ipw2100_priv *priv)
 
 /* Called by register_netdev() */
 static int ipw2100_net_init(struct net_device *dev)
+{
+       struct ipw2100_priv *priv = libipw_priv(dev);
+
+       return ipw2100_up(priv, 1);
+}
+
+static int ipw2100_wdev_init(struct net_device *dev)
 {
        struct ipw2100_priv *priv = libipw_priv(dev);
        const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
        struct wireless_dev *wdev = &priv->ieee->wdev;
-       int ret;
        int i;
 
-       ret = ipw2100_up(priv, 1);
-       if (ret)
-               return ret;
-
        memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
 
        /* fill-out priv->ieee->bg_band */
@@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
                       "Error calling register_netdev.\n");
                goto fail;
        }
+       registered = 1;
+
+       err = ipw2100_wdev_init(dev);
+       if (err)
+               goto fail;
 
        mutex_lock(&priv->action_mutex);
-       registered = 1;
 
        IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
 
@@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
 
       fail_unlock:
        mutex_unlock(&priv->action_mutex);
-
+       wiphy_unregister(priv->ieee->wdev.wiphy);
+       kfree(priv->ieee->bg_band.channels);
       fail:
        if (dev) {
                if (registered)
index 87813c3..4ffebed 100644 (file)
@@ -11424,17 +11424,24 @@ static void ipw_bg_down(struct work_struct *work)
 
 /* Called by register_netdev() */
 static int ipw_net_init(struct net_device *dev)
+{
+       int rc = 0;
+       struct ipw_priv *priv = libipw_priv(dev);
+
+       mutex_lock(&priv->mutex);
+       if (ipw_up(priv))
+               rc = -EIO;
+       mutex_unlock(&priv->mutex);
+
+       return rc;
+}
+
+static int ipw_wdev_init(struct net_device *dev)
 {
        int i, rc = 0;
        struct ipw_priv *priv = libipw_priv(dev);
        const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
        struct wireless_dev *wdev = &priv->ieee->wdev;
-       mutex_lock(&priv->mutex);
-
-       if (ipw_up(priv)) {
-               rc = -EIO;
-               goto out;
-       }
 
        memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
 
@@ -11519,13 +11526,9 @@ static int ipw_net_init(struct net_device *dev)
        set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
 
        /* With that information in place, we can now register the wiphy... */
-       if (wiphy_register(wdev->wiphy)) {
+       if (wiphy_register(wdev->wiphy))
                rc = -EIO;
-               goto out;
-       }
-
 out:
-       mutex_unlock(&priv->mutex);
        return rc;
 }
 
@@ -11832,14 +11835,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
                goto out_remove_sysfs;
        }
 
+       err = ipw_wdev_init(net_dev);
+       if (err) {
+               IPW_ERROR("failed to register wireless device\n");
+               goto out_unregister_netdev;
+       }
+
 #ifdef CONFIG_IPW2200_PROMISCUOUS
        if (rtap_iface) {
                err = ipw_prom_alloc(priv);
                if (err) {
                        IPW_ERROR("Failed to register promiscuous network "
                                  "device (error %d).\n", err);
-                       unregister_netdev(priv->net_dev);
-                       goto out_remove_sysfs;
+                       wiphy_unregister(priv->ieee->wdev.wiphy);
+                       kfree(priv->ieee->a_band.channels);
+                       kfree(priv->ieee->bg_band.channels);
+                       goto out_unregister_netdev;
                }
        }
 #endif
@@ -11851,6 +11862,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
 
        return 0;
 
+      out_unregister_netdev:
+       unregister_netdev(priv->net_dev);
       out_remove_sysfs:
        sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
       out_release_irq:
index 977bd24..164bcae 100644 (file)
@@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
 
  out:
 
-       rs_sta->last_txrate_idx = index;
-       if (sband->band == IEEE80211_BAND_5GHZ)
-               info->control.rates[0].idx = rs_sta->last_txrate_idx -
-                               IWL_FIRST_OFDM_RATE;
-       else
+       if (sband->band == IEEE80211_BAND_5GHZ) {
+               if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
+                       index = IWL_FIRST_OFDM_RATE;
+               rs_sta->last_txrate_idx = index;
+               info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
+       } else {
+               rs_sta->last_txrate_idx = index;
                info->control.rates[0].idx = rs_sta->last_txrate_idx;
+       }
 
        IWL_DEBUG_RATE(priv, "leave: %d\n", index);
 }
index 35cd253..e5971fe 100644 (file)
@@ -937,7 +937,7 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
                                        &priv->contexts[IWL_RXON_CTX_BSS]);
 #endif
 
-       wake_up_interruptible(&priv->wait_command_queue);
+       wake_up(&priv->wait_command_queue);
 
        /* Keep the restart process from trying to send host
         * commands by clearing the INIT status bit */
@@ -1746,7 +1746,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
 
        /* Set the FW error flag -- cleared on iwl_down */
        set_bit(STATUS_FW_ERROR, &priv->status);
-       wake_up_interruptible(&priv->wait_command_queue);
+       wake_up(&priv->wait_command_queue);
        /*
         * Keep the restart process from trying to send host
         * commands by clearing the INIT status bit
index 62b4b09..ce1fc9f 100644 (file)
@@ -167,7 +167,7 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
                goto out;
        }
 
-       ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+       ret = wait_event_timeout(priv->wait_command_queue,
                        !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
                        HOST_COMPLETE_TIMEOUT);
        if (!ret) {
index 4fff995..ef9e268 100644 (file)
@@ -625,6 +625,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
        cmd = txq->cmd[cmd_index];
        meta = &txq->meta[cmd_index];
 
+       txq->time_stamp = jiffies;
+
        pci_unmap_single(priv->pci_dev,
                         dma_unmap_addr(meta, mapping),
                         dma_unmap_len(meta, len),
@@ -645,7 +647,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
                clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
                IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
                               iwl_legacy_get_cmd_string(cmd->hdr.cmd));
-               wake_up_interruptible(&priv->wait_command_queue);
+               wake_up(&priv->wait_command_queue);
        }
 
        /* Mark as unmapped */
index 795826a..66ee156 100644 (file)
@@ -841,7 +841,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
                wiphy_rfkill_set_hw_state(priv->hw->wiphy,
                                test_bit(STATUS_RF_KILL_HW, &priv->status));
        else
-               wake_up_interruptible(&priv->wait_command_queue);
+               wake_up(&priv->wait_command_queue);
 }
 
 /**
@@ -2269,7 +2269,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
        iwl3945_reg_txpower_periodic(priv);
 
        IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
-       wake_up_interruptible(&priv->wait_command_queue);
+       wake_up(&priv->wait_command_queue);
 
        return;
 
@@ -2300,7 +2300,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
        iwl_legacy_clear_driver_stations(priv);
 
        /* Unblock any waiting calls */
-       wake_up_interruptible_all(&priv->wait_command_queue);
+       wake_up_all(&priv->wait_command_queue);
 
        /* Wipe out the EXIT_PENDING status bit if we are not actually
         * exiting the module */
@@ -2853,7 +2853,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
 
        /* Wait for START_ALIVE from ucode. Otherwise callbacks from
         * mac80211 will not be run successfully. */
-       ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+       ret = wait_event_timeout(priv->wait_command_queue,
                        test_bit(STATUS_READY, &priv->status),
                        UCODE_READY_TIMEOUT);
        if (!ret) {
index 1433466..aa0c253 100644 (file)
@@ -576,7 +576,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
                wiphy_rfkill_set_hw_state(priv->hw->wiphy,
                        test_bit(STATUS_RF_KILL_HW, &priv->status));
        else
-               wake_up_interruptible(&priv->wait_command_queue);
+               wake_up(&priv->wait_command_queue);
 }
 
 /**
@@ -926,7 +926,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
                handled |= CSR_INT_BIT_FH_TX;
                /* Wake up uCode load routine, now that load is complete */
                priv->ucode_write_complete = 1;
-               wake_up_interruptible(&priv->wait_command_queue);
+               wake_up(&priv->wait_command_queue);
        }
 
        if (inta & ~handled) {
@@ -1795,7 +1795,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
        iwl4965_rf_kill_ct_config(priv);
 
        IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
-       wake_up_interruptible(&priv->wait_command_queue);
+       wake_up(&priv->wait_command_queue);
 
        iwl_legacy_power_update_mode(priv, true);
        IWL_DEBUG_INFO(priv, "Updated power mode\n");
@@ -1828,7 +1828,7 @@ static void __iwl4965_down(struct iwl_priv *priv)
        iwl_legacy_clear_driver_stations(priv);
 
        /* Unblock any waiting calls */
-       wake_up_interruptible_all(&priv->wait_command_queue);
+       wake_up_all(&priv->wait_command_queue);
 
        /* Wipe out the EXIT_PENDING status bit if we are not actually
         * exiting the module */
@@ -2266,7 +2266,7 @@ int iwl4965_mac_start(struct ieee80211_hw *hw)
 
        /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
         * mac80211 will not be run successfully. */
-       ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+       ret = wait_event_timeout(priv->wait_command_queue,
                        test_bit(STATUS_READY, &priv->status),
                        UCODE_READY_TIMEOUT);
        if (!ret) {
index a895a09..5621100 100644 (file)
@@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
 
        memset(&cmd, 0, sizeof(cmd));
        iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
-       memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib));
+       memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
        if (!(cmd.radio_sensor_offset))
                cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
 
index b0ae4de..f9c3cd9 100644 (file)
@@ -2140,7 +2140,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
                    IEEE80211_HW_SPECTRUM_MGMT |
                    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
 
+       /*
+        * Including the following line will crash some AP's.  This
+        * workaround removes the stimulus which causes the crash until
+        * the AP software can be fixed.
        hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+        */
 
        hw->flags |= IEEE80211_HW_SUPPORTS_PS |
                     IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
index dd6937e..77e528f 100644 (file)
@@ -405,31 +405,33 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
 
        mutex_lock(&priv->mutex);
 
-       if (test_bit(STATUS_SCANNING, &priv->status) &&
-           priv->scan_type != IWL_SCAN_NORMAL) {
-               IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
-               ret = -EAGAIN;
-               goto out_unlock;
-       }
-
-       /* mac80211 will only ask for one band at a time */
-       priv->scan_request = req;
-       priv->scan_vif = vif;
-
        /*
         * If an internal scan is in progress, just set
         * up the scan_request as per above.
         */
        if (priv->scan_type != IWL_SCAN_NORMAL) {
-               IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
+               IWL_DEBUG_SCAN(priv,
+                              "SCAN request during internal scan - defer\n");
+               priv->scan_request = req;
+               priv->scan_vif = vif;
                ret = 0;
-       } else
+       } else {
+               priv->scan_request = req;
+               priv->scan_vif = vif;
+               /*
+                * mac80211 will only ask for one band at a time
+                * so using channels[0] here is ok
+                */
                ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
                                        req->channels[0]->band);
+               if (ret) {
+                       priv->scan_request = NULL;
+                       priv->scan_vif = NULL;
+               }
+       }
 
        IWL_DEBUG_MAC80211(priv, "leave\n");
 
-out_unlock:
        mutex_unlock(&priv->mutex);
 
        return ret;
index a6b2b1d..222d410 100644 (file)
@@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
        cmd = txq->cmd[cmd_index];
        meta = &txq->meta[cmd_index];
 
+       txq->time_stamp = jiffies;
+
        iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
 
        /* Input error checking is done when commands are added to queue. */
index ef67f67..0019dfd 100644 (file)
@@ -3697,14 +3697,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
        rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
 
        /* Apparently the data is read from end to start */
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3,
-                                       (u32 *)&rt2x00dev->eeprom[i]);
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2,
-                                       (u32 *)&rt2x00dev->eeprom[i + 2]);
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1,
-                                       (u32 *)&rt2x00dev->eeprom[i + 4]);
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0,
-                                       (u32 *)&rt2x00dev->eeprom[i + 6]);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
+       /* The returned value is in CPU order, but eeprom is le */
+       rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
+       *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
+       *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg);
+       *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
 
        mutex_unlock(&rt2x00dev->csr_mutex);
 }
@@ -3870,19 +3871,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
                return -ENODEV;
        }
 
-       if (!rt2x00_rf(rt2x00dev, RF2820) &&
-           !rt2x00_rf(rt2x00dev, RF2850) &&
-           !rt2x00_rf(rt2x00dev, RF2720) &&
-           !rt2x00_rf(rt2x00dev, RF2750) &&
-           !rt2x00_rf(rt2x00dev, RF3020) &&
-           !rt2x00_rf(rt2x00dev, RF2020) &&
-           !rt2x00_rf(rt2x00dev, RF3021) &&
-           !rt2x00_rf(rt2x00dev, RF3022) &&
-           !rt2x00_rf(rt2x00dev, RF3052) &&
-           !rt2x00_rf(rt2x00dev, RF3320) &&
-           !rt2x00_rf(rt2x00dev, RF5370) &&
-           !rt2x00_rf(rt2x00dev, RF5390)) {
-               ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
+       switch (rt2x00dev->chip.rf) {
+       case RF2820:
+       case RF2850:
+       case RF2720:
+       case RF2750:
+       case RF3020:
+       case RF2020:
+       case RF3021:
+       case RF3022:
+       case RF3052:
+       case RF3320:
+       case RF5370:
+       case RF5390:
+               break;
+       default:
+               ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n",
+                     rt2x00dev->chip.rf);
                return -ENODEV;
        }
 
index 1bdc1aa..04c4e9e 100644 (file)
@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
 
                        mac->link_state = MAC80211_NOLINK;
                        memset(mac->bssid, 0, 6);
+
+                       /* reset sec info */
+                       rtl_cam_reset_sec_info(hw);
+
+                       rtl_cam_reset_all_entry(hw);
                        mac->vendor = PEER_UNKNOWN;
 
                        RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                 *or clear all entry here.
                 */
                rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
+
+               rtl_cam_reset_sec_info(hw);
+
                break;
        default:
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index 906e7aa..3e52a54 100644 (file)
@@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
                               (tcb_desc->rts_use_shortpreamble ? 1 : 0)
                               : (tcb_desc->rts_use_shortgi ? 1 : 0)));
        if (mac->bw_40) {
-               if (tcb_desc->packet_bw) {
+               if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
                        SET_TX_DESC_DATA_BW(txdesc, 1);
                        SET_TX_DESC_DATA_SC(txdesc, 3);
+               } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){
+                       SET_TX_DESC_DATA_BW(txdesc, 1);
+                       SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);
                } else {
                        SET_TX_DESC_DATA_BW(txdesc, 0);
-                               if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
-                                       SET_TX_DESC_DATA_SC(txdesc,
-                                                         mac->cur_40_prime_sc);
-                       }
+                       SET_TX_DESC_DATA_SC(txdesc, 0);
+               }
        } else {
                SET_TX_DESC_DATA_BW(txdesc, 0);
                SET_TX_DESC_DATA_SC(txdesc, 0);
index 8b1cef0..4bf3cf4 100644 (file)
@@ -863,6 +863,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
        u8 tid = 0;
        u16 seq_number = 0;
 
+       memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
        if (ieee80211_is_auth(fc)) {
                RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
                rtl_ips_nic_on(hw);
index 0ca86f9..1825629 100644 (file)
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
        xenvif_get(vif);
 
        rtnl_lock();
-       if (netif_running(vif->dev))
-               xenvif_up(vif);
        if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
                dev_set_mtu(vif->dev, ETH_DATA_LEN);
        netdev_update_features(vif->dev);
        netif_carrier_on(vif->dev);
+       if (netif_running(vif->dev))
+               xenvif_up(vif);
        rtnl_unlock();
 
        return 0;
index 753b21a..3ffd9c1 100644 (file)
@@ -169,7 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
                        (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
                return;
 
-       pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss);
+       if (dev->bus && dev->bus->self)
+               pcie_bus_configure_settings(dev->bus,
+                                           dev->bus->self->pcie_mpss);
 
        memset(&hpp, 0, sizeof(hpp));
        ret = pci_get_hp_params(dev, &hpp);
index 0ce6742..e9651f0 100644 (file)
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
 
-enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
 
 /*
  * The default CLS is used if arch didn't set CLS explicitly and not
@@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str)
                                pci_hotplug_io_size = memparse(str + 9, &str);
                        } else if (!strncmp(str, "hpmemsize=", 10)) {
                                pci_hotplug_mem_size = memparse(str + 10, &str);
+                       } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
+                               pcie_bus_config = PCIE_BUS_TUNE_OFF;
                        } else if (!strncmp(str, "pcie_bus_safe", 13)) {
                                pcie_bus_config = PCIE_BUS_SAFE;
                        } else if (!strncmp(str, "pcie_bus_perf", 13)) {
                                pcie_bus_config = PCIE_BUS_PERFORMANCE;
+                       } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
+                               pcie_bus_config = PCIE_BUS_PEER2PEER;
                        } else {
                                printk(KERN_ERR "PCI: Unknown option `%s'\n",
                                                str);
index 8473727..6ab6bd3 100644 (file)
@@ -1351,7 +1351,8 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data)
         * will occur as normal.
         */
        if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
-           dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))
+            (dev->bus->self &&
+             dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)))
                *smpss = 0;
 
        if (*smpss > dev->pcie_mpss)
@@ -1396,34 +1397,37 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
 
 static void pcie_write_mrrs(struct pci_dev *dev, int mps)
 {
-       int rc, mrrs;
+       int rc, mrrs, dev_mpss;
 
-       if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
-               int dev_mpss = 128 << dev->pcie_mpss;
+       /* In the "safe" case, do not configure the MRRS.  There appear to be
+        * issues with setting MRRS to 0 on a number of devices.
+        */
 
-               /* For Max performance, the MRRS must be set to the largest
-                * supported value.  However, it cannot be configured larger
-                * than the MPS the device or the bus can support.  This assumes
-                * that the largest MRRS available on the device cannot be
-                * smaller than the device MPSS.
-                */
-               mrrs = mps < dev_mpss ? mps : dev_mpss;
-       } else
-               /* In the "safe" case, configure the MRRS for fairness on the
-                * bus by making all devices have the same size
-                */
-               mrrs = mps;
+       if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
+               return;
 
+       dev_mpss = 128 << dev->pcie_mpss;
+
+       /* For Max performance, the MRRS must be set to the largest supported
+        * value.  However, it cannot be configured larger than the MPS the
+        * device or the bus can support.  This assumes that the largest MRRS
+        * available on the device cannot be smaller than the device MPSS.
+        */
+       mrrs = min(mps, dev_mpss);
 
        /* MRRS is a R/W register.  Invalid values can be written, but a
-        * subsiquent read will verify if the value is acceptable or not.
+        * subsequent read will verify if the value is acceptable or not.
         * If the MRRS value provided is not acceptable (e.g., too large),
         * shrink the value until it is acceptable to the HW.
         */
        while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
+               dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
+                        " to %d.  If any issues are encountered, please try "
+                        "running with pci=pcie_bus_safe\n", mrrs);
                rc = pcie_set_readrq(dev, mrrs);
                if (rc)
-                       dev_err(&dev->dev, "Failed attempting to set the MRRS\n");
+                       dev_err(&dev->dev,
+                               "Failed attempting to set the MRRS\n");
 
                mrrs /= 2;
        }
@@ -1436,13 +1440,13 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
        if (!pci_is_pcie(dev))
                return 0;
 
-       dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+       dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
                 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
 
        pcie_write_mps(dev, mps);
        pcie_write_mrrs(dev, mps);
 
-       dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+       dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
                 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
 
        return 0;
@@ -1454,15 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
  */
 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
 {
-       u8 smpss = mpss;
+       u8 smpss;
 
-       if (!bus->self)
+       if (!pci_is_pcie(bus->self))
                return;
 
-       if (!pci_is_pcie(bus->self))
+       if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
                return;
 
+       /* FIXME - Peer to peer DMA is possible, though the endpoint would need
+        * to be aware to the MPS of the destination.  To work around this,
+        * simply force the MPS of the entire system to the smallest possible.
+        */
+       if (pcie_bus_config == PCIE_BUS_PEER2PEER)
+               smpss = 0;
+
        if (pcie_bus_config == PCIE_BUS_SAFE) {
+               smpss = mpss;
+
                pcie_find_smpss(bus->self, &smpss);
                pci_walk_bus(bus, pcie_find_smpss, &smpss);
        }
index 335551d..14a42a1 100644 (file)
@@ -36,6 +36,7 @@
  */
 struct ep93xx_rtc {
        void __iomem    *mmio_base;
+       struct rtc_device *rtc;
 };
 
 static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
 {
        struct ep93xx_rtc *ep93xx_rtc;
        struct resource *res;
-       struct rtc_device *rtc;
        int err;
 
        ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
                return -ENXIO;
 
        pdev->dev.platform_data = ep93xx_rtc;
-       platform_set_drvdata(pdev, rtc);
+       platform_set_drvdata(pdev, ep93xx_rtc);
 
-       rtc = rtc_device_register(pdev->name,
+       ep93xx_rtc->rtc = rtc_device_register(pdev->name,
                                &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
-       if (IS_ERR(rtc)) {
-               err = PTR_ERR(rtc);
+       if (IS_ERR(ep93xx_rtc->rtc)) {
+               err = PTR_ERR(ep93xx_rtc->rtc);
                goto exit;
        }
 
@@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
        return 0;
 
 fail:
-       rtc_device_unregister(rtc);
+       rtc_device_unregister(ep93xx_rtc->rtc);
 exit:
        platform_set_drvdata(pdev, NULL);
        pdev->dev.platform_data = NULL;
@@ -176,11 +176,11 @@ exit:
 
 static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
 {
-       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
 
        sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
        platform_set_drvdata(pdev, NULL);
-       rtc_device_unregister(rtc);
+       rtc_device_unregister(ep93xx_rtc->rtc);
        pdev->dev.platform_data = NULL;
 
        return 0;
index 2dd3c01..d93a960 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/rtc.h>
+#include <linux/sched.h>
 #include <linux/workqueue.h>
 
 /* DryIce Register Definitions */
index 075f170..c4cf057 100644 (file)
@@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
        time -= tm->tm_hour * 3600;
        tm->tm_min = time / 60;
        tm->tm_sec = time - tm->tm_min * 60;
+
+       tm->tm_isdst = 0;
 }
 EXPORT_SYMBOL(rtc_time_to_tm);
 
index 4e7c04e..7639ab9 100644 (file)
@@ -51,6 +51,27 @@ static enum s3c_cpu_type s3c_rtc_cpu_type;
 
 static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
 
+static void s3c_rtc_alarm_clk_enable(bool enable)
+{
+       static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock);
+       static bool alarm_clk_enabled;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags);
+       if (enable) {
+               if (!alarm_clk_enabled) {
+                       clk_enable(rtc_clk);
+                       alarm_clk_enabled = true;
+               }
+       } else {
+               if (alarm_clk_enabled) {
+                       clk_disable(rtc_clk);
+                       alarm_clk_enabled = false;
+               }
+       }
+       spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags);
+}
+
 /* IRQ Handlers */
 
 static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
@@ -64,6 +85,9 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
                writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP);
 
        clk_disable(rtc_clk);
+
+       s3c_rtc_alarm_clk_enable(false);
+
        return IRQ_HANDLED;
 }
 
@@ -97,6 +121,8 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
        writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
        clk_disable(rtc_clk);
 
+       s3c_rtc_alarm_clk_enable(enabled);
+
        return 0;
 }
 
index 9a81f77..20687d5 100644 (file)
@@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
        int res;
        u8 rd_reg;
 
-#ifdef CONFIG_LOCKDEP
-       /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
-        * we don't want and can't tolerate.  Although it might be
-        * friendlier not to borrow this thread context...
-        */
-       local_irq_enable();
-#endif
-
        res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
        if (res)
                goto out;
@@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {
 static int __devinit twl_rtc_probe(struct platform_device *pdev)
 {
        struct rtc_device *rtc;
-       int ret = 0;
+       int ret = -EINVAL;
        int irq = platform_get_irq(pdev, 0);
        u8 rd_reg;
 
        if (irq <= 0)
-               return -EINVAL;
-
-       rtc = rtc_device_register(pdev->name,
-                                 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
-       if (IS_ERR(rtc)) {
-               ret = PTR_ERR(rtc);
-               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
-                       PTR_ERR(rtc));
-               goto out0;
-
-       }
-
-       platform_set_drvdata(pdev, rtc);
+               goto out1;
 
        ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
        if (ret < 0)
@@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        if (ret < 0)
                goto out1;
 
-       ret = request_irq(irq, twl_rtc_interrupt,
-                               IRQF_TRIGGER_RISING,
-                               dev_name(&rtc->dev), rtc);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "IRQ is not free.\n");
-               goto out1;
-       }
-
        if (twl_class_is_6030()) {
                twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
                        REG_INT_MSK_LINE_A);
@@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        /* Check RTC module status, Enable if it is off */
        ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
        if (ret < 0)
-               goto out2;
+               goto out1;
 
        if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
                dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
                rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
                ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
                if (ret < 0)
-                       goto out2;
+                       goto out1;
        }
 
        /* init cached IRQ enable bits */
        ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
        if (ret < 0)
+               goto out1;
+
+       rtc = rtc_device_register(pdev->name,
+                                 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
+       if (IS_ERR(rtc)) {
+               ret = PTR_ERR(rtc);
+               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
+                       PTR_ERR(rtc));
+               goto out1;
+       }
+
+       ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
+                                  IRQF_TRIGGER_RISING,
+                                  dev_name(&rtc->dev), rtc);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "IRQ is not free.\n");
                goto out2;
+       }
 
-       return ret;
+       platform_set_drvdata(pdev, rtc);
+       return 0;
 
 out2:
-       free_irq(irq, rtc);
-out1:
        rtc_device_unregister(rtc);
-out0:
+out1:
        return ret;
 }
 
index cbde448..eb3140e 100644 (file)
@@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv;
 static int console_subchannel_in_use;
 
 /*
- * Use tpi to get a pending interrupt, call the interrupt handler and
- * return a pointer to the subchannel structure.
+ * Use cio_tpi to get a pending interrupt and call the interrupt handler.
+ * Return non-zero if an interrupt was processed, zero otherwise.
  */
 static int cio_tpi(void)
 {
@@ -667,6 +667,10 @@ static int cio_tpi(void)
        tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
        if (tpi(NULL) != 1)
                return 0;
+       if (tpi_info->adapter_IO) {
+               do_adapter_IO(tpi_info->isc);
+               return 1;
+       }
        irb = (struct irb *)&S390_lowcore.irb;
        /* Store interrupt response block to lowcore. */
        if (tsch(tpi_info->schid, irb) != 0)
index b7bd5b0..3868ab2 100644 (file)
@@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
        switch (retval) {
        case SCSI_MLQUEUE_HOST_BUSY:
                twa_free_request_id(tw_dev, request_id);
+               twa_unmap_scsi_data(tw_dev, request_id);
                break;
        case 1:
                tw_dev->state[request_id] = TW_S_COMPLETED;
                twa_free_request_id(tw_dev, request_id);
+               twa_unmap_scsi_data(tw_dev, request_id);
                SCpnt->result = (DID_ERROR << 16);
                done(SCpnt);
                retval = 0;
index 8d9dae8..3878b73 100644 (file)
@@ -837,6 +837,7 @@ config SCSI_ISCI
        # (temporary): known alpha quality driver
        depends on EXPERIMENTAL
        select SCSI_SAS_LIBSAS
+       select SCSI_SAS_HOST_SMP
        ---help---
          This driver supports the 6Gb/s SAS capabilities of the storage
          control unit found in the Intel(R) C600 series chipset.
index 3c08f53..6153a66 100644 (file)
@@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o       qlogicfas.o
 obj-$(CONFIG_PCMCIA_QLOGIC)    += qlogicfas408.o
 obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o 
 obj-$(CONFIG_SCSI_QLA_FC)      += qla2xxx/
-obj-$(CONFIG_SCSI_QLA_ISCSI)   += qla4xxx/
+obj-$(CONFIG_SCSI_QLA_ISCSI)   += libiscsi.o qla4xxx/
 obj-$(CONFIG_SCSI_LPFC)                += lpfc/
 obj-$(CONFIG_SCSI_BFA_FC)      += bfa/
 obj-$(CONFIG_SCSI_PAS16)       += pas16.o
index e7d0d47..e5f2d7d 100644 (file)
@@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
        kfree(aac->queues);
        aac->queues = NULL;
        free_irq(aac->pdev->irq, aac);
+       if (aac->msi)
+               pci_disable_msi(aac->pdev);
        kfree(aac->fsa_dev);
        aac->fsa_dev = NULL;
        quirks = aac_get_driver_ident(index)->quirks;
index 9ae80cd..dba72a4 100644 (file)
@@ -563,7 +563,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
        nopout_wqe->itt = ((u16)task->itt |
                           (ISCSI_TASK_TYPE_MPATH <<
                            ISCSI_TMF_REQUEST_TYPE_SHIFT));
-       nopout_wqe->ttt = nopout_hdr->ttt;
+       nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt);
        nopout_wqe->flags = 0;
        if (!unsol)
                nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
index bd22041..f586448 100644 (file)
@@ -913,7 +913,7 @@ static void l2t_put(struct cxgbi_sock *csk)
        struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
 
        if (csk->l2t) {
-               l2t_release(L2DATA(t3dev), csk->l2t);
+               l2t_release(t3dev, csk->l2t);
                csk->l2t = NULL;
                cxgbi_sock_put(csk);
        }
index ba710e3..5d0e9a2 100644 (file)
@@ -432,6 +432,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
        u8 flogi_maddr[ETH_ALEN];
        const struct net_device_ops *ops;
 
+       rtnl_lock();
+
        /*
         * Don't listen for Ethernet packets anymore.
         * synchronize_net() ensures that the packet handlers are not running
@@ -461,6 +463,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
                                        " specific feature for LLD.\n");
        }
 
+       rtnl_unlock();
+
        /* Release the self-reference taken during fcoe_interface_create() */
        fcoe_interface_put(fcoe);
 }
@@ -1951,11 +1955,8 @@ static void fcoe_destroy_work(struct work_struct *work)
        fcoe_if_destroy(port->lport);
 
        /* Do not tear down the fcoe interface for NPIV port */
-       if (!npiv) {
-               rtnl_lock();
+       if (!npiv)
                fcoe_interface_cleanup(fcoe);
-               rtnl_unlock();
-       }
 
        mutex_unlock(&fcoe_config_mutex);
 }
@@ -2009,8 +2010,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
                printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
                       netdev->name);
                rc = -EIO;
+               rtnl_unlock();
                fcoe_interface_cleanup(fcoe);
-               goto out_nodev;
+               goto out_nortnl;
        }
 
        /* Make this the "master" N_Port */
@@ -2027,6 +2029,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
 
 out_nodev:
        rtnl_unlock();
+out_nortnl:
        mutex_unlock(&fcoe_config_mutex);
        return rc;
 }
index ec61bdb..b200b73 100644 (file)
@@ -676,6 +676,16 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
        BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
        removed[*nremoved] = h->dev[entry];
        (*nremoved)++;
+
+       /*
+        * New physical devices won't have target/lun assigned yet
+        * so we need to preserve the values in the slot we are replacing.
+        */
+       if (new_entry->target == -1) {
+               new_entry->target = h->dev[entry]->target;
+               new_entry->lun = h->dev[entry]->lun;
+       }
+
        h->dev[entry] = new_entry;
        added[*nadded] = new_entry;
        (*nadded)++;
@@ -1548,10 +1558,17 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
 }
 
 static int hpsa_update_device_info(struct ctlr_info *h,
-       unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
+       unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
+       unsigned char *is_OBDR_device)
 {
-#define OBDR_TAPE_INQ_SIZE 49
+
+#define OBDR_SIG_OFFSET 43
+#define OBDR_TAPE_SIG "$DR-10"
+#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
+#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
+
        unsigned char *inq_buff;
+       unsigned char *obdr_sig;
 
        inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
        if (!inq_buff)
@@ -1583,6 +1600,16 @@ static int hpsa_update_device_info(struct ctlr_info *h,
        else
                this_device->raid_level = RAID_UNKNOWN;
 
+       if (is_OBDR_device) {
+               /* See if this is a One-Button-Disaster-Recovery device
+                * by looking for "$DR-10" at offset 43 in inquiry data.
+                */
+               obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
+               *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
+                                       strncmp(obdr_sig, OBDR_TAPE_SIG,
+                                               OBDR_SIG_LEN) == 0);
+       }
+
        kfree(inq_buff);
        return 0;
 
@@ -1716,7 +1743,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
                return 0;
        }
 
-       if (hpsa_update_device_info(h, scsi3addr, this_device))
+       if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
                return 0;
        (*nmsa2xxx_enclosures)++;
        hpsa_set_bus_target_lun(this_device, bus, target, 0);
@@ -1808,7 +1835,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
         */
        struct ReportLUNdata *physdev_list = NULL;
        struct ReportLUNdata *logdev_list = NULL;
-       unsigned char *inq_buff = NULL;
        u32 nphysicals = 0;
        u32 nlogicals = 0;
        u32 ndev_allocated = 0;
@@ -1824,11 +1850,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
                GFP_KERNEL);
        physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
        logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
-       inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
        tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
 
-       if (!currentsd || !physdev_list || !logdev_list ||
-               !inq_buff || !tmpdevice) {
+       if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
                dev_err(&h->pdev->dev, "out of memory\n");
                goto out;
        }
@@ -1863,7 +1887,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
        /* adjust our table of devices */
        nmsa2xxx_enclosures = 0;
        for (i = 0; i < nphysicals + nlogicals + 1; i++) {
-               u8 *lunaddrbytes;
+               u8 *lunaddrbytes, is_OBDR = 0;
 
                /* Figure out where the LUN ID info is coming from */
                lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
@@ -1874,7 +1898,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
                        continue;
 
                /* Get device type, vendor, model, device id */
-               if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
+               if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
+                                                       &is_OBDR))
                        continue; /* skip it if we can't talk to it. */
                figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
                        tmpdevice);
@@ -1898,7 +1923,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
                hpsa_set_bus_target_lun(this_device, bus, target, lun);
 
                switch (this_device->devtype) {
-               case TYPE_ROM: {
+               case TYPE_ROM:
                        /* We don't *really* support actual CD-ROM devices,
                         * just "One Button Disaster Recovery" tape drive
                         * which temporarily pretends to be a CD-ROM drive.
@@ -1906,15 +1931,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
                         * device by checking for "$DR-10" in bytes 43-48 of
                         * the inquiry data.
                         */
-                               char obdr_sig[7];
-#define OBDR_TAPE_SIG "$DR-10"
-                               strncpy(obdr_sig, &inq_buff[43], 6);
-                               obdr_sig[6] = '\0';
-                               if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
-                                       /* Not OBDR device, ignore it. */
-                                       break;
-                       }
-                       ncurrent++;
+                       if (is_OBDR)
+                               ncurrent++;
                        break;
                case TYPE_DISK:
                        if (i < nphysicals)
@@ -1947,7 +1965,6 @@ out:
        for (i = 0; i < ndev_allocated; i++)
                kfree(currentsd[i]);
        kfree(currentsd);
-       kfree(inq_buff);
        kfree(physdev_list);
        kfree(logdev_list);
 }
index 26072f1..6981b77 100644 (file)
@@ -531,6 +531,9 @@ static void sci_controller_process_completions(struct isci_host *ihost)
                        break;
 
                case SCU_COMPLETION_TYPE_EVENT:
+                       sci_controller_event_completion(ihost, ent);
+                       break;
+
                case SCU_COMPLETION_TYPE_NOTIFY: {
                        event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
                                       (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
@@ -1091,6 +1094,7 @@ static void isci_host_completion_routine(unsigned long data)
        struct isci_request *request;
        struct isci_request *next_request;
        struct sas_task     *task;
+       u16 active;
 
        INIT_LIST_HEAD(&completed_request_list);
        INIT_LIST_HEAD(&errored_request_list);
@@ -1181,6 +1185,13 @@ static void isci_host_completion_routine(unsigned long data)
                }
        }
 
+       /* the coalesence timeout doubles at each encoding step, so
+        * update it based on the ilog2 value of the outstanding requests
+        */
+       active = isci_tci_active(ihost);
+       writel(SMU_ICC_GEN_VAL(NUMBER, active) |
+              SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
+              &ihost->smu_registers->interrupt_coalesce_control);
 }
 
 /**
@@ -1471,7 +1482,7 @@ static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
        struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
 
        /* set the default interrupt coalescence number and timeout value. */
-       sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
+       sci_controller_set_interrupt_coalescence(ihost, 00);
 }
 
 static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
index 062101a..9f33831 100644 (file)
@@ -369,6 +369,9 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
 #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
 #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
 
+/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */
+#define ISCI_COALESCE_BASE 9
+
 /* expander attached sata devices require 3 rnc slots */
 static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
 {
index 61e0d09..29aa34e 100644 (file)
 #include <linux/firmware.h>
 #include <linux/efi.h>
 #include <asm/string.h>
+#include <scsi/scsi_host.h>
 #include "isci.h"
 #include "task.h"
 #include "probe_roms.h"
 
+#define MAJ 1
+#define MIN 0
+#define BUILD 0
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+       __stringify(BUILD)
+
+MODULE_VERSION(DRV_VERSION);
+
 static struct scsi_transport_template *isci_transport_template;
 
 static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
@@ -113,6 +122,22 @@ unsigned char max_concurr_spinup = 1;
 module_param(max_concurr_spinup, byte, 0);
 MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
 
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+       struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+       struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+struct device_attribute *isci_host_attrs[] = {
+       &dev_attr_isci_id,
+       NULL
+};
+
 static struct scsi_host_template isci_sht = {
 
        .module                         = THIS_MODULE,
@@ -138,6 +163,7 @@ static struct scsi_host_template isci_sht = {
        .slave_alloc                    = sas_slave_alloc,
        .target_destroy                 = sas_target_destroy,
        .ioctl                          = sas_ioctl,
+       .shost_attrs                    = isci_host_attrs,
 };
 
 static struct sas_domain_function_template isci_transport_ops  = {
@@ -232,17 +258,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
        return 0;
 }
 
-static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
-       struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
-       struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
-
-       return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
-}
-
-static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
-
 static void isci_unregister(struct isci_host *isci_host)
 {
        struct Scsi_Host *shost;
@@ -251,7 +266,6 @@ static void isci_unregister(struct isci_host *isci_host)
                return;
 
        shost = isci_host->shost;
-       device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
 
        sas_unregister_ha(&isci_host->sas_ha);
 
@@ -415,14 +429,8 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
        if (err)
                goto err_shost_remove;
 
-       err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
-       if (err)
-               goto err_unregister_ha;
-
        return isci_host;
 
- err_unregister_ha:
-       sas_unregister_ha(&(isci_host->sas_ha));
  err_shost_remove:
        scsi_remove_host(shost);
  err_shost:
@@ -540,7 +548,8 @@ static __init int isci_init(void)
 {
        int err;
 
-       pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
+       pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n",
+               DRV_NAME, DRV_VERSION);
 
        isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
        if (!isci_transport_template)
index 79313a7..430fc8f 100644 (file)
@@ -104,6 +104,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
        u32 parity_count = 0;
        u32 llctl, link_rate;
        u32 clksm_value = 0;
+       u32 sp_timeouts = 0;
 
        iphy->link_layer_registers = reg;
 
@@ -211,6 +212,18 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
        llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
        writel(llctl, &iphy->link_layer_registers->link_layer_control);
 
+       sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
+
+       /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
+       sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
+
+       /* Set RATE_CHANGE timeout value to 0x3B (59us).  This ensures SCU can
+        * lock with 3Gb drive when SCU max rate is set to 1.5Gb.
+        */
+       sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
+
+       writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
+
        if (is_a2(ihost->pdev)) {
                /* Program the max ARB time for the PHY to 700us so we inter-operate with
                 * the PMC expander which shuts down PHYs if the expander PHY generates too
index 9b266c7..00afc73 100644 (file)
@@ -1299,6 +1299,18 @@ struct scu_transport_layer_registers {
 #define SCU_AFE_XCVRCR_OFFSET       0x00DC
 #define SCU_AFE_LUTCR_OFFSET        0x00E0
 
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT          (0UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK           (0x000000FFUL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT                 (8UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK                  (0x0000FF00UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT         (16UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK          (0x00FF0000UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT              (24UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK               (0xFF000000UL)
+
+#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value)
+
 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT                  (0)
 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK                   (0x00000003)
 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1                   (0)
index a46e07a..b5d3a8c 100644 (file)
@@ -732,12 +732,20 @@ sci_io_request_terminate(struct isci_request *ireq)
                sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
                return SCI_SUCCESS;
        case SCI_REQ_TASK_WAIT_TC_RESP:
+               /* The task frame was already confirmed to have been
+                * sent by the SCU HW.  Since the state machine is
+                * now only waiting for the task response itself,
+                * abort the request and complete it immediately
+                * and don't wait for the task response.
+                */
                sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
                sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
                return SCI_SUCCESS;
        case SCI_REQ_ABORTING:
-               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
-               return SCI_SUCCESS;
+               /* If a request has a termination requested twice, return
+                * a failure indication, since HW confirmation of the first
+                * abort is still outstanding.
+                */
        case SCI_REQ_COMPLETED:
        default:
                dev_warn(&ireq->owning_controller->pdev->dev,
@@ -2399,22 +2407,19 @@ static void isci_task_save_for_upper_layer_completion(
        }
 }
 
-static void isci_request_process_stp_response(struct sas_task *task,
-                                             void *response_buffer)
+static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
 {
-       struct dev_to_host_fis *d2h_reg_fis = response_buffer;
        struct task_status_struct *ts = &task->task_status;
        struct ata_task_resp *resp = (void *)&ts->buf[0];
 
-       resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
-       memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
+       resp->frame_len = sizeof(*fis);
+       memcpy(resp->ending_fis, fis, sizeof(*fis));
        ts->buf_valid_size = sizeof(*resp);
 
-       /**
-        * If the device fault bit is set in the status register, then
+       /* If the device fault bit is set in the status register, then
         * set the sense data and return.
         */
-       if (d2h_reg_fis->status & ATA_DF)
+       if (fis->status & ATA_DF)
                ts->stat = SAS_PROTO_RESPONSE;
        else
                ts->stat = SAM_STAT_GOOD;
@@ -2428,7 +2433,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
 {
        struct sas_task *task = isci_request_access_task(request);
        struct ssp_response_iu *resp_iu;
-       void *resp_buf;
        unsigned long task_flags;
        struct isci_remote_device *idev = isci_lookup_device(task->dev);
        enum service_response response       = SAS_TASK_UNDELIVERED;
@@ -2565,9 +2569,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
                                task);
 
                        if (sas_protocol_ata(task->task_proto)) {
-                               resp_buf = &request->stp.rsp;
-                               isci_request_process_stp_response(task,
-                                                                 resp_buf);
+                               isci_process_stp_response(task, &request->stp.rsp);
                        } else if (SAS_PROTOCOL_SSP == task->task_proto) {
 
                                /* crack the iu response buffer. */
index e9e1e2a..16f88ab 100644 (file)
@@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
         */
        buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
        header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
-       size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
+       size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
 
        /*
         * The Unsolicited Frame buffers are set at the start of the UF
index 31cb950..75d8966 100644 (file)
@@ -214,7 +214,7 @@ struct sci_uf_address_table_array {
         * starting address of the UF address table.
         * 64-bit pointers are required by the hardware.
         */
-       dma_addr_t *array;
+       u64 *array;
 
        /**
         * This field specifies the physical address location for the UF
index 01ff082..d261e98 100644 (file)
@@ -494,6 +494,9 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
         */
        error = lport->tt.frame_send(lport, fp);
 
+       if (fh->fh_type == FC_TYPE_BLS)
+               return error;
+
        /*
         * Update the exchange and sequence flags,
         * assuming all frames for the sequence have been sent.
@@ -575,42 +578,35 @@ static void fc_seq_set_resp(struct fc_seq *sp,
 }
 
 /**
- * fc_seq_exch_abort() - Abort an exchange and sequence
- * @req_sp:    The sequence to be aborted
+ * fc_exch_abort_locked() - Abort an exchange
+ * @ep:        The exchange to be aborted
  * @timer_msec: The period of time to wait before aborting
  *
- * Generally called because of a timeout or an abort from the upper layer.
+ * Locking notes:  Called with exch lock held
+ *
+ * Return value: 0 on success else error code
  */
-static int fc_seq_exch_abort(const struct fc_seq *req_sp,
-                            unsigned int timer_msec)
+static int fc_exch_abort_locked(struct fc_exch *ep,
+                               unsigned int timer_msec)
 {
        struct fc_seq *sp;
-       struct fc_exch *ep;
        struct fc_frame *fp;
        int error;
 
-       ep = fc_seq_exch(req_sp);
-
-       spin_lock_bh(&ep->ex_lock);
        if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
-           ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
-               spin_unlock_bh(&ep->ex_lock);
+           ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
                return -ENXIO;
-       }
 
        /*
         * Send the abort on a new sequence if possible.
         */
        sp = fc_seq_start_next_locked(&ep->seq);
-       if (!sp) {
-               spin_unlock_bh(&ep->ex_lock);
+       if (!sp)
                return -ENOMEM;
-       }
 
        ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
        if (timer_msec)
                fc_exch_timer_set_locked(ep, timer_msec);
-       spin_unlock_bh(&ep->ex_lock);
 
        /*
         * If not logged into the fabric, don't send ABTS but leave
@@ -632,6 +628,28 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,
        return error;
 }
 
+/**
+ * fc_seq_exch_abort() - Abort an exchange and sequence
+ * @req_sp:    The sequence to be aborted
+ * @timer_msec: The period of time to wait before aborting
+ *
+ * Generally called because of a timeout or an abort from the upper layer.
+ *
+ * Return value: 0 on success else error code
+ */
+static int fc_seq_exch_abort(const struct fc_seq *req_sp,
+                            unsigned int timer_msec)
+{
+       struct fc_exch *ep;
+       int error;
+
+       ep = fc_seq_exch(req_sp);
+       spin_lock_bh(&ep->ex_lock);
+       error = fc_exch_abort_locked(ep, timer_msec);
+       spin_unlock_bh(&ep->ex_lock);
+       return error;
+}
+
 /**
  * fc_exch_timeout() - Handle exchange timer expiration
  * @work: The work_struct identifying the exchange that timed out
@@ -1715,6 +1733,7 @@ static void fc_exch_reset(struct fc_exch *ep)
        int rc = 1;
 
        spin_lock_bh(&ep->ex_lock);
+       fc_exch_abort_locked(ep, 0);
        ep->state |= FC_EX_RST_CLEANUP;
        if (cancel_delayed_work(&ep->timeout_work))
                atomic_dec(&ep->ex_refcnt);     /* drop hold for timer */
@@ -1962,6 +1981,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
        struct fc_exch *ep;
        struct fc_seq *sp = NULL;
        struct fc_frame_header *fh;
+       struct fc_fcp_pkt *fsp = NULL;
        int rc = 1;
 
        ep = fc_exch_alloc(lport, fp);
@@ -1984,8 +2004,10 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
        fc_exch_setup_hdr(ep, fp, ep->f_ctl);
        sp->cnt++;
 
-       if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD)
+       if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
+               fsp = fr_fsp(fp);
                fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
+       }
 
        if (unlikely(lport->tt.frame_send(lport, fp)))
                goto err;
@@ -1999,7 +2021,8 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
        spin_unlock_bh(&ep->ex_lock);
        return sp;
 err:
-       fc_fcp_ddp_done(fr_fsp(fp));
+       if (fsp)
+               fc_fcp_ddp_done(fsp);
        rc = fc_exch_done_locked(ep);
        spin_unlock_bh(&ep->ex_lock);
        if (!rc)
index afb63c8..4c41ee8 100644 (file)
@@ -2019,6 +2019,11 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
        struct fc_fcp_internal *si;
        int rc = FAILED;
        unsigned long flags;
+       int rval;
+
+       rval = fc_block_scsi_eh(sc_cmd);
+       if (rval)
+               return rval;
 
        lport = shost_priv(sc_cmd->device->host);
        if (lport->state != LPORT_ST_READY)
@@ -2068,9 +2073,9 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
        int rc = FAILED;
        int rval;
 
-       rval = fc_remote_port_chkready(rport);
+       rval = fc_block_scsi_eh(sc_cmd);
        if (rval)
-               goto out;
+               return rval;
 
        lport = shost_priv(sc_cmd->device->host);
 
@@ -2116,6 +2121,8 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
 
        FC_SCSI_DBG(lport, "Resetting host\n");
 
+       fc_block_scsi_eh(sc_cmd);
+
        lport->tt.lport_reset(lport);
        wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
        while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
index e55ed9c..628f347 100644 (file)
@@ -88,6 +88,7 @@
  */
 
 #include <linux/timer.h>
+#include <linux/delay.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 
@@ -1029,8 +1030,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
                           FCH_EVT_LIPRESET, 0);
        fc_vports_linkchange(lport);
        fc_lport_reset_locked(lport);
-       if (lport->link_up)
+       if (lport->link_up) {
+               /*
+                * Wait upto resource allocation time out before
+                * doing re-login since incomplete FIP exchanged
+                * from last session may collide with exchanges
+                * in new session.
+                */
+               msleep(lport->r_a_tov);
                fc_lport_enter_flogi(lport);
+       }
 }
 
 /**
index f84084b..16ad97d 100644 (file)
@@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
        list_for_each_entry(ch, &ex->children, siblings) {
                if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
                        res = sas_find_bcast_dev(ch, src_dev);
-                       if (src_dev)
+                       if (*src_dev)
                                return res;
                }
        }
@@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
                sas_disable_routing(parent, phy->attached_sas_addr);
        }
        memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
-       sas_port_delete_phy(phy->port, phy->phy);
-       if (phy->port->num_phys == 0)
-               sas_port_delete(phy->port);
-       phy->port = NULL;
+       if (phy->port) {
+               sas_port_delete_phy(phy->port, phy->phy);
+               if (phy->port->num_phys == 0)
+                       sas_port_delete(phy->port);
+               phy->port = NULL;
+       }
 }
 
 static int sas_discover_bfs_by_root_level(struct domain_device *root,
index 7836eb0..a31e05f 100644 (file)
@@ -1786,13 +1786,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
                        fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
        }
 
-       if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
+       if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
                if (ha->fw_attributes & BIT_4) {
+                       int prot = 0;
                        vha->flags.difdix_supported = 1;
                        ql_dbg(ql_dbg_user, vha, 0x7082,
                            "Registered for DIF/DIX type 1 and 3 protection.\n");
+                       if (ql2xenabledif == 1)
+                               prot = SHOST_DIX_TYPE0_PROTECTION;
                        scsi_host_set_prot(vha->host,
-                           SHOST_DIF_TYPE1_PROTECTION
+                           prot | SHOST_DIF_TYPE1_PROTECTION
                            | SHOST_DIF_TYPE2_PROTECTION
                            | SHOST_DIF_TYPE3_PROTECTION
                            | SHOST_DIX_TYPE1_PROTECTION
index 2155071..d79cd8a 100644 (file)
@@ -8,24 +8,24 @@
 /*
  * Table for showing the current message id in use for particular level
  * Change this table for addition of log/debug messages.
- * -----------------------------------------------------
- * |             Level            |   Last Value Used  |
- * -----------------------------------------------------
- * | Module Init and Probe        |       0x0116       |
- * | Mailbox commands             |       0x111e       |
- * | Device Discovery             |       0x2083       |
- * | Queue Command and IO tracing |       0x302e       |
- * | DPC Thread                   |       0x401c       |
- * | Async Events                 |       0x5059       |
- * | Timer Routines               |       0x600d       |
- * | User Space Interactions      |       0x709c       |
- * | Task Management              |       0x8043       |
- * | AER/EEH                      |       0x900f       |
- * | Virtual Port                 |       0xa007       |
- * | ISP82XX Specific             |       0xb027       |
- * | MultiQ                       |       0xc00b       |
- * | Misc                         |       0xd00b       |
- * -----------------------------------------------------
+ * ----------------------------------------------------------------------
+ * |             Level            |   Last Value Used  |     Holes     |
+ * ----------------------------------------------------------------------
+ * | Module Init and Probe        |       0x0116       |               |
+ * | Mailbox commands             |       0x1126       |               |
+ * | Device Discovery             |       0x2083       |               |
+ * | Queue Command and IO tracing |       0x302e       |     0x3008     |
+ * | DPC Thread                   |       0x401c       |               |
+ * | Async Events                 |       0x5059       |               |
+ * | Timer Routines               |       0x600d       |               |
+ * | User Space Interactions      |       0x709d       |               |
+ * | Task Management              |       0x8041       |               |
+ * | AER/EEH                      |       0x900f       |               |
+ * | Virtual Port                 |       0xa007       |               |
+ * | ISP82XX Specific             |       0xb04f       |               |
+ * | MultiQ                       |       0xc00b       |               |
+ * | Misc                         |       0xd00b       |               |
+ * ----------------------------------------------------------------------
  */
 
 #include "qla_def.h"
index cc5a792..a03eaf4 100644 (file)
@@ -2529,6 +2529,7 @@ struct qla_hw_data {
 #define DT_ISP8021                     BIT_14
 #define DT_ISP_LAST                    (DT_ISP8021 << 1)
 
+#define DT_T10_PI                       BIT_25
 #define DT_IIDMA                        BIT_26
 #define DT_FWI2                         BIT_27
 #define DT_ZIO_SUPPORTED                BIT_28
@@ -2572,6 +2573,7 @@ struct qla_hw_data {
 #define IS_NOCACHE_VPD_TYPE(ha)        (IS_QLA81XX(ha))
 #define IS_ALOGIO_CAPABLE(ha)  (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
 
+#define IS_T10_PI_CAPABLE(ha)   ((ha)->device_type & DT_T10_PI)
 #define IS_IIDMA_CAPABLE(ha)    ((ha)->device_type & DT_IIDMA)
 #define IS_FWI2_CAPABLE(ha)     ((ha)->device_type & DT_FWI2)
 #define IS_ZIO_SUPPORTED(ha)    ((ha)->device_type & DT_ZIO_SUPPORTED)
index 691783a..aa69486 100644 (file)
@@ -537,6 +537,11 @@ struct sts_entry_24xx {
        /*
         * If DIF Error is set in comp_status, these additional fields are
         * defined:
+        *
+        * !!! NOTE: Firmware sends expected/actual DIF data in big endian
+        * format; but all of the "data" field gets swab32-d in the beginning
+        * of qla2x00_status_entry().
+        *
         * &data[10] : uint8_t report_runt_bg[2];       - computed guard
         * &data[12] : uint8_t actual_dif[8];           - DIF Data received
         * &data[20] : uint8_t expected_dif[8];         - DIF Data computed
index def6942..37da04d 100644 (file)
@@ -3838,15 +3838,12 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
                req = vha->req;
        rsp = req->rsp;
 
-       atomic_set(&vha->loop_state, LOOP_UPDATE);
        clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
        if (vha->flags.online) {
                if (!(rval = qla2x00_fw_ready(vha))) {
                        /* Wait at most MAX_TARGET RSCNs for a stable link. */
                        wait_time = 256;
                        do {
-                               atomic_set(&vha->loop_state, LOOP_UPDATE);
-
                                /* Issue a marker after FW becomes ready. */
                                qla2x00_marker(vha, req, rsp, 0, 0,
                                        MK_SYNC_ALL);
index d2e904b..9902834 100644 (file)
@@ -102,3 +102,32 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
                    fcport->d_id.b.al_pa);
        }
 }
+
+static inline int
+qla2x00_hba_err_chk_enabled(srb_t *sp)
+{
+       /*
+        * Uncomment when corresponding SCSI changes are done.
+        *
+       if (!sp->cmd->prot_chk)
+               return 0;
+        *
+        */
+
+       switch (scsi_get_prot_op(sp->cmd)) {
+       case SCSI_PROT_READ_STRIP:
+       case SCSI_PROT_WRITE_INSERT:
+               if (ql2xenablehba_err_chk >= 1)
+                       return 1;
+               break;
+       case SCSI_PROT_READ_PASS:
+       case SCSI_PROT_WRITE_PASS:
+               if (ql2xenablehba_err_chk >= 2)
+                       return 1;
+               break;
+       case SCSI_PROT_READ_INSERT:
+       case SCSI_PROT_WRITE_STRIP:
+               return 1;
+       }
+       return 0;
+}
index 49d6906..dbec896 100644 (file)
@@ -709,20 +709,28 @@ struct fw_dif_context {
  *
  */
 static inline void
-qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
+qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
     unsigned int protcnt)
 {
-       struct sd_dif_tuple *spt;
+       struct scsi_cmnd *cmd = sp->cmd;
        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
-       unsigned char op = scsi_get_prot_op(cmd);
 
        switch (scsi_get_prot_type(cmd)) {
-       /* For TYPE 0 protection: no checking */
        case SCSI_PROT_DIF_TYPE0:
-               pkt->ref_tag_mask[0] = 0x00;
-               pkt->ref_tag_mask[1] = 0x00;
-               pkt->ref_tag_mask[2] = 0x00;
-               pkt->ref_tag_mask[3] = 0x00;
+               /*
+                * No check for ql2xenablehba_err_chk, as it would be an
+                * I/O error if hba tag generation is not done.
+                */
+               pkt->ref_tag = cpu_to_le32((uint32_t)
+                   (0xffffffff & scsi_get_lba(cmd)));
+
+               if (!qla2x00_hba_err_chk_enabled(sp))
+                       break;
+
+               pkt->ref_tag_mask[0] = 0xff;
+               pkt->ref_tag_mask[1] = 0xff;
+               pkt->ref_tag_mask[2] = 0xff;
+               pkt->ref_tag_mask[3] = 0xff;
                break;
 
        /*
@@ -730,20 +738,16 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
         * match LBA in CDB + N
         */
        case SCSI_PROT_DIF_TYPE2:
-               if (!ql2xenablehba_err_chk)
-                       break;
-
-               if (scsi_prot_sg_count(cmd)) {
-                       spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
-                           scsi_prot_sglist(cmd)[0].offset;
-                       pkt->app_tag = swab32(spt->app_tag);
-                       pkt->app_tag_mask[0] =  0xff;
-                       pkt->app_tag_mask[1] =  0xff;
-               }
+               pkt->app_tag = __constant_cpu_to_le16(0);
+               pkt->app_tag_mask[0] = 0x0;
+               pkt->app_tag_mask[1] = 0x0;
 
                pkt->ref_tag = cpu_to_le32((uint32_t)
                    (0xffffffff & scsi_get_lba(cmd)));
 
+               if (!qla2x00_hba_err_chk_enabled(sp))
+                       break;
+
                /* enable ALL bytes of the ref tag */
                pkt->ref_tag_mask[0] = 0xff;
                pkt->ref_tag_mask[1] = 0xff;
@@ -763,26 +767,15 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
         * 16 bit app tag.
         */
        case SCSI_PROT_DIF_TYPE1:
-               if (!ql2xenablehba_err_chk)
+               pkt->ref_tag = cpu_to_le32((uint32_t)
+                   (0xffffffff & scsi_get_lba(cmd)));
+               pkt->app_tag = __constant_cpu_to_le16(0);
+               pkt->app_tag_mask[0] = 0x0;
+               pkt->app_tag_mask[1] = 0x0;
+
+               if (!qla2x00_hba_err_chk_enabled(sp))
                        break;
 
-               if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
-                   op == SCSI_PROT_WRITE_PASS)) {
-                       spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
-                           scsi_prot_sglist(cmd)[0].offset;
-                       ql_dbg(ql_dbg_io, vha, 0x3008,
-                           "LBA from user %p, lba = 0x%x for cmd=%p.\n",
-                           spt, (int)spt->ref_tag, cmd);
-                       pkt->ref_tag = swab32(spt->ref_tag);
-                       pkt->app_tag_mask[0] = 0x0;
-                       pkt->app_tag_mask[1] = 0x0;
-               } else {
-                       pkt->ref_tag = cpu_to_le32((uint32_t)
-                           (0xffffffff & scsi_get_lba(cmd)));
-                       pkt->app_tag = __constant_cpu_to_le16(0);
-                       pkt->app_tag_mask[0] = 0x0;
-                       pkt->app_tag_mask[1] = 0x0;
-               }
                /* enable ALL bytes of the ref tag */
                pkt->ref_tag_mask[0] = 0xff;
                pkt->ref_tag_mask[1] = 0xff;
@@ -798,7 +791,161 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
            scsi_get_prot_type(cmd), cmd);
 }
 
+struct qla2_sgx {
+       dma_addr_t              dma_addr;       /* OUT */
+       uint32_t                dma_len;        /* OUT */
+
+       uint32_t                tot_bytes;      /* IN */
+       struct scatterlist      *cur_sg;        /* IN */
+
+       /* for book keeping, bzero on initial invocation */
+       uint32_t                bytes_consumed;
+       uint32_t                num_bytes;
+       uint32_t                tot_partial;
+
+       /* for debugging */
+       uint32_t                num_sg;
+       srb_t                   *sp;
+};
 
+static int
+qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
+       uint32_t *partial)
+{
+       struct scatterlist *sg;
+       uint32_t cumulative_partial, sg_len;
+       dma_addr_t sg_dma_addr;
+
+       if (sgx->num_bytes == sgx->tot_bytes)
+               return 0;
+
+       sg = sgx->cur_sg;
+       cumulative_partial = sgx->tot_partial;
+
+       sg_dma_addr = sg_dma_address(sg);
+       sg_len = sg_dma_len(sg);
+
+       sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
+
+       if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
+               sgx->dma_len = (blk_sz - cumulative_partial);
+               sgx->tot_partial = 0;
+               sgx->num_bytes += blk_sz;
+               *partial = 0;
+       } else {
+               sgx->dma_len = sg_len - sgx->bytes_consumed;
+               sgx->tot_partial += sgx->dma_len;
+               *partial = 1;
+       }
+
+       sgx->bytes_consumed += sgx->dma_len;
+
+       if (sg_len == sgx->bytes_consumed) {
+               sg = sg_next(sg);
+               sgx->num_sg++;
+               sgx->cur_sg = sg;
+               sgx->bytes_consumed = 0;
+       }
+
+       return 1;
+}
+
+static int
+qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
+       uint32_t *dsd, uint16_t tot_dsds)
+{
+       void *next_dsd;
+       uint8_t avail_dsds = 0;
+       uint32_t dsd_list_len;
+       struct dsd_dma *dsd_ptr;
+       struct scatterlist *sg_prot;
+       uint32_t *cur_dsd = dsd;
+       uint16_t        used_dsds = tot_dsds;
+
+       uint32_t        prot_int;
+       uint32_t        partial;
+       struct qla2_sgx sgx;
+       dma_addr_t      sle_dma;
+       uint32_t        sle_dma_len, tot_prot_dma_len = 0;
+       struct scsi_cmnd *cmd = sp->cmd;
+
+       prot_int = cmd->device->sector_size;
+
+       memset(&sgx, 0, sizeof(struct qla2_sgx));
+       sgx.tot_bytes = scsi_bufflen(sp->cmd);
+       sgx.cur_sg = scsi_sglist(sp->cmd);
+       sgx.sp = sp;
+
+       sg_prot = scsi_prot_sglist(sp->cmd);
+
+       while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
+
+               sle_dma = sgx.dma_addr;
+               sle_dma_len = sgx.dma_len;
+alloc_and_fill:
+               /* Allocate additional continuation packets? */
+               if (avail_dsds == 0) {
+                       avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+                                       QLA_DSDS_PER_IOCB : used_dsds;
+                       dsd_list_len = (avail_dsds + 1) * 12;
+                       used_dsds -= avail_dsds;
+
+                       /* allocate tracking DS */
+                       dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+                       if (!dsd_ptr)
+                               return 1;
+
+                       /* allocate new list */
+                       dsd_ptr->dsd_addr = next_dsd =
+                           dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+                               &dsd_ptr->dsd_list_dma);
+
+                       if (!next_dsd) {
+                               /*
+                                * Need to cleanup only this dsd_ptr, rest
+                                * will be done by sp_free_dma()
+                                */
+                               kfree(dsd_ptr);
+                               return 1;
+                       }
+
+                       list_add_tail(&dsd_ptr->list,
+                           &((struct crc_context *)sp->ctx)->dsd_list);
+
+                       sp->flags |= SRB_CRC_CTX_DSD_VALID;
+
+                       /* add new list to cmd iocb or last list */
+                       *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+                       *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+                       *cur_dsd++ = dsd_list_len;
+                       cur_dsd = (uint32_t *)next_dsd;
+               }
+               *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+               *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+               *cur_dsd++ = cpu_to_le32(sle_dma_len);
+               avail_dsds--;
+
+               if (partial == 0) {
+                       /* Got a full protection interval */
+                       sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
+                       sle_dma_len = 8;
+
+                       tot_prot_dma_len += sle_dma_len;
+                       if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
+                               tot_prot_dma_len = 0;
+                               sg_prot = sg_next(sg_prot);
+                       }
+
+                       partial = 1; /* So as to not re-enter this block */
+                       goto alloc_and_fill;
+               }
+       }
+       /* Null termination */
+       *cur_dsd++ = 0;
+       *cur_dsd++ = 0;
+       *cur_dsd++ = 0;
+       return 0;
+}
 static int
 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
        uint16_t tot_dsds)
@@ -981,7 +1128,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        struct scsi_cmnd        *cmd;
        struct scatterlist      *cur_seg;
        int                     sgc;
-       uint32_t                total_bytes;
+       uint32_t                total_bytes = 0;
        uint32_t                data_bytes;
        uint32_t                dif_bytes;
        uint8_t                 bundling = 1;
@@ -1023,8 +1170,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
                    __constant_cpu_to_le16(CF_READ_DATA);
        }
 
-       tot_prot_dsds = scsi_prot_sg_count(cmd);
-       if (!tot_prot_dsds)
+       if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
+           (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
+           (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
+           (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
                bundling = 0;
 
        /* Allocate CRC context from global pool */
@@ -1047,7 +1196,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
 
        INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
 
-       qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
+       qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
            &crc_ctx_pkt->ref_tag, tot_prot_dsds);
 
        cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
@@ -1076,7 +1225,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
                fcp_cmnd->additional_cdb_len |= 2;
 
        int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
-       host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
        memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
        cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
        cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@@ -1107,15 +1255,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
 
        /* Compute dif len and adjust data len to incude protection */
-       total_bytes = data_bytes;
        dif_bytes = 0;
        blk_size = cmd->device->sector_size;
-       if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
-               dif_bytes = (data_bytes / blk_size) * 8;
-               total_bytes += dif_bytes;
+       dif_bytes = (data_bytes / blk_size) * 8;
+
+       switch (scsi_get_prot_op(sp->cmd)) {
+       case SCSI_PROT_READ_INSERT:
+       case SCSI_PROT_WRITE_STRIP:
+           total_bytes = data_bytes;
+           data_bytes += dif_bytes;
+           break;
+
+       case SCSI_PROT_READ_STRIP:
+       case SCSI_PROT_WRITE_INSERT:
+       case SCSI_PROT_READ_PASS:
+       case SCSI_PROT_WRITE_PASS:
+           total_bytes = data_bytes + dif_bytes;
+           break;
+       default:
+           BUG();
        }
 
-       if (!ql2xenablehba_err_chk)
+       if (!qla2x00_hba_err_chk_enabled(sp))
                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
 
        if (!bundling) {
@@ -1151,7 +1312,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
 
        cmd_pkt->control_flags |=
            __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
-       if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
+
+       if (!bundling && tot_prot_dsds) {
+               if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
+                   cur_dsd, tot_dsds))
+                       goto crc_queuing_error;
+       } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
            (tot_dsds - tot_prot_dsds)))
                goto crc_queuing_error;
 
@@ -1414,6 +1580,22 @@ qla24xx_dif_start_scsi(srb_t *sp)
                        goto queuing_error;
                else
                        sp->flags |= SRB_DMA_VALID;
+
+               if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+                   (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+                       struct qla2_sgx sgx;
+                       uint32_t        partial;
+
+                       memset(&sgx, 0, sizeof(struct qla2_sgx));
+                       sgx.tot_bytes = scsi_bufflen(cmd);
+                       sgx.cur_sg = scsi_sglist(cmd);
+                       sgx.sp = sp;
+
+                       nseg = 0;
+                       while (qla24xx_get_one_block_sg(
+                           cmd->device->sector_size, &sgx, &partial))
+                               nseg++;
+               }
        } else
                nseg = 0;
 
@@ -1428,6 +1610,11 @@ qla24xx_dif_start_scsi(srb_t *sp)
                        goto queuing_error;
                else
                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+               if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+                   (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+                       nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+               }
        } else {
                nseg = 0;
        }
@@ -1454,6 +1641,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
        /* Build header part of command packet (excluding the OPCODE). */
        req->current_outstanding_cmd = handle;
        req->outstanding_cmds[handle] = sp;
+       sp->handle = handle;
        sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
        req->cnt -= req_cnt;
 
index b16b772..8a7591f 100644 (file)
@@ -719,7 +719,6 @@ skip_rio:
                        vha->flags.rscn_queue_overflow = 1;
                }
 
-               atomic_set(&vha->loop_state, LOOP_UPDATE);
                atomic_set(&vha->loop_down_timer, 0);
                vha->flags.management_server_logged_in = 0;
 
@@ -1435,25 +1434,27 @@ struct scsi_dif_tuple {
  * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
  * to indicate to the kernel that the HBA detected error.
  */
-static inline void
+static inline int
 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
 {
        struct scsi_qla_host *vha = sp->fcport->vha;
        struct scsi_cmnd *cmd = sp->cmd;
-       struct scsi_dif_tuple   *ep =
-                       (struct scsi_dif_tuple *)&sts24->data[20];
-       struct scsi_dif_tuple   *ap =
-                       (struct scsi_dif_tuple *)&sts24->data[12];
+       uint8_t         *ap = &sts24->data[12];
+       uint8_t         *ep = &sts24->data[20];
        uint32_t        e_ref_tag, a_ref_tag;
        uint16_t        e_app_tag, a_app_tag;
        uint16_t        e_guard, a_guard;
 
-       e_ref_tag = be32_to_cpu(ep->ref_tag);
-       a_ref_tag = be32_to_cpu(ap->ref_tag);
-       e_app_tag = be16_to_cpu(ep->app_tag);
-       a_app_tag = be16_to_cpu(ap->app_tag);
-       e_guard = be16_to_cpu(ep->guard);
-       a_guard = be16_to_cpu(ap->guard);
+       /*
+        * swab32 of the "data" field in the beginning of qla2x00_status_entry()
+        * would make guard field appear at offset 2
+        */
+       a_guard   = le16_to_cpu(*(uint16_t *)(ap + 2));
+       a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
+       a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
+       e_guard   = le16_to_cpu(*(uint16_t *)(ep + 2));
+       e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
+       e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
 
        ql_dbg(ql_dbg_io, vha, 0x3023,
            "iocb(s) %p Returned STATUS.\n", sts24);
@@ -1465,6 +1466,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
            cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
            a_app_tag, e_app_tag, a_guard, e_guard);
 
+       /*
+        * Ignore sector if:
+        * For type     3: ref & app tag is all 'f's
+        * For type 0,1,2: app tag is all 'f's
+        */
+       if ((a_app_tag == 0xffff) &&
+           ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
+            (a_ref_tag == 0xffffffff))) {
+               uint32_t blocks_done, resid;
+               sector_t lba_s = scsi_get_lba(cmd);
+
+               /* 2TB boundary case covered automatically with this */
+               blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
+
+               resid = scsi_bufflen(cmd) - (blocks_done *
+                   cmd->device->sector_size);
+
+               scsi_set_resid(cmd, resid);
+               cmd->result = DID_OK << 16;
+
+               /* Update protection tag */
+               if (scsi_prot_sg_count(cmd)) {
+                       uint32_t i, j = 0, k = 0, num_ent;
+                       struct scatterlist *sg;
+                       struct sd_dif_tuple *spt;
+
+                       /* Patch the corresponding protection tags */
+                       scsi_for_each_prot_sg(cmd, sg,
+                           scsi_prot_sg_count(cmd), i) {
+                               num_ent = sg_dma_len(sg) / 8;
+                               if (k + num_ent < blocks_done) {
+                                       k += num_ent;
+                                       continue;
+                               }
+                               j = blocks_done - k - 1;
+                               k = blocks_done;
+                               break;
+                       }
+
+                       if (k != blocks_done) {
+                               qla_printk(KERN_WARNING, sp->fcport->vha->hw,
+                                   "unexpected tag values tag:lba=%x:%llx)\n",
+                                   e_ref_tag, (unsigned long long)lba_s);
+                               return 1;
+                       }
+
+                       spt = page_address(sg_page(sg)) + sg->offset;
+                       spt += j;
+
+                       spt->app_tag = 0xffff;
+                       if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
+                               spt->ref_tag = 0xffffffff;
+               }
+
+               return 0;
+       }
+
        /* check guard */
        if (e_guard != a_guard) {
                scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
@@ -1472,28 +1530,30 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
                set_driver_byte(cmd, DRIVER_SENSE);
                set_host_byte(cmd, DID_ABORT);
                cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
-               return;
+               return 1;
        }
 
-       /* check appl tag */
-       if (e_app_tag != a_app_tag) {
+       /* check ref tag */
+       if (e_ref_tag != a_ref_tag) {
                scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
-                   0x10, 0x2);
+                   0x10, 0x3);
                set_driver_byte(cmd, DRIVER_SENSE);
                set_host_byte(cmd, DID_ABORT);
                cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
-               return;
+               return 1;
        }
 
-       /* check ref tag */
-       if (e_ref_tag != a_ref_tag) {
+       /* check appl tag */
+       if (e_app_tag != a_app_tag) {
                scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
-                   0x10, 0x3);
+                   0x10, 0x2);
                set_driver_byte(cmd, DRIVER_SENSE);
                set_host_byte(cmd, DID_ABORT);
                cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
-               return;
+               return 1;
        }
+
+       return 1;
 }
 
 /**
@@ -1767,7 +1827,7 @@ check_scsi_status:
                break;
 
        case CS_DIF_ERROR:
-               qla2x00_handle_dif_error(sp, sts24);
+               logit = qla2x00_handle_dif_error(sp, sts24);
                break;
        default:
                cp->result = DID_ERROR << 16;
@@ -2468,11 +2528,10 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
                goto skip_msi;
        }
 
-       if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
-               !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
+       if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
                ql_log(ql_log_warn, vha, 0x0035,
                    "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
-                   ha->pdev->revision, ha->fw_attributes);
+                   ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
                goto skip_msix;
        }
 
index c706ed3..f488cc6 100644 (file)
@@ -472,7 +472,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
        host->can_queue = base_vha->req->length + 128;
        host->this_id = 255;
        host->cmd_per_lun = 3;
-       if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
+       if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
                host->max_cmd_len = 32;
        else
                host->max_cmd_len = MAX_CMDSZ;
index 5cbf33a..049807c 100644 (file)
@@ -2208,6 +2208,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
        struct qla_hw_data *ha;
        struct rsp_que *rsp;
        struct device_reg_82xx __iomem *reg;
+       unsigned long flags;
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
@@ -2218,11 +2219,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
 
        ha = rsp->hw;
        reg = &ha->iobase->isp82;
-       spin_lock_irq(&ha->hardware_lock);
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        vha = pci_get_drvdata(ha->pdev);
        qla24xx_process_response_queue(vha, rsp);
        WRT_REG_DWORD(&reg->host_int, 0);
-       spin_unlock_irq(&ha->hardware_lock);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
        return IRQ_HANDLED;
 }
 
@@ -2838,6 +2839,16 @@ sufficient_dsds:
                int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
 
+               /* build FCP_CMND IU */
+               memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+               int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+               ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+               if (cmd->sc_data_direction == DMA_TO_DEVICE)
+                       ctx->fcp_cmnd->additional_cdb_len |= 1;
+               else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+                       ctx->fcp_cmnd->additional_cdb_len |= 2;
+
                /*
                 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
                 */
@@ -2854,16 +2865,6 @@ sufficient_dsds:
                        }
                }
 
-               /* build FCP_CMND IU */
-               memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
-               int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
-               ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
-
-               if (cmd->sc_data_direction == DMA_TO_DEVICE)
-                       ctx->fcp_cmnd->additional_cdb_len |= 1;
-               else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
-                       ctx->fcp_cmnd->additional_cdb_len |= 2;
-
                memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
 
                fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
index e02df27..1e69527 100644 (file)
@@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
                "Maximum queue depth to report for target devices.");
 
 /* Do not change the value of this after module load */
-int ql2xenabledif = 1;
+int ql2xenabledif = 0;
 module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xenabledif,
                " Enable T10-CRC-DIF "
-               " Default is 0 - No DIF Support. 1 - Enable it");
+               " Default is 0 - No DIF Support. 1 - Enable it"
+               ", 2 - Enable DIF for all types, except Type 0.");
 
-int ql2xenablehba_err_chk;
+int ql2xenablehba_err_chk = 2;
 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xenablehba_err_chk,
-               " Enable T10-CRC-DIF Error isolation by HBA"
-               " Default is 0 - Error isolation disabled, 1 - Enable it");
+               " Enable T10-CRC-DIF Error isolation by HBA:\n"
+               " Default is 1.\n"
+               "  0 -- Error isolation disabled\n"
+               "  1 -- Error isolation enabled only for DIX Type 0\n"
+               "  2 -- Error isolation enabled for all Types\n");
 
 int ql2xiidmaenable=1;
 module_param(ql2xiidmaenable, int, S_IRUGO);
@@ -909,7 +913,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
                    "Abort command mbx success.\n");
                wait = 1;
        }
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        qla2x00_sp_compl(ha, sp);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       /* Did the command return during mailbox execution? */
+       if (ret == FAILED && !CMD_SP(cmd))
+               ret = SUCCESS;
 
        /* Wait for the command to be returned. */
        if (wait) {
@@ -1317,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
                                        qla2x00_sp_compl(ha, sp);
                                } else {
                                        ctx = sp->ctx;
-                                       if (ctx->type == SRB_LOGIN_CMD ||
-                                           ctx->type == SRB_LOGOUT_CMD) {
-                                               ctx->u.iocb_cmd->free(sp);
-                                       } else {
+                                       if (ctx->type == SRB_ELS_CMD_RPT ||
+                                           ctx->type == SRB_ELS_CMD_HST ||
+                                           ctx->type == SRB_CT_CMD) {
                                                struct fc_bsg_job *bsg_job =
                                                    ctx->u.bsg_job;
                                                if (bsg_job->request->msgcode
@@ -1332,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
                                                kfree(sp->ctx);
                                                mempool_free(sp,
                                                        ha->srb_mempool);
+                                       } else {
+                                               ctx->u.iocb_cmd->free(sp);
                                        }
                                }
                        }
@@ -2251,7 +2263,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        host->this_id = 255;
        host->cmd_per_lun = 3;
        host->unique_id = host->host_no;
-       if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
+       if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
                host->max_cmd_len = 32;
        else
                host->max_cmd_len = MAX_CMDSZ;
@@ -2378,13 +2390,16 @@ skip_dpc:
            "Detected hba at address=%p.\n",
            ha);
 
-       if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
+       if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
                if (ha->fw_attributes & BIT_4) {
+                       int prot = 0;
                        base_vha->flags.difdix_supported = 1;
                        ql_dbg(ql_dbg_init, base_vha, 0x00f1,
                            "Registering for DIF/DIX type 1 and 3 protection.\n");
+                       if (ql2xenabledif == 1)
+                               prot = SHOST_DIX_TYPE0_PROTECTION;
                        scsi_host_set_prot(host,
-                           SHOST_DIF_TYPE1_PROTECTION
+                           prot | SHOST_DIF_TYPE1_PROTECTION
                            | SHOST_DIF_TYPE2_PROTECTION
                            | SHOST_DIF_TYPE3_PROTECTION
                            | SHOST_DIX_TYPE1_PROTECTION
index 062c97b..13b6357 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.07.03-k"
+#define QLA2XXX_VERSION      "8.03.07.07-k"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   3
index 2c33ce6..0f5599e 100644 (file)
@@ -1,6 +1,6 @@
 config SCSI_QLA_ISCSI
        tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
-       depends on PCI && SCSI
+       depends on PCI && SCSI && NET
        select SCSI_ISCSI_ATTRS
        ---help---
        This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
index d240755..24cacff 100644 (file)
@@ -825,6 +825,9 @@ static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
 {
        struct device *dev = mspi->dev;
 
+       if (!(mspi->flags & SPI_CPM_MODE))
+               return;
+
        dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
        dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
        cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
index 8ac6542..fa594d6 100644 (file)
@@ -786,9 +786,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
                int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
                if (cs_gpio < 0)
                        cs_gpio = mxc_platform_info->chipselect[i];
+
+               spi_imx->chipselect[i] = cs_gpio;
                if (cs_gpio < 0)
                        continue;
-               spi_imx->chipselect[i] = cs_gpio;
+
                ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
                if (ret) {
                        while (i > 0) {
index 1d23f38..6a80749 100644 (file)
@@ -50,6 +50,8 @@
 #define PCH_RX_THOLD           7
 #define PCH_RX_THOLD_MAX       15
 
+#define PCH_TX_THOLD           2
+
 #define PCH_MAX_BAUDRATE       5000000
 #define PCH_MAX_FIFO_DEPTH     16
 
@@ -58,6 +60,7 @@
 #define PCH_SLEEP_TIME         10
 
 #define SSN_LOW                        0x02U
+#define SSN_HIGH               0x03U
 #define SSN_NO_CONTROL         0x00U
 #define PCH_MAX_CS             0xFF
 #define PCI_DEVICE_ID_GE_SPI   0x8816
@@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
 
        /* if transfer complete interrupt */
        if (reg_spsr_val & SPSR_FI_BIT) {
-               if (tx_index < bpw_len)
+               if ((tx_index == bpw_len) && (rx_index == tx_index)) {
+                       /* disable interrupts */
+                       pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+
+                       /* transfer is completed;
+                          inform pch_spi_process_messages */
+                       data->transfer_complete = true;
+                       data->transfer_active = false;
+                       wake_up(&data->wait);
+               } else {
                        dev_err(&data->master->dev,
                                "%s : Transfer is not completed", __func__);
-               /* disable interrupts */
-               pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
-
-               /* transfer is completed;inform pch_spi_process_messages */
-               data->transfer_complete = true;
-               data->transfer_active = false;
-               wake_up(&data->wait);
+               }
        }
 }
 
@@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id)
                        "%s returning due to suspend\n", __func__);
                return IRQ_NONE;
        }
-       if (data->use_dma)
-               return IRQ_NONE;
 
        io_remap_addr = data->io_remap_addr;
        spsr = io_remap_addr + PCH_SPSR;
 
        reg_spsr_val = ioread32(spsr);
 
-       if (reg_spsr_val & SPSR_ORF_BIT)
-               dev_err(&board_dat->pdev->dev, "%s Over run error", __func__);
+       if (reg_spsr_val & SPSR_ORF_BIT) {
+               dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
+               if (data->current_msg->complete != 0) {
+                       data->transfer_complete = true;
+                       data->current_msg->status = -EIO;
+                       data->current_msg->complete(data->current_msg->context);
+                       data->bcurrent_msg_processing = false;
+                       data->current_msg = NULL;
+                       data->cur_trans = NULL;
+               }
+       }
+
+       if (data->use_dma)
+               return IRQ_NONE;
 
        /* Check if the interrupt is for SPI device */
        if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
@@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
 
        wait_event_interruptible(data->wait, data->transfer_complete);
 
-       pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
-       dev_dbg(&data->master->dev,
-               "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
-
        /* clear all interrupts */
        pch_spi_writereg(data->master, PCH_SPSR,
                         pch_spi_readreg(data->master, PCH_SPSR));
@@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
        }
 }
 
-static void pch_spi_start_transfer(struct pch_spi_data *data)
+static int pch_spi_start_transfer(struct pch_spi_data *data)
 {
        struct pch_spi_dma_ctrl *dma;
        unsigned long flags;
+       int rtn;
 
        dma = &data->dma;
 
@@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
                                 initiating the transfer. */
        dev_dbg(&data->master->dev,
                "%s:waiting for transfer to get over\n", __func__);
-       wait_event_interruptible(data->wait, data->transfer_complete);
+       rtn = wait_event_interruptible_timeout(data->wait,
+                                              data->transfer_complete,
+                                              msecs_to_jiffies(2 * HZ));
 
        dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
                            DMA_FROM_DEVICE);
+
+       dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
+                           DMA_FROM_DEVICE);
+       memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
+
        async_tx_ack(dma->desc_rx);
        async_tx_ack(dma->desc_tx);
        kfree(dma->sg_tx_p);
        kfree(dma->sg_rx_p);
 
        spin_lock_irqsave(&data->lock, flags);
-       pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
-       dev_dbg(&data->master->dev,
-               "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
 
        /* clear fifo threshold, disable interrupts, disable SPI transfer */
        pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
@@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
        pch_spi_clear_fifo(data->master);
 
        spin_unlock_irqrestore(&data->lock, flags);
+
+       return rtn;
 }
 
 static void pch_dma_rx_complete(void *arg)
@@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
        /* set receive fifo threshold and transmit fifo threshold */
        pch_spi_setclr_reg(data->master, PCH_SPCR,
                           ((size - 1) << SPCR_RFIC_FIELD) |
-                          ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) <<
-                           SPCR_TFIC_FIELD),
+                          (PCH_TX_THOLD << SPCR_TFIC_FIELD),
                           MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
 
        spin_unlock_irqrestore(&data->lock, flags);
@@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
        /* offset, length setting */
        sg = dma->sg_rx_p;
        for (i = 0; i < num; i++, sg++) {
-               if (i == 0) {
-                       sg->offset = 0;
+               if (i == (num - 2)) {
+                       sg->offset = size * i;
+                       sg->offset = sg->offset * (*bpw / 8);
                        sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
                                    sg->offset);
                        sg_dma_len(sg) = rem;
+               } else if (i == (num - 1)) {
+                       sg->offset = size * (i - 1) + rem;
+                       sg->offset = sg->offset * (*bpw / 8);
+                       sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
+                                   sg->offset);
+                       sg_dma_len(sg) = size;
                } else {
-                       sg->offset = rem + size * (i - 1);
+                       sg->offset = size * i;
                        sg->offset = sg->offset * (*bpw / 8);
                        sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
                                    sg->offset);
@@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
        dma->desc_rx = desc_rx;
 
        /* TX */
+       if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
+               num = data->bpw_len / PCH_DMA_TRANS_SIZE;
+               size = PCH_DMA_TRANS_SIZE;
+               rem = 16;
+       } else {
+               num = 1;
+               size = data->bpw_len;
+               rem = data->bpw_len;
+       }
+
        dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
        sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
        /* offset, length setting */
@@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
        if (data->use_dma)
                pch_spi_request_dma(data,
                                    data->current_msg->spi->bits_per_word);
+       pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
        do {
                /* If we are already processing a message get the next
                transfer structure from the message otherwise retrieve
@@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
 
                if (data->use_dma) {
                        pch_spi_handle_dma(data, &bpw);
-                       pch_spi_start_transfer(data);
+                       if (!pch_spi_start_transfer(data))
+                               goto out;
                        pch_spi_copy_rx_data_for_dma(data, bpw);
                } else {
                        pch_spi_set_tx(data, &bpw);
@@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
 
        } while (data->cur_trans != NULL);
 
+out:
+       pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
        if (data->use_dma)
                pch_spi_release_dma(data);
 }
index 6859af0..7611def 100644 (file)
@@ -241,8 +241,10 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev,
                                   struct comedi_insn *insn,
                                   unsigned int *data);
 static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd);
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_ISA_DMA_API
 static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd);
+#endif
+#ifdef CONFIG_COMEDI_PCI
 static int labpc_find_device(struct comedi_device *dev, int bus, int slot);
 #endif
 static int labpc_dio_mem_callback(int dir, int port, int data,
index a3f5162..462fbc2 100644 (file)
@@ -1242,7 +1242,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
        int ret = 0;
 
        BUG_ON(!is_ephemeral(pool));
-       zbud_decompress(virt_to_page(data), pampd);
+       zbud_decompress((struct page *)(data), pampd);
        zbud_free_and_delist((struct zbud_hdr *)pampd);
        atomic_dec(&zcache_curr_eph_pampd_count);
        return ret;
index 497b2e7..5b77316 100644 (file)
@@ -1430,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(
        u8 DataSequenceInOrder = 0;
        u8 ErrorRecoveryLevel = 0, SessionType = 0;
        u8 IFMarker = 0, OFMarker = 0;
-       u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
+       u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
        u32 FirstBurstLength = 0, MaxBurstLength = 0;
        struct iscsi_param *param = NULL;
 
index a0d23bc..f00137f 100644 (file)
@@ -874,40 +874,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
        spin_unlock_bh(&sess->session_usage_lock);
 }
 
-/*
- *     Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
- *     array counts needed for sync and steering.
- */
-static int iscsit_determine_sync_and_steering_counts(
-       struct iscsi_conn *conn,
-       struct iscsi_data_count *count)
-{
-       u32 length = count->data_length;
-       u32 marker, markint;
-
-       count->sync_and_steering = 1;
-
-       marker = (count->type == ISCSI_RX_DATA) ?
-                       conn->of_marker : conn->if_marker;
-       markint = (count->type == ISCSI_RX_DATA) ?
-                       (conn->conn_ops->OFMarkInt * 4) :
-                       (conn->conn_ops->IFMarkInt * 4);
-       count->ss_iov_count = count->iov_count;
-
-       while (length > 0) {
-               if (length >= marker) {
-                       count->ss_iov_count += 3;
-                       count->ss_marker_count += 2;
-
-                       length -= marker;
-                       marker = markint;
-               } else
-                       length = 0;
-       }
-
-       return 0;
-}
-
 /*
  *     Setup conn->if_marker and conn->of_marker values based upon
  *     the initial marker-less interval. (see iSCSI v19 A.2)
@@ -1290,7 +1256,7 @@ int iscsit_fe_sendpage_sg(
        struct kvec iov;
        u32 tx_hdr_size, data_len;
        u32 offset = cmd->first_data_sg_off;
-       int tx_sent;
+       int tx_sent, iov_off;
 
 send_hdr:
        tx_hdr_size = ISCSI_HDR_LEN;
@@ -1310,9 +1276,19 @@ send_hdr:
        }
 
        data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
-       if (conn->conn_ops->DataDigest)
+       /*
+        * Set iov_off used by padding and data digest tx_data() calls below
+        * in order to determine proper offset into cmd->iov_data[]
+        */
+       if (conn->conn_ops->DataDigest) {
                data_len -= ISCSI_CRC_LEN;
-
+               if (cmd->padding)
+                       iov_off = (cmd->iov_data_count - 2);
+               else
+                       iov_off = (cmd->iov_data_count - 1);
+       } else {
+               iov_off = (cmd->iov_data_count - 1);
+       }
        /*
         * Perform sendpage() for each page in the scatterlist
         */
@@ -1341,8 +1317,7 @@ send_pg:
 
 send_padding:
        if (cmd->padding) {
-               struct kvec *iov_p =
-                       &cmd->iov_data[cmd->iov_data_count-1];
+               struct kvec *iov_p = &cmd->iov_data[iov_off++];
 
                tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
                if (cmd->padding != tx_sent) {
@@ -1356,8 +1331,7 @@ send_padding:
 
 send_datacrc:
        if (conn->conn_ops->DataDigest) {
-               struct kvec *iov_d =
-                       &cmd->iov_data[cmd->iov_data_count];
+               struct kvec *iov_d = &cmd->iov_data[iov_off];
 
                tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
                if (ISCSI_CRC_LEN != tx_sent) {
@@ -1431,8 +1405,7 @@ static int iscsit_do_rx_data(
        struct iscsi_data_count *count)
 {
        int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
-       u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
-       struct kvec iov[count->ss_iov_count], *iov_p;
+       struct kvec *iov_p;
        struct msghdr msg;
 
        if (!conn || !conn->sock || !conn->conn_ops)
@@ -1440,93 +1413,8 @@ static int iscsit_do_rx_data(
 
        memset(&msg, 0, sizeof(struct msghdr));
 
-       if (count->sync_and_steering) {
-               int size = 0;
-               u32 i, orig_iov_count = 0;
-               u32 orig_iov_len = 0, orig_iov_loc = 0;
-               u32 iov_count = 0, per_iov_bytes = 0;
-               u32 *rx_marker, old_rx_marker = 0;
-               struct kvec *iov_record;
-
-               memset(&rx_marker_val, 0,
-                               count->ss_marker_count * sizeof(u32));
-               memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
-
-               iov_record = count->iov;
-               orig_iov_count = count->iov_count;
-               rx_marker = &conn->of_marker;
-
-               i = 0;
-               size = data;
-               orig_iov_len = iov_record[orig_iov_loc].iov_len;
-               while (size > 0) {
-                       pr_debug("rx_data: #1 orig_iov_len %u,"
-                       " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
-                       pr_debug("rx_data: #2 rx_marker %u, size"
-                               " %u\n", *rx_marker, size);
-
-                       if (orig_iov_len >= *rx_marker) {
-                               iov[iov_count].iov_len = *rx_marker;
-                               iov[iov_count++].iov_base =
-                                       (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &rx_marker_val[rx_marker_iov++];
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &rx_marker_val[rx_marker_iov++];
-                               old_rx_marker = *rx_marker;
-
-                               /*
-                                * OFMarkInt is in 32-bit words.
-                                */
-                               *rx_marker = (conn->conn_ops->OFMarkInt * 4);
-                               size -= old_rx_marker;
-                               orig_iov_len -= old_rx_marker;
-                               per_iov_bytes += old_rx_marker;
-
-                               pr_debug("rx_data: #3 new_rx_marker"
-                                       " %u, size %u\n", *rx_marker, size);
-                       } else {
-                               iov[iov_count].iov_len = orig_iov_len;
-                               iov[iov_count++].iov_base =
-                                       (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               per_iov_bytes = 0;
-                               *rx_marker -= orig_iov_len;
-                               size -= orig_iov_len;
-
-                               if (size)
-                                       orig_iov_len =
-                                       iov_record[++orig_iov_loc].iov_len;
-
-                               pr_debug("rx_data: #4 new_rx_marker"
-                                       " %u, size %u\n", *rx_marker, size);
-                       }
-               }
-               data += (rx_marker_iov * (MARKER_SIZE / 2));
-
-               iov_p   = &iov[0];
-               iov_len = iov_count;
-
-               if (iov_count > count->ss_iov_count) {
-                       pr_err("iov_count: %d, count->ss_iov_count:"
-                               " %d\n", iov_count, count->ss_iov_count);
-                       return -1;
-               }
-               if (rx_marker_iov > count->ss_marker_count) {
-                       pr_err("rx_marker_iov: %d, count->ss_marker"
-                               "_count: %d\n", rx_marker_iov,
-                               count->ss_marker_count);
-                       return -1;
-               }
-       } else {
-               iov_p = count->iov;
-               iov_len = count->iov_count;
-       }
+       iov_p = count->iov;
+       iov_len = count->iov_count;
 
        while (total_rx < data) {
                rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1541,16 +1429,6 @@ static int iscsit_do_rx_data(
                                rx_loop, total_rx, data);
        }
 
-       if (count->sync_and_steering) {
-               int j;
-               for (j = 0; j < rx_marker_iov; j++) {
-                       pr_debug("rx_data: #5 j: %d, offset: %d\n",
-                               j, rx_marker_val[j]);
-                       conn->of_marker_offset = rx_marker_val[j];
-               }
-               total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
-       }
-
        return total_rx;
 }
 
@@ -1559,8 +1437,7 @@ static int iscsit_do_tx_data(
        struct iscsi_data_count *count)
 {
        int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
-       u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
-       struct kvec iov[count->ss_iov_count], *iov_p;
+       struct kvec *iov_p;
        struct msghdr msg;
 
        if (!conn || !conn->sock || !conn->conn_ops)
@@ -1573,98 +1450,8 @@ static int iscsit_do_tx_data(
 
        memset(&msg, 0, sizeof(struct msghdr));
 
-       if (count->sync_and_steering) {
-               int size = 0;
-               u32 i, orig_iov_count = 0;
-               u32 orig_iov_len = 0, orig_iov_loc = 0;
-               u32 iov_count = 0, per_iov_bytes = 0;
-               u32 *tx_marker, old_tx_marker = 0;
-               struct kvec *iov_record;
-
-               memset(&tx_marker_val, 0,
-                       count->ss_marker_count * sizeof(u32));
-               memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
-
-               iov_record = count->iov;
-               orig_iov_count = count->iov_count;
-               tx_marker = &conn->if_marker;
-
-               i = 0;
-               size = data;
-               orig_iov_len = iov_record[orig_iov_loc].iov_len;
-               while (size > 0) {
-                       pr_debug("tx_data: #1 orig_iov_len %u,"
-                       " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
-                       pr_debug("tx_data: #2 tx_marker %u, size"
-                               " %u\n", *tx_marker, size);
-
-                       if (orig_iov_len >= *tx_marker) {
-                               iov[iov_count].iov_len = *tx_marker;
-                               iov[iov_count++].iov_base =
-                                       (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               tx_marker_val[tx_marker_iov] =
-                                               (size - *tx_marker);
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &tx_marker_val[tx_marker_iov++];
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &tx_marker_val[tx_marker_iov++];
-                               old_tx_marker = *tx_marker;
-
-                               /*
-                                * IFMarkInt is in 32-bit words.
-                                */
-                               *tx_marker = (conn->conn_ops->IFMarkInt * 4);
-                               size -= old_tx_marker;
-                               orig_iov_len -= old_tx_marker;
-                               per_iov_bytes += old_tx_marker;
-
-                               pr_debug("tx_data: #3 new_tx_marker"
-                                       " %u, size %u\n", *tx_marker, size);
-                               pr_debug("tx_data: #4 offset %u\n",
-                                       tx_marker_val[tx_marker_iov-1]);
-                       } else {
-                               iov[iov_count].iov_len = orig_iov_len;
-                               iov[iov_count++].iov_base
-                                       = (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               per_iov_bytes = 0;
-                               *tx_marker -= orig_iov_len;
-                               size -= orig_iov_len;
-
-                               if (size)
-                                       orig_iov_len =
-                                       iov_record[++orig_iov_loc].iov_len;
-
-                               pr_debug("tx_data: #5 new_tx_marker"
-                                       " %u, size %u\n", *tx_marker, size);
-                       }
-               }
-
-               data += (tx_marker_iov * (MARKER_SIZE / 2));
-
-               iov_p = &iov[0];
-               iov_len = iov_count;
-
-               if (iov_count > count->ss_iov_count) {
-                       pr_err("iov_count: %d, count->ss_iov_count:"
-                               " %d\n", iov_count, count->ss_iov_count);
-                       return -1;
-               }
-               if (tx_marker_iov > count->ss_marker_count) {
-                       pr_err("tx_marker_iov: %d, count->ss_marker"
-                               "_count: %d\n", tx_marker_iov,
-                               count->ss_marker_count);
-                       return -1;
-               }
-       } else {
-               iov_p = count->iov;
-               iov_len = count->iov_count;
-       }
+       iov_p = count->iov;
+       iov_len = count->iov_count;
 
        while (total_tx < data) {
                tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1679,9 +1466,6 @@ static int iscsit_do_tx_data(
                                        tx_loop, total_tx, data);
        }
 
-       if (count->sync_and_steering)
-               total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
-
        return total_tx;
 }
 
@@ -1702,12 +1486,6 @@ int rx_data(
        c.data_length = data;
        c.type = ISCSI_RX_DATA;
 
-       if (conn->conn_ops->OFMarker &&
-          (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
-               if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
-                       return -1;
-       }
-
        return iscsit_do_rx_data(conn, &c);
 }
 
@@ -1728,12 +1506,6 @@ int tx_data(
        c.data_length = data;
        c.type = ISCSI_TX_DATA;
 
-       if (conn->conn_ops->IFMarker &&
-          (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
-               if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
-                       return -1;
-       }
-
        return iscsit_do_tx_data(conn, &c);
 }
 
index 89ae923..f04d4ef 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/ctype.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi.h>
 
@@ -154,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
        return 0;
 }
 
+static void
+target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off)
+{
+       unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
+       unsigned char *buf = buf_off;
+       int cnt = 0, next = 1;
+       /*
+        * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
+        * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
+        * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
+        * to complete the payload.  These are based from VPD=0x80 PRODUCT SERIAL
+        * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
+        * per device uniqeness.
+        */
+       while (*p != '\0') {
+               if (cnt >= 13)
+                       break;
+               if (!isxdigit(*p)) {
+                       p++;
+                       continue;
+               }
+               if (next != 0) {
+                       buf[cnt++] |= hex_to_bin(*p++);
+                       next = 0;
+               } else {
+                       buf[cnt] = hex_to_bin(*p++) << 4;
+                       next = 1;
+               }
+       }
+}
+
 /*
  * Device identification VPD, for a complete list of
  * DESIGNATOR TYPEs see spc4r17 Table 459.
@@ -219,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
         * VENDOR_SPECIFIC_IDENTIFIER and
         * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
         */
-       buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
-       hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
+       target_parse_naa_6h_vendor_specific(dev, &buf[off]);
 
        len = 20;
        off = (len + 4);
index 8d0c58e..a4b0a8d 100644 (file)
@@ -977,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work)
 {
        struct se_device *dev = container_of(work, struct se_device,
                                        qf_work_queue);
+       LIST_HEAD(qf_cmd_list);
        struct se_cmd *cmd, *cmd_tmp;
 
        spin_lock_irq(&dev->qf_cmd_lock);
-       list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
+       list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
+       spin_unlock_irq(&dev->qf_cmd_lock);
 
+       list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
                list_del(&cmd->se_qf_node);
                atomic_dec(&dev->dev_qf_count);
                smp_mb__after_atomic_dec();
-               spin_unlock_irq(&dev->qf_cmd_lock);
 
                pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
                        " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -997,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work)
                 * has been added to head of queue
                 */
                transport_add_cmd_to_queue(cmd, cmd->t_state);
-
-               spin_lock_irq(&dev->qf_cmd_lock);
        }
-       spin_unlock_irq(&dev->qf_cmd_lock);
 }
 
 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
index bd4fe21..3749d8b 100644 (file)
@@ -98,8 +98,7 @@ struct ft_tpg {
        struct list_head list;          /* linkage in ft_lport_acl tpg_list */
        struct list_head lun_list;      /* head of LUNs */
        struct se_portal_group se_tpg;
-       struct task_struct *thread;     /* processing thread */
-       struct se_queue_obj qobj;       /* queue for processing thread */
+       struct workqueue_struct *workqueue;
 };
 
 struct ft_lport_acl {
@@ -110,16 +109,10 @@ struct ft_lport_acl {
        struct se_wwn fc_lport_wwn;
 };
 
-enum ft_cmd_state {
-       FC_CMD_ST_NEW = 0,
-       FC_CMD_ST_REJ
-};
-
 /*
  * Commands
  */
 struct ft_cmd {
-       enum ft_cmd_state state;
        u32 lun;                        /* LUN from request */
        struct ft_sess *sess;           /* session held for cmd */
        struct fc_seq *seq;             /* sequence in exchange mgr */
@@ -127,7 +120,7 @@ struct ft_cmd {
        struct fc_frame *req_frame;
        unsigned char *cdb;             /* pointer to CDB inside frame */
        u32 write_data_len;             /* data received on writes */
-       struct se_queue_req se_req;
+       struct work_struct work;
        /* Local sense buffer */
        unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
        u32 was_ddp_setup:1;            /* Set only if ddp is setup */
@@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);
 /*
  * other internal functions.
  */
-int ft_thread(void *);
 void ft_recv_req(struct ft_sess *, struct fc_frame *);
 struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
 struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
index 5654dc2..80fbcde 100644 (file)
@@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
        int count;
 
        se_cmd = &cmd->se_cmd;
-       pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
-               caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
+       pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
+               caller, cmd, cmd->sess, cmd->seq, se_cmd);
        pr_debug("%s: cmd %p cdb %p\n",
                caller, cmd, cmd->cdb);
        pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
@@ -90,38 +90,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
                16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
 }
 
-static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
-{
-       struct ft_tpg *tpg = sess->tport->tpg;
-       struct se_queue_obj *qobj = &tpg->qobj;
-       unsigned long flags;
-
-       qobj = &sess->tport->tpg->qobj;
-       spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-       list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
-       atomic_inc(&qobj->queue_cnt);
-       spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
-       wake_up_process(tpg->thread);
-}
-
-static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
-{
-       unsigned long flags;
-       struct se_queue_req *qr;
-
-       spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-       if (list_empty(&qobj->qobj_list)) {
-               spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-               return NULL;
-       }
-       qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
-       list_del(&qr->qr_list);
-       atomic_dec(&qobj->queue_cnt);
-       spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-       return container_of(qr, struct ft_cmd, se_req);
-}
-
 static void ft_free_cmd(struct ft_cmd *cmd)
 {
        struct fc_frame *fp;
@@ -282,9 +250,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
 
 int ft_get_cmd_state(struct se_cmd *se_cmd)
 {
-       struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-
-       return cmd->state;
+       return 0;
 }
 
 int ft_is_state_remove(struct se_cmd *se_cmd)
@@ -505,6 +471,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
        return 0;
 }
 
+static void ft_send_work(struct work_struct *work);
+
 /*
  * Handle incoming FCP command.
  */
@@ -523,7 +491,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
                goto busy;
        }
        cmd->req_frame = fp;            /* hold frame during cmd */
-       ft_queue_cmd(sess, cmd);
+
+       INIT_WORK(&cmd->work, ft_send_work);
+       queue_work(sess->tport->tpg->workqueue, &cmd->work);
        return;
 
 busy:
@@ -563,12 +533,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
 /*
  * Send new command to target.
  */
-static void ft_send_cmd(struct ft_cmd *cmd)
+static void ft_send_work(struct work_struct *work)
 {
+       struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
        struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
        struct se_cmd *se_cmd;
        struct fcp_cmnd *fcp;
-       int data_dir;
+       int data_dir = 0;
        u32 data_len;
        int task_attr;
        int ret;
@@ -675,42 +646,3 @@ static void ft_send_cmd(struct ft_cmd *cmd)
 err:
        ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
 }
-
-/*
- * Handle request in the command thread.
- */
-static void ft_exec_req(struct ft_cmd *cmd)
-{
-       pr_debug("cmd state %x\n", cmd->state);
-       switch (cmd->state) {
-       case FC_CMD_ST_NEW:
-               ft_send_cmd(cmd);
-               break;
-       default:
-               break;
-       }
-}
-
-/*
- * Processing thread.
- * Currently one thread per tpg.
- */
-int ft_thread(void *arg)
-{
-       struct ft_tpg *tpg = arg;
-       struct se_queue_obj *qobj = &tpg->qobj;
-       struct ft_cmd *cmd;
-
-       while (!kthread_should_stop()) {
-               schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
-               if (kthread_should_stop())
-                       goto out;
-
-               cmd = ft_dequeue_cmd(qobj);
-               if (cmd)
-                       ft_exec_req(cmd);
-       }
-
-out:
-       return 0;
-}
index b15879d..8fa39b7 100644 (file)
@@ -327,7 +327,6 @@ static struct se_portal_group *ft_add_tpg(
        tpg->index = index;
        tpg->lport_acl = lacl;
        INIT_LIST_HEAD(&tpg->lun_list);
-       transport_init_queue_obj(&tpg->qobj);
 
        ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
                                tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -336,8 +335,8 @@ static struct se_portal_group *ft_add_tpg(
                return NULL;
        }
 
-       tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index);
-       if (IS_ERR(tpg->thread)) {
+       tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
+       if (!tpg->workqueue) {
                kfree(tpg);
                return NULL;
        }
@@ -356,7 +355,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
        pr_debug("del tpg %s\n",
                    config_item_name(&tpg->se_tpg.tpg_group.cg_item));
 
-       kthread_stop(tpg->thread);
+       destroy_workqueue(tpg->workqueue);
 
        /* Wait for sessions to be freed thru RCU, for BUG_ON below */
        synchronize_rcu();
index c37f4cd..d35ea5a 100644 (file)
@@ -219,43 +219,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
        if (cmd->was_ddp_setup) {
                BUG_ON(!ep);
                BUG_ON(!lport);
-       }
-
-       /*
-        * Doesn't expect payload if DDP is setup. Payload
-        * is expected to be copied directly to user buffers
-        * due to DDP (Large Rx offload),
-        */
-       buf = fc_frame_payload_get(fp, 1);
-       if (buf)
-               pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
+               /*
+                * Since DDP (Large Rx offload) was setup for this request,
+                * payload is expected to be copied directly to user buffers.
+                */
+               buf = fc_frame_payload_get(fp, 1);
+               if (buf)
+                       pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
                                "cmd->sg_cnt 0x%x. DDP was setup"
                                " hence not expected to receive frame with "
-                               "payload, Frame will be dropped if "
-                               "'Sequence Initiative' bit in f_ctl is "
+                               "payload, Frame will be dropped if"
+                               "'Sequence Initiative' bit in f_ctl is"
                                "not set\n", __func__, ep->xid, f_ctl,
                                cmd->sg, cmd->sg_cnt);
-       /*
-        * Invalidate HW DDP context if it was setup for respective
-        * command. Invalidation of HW DDP context is requited in both
-        * situation (success and error). 
-        */
-       ft_invl_hw_context(cmd);
+               /*
+                * Invalidate HW DDP context if it was setup for respective
+                * command. Invalidation of HW DDP context is requited in both
+                * situation (success and error).
+                */
+               ft_invl_hw_context(cmd);
 
-       /*
-        * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
-        * write data frame is received successfully where payload is
-        * posted directly to user buffer and only the last frame's
-        * header is posted in receive queue.
-        *
-        * If "Sequence Initiative (TSI)" bit is not set, means error
-        * condition w.r.t. DDP, hence drop the packet and let explict
-        * ABORTS from other end of exchange timer trigger the recovery.
-        */
-       if (f_ctl & FC_FC_SEQ_INIT)
-               goto last_frame;
-       else
-               goto drop;
+               /*
+                * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
+                * write data frame is received successfully where payload is
+                * posted directly to user buffer and only the last frame's
+                * header is posted in receive queue.
+                *
+                * If "Sequence Initiative (TSI)" bit is not set, means error
+                * condition w.r.t. DDP, hence drop the packet and let explict
+                * ABORTS from other end of exchange timer trigger the recovery.
+                */
+               if (f_ctl & FC_FC_SEQ_INIT)
+                       goto last_frame;
+               else
+                       goto drop;
+       }
 
        rel_off = ntohl(fh->fh_parm_offset);
        frame_len = fr_len(fp);
index 225123b..58be715 100644 (file)
@@ -4450,7 +4450,7 @@ static int __init rs_init(void)
 
 #if defined(CONFIG_ETRAX_RS485)
 #if defined(CONFIG_ETRAX_RS485_ON_PA)
-       if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit,
+       if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,
                        rs485_pa_bit)) {
                printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
                        "RS485 pin\n");
@@ -4459,7 +4459,7 @@ static int __init rs_init(void)
        }
 #endif
 #if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
-       if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit,
+       if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,
                        rs485_port_g_bit)) {
                printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
                        "RS485 pin\n");
index 1e96d1f..723f823 100644 (file)
@@ -761,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
        memset(buf, 0, retval);
        status = 0;
 
-       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC;
+       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
 
        spin_lock_irqsave(&xhci->lock, flags);
        /* For each port, did anything change?  If so, set that bit in buf. */
index 54139a2..952e2de 100644 (file)
@@ -1934,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
        int status = -EINPROGRESS;
        struct urb_priv *urb_priv;
        struct xhci_ep_ctx *ep_ctx;
+       struct list_head *tmp;
        u32 trb_comp_code;
        int ret = 0;
+       int td_num = 0;
 
        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
        xdev = xhci->devs[slot_id];
@@ -1957,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                return -ENODEV;
        }
 
+       /* Count current td numbers if ep->skip is set */
+       if (ep->skip) {
+               list_for_each(tmp, &ep_ring->td_list)
+                       td_num++;
+       }
+
        event_dma = le64_to_cpu(event->buffer);
        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
        /* Look for common error cases */
@@ -2068,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                        goto cleanup;
                }
 
+               /* We've skipped all the TDs on the ep ring when ep->skip set */
+               if (ep->skip && td_num == 0) {
+                       ep->skip = false;
+                       xhci_dbg(xhci, "All tds on the ep_ring skipped. "
+                                               "Clear skip flag.\n");
+                       ret = 0;
+                       goto cleanup;
+               }
+
                td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+               if (ep->skip)
+                       td_num--;
 
                /* Is this a TRB in the currently executing TD? */
                event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
index 80d292f..7363c1b 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/backlight.h>
 #endif
 
-static const char const *backlight_types[] = {
+static const char *const backlight_types[] = {
        [BACKLIGHT_RAW] = "raw",
        [BACKLIGHT_PLATFORM] = "platform",
        [BACKLIGHT_FIRMWARE] = "firmware",
index 410fba4..809cbda 100644 (file)
@@ -494,15 +494,16 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
                asminline_call(&cmn_regs, cru_rom_addr);
        die_nmi_called = 1;
        spin_unlock_irqrestore(&rom_lock, rom_pl);
+
+       if (allow_kdump)
+               hpwdt_stop();
+
        if (!is_icru) {
                if (cmn_regs.u1.ral == 0) {
-                       printk(KERN_WARNING "hpwdt: An NMI occurred, "
+                       panic("An NMI occurred, "
                                "but unable to determine source.\n");
                }
        }
-
-       if (allow_kdump)
-               hpwdt_stop();
        panic("An NMI occurred, please see the Integrated "
                "Management Log for details.\n");
 
index 7d82ada..102aed0 100644 (file)
@@ -51,16 +51,16 @@ static int ltq_wdt_ok_to_close;
 static void
 ltq_wdt_enable(void)
 {
-       ltq_wdt_timeout = ltq_wdt_timeout *
+       unsigned long int timeout = ltq_wdt_timeout *
                        (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000;
-       if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT)
-               ltq_wdt_timeout = LTQ_MAX_TIMEOUT;
+       if (timeout > LTQ_MAX_TIMEOUT)
+               timeout = LTQ_MAX_TIMEOUT;
 
        /* write the first password magic */
        ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
        /* write the second magic plus the configuration and new timeout */
        ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV |
-               LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR);
+               LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR);
 }
 
 static void
index 3066a51..eaca366 100644 (file)
@@ -173,7 +173,7 @@ static struct notifier_block epx_c3_notifier = {
        .notifier_call = epx_c3_notify_sys,
 };
 
-static const char banner[] __initdata = KERN_INFO PFX
+static const char banner[] __initconst = KERN_INFO PFX
        "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n";
 
 static int __init watchdog_init(void)
index d33520d..1199da0 100644 (file)
@@ -59,7 +59,7 @@ static struct watchdog_device *wdd;
 
 static int watchdog_ping(struct watchdog_device *wddev)
 {
-       if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+       if (test_bit(WDOG_ACTIVE, &wddev->status)) {
                if (wddev->ops->ping)
                        return wddev->ops->ping(wddev);  /* ping the watchdog */
                else
@@ -81,12 +81,12 @@ static int watchdog_start(struct watchdog_device *wddev)
 {
        int err;
 
-       if (!test_bit(WDOG_ACTIVE, &wdd->status)) {
+       if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
                err = wddev->ops->start(wddev);
                if (err < 0)
                        return err;
 
-               set_bit(WDOG_ACTIVE, &wdd->status);
+               set_bit(WDOG_ACTIVE, &wddev->status);
        }
        return 0;
 }
@@ -105,18 +105,18 @@ static int watchdog_stop(struct watchdog_device *wddev)
 {
        int err = -EBUSY;
 
-       if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
+       if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
                pr_info("%s: nowayout prevents watchdog to be stopped!\n",
-                                                       wdd->info->identity);
+                                                       wddev->info->identity);
                return err;
        }
 
-       if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+       if (test_bit(WDOG_ACTIVE, &wddev->status)) {
                err = wddev->ops->stop(wddev);
                if (err < 0)
                        return err;
 
-               clear_bit(WDOG_ACTIVE, &wdd->status);
+               clear_bit(WDOG_ACTIVE, &wddev->status);
        }
        return 0;
 }
index da70f5c..7523719 100644 (file)
@@ -54,7 +54,7 @@
  * This lock protects updates to the following mapping and reference-count
  * arrays. The lock does not need to be acquired to read the mapping tables.
  */
-static DEFINE_SPINLOCK(irq_mapping_update_lock);
+static DEFINE_MUTEX(irq_mapping_update_lock);
 
 static LIST_HEAD(xen_irq_list_head);
 
@@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
        int irq = -1;
        struct physdev_irq irq_op;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        irq = find_irq_by_gsi(gsi);
        if (irq != -1) {
@@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
                                handle_edge_irq, name);
 
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 
        return irq;
 }
@@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
 {
        int irq, ret;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        irq = xen_allocate_irq_dynamic();
        if (irq == -1)
@@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
        if (ret < 0)
                goto error_irq;
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
        return irq;
 error_irq:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
        xen_free_irq(irq);
        return -1;
 }
@@ -740,7 +740,7 @@ int xen_destroy_irq(int irq)
        struct irq_info *info = info_for_irq(irq);
        int rc = -ENOENT;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        desc = irq_to_desc(irq);
        if (!desc)
@@ -766,7 +766,7 @@ int xen_destroy_irq(int irq)
        xen_free_irq(irq);
 
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
        return rc;
 }
 
@@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq)
 
        struct irq_info *info;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        list_for_each_entry(info, &xen_irq_list_head, list) {
                if (info == NULL || info->type != IRQT_PIRQ)
@@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq)
        }
        irq = -1;
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 
        return irq;
 }
@@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
 {
        int irq;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        irq = evtchn_to_irq[evtchn];
 
@@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
        }
 
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 
        return irq;
 }
@@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
        struct evtchn_bind_ipi bind_ipi;
        int evtchn, irq;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        irq = per_cpu(ipi_to_irq, cpu)[ipi];
 
@@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
        }
 
  out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
        return irq;
 }
 
@@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
        struct evtchn_bind_virq bind_virq;
        int evtchn, irq;
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        irq = per_cpu(virq_to_irq, cpu)[virq];
 
@@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
        }
 
 out:
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 
        return irq;
 }
@@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq)
        struct evtchn_close close;
        int evtchn = evtchn_from_irq(irq);
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        if (VALID_EVTCHN(evtchn)) {
                close.port = evtchn;
@@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq)
 
        xen_free_irq(irq);
 
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 }
 
 int bind_evtchn_to_irqhandler(unsigned int evtchn,
@@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
           will also be masked. */
        disable_irq(irq);
 
-       spin_lock(&irq_mapping_update_lock);
+       mutex_lock(&irq_mapping_update_lock);
 
        /* After resume the irq<->evtchn mappings are all cleared out */
        BUG_ON(evtchn_to_irq[evtchn] != -1);
@@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
 
        xen_irq_info_evtchn_init(irq, evtchn);
 
-       spin_unlock(&irq_mapping_update_lock);
+       mutex_unlock(&irq_mapping_update_lock);
 
        /* new event channels are always bound to cpu 0 */
        irq_set_affinity(irq, cpumask_of(0));
index e0c2807..181fa81 100644 (file)
@@ -148,10 +148,10 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
        }
        platform_set_drvdata(pdev, bus);
 
-       /* Register all devices */
        pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
                 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
 
+       /* First identify all devices ... */
        for (i = 0; i < zorro_num_autocon; i++) {
                z = &zorro_autocon[i];
                z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
@@ -172,6 +172,11 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
                dev_set_name(&z->dev, "%02x", i);
                z->dev.parent = &bus->dev;
                z->dev.bus = &zorro_bus_type;
+       }
+
+       /* ... then register them */
+       for (i = 0; i < zorro_num_autocon; i++) {
+               z = &zorro_autocon[i];
                error = device_register(&z->dev);
                if (error) {
                        dev_err(&bus->dev, "Error registering device %s\n",
index 46ce357..410ffd6 100644 (file)
@@ -54,9 +54,9 @@ extern struct kmem_cache *v9fs_inode_cache;
 
 struct inode *v9fs_alloc_inode(struct super_block *sb);
 void v9fs_destroy_inode(struct inode *inode);
-struct inode *v9fs_get_inode(struct super_block *sb, int mode);
+struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t);
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
-                   struct inode *inode, int mode);
+                   struct inode *inode, int mode, dev_t);
 void v9fs_evict_inode(struct inode *inode);
 ino_t v9fs_qid2ino(struct p9_qid *qid);
 void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
@@ -83,4 +83,6 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
        v9inode->cache_validity |= V9FS_INO_INVALID_ATTR;
        return;
 }
+
+int v9fs_open_to_dotl_flags(int flags);
 #endif
index 3c173fc..62857a8 100644 (file)
@@ -65,7 +65,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
        v9inode = V9FS_I(inode);
        v9ses = v9fs_inode2v9ses(inode);
        if (v9fs_proto_dotl(v9ses))
-               omode = file->f_flags;
+               omode = v9fs_open_to_dotl_flags(file->f_flags);
        else
                omode = v9fs_uflags2omode(file->f_flags,
                                        v9fs_proto_dotu(v9ses));
@@ -169,7 +169,18 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
 
        /* convert posix lock to p9 tlock args */
        memset(&flock, 0, sizeof(flock));
-       flock.type = fl->fl_type;
+       /* map the lock type */
+       switch (fl->fl_type) {
+       case F_RDLCK:
+               flock.type = P9_LOCK_TYPE_RDLCK;
+               break;
+       case F_WRLCK:
+               flock.type = P9_LOCK_TYPE_WRLCK;
+               break;
+       case F_UNLCK:
+               flock.type = P9_LOCK_TYPE_UNLCK;
+               break;
+       }
        flock.start = fl->fl_start;
        if (fl->fl_end == OFFSET_MAX)
                flock.length = 0;
@@ -245,7 +256,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
 
        /* convert posix lock to p9 tgetlock args */
        memset(&glock, 0, sizeof(glock));
-       glock.type = fl->fl_type;
+       glock.type  = P9_LOCK_TYPE_UNLCK;
        glock.start = fl->fl_start;
        if (fl->fl_end == OFFSET_MAX)
                glock.length = 0;
@@ -257,17 +268,26 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
        res = p9_client_getlock_dotl(fid, &glock);
        if (res < 0)
                return res;
-       if (glock.type != F_UNLCK) {
-               fl->fl_type = glock.type;
+       /* map 9p lock type to os lock type */
+       switch (glock.type) {
+       case P9_LOCK_TYPE_RDLCK:
+               fl->fl_type = F_RDLCK;
+               break;
+       case P9_LOCK_TYPE_WRLCK:
+               fl->fl_type = F_WRLCK;
+               break;
+       case P9_LOCK_TYPE_UNLCK:
+               fl->fl_type = F_UNLCK;
+               break;
+       }
+       if (glock.type != P9_LOCK_TYPE_UNLCK) {
                fl->fl_start = glock.start;
                if (glock.length == 0)
                        fl->fl_end = OFFSET_MAX;
                else
                        fl->fl_end = glock.start + glock.length - 1;
                fl->fl_pid = glock.proc_id;
-       } else
-               fl->fl_type = F_UNLCK;
-
+       }
        return res;
 }
 
index 8bb5507..e3c03db 100644 (file)
@@ -95,15 +95,18 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
 /**
  * p9mode2unixmode- convert plan9 mode bits to unix mode bits
  * @v9ses: v9fs session information
- * @mode: mode to convert
+ * @stat: p9_wstat from which mode need to be derived
+ * @rdev: major number, minor number in case of device files.
  *
  */
-
-static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
+static int p9mode2unixmode(struct v9fs_session_info *v9ses,
+                          struct p9_wstat *stat, dev_t *rdev)
 {
        int res;
+       int mode = stat->mode;
 
-       res = mode & 0777;
+       res = mode & S_IALLUGO;
+       *rdev = 0;
 
        if ((mode & P9_DMDIR) == P9_DMDIR)
                res |= S_IFDIR;
@@ -116,9 +119,26 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
                 && (v9ses->nodev == 0))
                res |= S_IFIFO;
        else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses))
-                && (v9ses->nodev == 0))
-               res |= S_IFBLK;
-       else
+                && (v9ses->nodev == 0)) {
+               char type = 0, ext[32];
+               int major = -1, minor = -1;
+
+               strncpy(ext, stat->extension, sizeof(ext));
+               sscanf(ext, "%c %u %u", &type, &major, &minor);
+               switch (type) {
+               case 'c':
+                       res |= S_IFCHR;
+                       break;
+               case 'b':
+                       res |= S_IFBLK;
+                       break;
+               default:
+                       P9_DPRINTK(P9_DEBUG_ERROR,
+                               "Unknown special type %c %s\n", type,
+                               stat->extension);
+               };
+               *rdev = MKDEV(major, minor);
+       } else
                res |= S_IFREG;
 
        if (v9fs_proto_dotu(v9ses)) {
@@ -131,7 +151,6 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
                if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
                        res |= S_ISVTX;
        }
-
        return res;
 }
 
@@ -242,13 +261,13 @@ void v9fs_destroy_inode(struct inode *inode)
 }
 
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
-                   struct inode *inode, int mode)
+                   struct inode *inode, int mode, dev_t rdev)
 {
        int err = 0;
 
        inode_init_owner(inode, NULL, mode);
        inode->i_blocks = 0;
-       inode->i_rdev = 0;
+       inode->i_rdev = rdev;
        inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        inode->i_mapping->a_ops = &v9fs_addr_operations;
 
@@ -335,7 +354,7 @@ error:
  *
  */
 
-struct inode *v9fs_get_inode(struct super_block *sb, int mode)
+struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev)
 {
        int err;
        struct inode *inode;
@@ -348,7 +367,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
                P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
                return ERR_PTR(-ENOMEM);
        }
-       err = v9fs_init_inode(v9ses, inode, mode);
+       err = v9fs_init_inode(v9ses, inode, mode, rdev);
        if (err) {
                iput(inode);
                return ERR_PTR(err);
@@ -435,11 +454,12 @@ void v9fs_evict_inode(struct inode *inode)
 static int v9fs_test_inode(struct inode *inode, void *data)
 {
        int umode;
+       dev_t rdev;
        struct v9fs_inode *v9inode = V9FS_I(inode);
        struct p9_wstat *st = (struct p9_wstat *)data;
        struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
 
-       umode = p9mode2unixmode(v9ses, st->mode);
+       umode = p9mode2unixmode(v9ses, st, &rdev);
        /* don't match inode of different type */
        if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
                return 0;
@@ -473,6 +493,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
                                   struct p9_wstat *st,
                                   int new)
 {
+       dev_t rdev;
        int retval, umode;
        unsigned long i_ino;
        struct inode *inode;
@@ -496,8 +517,8 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
         * later.
         */
        inode->i_ino = i_ino;
-       umode = p9mode2unixmode(v9ses, st->mode);
-       retval = v9fs_init_inode(v9ses, inode, umode);
+       umode = p9mode2unixmode(v9ses, st, &rdev);
+       retval = v9fs_init_inode(v9ses, inode, umode, rdev);
        if (retval)
                goto error;
 
@@ -531,6 +552,19 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
        return inode;
 }
 
+/**
+ * v9fs_at_to_dotl_flags- convert Linux specific AT flags to
+ * plan 9 AT flag.
+ * @flags: flags to convert
+ */
+static int v9fs_at_to_dotl_flags(int flags)
+{
+       int rflags = 0;
+       if (flags & AT_REMOVEDIR)
+               rflags |= P9_DOTL_AT_REMOVEDIR;
+       return rflags;
+}
+
 /**
  * v9fs_remove - helper function to remove files and directories
  * @dir: directory inode that is being deleted
@@ -558,7 +592,8 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
                return retval;
        }
        if (v9fs_proto_dotl(v9ses))
-               retval = p9_client_unlinkat(dfid, dentry->d_name.name, flags);
+               retval = p9_client_unlinkat(dfid, dentry->d_name.name,
+                                           v9fs_at_to_dotl_flags(flags));
        if (retval == -EOPNOTSUPP) {
                /* Try the one based on path */
                v9fid = v9fs_fid_clone(dentry);
@@ -645,13 +680,11 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
                P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
-       d_instantiate(dentry, inode);
        err = v9fs_fid_add(dentry, fid);
        if (err < 0)
                goto error;
-
+       d_instantiate(dentry, inode);
        return ofid;
-
 error:
        if (ofid)
                p9_client_clunk(ofid);
@@ -792,6 +825,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
                                      struct nameidata *nameidata)
 {
+       struct dentry *res;
        struct super_block *sb;
        struct v9fs_session_info *v9ses;
        struct p9_fid *dfid, *fid;
@@ -823,22 +857,35 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
 
                return ERR_PTR(result);
        }
-
-       inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+       /*
+        * Make sure we don't use a wrong inode due to parallel
+        * unlink. For cached mode create calls request for new
+        * inode. But with cache disabled, lookup should do this.
+        */
+       if (v9ses->cache)
+               inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+       else
+               inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
        if (IS_ERR(inode)) {
                result = PTR_ERR(inode);
                inode = NULL;
                goto error;
        }
-
        result = v9fs_fid_add(dentry, fid);
        if (result < 0)
                goto error_iput;
-
 inst_out:
-       d_add(dentry, inode);
-       return NULL;
-
+       /*
+        * If we had a rename on the server and a parallel lookup
+        * for the new name, then make sure we instantiate with
+        * the new name. ie look up for a/b, while on server somebody
+        * moved b under k and client parallely did a lookup for
+        * k/b.
+        */
+       res = d_materialise_unique(dentry, inode);
+       if (!IS_ERR(res))
+               return res;
+       result = PTR_ERR(res);
 error_iput:
        iput(inode);
 error:
@@ -1002,7 +1049,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
                return PTR_ERR(st);
 
        v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
-               generic_fillattr(dentry->d_inode, stat);
+       generic_fillattr(dentry->d_inode, stat);
 
        p9stat_free(st);
        kfree(st);
@@ -1086,6 +1133,7 @@ void
 v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
        struct super_block *sb)
 {
+       mode_t mode;
        char ext[32];
        char tag_name[14];
        unsigned int i_nlink;
@@ -1121,31 +1169,9 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
                                inode->i_nlink = i_nlink;
                }
        }
-       inode->i_mode = p9mode2unixmode(v9ses, stat->mode);
-       if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) {
-               char type = 0;
-               int major = -1;
-               int minor = -1;
-
-               strncpy(ext, stat->extension, sizeof(ext));
-               sscanf(ext, "%c %u %u", &type, &major, &minor);
-               switch (type) {
-               case 'c':
-                       inode->i_mode &= ~S_IFBLK;
-                       inode->i_mode |= S_IFCHR;
-                       break;
-               case 'b':
-                       break;
-               default:
-                       P9_DPRINTK(P9_DEBUG_ERROR,
-                               "Unknown special type %c %s\n", type,
-                               stat->extension);
-               };
-               inode->i_rdev = MKDEV(major, minor);
-               init_special_inode(inode, inode->i_mode, inode->i_rdev);
-       } else
-               inode->i_rdev = 0;
-
+       mode = stat->mode & S_IALLUGO;
+       mode |= inode->i_mode & ~S_IALLUGO;
+       inode->i_mode = mode;
        i_size_write(inode, stat->length);
 
        /* not real number of blocks, but 512 byte ones ... */
@@ -1411,6 +1437,8 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
 
 int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
 {
+       int umode;
+       dev_t rdev;
        loff_t i_size;
        struct p9_wstat *st;
        struct v9fs_session_info *v9ses;
@@ -1419,6 +1447,12 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
        st = p9_client_stat(fid);
        if (IS_ERR(st))
                return PTR_ERR(st);
+       /*
+        * Don't update inode if the file type is different
+        */
+       umode = p9mode2unixmode(v9ses, st, &rdev);
+       if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
+               goto out;
 
        spin_lock(&inode->i_lock);
        /*
@@ -1430,6 +1464,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
        if (v9ses->cache)
                inode->i_size = i_size;
        spin_unlock(&inode->i_lock);
+out:
        p9stat_free(st);
        kfree(st);
        return 0;
index b6c8ed2..aded79f 100644 (file)
@@ -153,7 +153,8 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
         * later.
         */
        inode->i_ino = i_ino;
-       retval = v9fs_init_inode(v9ses, inode, st->st_mode);
+       retval = v9fs_init_inode(v9ses, inode,
+                                st->st_mode, new_decode_dev(st->st_rdev));
        if (retval)
                goto error;
 
@@ -190,6 +191,58 @@ v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
        return inode;
 }
 
+struct dotl_openflag_map {
+       int open_flag;
+       int dotl_flag;
+};
+
+static int v9fs_mapped_dotl_flags(int flags)
+{
+       int i;
+       int rflags = 0;
+       struct dotl_openflag_map dotl_oflag_map[] = {
+               { O_CREAT,      P9_DOTL_CREATE },
+               { O_EXCL,       P9_DOTL_EXCL },
+               { O_NOCTTY,     P9_DOTL_NOCTTY },
+               { O_TRUNC,      P9_DOTL_TRUNC },
+               { O_APPEND,     P9_DOTL_APPEND },
+               { O_NONBLOCK,   P9_DOTL_NONBLOCK },
+               { O_DSYNC,      P9_DOTL_DSYNC },
+               { FASYNC,       P9_DOTL_FASYNC },
+               { O_DIRECT,     P9_DOTL_DIRECT },
+               { O_LARGEFILE,  P9_DOTL_LARGEFILE },
+               { O_DIRECTORY,  P9_DOTL_DIRECTORY },
+               { O_NOFOLLOW,   P9_DOTL_NOFOLLOW },
+               { O_NOATIME,    P9_DOTL_NOATIME },
+               { O_CLOEXEC,    P9_DOTL_CLOEXEC },
+               { O_SYNC,       P9_DOTL_SYNC},
+       };
+       for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
+               if (flags & dotl_oflag_map[i].open_flag)
+                       rflags |= dotl_oflag_map[i].dotl_flag;
+       }
+       return rflags;
+}
+
+/**
+ * v9fs_open_to_dotl_flags- convert Linux specific open flags to
+ * plan 9 open flag.
+ * @flags: flags to convert
+ */
+int v9fs_open_to_dotl_flags(int flags)
+{
+       int rflags = 0;
+
+       /*
+        * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
+        * and P9_DOTL_NOACCESS
+        */
+       rflags |= flags & O_ACCMODE;
+       rflags |= v9fs_mapped_dotl_flags(flags);
+
+       return rflags;
+}
+
 /**
  * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
  * @dir: directory inode that is being created
@@ -258,7 +311,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
                           "Failed to get acl values in creat %d\n", err);
                goto error;
        }
-       err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
+       err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
+                                   mode, gid, &qid);
        if (err < 0) {
                P9_DPRINTK(P9_DEBUG_VFS,
                                "p9_client_open_dotl failed in creat %d\n",
@@ -281,10 +335,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
                P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
-       d_instantiate(dentry, inode);
        err = v9fs_fid_add(dentry, fid);
        if (err < 0)
                goto error;
+       d_instantiate(dentry, inode);
 
        /* Now set the ACL based on the default value */
        v9fs_set_create_acl(dentry, &dacl, &pacl);
@@ -403,10 +457,10 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                                err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /*
@@ -414,7 +468,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                 * inode with stat. We need to get an inode
                 * so that we can set the acl with dentry
                 */
-               inode = v9fs_get_inode(dir->i_sb, mode);
+               inode = v9fs_get_inode(dir->i_sb, mode, 0);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -540,6 +594,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
 void
 v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
 {
+       mode_t mode;
        struct v9fs_inode *v9inode = V9FS_I(inode);
 
        if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
@@ -552,11 +607,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
                inode->i_uid = stat->st_uid;
                inode->i_gid = stat->st_gid;
                inode->i_nlink = stat->st_nlink;
-               inode->i_mode = stat->st_mode;
-               inode->i_rdev = new_decode_dev(stat->st_rdev);
 
-               if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
-                       init_special_inode(inode, inode->i_mode, inode->i_rdev);
+               mode = stat->st_mode & S_IALLUGO;
+               mode |= inode->i_mode & ~S_IALLUGO;
+               inode->i_mode = mode;
 
                i_size_write(inode, stat->st_size);
                inode->i_blocks = stat->st_blocks;
@@ -657,14 +711,14 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
                                        err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /* Not in cached mode. No need to populate inode with stat */
-               inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
+               inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -810,17 +864,17 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
                                err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /*
                 * Not in cached mode. No need to populate inode with stat.
                 * socket syscall returns a fd, so we need instantiate
                 */
-               inode = v9fs_get_inode(dir->i_sb, mode);
+               inode = v9fs_get_inode(dir->i_sb, mode, rdev);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -886,6 +940,11 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
        st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
        if (IS_ERR(st))
                return PTR_ERR(st);
+       /*
+        * Don't update inode if the file type is different
+        */
+       if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
+               goto out;
 
        spin_lock(&inode->i_lock);
        /*
@@ -897,6 +956,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
        if (v9ses->cache)
                inode->i_size = i_size;
        spin_unlock(&inode->i_lock);
+out:
        kfree(st);
        return 0;
 }
index feef6cd..c70251d 100644 (file)
@@ -149,7 +149,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
        else
                sb->s_d_op = &v9fs_dentry_operations;
 
-       inode = v9fs_get_inode(sb, S_IFDIR | mode);
+       inode = v9fs_get_inode(sb, S_IFDIR | mode, 0);
        if (IS_ERR(inode)) {
                retval = PTR_ERR(inode);
                goto release_sb;
index ff77262..95f786e 100644 (file)
@@ -1429,6 +1429,11 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                WARN_ON_ONCE(bdev->bd_holders);
                sync_blockdev(bdev);
                kill_bdev(bdev);
+               /* ->release can cause the old bdi to disappear,
+                * so must switch it out first
+                */
+               bdev_inode_switch_bdi(bdev->bd_inode,
+                                       &default_backing_dev_info);
        }
        if (bdev->bd_contains == bdev) {
                if (disk->fops->release)
@@ -1442,8 +1447,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                disk_put_part(bdev->bd_part);
                bdev->bd_part = NULL;
                bdev->bd_disk = NULL;
-               bdev_inode_switch_bdi(bdev->bd_inode,
-                                       &default_backing_dev_info);
                if (bdev != bdev->bd_contains)
                        victim = bdev->bd_contains;
                bdev->bd_contains = NULL;
index 502b9e9..d9f99a1 100644 (file)
@@ -176,7 +176,11 @@ static inline u64 btrfs_ino(struct inode *inode)
 {
        u64 ino = BTRFS_I(inode)->location.objectid;
 
-       if (ino <= BTRFS_FIRST_FREE_OBJECTID)
+       /*
+        * !ino: btree_inode
+        * type == BTRFS_ROOT_ITEM_KEY: subvol dir
+        */
+       if (!ino || BTRFS_I(inode)->location.type == BTRFS_ROOT_ITEM_KEY)
                ino = inode->i_ino;
        return ino;
 }
index b910694..a1cb782 100644 (file)
@@ -183,8 +183,10 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
         * read from the commit root and sidestep a nasty deadlock
         * between reading the free space cache and updating the csum tree.
         */
-       if (btrfs_is_free_space_inode(root, inode))
+       if (btrfs_is_free_space_inode(root, inode)) {
                path->search_commit_root = 1;
+               path->skip_locking = 1;
+       }
 
        disk_bytenr = (u64)bio->bi_sector << 9;
        if (dio)
index e7872e4..e4e57d5 100644 (file)
@@ -1036,11 +1036,13 @@ out:
  * on error we return an unlocked page and the error value
  * on success we return a locked page and 0
  */
-static int prepare_uptodate_page(struct page *page, u64 pos)
+static int prepare_uptodate_page(struct page *page, u64 pos,
+                                bool force_uptodate)
 {
        int ret = 0;
 
-       if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) {
+       if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
+           !PageUptodate(page)) {
                ret = btrfs_readpage(NULL, page);
                if (ret)
                        return ret;
@@ -1061,7 +1063,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos)
 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
                         struct page **pages, size_t num_pages,
                         loff_t pos, unsigned long first_index,
-                        size_t write_bytes)
+                        size_t write_bytes, bool force_uptodate)
 {
        struct extent_state *cached_state = NULL;
        int i;
@@ -1075,12 +1077,6 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
        start_pos = pos & ~((u64)root->sectorsize - 1);
        last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
 
-       if (start_pos > inode->i_size) {
-               err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
-               if (err)
-                       return err;
-       }
-
 again:
        for (i = 0; i < num_pages; i++) {
                pages[i] = find_or_create_page(inode->i_mapping, index + i,
@@ -1092,10 +1088,11 @@ again:
                }
 
                if (i == 0)
-                       err = prepare_uptodate_page(pages[i], pos);
+                       err = prepare_uptodate_page(pages[i], pos,
+                                                   force_uptodate);
                if (i == num_pages - 1)
                        err = prepare_uptodate_page(pages[i],
-                                                   pos + write_bytes);
+                                                   pos + write_bytes, false);
                if (err) {
                        page_cache_release(pages[i]);
                        faili = i - 1;
@@ -1164,6 +1161,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
        size_t num_written = 0;
        int nrptrs;
        int ret = 0;
+       bool force_page_uptodate = false;
 
        nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
                     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
@@ -1206,7 +1204,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                 * contents of pages from loop to loop
                 */
                ret = prepare_pages(root, file, pages, num_pages,
-                                   pos, first_index, write_bytes);
+                                   pos, first_index, write_bytes,
+                                   force_page_uptodate);
                if (ret) {
                        btrfs_delalloc_release_space(inode,
                                        num_pages << PAGE_CACHE_SHIFT);
@@ -1223,12 +1222,15 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                if (copied < write_bytes)
                        nrptrs = 1;
 
-               if (copied == 0)
+               if (copied == 0) {
+                       force_page_uptodate = true;
                        dirty_pages = 0;
-               else
+               } else {
+                       force_page_uptodate = false;
                        dirty_pages = (copied + offset +
                                       PAGE_CACHE_SIZE - 1) >>
                                       PAGE_CACHE_SHIFT;
+               }
 
                /*
                 * If we had a short copy we need to release the excess delaloc
@@ -1338,6 +1340,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        struct inode *inode = fdentry(file)->d_inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        loff_t *ppos = &iocb->ki_pos;
+       u64 start_pos;
        ssize_t num_written = 0;
        ssize_t err = 0;
        size_t count, ocount;
@@ -1386,6 +1389,15 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        file_update_time(file);
        BTRFS_I(inode)->sequence++;
 
+       start_pos = round_down(pos, root->sectorsize);
+       if (start_pos > i_size_read(inode)) {
+               err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
+               if (err) {
+                       mutex_unlock(&inode->i_mutex);
+                       goto out;
+               }
+       }
+
        if (unlikely(file->f_flags & O_DIRECT)) {
                num_written = __btrfs_direct_write(iocb, iov, nr_segs,
                                                   pos, ppos, count, ocount);
@@ -1813,6 +1825,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
                goto out;
        case SEEK_DATA:
        case SEEK_HOLE:
+               if (offset >= i_size_read(inode)) {
+                       mutex_unlock(&inode->i_mutex);
+                       return -ENXIO;
+               }
+
                ret = find_desired_extent(inode, &offset, origin);
                if (ret) {
                        mutex_unlock(&inode->i_mutex);
@@ -1821,11 +1838,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
        }
 
        if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
-               ret = -EINVAL;
+               offset = -EINVAL;
                goto out;
        }
        if (offset > inode->i_sb->s_maxbytes) {
-               ret = -EINVAL;
+               offset = -EINVAL;
                goto out;
        }
 
index 6a265b9..41ac927 100644 (file)
@@ -190,9 +190,11 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
                                    struct btrfs_path *path,
                                    struct inode *inode)
 {
+       struct btrfs_block_rsv *rsv;
        loff_t oldsize;
        int ret = 0;
 
+       rsv = trans->block_rsv;
        trans->block_rsv = root->orphan_block_rsv;
        ret = btrfs_block_rsv_check(trans, root,
                                    root->orphan_block_rsv,
@@ -210,6 +212,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
         */
        ret = btrfs_truncate_inode_items(trans, root, inode,
                                         0, BTRFS_EXTENT_DATA_KEY);
+
+       trans->block_rsv = rsv;
        if (ret) {
                WARN_ON(1);
                return ret;
index 0ccc743..b2d004a 100644 (file)
@@ -1786,7 +1786,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                          &ordered_extent->list);
 
        ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
-       if (!ret) {
+       if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
                ret = btrfs_update_inode(trans, root, inode);
                BUG_ON(ret);
        }
@@ -3510,15 +3510,19 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                        err = btrfs_drop_extents(trans, inode, cur_offset,
                                                 cur_offset + hole_size,
                                                 &hint_byte, 1);
-                       if (err)
+                       if (err) {
+                               btrfs_end_transaction(trans, root);
                                break;
+                       }
 
                        err = btrfs_insert_file_extent(trans, root,
                                        btrfs_ino(inode), cur_offset, 0,
                                        0, hole_size, 0, hole_size,
                                        0, 0, 0);
-                       if (err)
+                       if (err) {
+                               btrfs_end_transaction(trans, root);
                                break;
+                       }
 
                        btrfs_drop_extent_cache(inode, hole_start,
                                        last_byte - 1, 0);
@@ -3952,7 +3956,6 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                         struct btrfs_root *root, int *new)
 {
        struct inode *inode;
-       int bad_inode = 0;
 
        inode = btrfs_iget_locked(s, location->objectid, root);
        if (!inode)
@@ -3968,15 +3971,12 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                        if (new)
                                *new = 1;
                } else {
-                       bad_inode = 1;
+                       unlock_new_inode(inode);
+                       iput(inode);
+                       inode = ERR_PTR(-ESTALE);
                }
        }
 
-       if (bad_inode) {
-               iput(inode);
-               inode = ERR_PTR(-ESTALE);
-       }
-
        return inode;
 }
 
@@ -4018,7 +4018,8 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
                memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
                kfree(dentry->d_fsdata);
                dentry->d_fsdata = NULL;
-               d_clear_need_lookup(dentry);
+               /* This thing is hashed, drop it for now */
+               d_drop(dentry);
        } else {
                ret = btrfs_inode_by_name(dir, dentry, &location);
        }
@@ -4085,7 +4086,15 @@ static void btrfs_dentry_release(struct dentry *dentry)
 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
                                   struct nameidata *nd)
 {
-       return d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
+       struct dentry *ret;
+
+       ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
+       if (unlikely(d_need_lookup(dentry))) {
+               spin_lock(&dentry->d_lock);
+               dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
+               spin_unlock(&dentry->d_lock);
+       }
+       return ret;
 }
 
 unsigned char btrfs_filetype_table[] = {
@@ -4125,7 +4134,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
 
        /* special case for "." */
        if (filp->f_pos == 0) {
-               over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
+               over = filldir(dirent, ".", 1,
+                              filp->f_pos, btrfs_ino(inode), DT_DIR);
                if (over)
                        return 0;
                filp->f_pos = 1;
@@ -4134,7 +4144,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        if (filp->f_pos == 1) {
                u64 pino = parent_ino(filp->f_path.dentry);
                over = filldir(dirent, "..", 2,
-                              2, pino, DT_DIR);
+                              filp->f_pos, pino, DT_DIR);
                if (over)
                        return 0;
                filp->f_pos = 2;
@@ -5823,7 +5833,7 @@ again:
 
        add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
        ret = btrfs_ordered_update_i_size(inode, 0, ordered);
-       if (!ret)
+       if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
                btrfs_update_inode(trans, root, inode);
        ret = 0;
 out_unlock:
index 970977a..538f65a 100644 (file)
@@ -2177,6 +2177,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
        if (!(src_file->f_mode & FMODE_READ))
                goto out_fput;
 
+       /* don't make the dst file partly checksummed */
+       if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
+           (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
+               goto out_fput;
+
        ret = -EISDIR;
        if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
                goto out_fput;
@@ -2220,6 +2225,16 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
            !IS_ALIGNED(destoff, bs))
                goto out_unlock;
 
+       if (destoff > inode->i_size) {
+               ret = btrfs_cont_expand(inode, inode->i_size, destoff);
+               if (ret)
+                       goto out_unlock;
+       }
+
+       /* truncate page cache pages from target inode range */
+       truncate_inode_pages_range(&inode->i_data, destoff,
+                                  PAGE_CACHE_ALIGN(destoff + len) - 1);
+
        /* do any pending delalloc/csum calc on src, one way or
           another, and lock file content */
        while (1) {
@@ -2236,10 +2251,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                btrfs_wait_ordered_range(src, off, len);
        }
 
-       /* truncate page cache pages from target inode range */
-       truncate_inode_pages_range(&inode->i_data, off,
-                                  ALIGN(off + len, PAGE_CACHE_SIZE) - 1);
-
        /* clone data */
        key.objectid = btrfs_ino(src);
        key.type = BTRFS_EXTENT_DATA_KEY;
@@ -2317,7 +2328,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        else
                                new_key.offset = destoff;
 
-                       trans = btrfs_start_transaction(root, 1);
+                       /*
+                        * 1 - adjusting old extent (we may have to split it)
+                        * 1 - add new extent
+                        * 1 - inode update
+                        */
+                       trans = btrfs_start_transaction(root, 3);
                        if (IS_ERR(trans)) {
                                ret = PTR_ERR(trans);
                                goto out;
@@ -2325,14 +2341,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 
                        if (type == BTRFS_FILE_EXTENT_REG ||
                            type == BTRFS_FILE_EXTENT_PREALLOC) {
+                               /*
+                                *    a  | --- range to clone ---|  b
+                                * | ------------- extent ------------- |
+                                */
+
+                               /* substract range b */
+                               if (key.offset + datal > off + len)
+                                       datal = off + len - key.offset;
+
+                               /* substract range a */
                                if (off > key.offset) {
                                        datao += off - key.offset;
                                        datal -= off - key.offset;
                                }
 
-                               if (key.offset + datal > off + len)
-                                       datal = off + len - key.offset;
-
                                ret = btrfs_drop_extents(trans, inode,
                                                         new_key.offset,
                                                         new_key.offset + datal,
@@ -2429,7 +2452,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        if (endoff > inode->i_size)
                                btrfs_i_size_write(inode, endoff);
 
-                       BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
                        ret = btrfs_update_inode(trans, root, inode);
                        BUG_ON(ret);
                        btrfs_end_transaction(trans, root);
index 7dc36fa..e24b796 100644 (file)
@@ -884,6 +884,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        struct btrfs_root *tree_root = fs_info->tree_root;
        struct btrfs_root *root = pending->root;
        struct btrfs_root *parent_root;
+       struct btrfs_block_rsv *rsv;
        struct inode *parent_inode;
        struct dentry *parent;
        struct dentry *dentry;
@@ -895,6 +896,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        u64 objectid;
        u64 root_flags;
 
+       rsv = trans->block_rsv;
+
        new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
        if (!new_root_item) {
                pending->error = -ENOMEM;
@@ -1002,6 +1005,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        btrfs_orphan_post_snapshot(trans, pending);
 fail:
        kfree(new_root_item);
+       trans->block_rsv = rsv;
        btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
        return 0;
 }
index d733b9c..69565e5 100644 (file)
@@ -116,6 +116,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
                if (ret)
                        goto out;
                btrfs_release_path(path);
+
+               /*
+                * remove the attribute
+                */
+               if (!value)
+                       goto out;
        }
 
 again:
@@ -158,6 +164,9 @@ out:
        return ret;
 }
 
+/*
+ * @value: "" makes the attribute to empty, NULL removes it
+ */
 int __btrfs_setxattr(struct btrfs_trans_handle *trans,
                     struct inode *inode, const char *name,
                     const void *value, size_t size, int flags)
index fee028b..86c59e1 100644 (file)
@@ -1595,7 +1595,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
                r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
                dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
                     *ppath);
-       } else if (rpath) {
+       } else if (rpath || rino) {
                *ino = rino;
                *ppath = rpath;
                *pathlen = strlen(rpath);
index d47c5ec..88bacaf 100644 (file)
@@ -813,8 +813,8 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
        fsc = create_fs_client(fsopt, opt);
        if (IS_ERR(fsc)) {
                res = ERR_CAST(fsc);
-               kfree(fsopt);
-               kfree(opt);
+               destroy_mount_options(fsopt);
+               ceph_destroy_options(opt);
                goto out_final;
        }
 
index e76bfeb..30acd22 100644 (file)
@@ -351,9 +351,7 @@ static int
 build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
 {
        unsigned int dlen;
-       unsigned int wlen;
-       unsigned int size = 6 * sizeof(struct ntlmssp2_name);
-       __le64  curtime;
+       unsigned int size = 2 * sizeof(struct ntlmssp2_name);
        char *defdmname = "WORKGROUP";
        unsigned char *blobptr;
        struct ntlmssp2_name *attrptr;
@@ -365,15 +363,14 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
        }
 
        dlen = strlen(ses->domainName);
-       wlen = strlen(ses->server->hostname);
 
-       /* The length of this blob is a size which is
-        * six times the size of a structure which holds name/size +
-        * two times the unicode length of a domain name +
-        * two times the unicode length of a server name +
-        * size of a timestamp (which is 8 bytes).
+       /*
+        * The length of this blob is two times the size of a
+        * structure (av pair) which holds name/size
+        * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
+        * unicode length of a netbios domain name
         */
-       ses->auth_key.len = size + 2 * (2 * dlen) + 2 * (2 * wlen) + 8;
+       ses->auth_key.len = size + 2 * dlen;
        ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
        if (!ses->auth_key.response) {
                ses->auth_key.len = 0;
@@ -384,44 +381,15 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
        blobptr = ses->auth_key.response;
        attrptr = (struct ntlmssp2_name *) blobptr;
 
+       /*
+        * As defined in MS-NTLM 3.3.2, just this av pair field
+        * is sufficient as part of the temp
+        */
        attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
        attrptr->length = cpu_to_le16(2 * dlen);
        blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
        cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
 
-       blobptr += 2 * dlen;
-       attrptr = (struct ntlmssp2_name *) blobptr;
-
-       attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_COMPUTER_NAME);
-       attrptr->length = cpu_to_le16(2 * wlen);
-       blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
-       cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
-
-       blobptr += 2 * wlen;
-       attrptr = (struct ntlmssp2_name *) blobptr;
-
-       attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_DOMAIN_NAME);
-       attrptr->length = cpu_to_le16(2 * dlen);
-       blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
-       cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
-
-       blobptr += 2 * dlen;
-       attrptr = (struct ntlmssp2_name *) blobptr;
-
-       attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_COMPUTER_NAME);
-       attrptr->length = cpu_to_le16(2 * wlen);
-       blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
-       cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
-
-       blobptr += 2 * wlen;
-       attrptr = (struct ntlmssp2_name *) blobptr;
-
-       attrptr->type = cpu_to_le16(NTLMSSP_AV_TIMESTAMP);
-       attrptr->length = cpu_to_le16(sizeof(__le64));
-       blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
-       curtime = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
-       memcpy(blobptr, &curtime, sizeof(__le64));
-
        return 0;
 }
 
index f93eb94..54b8f1e 100644 (file)
@@ -548,6 +548,12 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
                struct inode *dir = dentry->d_inode;
                struct dentry *child;
 
+               if (!dir) {
+                       dput(dentry);
+                       dentry = ERR_PTR(-ENOENT);
+                       break;
+               }
+
                /* skip separators */
                while (*s == sep)
                        s++;
@@ -563,10 +569,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
                mutex_unlock(&dir->i_mutex);
                dput(dentry);
                dentry = child;
-               if (!dentry->d_inode) {
-                       dput(dentry);
-                       dentry = ERR_PTR(-ENOENT);
-               }
        } while (!IS_ERR(dentry));
        _FreeXid(xid);
        kfree(full_path);
index aac37d9..a80f7bd 100644 (file)
@@ -4079,7 +4079,8 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
        T2_FNEXT_RSP_PARMS *parms;
        char *response_data;
        int rc = 0;
-       int bytes_returned, name_len;
+       int bytes_returned;
+       unsigned int name_len;
        __u16 params, byte_count;
 
        cFYI(1, "In FindNext");
index 633c246..f4af4cc 100644 (file)
@@ -1298,7 +1298,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        /* ignore */
                } else if (strnicmp(data, "guest", 5) == 0) {
                        /* ignore */
-               } else if (strnicmp(data, "rw", 2) == 0) {
+               } else if (strnicmp(data, "rw", 2) == 0 && strlen(data) == 2) {
                        /* ignore */
                } else if (strnicmp(data, "ro", 2) == 0) {
                        /* ignore */
@@ -1401,7 +1401,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        vol->server_ino = 1;
                } else if (strnicmp(data, "noserverino", 9) == 0) {
                        vol->server_ino = 0;
-               } else if (strnicmp(data, "rwpidforward", 4) == 0) {
+               } else if (strnicmp(data, "rwpidforward", 12) == 0) {
                        vol->rwpidforward = 1;
                } else if (strnicmp(data, "cifsacl", 7) == 0) {
                        vol->cifs_acl = 1;
index 04da6ac..12661e1 100644 (file)
@@ -1134,7 +1134,7 @@ struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
                return bh;
        if (buffer_uptodate(bh))
                return bh;
-       ll_rw_block(READ_META, 1, &bh);
+       ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
        wait_on_buffer(bh);
        if (buffer_uptodate(bh))
                return bh;
@@ -2807,7 +2807,7 @@ make_io:
                trace_ext3_load_inode(inode);
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
-               submit_bh(READ_META, bh);
+               submit_bh(READ | REQ_META | REQ_PRIO, bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
                        ext3_error(inode->i_sb, "ext3_get_inode_loc",
index 5571708..0629e09 100644 (file)
@@ -922,7 +922,8 @@ restart:
                                bh = ext3_getblk(NULL, dir, b++, 0, &err);
                                bh_use[ra_max] = bh;
                                if (bh)
-                                       ll_rw_block(READ_META, 1, &bh);
+                                       ll_rw_block(READ | REQ_META | REQ_PRIO,
+                                                   1, &bh);
                        }
                }
                if ((bh = bh_use[ra_ptr++]) == NULL)
index 18d2558..986e238 100644 (file)
@@ -647,7 +647,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
                return bh;
        if (buffer_uptodate(bh))
                return bh;
-       ll_rw_block(READ_META, 1, &bh);
+       ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
        wait_on_buffer(bh);
        if (buffer_uptodate(bh))
                return bh;
@@ -3298,7 +3298,7 @@ make_io:
                trace_ext4_load_inode(inode);
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
-               submit_bh(READ_META, bh);
+               submit_bh(READ | REQ_META | REQ_PRIO, bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
                        EXT4_ERROR_INODE_BLOCK(inode, block,
index f8068c7..1c924fa 100644 (file)
@@ -922,7 +922,8 @@ restart:
                                bh = ext4_getblk(NULL, dir, b++, 0, &err);
                                bh_use[ra_max] = bh;
                                if (bh)
-                                       ll_rw_block(READ_META, 1, &bh);
+                                       ll_rw_block(READ | REQ_META | REQ_PRIO,
+                                                   1, &bh);
                        }
                }
                if ((bh = bh_use[ra_ptr++]) == NULL)
index 168a80f..5cb8614 100644 (file)
@@ -258,10 +258,14 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
        forget->forget_one.nlookup = nlookup;
 
        spin_lock(&fc->lock);
-       fc->forget_list_tail->next = forget;
-       fc->forget_list_tail = forget;
-       wake_up(&fc->waitq);
-       kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+       if (fc->connected) {
+               fc->forget_list_tail->next = forget;
+               fc->forget_list_tail = forget;
+               wake_up(&fc->waitq);
+               kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+       } else {
+               kfree(forget);
+       }
        spin_unlock(&fc->lock);
 }
 
index 12b5029..add96f6 100644 (file)
@@ -812,6 +812,9 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                        if (arg->minor >= 17) {
                                if (!(arg->flags & FUSE_FLOCK_LOCKS))
                                        fc->no_flock = 1;
+                       } else {
+                               if (!(arg->flags & FUSE_POSIX_LOCKS))
+                                       fc->no_flock = 1;
                        }
                        if (arg->flags & FUSE_ATOMIC_O_TRUNC)
                                fc->atomic_o_trunc = 1;
index 85c6292..5986464 100644 (file)
@@ -624,9 +624,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
        bh->b_end_io = end_buffer_write_sync;
        get_bh(bh);
        if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
-               submit_bh(WRITE_SYNC | REQ_META, bh);
+               submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
        else
-               submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
+               submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);
        wait_on_buffer(bh);
 
        if (!buffer_uptodate(bh))
index 747238c..be29858 100644 (file)
@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
 {
        struct buffer_head *bh, *head;
        int nr_underway = 0;
-       int write_op = REQ_META |
+       int write_op = REQ_META | REQ_PRIO |
                (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
 
        BUG_ON(!PageLocked(page));
@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
        }
        bh->b_end_io = end_buffer_read_sync;
        get_bh(bh);
-       submit_bh(READ_SYNC | REQ_META, bh);
+       submit_bh(READ_SYNC | REQ_META | REQ_PRIO, bh);
        if (!(flags & DIO_WAIT))
                return 0;
 
@@ -435,7 +435,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
        if (buffer_uptodate(first_bh))
                goto out;
        if (!buffer_locked(first_bh))
-               ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
+               ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);
 
        dblock++;
        extlen--;
index 3bc073a..079587e 100644 (file)
@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
 
        bio->bi_end_io = end_bio_io_page;
        bio->bi_private = page;
-       submit_bio(READ_SYNC | REQ_META, bio);
+       submit_bio(READ_SYNC | REQ_META | REQ_PRIO, bio);
        wait_on_page_locked(page);
        bio_put(bio);
        if (!PageUptodate(page)) {
index 42e8d23..0e8bb13 100644 (file)
@@ -709,7 +709,7 @@ get_a_page:
                set_buffer_uptodate(bh);
 
        if (!buffer_uptodate(bh)) {
-               ll_rw_block(READ_META, 1, &bh);
+               ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh))
                        goto unlock_out;
index c106ca2..d24a9b6 100644 (file)
@@ -344,6 +344,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        struct inode *root, *inode;
        struct qstr str;
        struct nls_table *nls = NULL;
+       u64 last_fs_block, last_fs_page;
        int err;
 
        err = -EINVAL;
@@ -399,9 +400,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        if (!sbi->rsrc_clump_blocks)
                sbi->rsrc_clump_blocks = 1;
 
-       err = generic_check_addressable(sbi->alloc_blksz_shift,
-                                       sbi->total_blocks);
-       if (err) {
+       err = -EFBIG;
+       last_fs_block = sbi->total_blocks - 1;
+       last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
+                       PAGE_CACHE_SHIFT;
+
+       if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
+           (last_fs_page > (pgoff_t)(~0ULL))) {
                printk(KERN_ERR "hfs: filesystem size too large.\n");
                goto out_free_vhdr;
        }
@@ -525,8 +530,8 @@ out_close_cat_tree:
 out_close_ext_tree:
        hfs_btree_close(sbi->ext_tree);
 out_free_vhdr:
-       kfree(sbi->s_vhdr);
-       kfree(sbi->s_backup_vhdr);
+       kfree(sbi->s_vhdr_buf);
+       kfree(sbi->s_backup_vhdr_buf);
 out_unload_nls:
        unload_nls(sbi->nls);
        unload_nls(nls);
index 10e515a..7daf4b8 100644 (file)
@@ -272,9 +272,9 @@ reread:
        return 0;
 
 out_free_backup_vhdr:
-       kfree(sbi->s_backup_vhdr);
+       kfree(sbi->s_backup_vhdr_buf);
 out_free_vhdr:
-       kfree(sbi->s_vhdr);
+       kfree(sbi->s_vhdr_buf);
 out:
        return error;
 }
index 2826db3..0b3138d 100644 (file)
@@ -721,31 +721,22 @@ static int follow_automount(struct path *path, unsigned flags,
        if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
                return -EREMOTE;
 
-       /* We don't want to mount if someone supplied AT_NO_AUTOMOUNT
-        * and this is the terminal part of the path.
+       /* We don't want to mount if someone's just doing a stat -
+        * unless they're stat'ing a directory and appended a '/' to
+        * the name.
+        *
+        * We do, however, want to mount if someone wants to open or
+        * create a file of any type under the mountpoint, wants to
+        * traverse through the mountpoint or wants to open the
+        * mounted directory.  Also, autofs may mark negative dentries
+        * as being automount points.  These will need the attentions
+        * of the daemon to instantiate them before they can be used.
         */
-       if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT))
-               return -EISDIR; /* we actually want to stop here */
+       if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
+                    LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
+           path->dentry->d_inode)
+               return -EISDIR;
 
-       /*
-        * We don't want to mount if someone's just doing a stat and they've
-        * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and
-        * appended a '/' to the name.
-        */
-       if (!(flags & LOOKUP_FOLLOW)) {
-               /* We do, however, want to mount if someone wants to open or
-                * create a file of any type under the mountpoint, wants to
-                * traverse through the mountpoint or wants to open the mounted
-                * directory.
-                * Also, autofs may mark negative dentries as being automount
-                * points.  These will need the attentions of the daemon to
-                * instantiate them before they can be used.
-                */
-               if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
-                            LOOKUP_OPEN | LOOKUP_CREATE)) &&
-                   path->dentry->d_inode)
-                       return -EISDIR;
-       }
        current->total_link_count++;
        if (current->total_link_count >= 40)
                return -ELOOP;
@@ -2619,6 +2610,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
        if (!dir->i_op->rmdir)
                return -EPERM;
 
+       dget(dentry);
        mutex_lock(&dentry->d_inode->i_mutex);
 
        error = -EBUSY;
@@ -2639,6 +2631,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
 
 out:
        mutex_unlock(&dentry->d_inode->i_mutex);
+       dput(dentry);
        if (!error)
                d_delete(dentry);
        return error;
@@ -3028,6 +3021,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
        if (error)
                return error;
 
+       dget(new_dentry);
        if (target)
                mutex_lock(&target->i_mutex);
 
@@ -3048,6 +3042,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
 out:
        if (target)
                mutex_unlock(&target->i_mutex);
+       dput(new_dentry);
        if (!error)
                if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
                        d_move(old_dentry,new_dentry);
index 22bfe82..b4febb2 100644 (file)
@@ -1757,7 +1757,7 @@ static int do_loopback(struct path *path, char *old_name,
                return err;
        if (!old_name || !*old_name)
                return -EINVAL;
-       err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
+       err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
        if (err)
                return err;
 
index 1ec1a85..3e93e9a 100644 (file)
@@ -56,6 +56,9 @@ enum nfs4_session_state {
        NFS4_SESSION_DRAINING,
 };
 
+#define NFS4_RENEW_TIMEOUT             0x01
+#define NFS4_RENEW_DELEGATION_CB       0x02
+
 struct nfs4_minor_version_ops {
        u32     minor_version;
 
@@ -225,7 +228,7 @@ struct nfs4_state_recovery_ops {
 };
 
 struct nfs4_state_maintenance_ops {
-       int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *);
+       int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned);
        struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
        int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
 };
@@ -237,8 +240,6 @@ extern const struct inode_operations nfs4_dir_inode_operations;
 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
 extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
-extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
-extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
 extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
@@ -349,6 +350,7 @@ extern void nfs4_close_sync(struct nfs4_state *, fmode_t);
 extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
 extern void nfs4_schedule_lease_recovery(struct nfs_client *);
 extern void nfs4_schedule_state_manager(struct nfs_client *);
+extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
 extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
 extern void nfs41_handle_recall_slot(struct nfs_client *clp);
index 8c77039..4700fae 100644 (file)
@@ -3374,9 +3374,13 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
 
        if (task->tk_status < 0) {
                /* Unless we're shutting down, schedule state recovery! */
-               if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
+               if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
+                       return;
+               if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
                        nfs4_schedule_lease_recovery(clp);
-               return;
+                       return;
+               }
+               nfs4_schedule_path_down_recovery(clp);
        }
        do_renew_lease(clp, timestamp);
 }
@@ -3386,7 +3390,7 @@ static const struct rpc_call_ops nfs4_renew_ops = {
        .rpc_release = nfs4_renew_release,
 };
 
-int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
+static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
 {
        struct rpc_message msg = {
                .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
@@ -3395,9 +3399,11 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
        };
        struct nfs4_renewdata *data;
 
+       if (renew_flags == 0)
+               return 0;
        if (!atomic_inc_not_zero(&clp->cl_count))
                return -EIO;
-       data = kmalloc(sizeof(*data), GFP_KERNEL);
+       data = kmalloc(sizeof(*data), GFP_NOFS);
        if (data == NULL)
                return -ENOMEM;
        data->client = clp;
@@ -3406,7 +3412,7 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
                        &nfs4_renew_ops, data);
 }
 
-int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
+static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
 {
        struct rpc_message msg = {
                .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
@@ -5504,11 +5510,13 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_
        return rpc_run_task(&task_setup_data);
 }
 
-static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
 {
        struct rpc_task *task;
        int ret = 0;
 
+       if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
+               return 0;
        task = _nfs41_proc_sequence(clp, cred);
        if (IS_ERR(task))
                ret = PTR_ERR(task);
index df8e7f3..dc484c0 100644 (file)
@@ -60,6 +60,7 @@ nfs4_renew_state(struct work_struct *work)
        struct rpc_cred *cred;
        long lease;
        unsigned long last, now;
+       unsigned renew_flags = 0;
 
        ops = clp->cl_mvops->state_renewal_ops;
        dprintk("%s: start\n", __func__);
@@ -72,18 +73,23 @@ nfs4_renew_state(struct work_struct *work)
        last = clp->cl_last_renewal;
        now = jiffies;
        /* Are we close to a lease timeout? */
-       if (time_after(now, last + lease/3)) {
+       if (time_after(now, last + lease/3))
+               renew_flags |= NFS4_RENEW_TIMEOUT;
+       if (nfs_delegations_present(clp))
+               renew_flags |= NFS4_RENEW_DELEGATION_CB;
+
+       if (renew_flags != 0) {
                cred = ops->get_state_renewal_cred_locked(clp);
                spin_unlock(&clp->cl_lock);
                if (cred == NULL) {
-                       if (!nfs_delegations_present(clp)) {
+                       if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) {
                                set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
                                goto out;
                        }
                        nfs_expire_all_delegations(clp);
                } else {
                        /* Queue an asynchronous RENEW. */
-                       ops->sched_state_renewal(clp, cred);
+                       ops->sched_state_renewal(clp, cred, renew_flags);
                        put_rpccred(cred);
                        goto out_exp;
                }
index 72ab97e..39914be 100644 (file)
@@ -1038,6 +1038,12 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)
        nfs4_schedule_state_manager(clp);
 }
 
+void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
+{
+       nfs_handle_cb_pathdown(clp);
+       nfs4_schedule_state_manager(clp);
+}
+
 static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
 {
 
index b961cea..5b19b6a 100644 (file)
@@ -2035,9 +2035,6 @@ static inline void nfs_initialise_sb(struct super_block *sb)
                sb->s_blocksize = nfs_block_bits(server->wsize,
                                                 &sb->s_blocksize_bits);
 
-       if (server->flags & NFS_MOUNT_NOAC)
-               sb->s_flags |= MS_SYNCHRONOUS;
-
        sb->s_bdi = &server->backing_dev_info;
 
        nfs_super_set_maxbytes(sb, server->maxfilesize);
@@ -2249,6 +2246,10 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
        if (server->flags & NFS_MOUNT_UNSHARED)
                compare_super = NULL;
 
+       /* -o noac implies -o sync */
+       if (server->flags & NFS_MOUNT_NOAC)
+               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
        if (IS_ERR(s)) {
@@ -2361,6 +2362,10 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags,
        if (server->flags & NFS_MOUNT_UNSHARED)
                compare_super = NULL;
 
+       /* -o noac implies -o sync */
+       if (server->flags & NFS_MOUNT_NOAC)
+               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
        if (IS_ERR(s)) {
@@ -2628,6 +2633,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
        if (server->flags & NFS4_MOUNT_UNSHARED)
                compare_super = NULL;
 
+       /* -o noac implies -o sync */
+       if (server->flags & NFS_MOUNT_NOAC)
+               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
        if (IS_ERR(s)) {
@@ -2789,7 +2798,7 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
                goto out_put_mnt_ns;
 
        ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
-                       export_path, LOOKUP_FOLLOW, &path);
+                       export_path, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
 
        nfs_referral_loop_unprotect();
        put_mnt_ns(ns_private);
@@ -2916,6 +2925,10 @@ nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
        if (server->flags & NFS4_MOUNT_UNSHARED)
                compare_super = NULL;
 
+       /* -o noac implies -o sync */
+       if (server->flags & NFS_MOUNT_NOAC)
+               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
        if (IS_ERR(s)) {
@@ -3003,6 +3016,10 @@ nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
        if (server->flags & NFS4_MOUNT_UNSHARED)
                compare_super = NULL;
 
+       /* -o noac implies -o sync */
+       if (server->flags & NFS_MOUNT_NOAC)
+               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
        if (IS_ERR(s)) {
index b39b37f..c9bd2a6 100644 (file)
@@ -958,7 +958,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head
                if (!data)
                        goto out_bad;
                data->pagevec[0] = page;
-               nfs_write_rpcsetup(req, data, wsize, offset, desc->pg_ioflags);
+               nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
                list_add(&data->list, res);
                requests++;
                nbytes -= len;
index 25b6a88..5afaa58 100644 (file)
@@ -877,30 +877,54 @@ struct numa_maps_private {
        struct numa_maps md;
 };
 
-static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty)
+static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
+                       unsigned long nr_pages)
 {
        int count = page_mapcount(page);
 
-       md->pages++;
+       md->pages += nr_pages;
        if (pte_dirty || PageDirty(page))
-               md->dirty++;
+               md->dirty += nr_pages;
 
        if (PageSwapCache(page))
-               md->swapcache++;
+               md->swapcache += nr_pages;
 
        if (PageActive(page) || PageUnevictable(page))
-               md->active++;
+               md->active += nr_pages;
 
        if (PageWriteback(page))
-               md->writeback++;
+               md->writeback += nr_pages;
 
        if (PageAnon(page))
-               md->anon++;
+               md->anon += nr_pages;
 
        if (count > md->mapcount_max)
                md->mapcount_max = count;
 
-       md->node[page_to_nid(page)]++;
+       md->node[page_to_nid(page)] += nr_pages;
+}
+
+static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
+               unsigned long addr)
+{
+       struct page *page;
+       int nid;
+
+       if (!pte_present(pte))
+               return NULL;
+
+       page = vm_normal_page(vma, addr, pte);
+       if (!page)
+               return NULL;
+
+       if (PageReserved(page))
+               return NULL;
+
+       nid = page_to_nid(page);
+       if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
+               return NULL;
+
+       return page;
 }
 
 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
@@ -912,26 +936,32 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
        pte_t *pte;
 
        md = walk->private;
-       orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
-       do {
-               struct page *page;
-               int nid;
+       spin_lock(&walk->mm->page_table_lock);
+       if (pmd_trans_huge(*pmd)) {
+               if (pmd_trans_splitting(*pmd)) {
+                       spin_unlock(&walk->mm->page_table_lock);
+                       wait_split_huge_page(md->vma->anon_vma, pmd);
+               } else {
+                       pte_t huge_pte = *(pte_t *)pmd;
+                       struct page *page;
 
-               if (!pte_present(*pte))
-                       continue;
+                       page = can_gather_numa_stats(huge_pte, md->vma, addr);
+                       if (page)
+                               gather_stats(page, md, pte_dirty(huge_pte),
+                                               HPAGE_PMD_SIZE/PAGE_SIZE);
+                       spin_unlock(&walk->mm->page_table_lock);
+                       return 0;
+               }
+       } else {
+               spin_unlock(&walk->mm->page_table_lock);
+       }
 
-               page = vm_normal_page(md->vma, addr, *pte);
+       orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+       do {
+               struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
                if (!page)
                        continue;
-
-               if (PageReserved(page))
-                       continue;
-
-               nid = page_to_nid(page);
-               if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
-                       continue;
-
-               gather_stats(page, md, pte_dirty(*pte));
+               gather_stats(page, md, pte_dirty(*pte), 1);
 
        } while (pte++, addr += PAGE_SIZE, addr != end);
        pte_unmap_unlock(orig_pte, ptl);
@@ -952,7 +982,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
                return 0;
 
        md = walk->private;
-       gather_stats(page, md, pte_dirty(*pte));
+       gather_stats(page, md, pte_dirty(*pte), 1);
        return 0;
 }
 
index b34bdb2..10b6be3 100644 (file)
@@ -355,7 +355,7 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
         * resolution (think about autofs) and thus deadlocks could arise.
         */
        if (cmds == Q_QUOTAON) {
-               ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path);
+               ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
                if (ret)
                        pathp = ERR_PTR(ret);
                else
index ba5316f..78a3aa8 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -81,8 +81,6 @@ int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
 
        if (!(flag & AT_SYMLINK_NOFOLLOW))
                lookup_flags |= LOOKUP_FOLLOW;
-       if (flag & AT_NO_AUTOMOUNT)
-               lookup_flags |= LOOKUP_NO_AUTOMOUNT;
        if (flag & AT_EMPTY_PATH)
                lookup_flags |= LOOKUP_EMPTY;
 
index 45174b5..feb361e 100644 (file)
@@ -335,9 +335,9 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
 #define DBGKEY(key)  ((char *)(key))
 #define DBGKEY1(key) ((char *)(key))
 
-#define ubifs_dbg_msg(fmt, ...) do {               \
-       if (0)                                     \
-               pr_debug(fmt "\n", ##__VA_ARGS__); \
+#define ubifs_dbg_msg(fmt, ...) do {                        \
+       if (0)                                              \
+               printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
 } while (0)
 
 #define dbg_dump_stack()
index 63e971e..8c37dde 100644 (file)
@@ -1300,6 +1300,7 @@ xfs_end_io_direct_write(
        bool                    is_async)
 {
        struct xfs_ioend        *ioend = iocb->private;
+       struct inode            *inode = ioend->io_inode;
 
        /*
         * blockdev_direct_IO can return an error even after the I/O
@@ -1331,7 +1332,7 @@ xfs_end_io_direct_write(
        }
 
        /* XXX: probably should move into the real I/O completion handler */
-       inode_dio_done(ioend->io_inode);
+       inode_dio_done(inode);
 }
 
 STATIC ssize_t
index 98999cf..feb9121 100644 (file)
@@ -63,15 +63,10 @@ static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)
        return container_of(gc, struct bgpio_chip, gc);
 }
 
-int __devexit bgpio_remove(struct bgpio_chip *bgc);
-int __devinit bgpio_init(struct bgpio_chip *bgc,
-                        struct device *dev,
-                        unsigned long sz,
-                        void __iomem *dat,
-                        void __iomem *set,
-                        void __iomem *clr,
-                        void __iomem *dirout,
-                        void __iomem *dirin,
-                        bool big_endian);
+int bgpio_remove(struct bgpio_chip *bgc);
+int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
+              unsigned long sz, void __iomem *dat, void __iomem *set,
+              void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
+              bool big_endian);
 
 #endif /* __BASIC_MMIO_GPIO_H */
index 32f0076..71fc53b 100644 (file)
@@ -124,6 +124,7 @@ enum rq_flag_bits {
 
        __REQ_SYNC,             /* request is sync (sync write or read) */
        __REQ_META,             /* metadata io request */
+       __REQ_PRIO,             /* boost priority in cfq */
        __REQ_DISCARD,          /* request to discard sectors */
        __REQ_SECURE,           /* secure discard (used with __REQ_DISCARD) */
 
@@ -161,14 +162,15 @@ enum rq_flag_bits {
 #define REQ_FAILFAST_DRIVER    (1 << __REQ_FAILFAST_DRIVER)
 #define REQ_SYNC               (1 << __REQ_SYNC)
 #define REQ_META               (1 << __REQ_META)
+#define REQ_PRIO               (1 << __REQ_PRIO)
 #define REQ_DISCARD            (1 << __REQ_DISCARD)
 #define REQ_NOIDLE             (1 << __REQ_NOIDLE)
 
 #define REQ_FAILFAST_MASK \
        (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
 #define REQ_COMMON_MASK \
-       (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
-        REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
+       (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
+        REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
 #define REQ_CLONE_MASK         REQ_COMMON_MASK
 
 #define REQ_RAHEAD             (1 << __REQ_RAHEAD)
index 84b15d5..7fbaa91 100644 (file)
@@ -873,7 +873,6 @@ struct blk_plug {
        struct list_head list;
        struct list_head cb_list;
        unsigned int should_sort;
-       unsigned int count;
 };
 #define BLK_MAX_REQUEST_COUNT 16
 
index 3fa1f3d..99e3e50 100644 (file)
@@ -197,6 +197,11 @@ struct dm_target {
         * whether or not its underlying devices have support.
         */
        unsigned discards_supported:1;
+
+       /*
+        * Set if this target does not return zeroes on discarded blocks.
+        */
+       unsigned discard_zeroes_data_unsupported:1;
 };
 
 /* Each target can link one of these into the table */
index c2bd68f..277f497 100644 (file)
@@ -162,10 +162,8 @@ struct inodes_stat_t {
 #define READA                  RWA_MASK
 
 #define READ_SYNC              (READ | REQ_SYNC)
-#define READ_META              (READ | REQ_META)
 #define WRITE_SYNC             (WRITE | REQ_SYNC | REQ_NOIDLE)
 #define WRITE_ODIRECT          (WRITE | REQ_SYNC)
-#define WRITE_META             (WRITE | REQ_META)
 #define WRITE_FLUSH            (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
 #define WRITE_FUA              (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
 #define WRITE_FLUSH_FUA                (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
index 9cf8e7a..6fb743d 100644 (file)
@@ -312,6 +312,7 @@ struct hid_item {
 #define HID_QUIRK_BADPAD                       0x00000020
 #define HID_QUIRK_MULTI_INPUT                  0x00000040
 #define HID_QUIRK_HIDINPUT_FORCE               0x00000080
+#define HID_QUIRK_MULTITOUCH                   0x00000100
 #define HID_QUIRK_SKIP_OUTPUT_REPORTS          0x00010000
 #define HID_QUIRK_FULLSPEED_INTERVAL           0x10000000
 #define HID_QUIRK_NO_INIT_REPORTS              0x20000000
index e807ad6..3ad553e 100644 (file)
@@ -80,6 +80,7 @@ extern void irq_domain_del(struct irq_domain *domain);
 #endif /* CONFIG_IRQ_DOMAIN */
 
 #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
+extern struct irq_domain_ops irq_domain_simple_ops;
 extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
 extern void irq_domain_generate_simple(const struct of_device_id *match,
                                        u64 phys_base, unsigned int irq_start);
index 2c366b5..aace6b8 100644 (file)
@@ -553,6 +553,7 @@ struct kvm_ppc_pvinfo {
 #define KVM_CAP_SPAPR_TCE 63
 #define KVM_CAP_PPC_SMT 64
 #define KVM_CAP_PPC_RMA        65
+#define KVM_CAP_S390_GMAP 71
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 3b535db..343bd76 100644 (file)
@@ -39,16 +39,6 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        struct mem_cgroup *mem_cont,
                                        int active, int file);
 
-struct memcg_scanrecord {
-       struct mem_cgroup *mem; /* scanend memory cgroup */
-       struct mem_cgroup *root; /* scan target hierarchy root */
-       int context;            /* scanning context (see memcontrol.c) */
-       unsigned long nr_scanned[2]; /* the number of scanned pages */
-       unsigned long nr_rotated[2]; /* the number of rotated pages */
-       unsigned long nr_freed[2]; /* the number of freed pages */
-       unsigned long elapsed; /* nsec of time elapsed while scanning */
-};
-
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 /*
  * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -127,15 +117,6 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page);
 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
                                        struct task_struct *p);
 
-extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
-                                                 gfp_t gfp_mask, bool noswap,
-                                                 struct memcg_scanrecord *rec);
-extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
-                                               gfp_t gfp_mask, bool noswap,
-                                               struct zone *zone,
-                                               struct memcg_scanrecord *rec,
-                                               unsigned long *nr_scanned);
-
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 extern int do_swap_account;
 #endif
index d12f8d6..97cf4f2 100644 (file)
@@ -26,7 +26,7 @@ struct wm8994_ldo_pdata {
        struct regulator_init_data *init_data;
 };
 
-#define WM8994_CONFIGURE_GPIO 0x8000
+#define WM8994_CONFIGURE_GPIO 0x10000
 
 #define WM8994_DRC_REGS 5
 #define WM8994_EQ_REGS  20
index 76fe2c6..409328d 100644 (file)
@@ -48,11 +48,12 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
  */
 #define LOOKUP_FOLLOW          0x0001
 #define LOOKUP_DIRECTORY       0x0002
+#define LOOKUP_AUTOMOUNT       0x0004
 
 #define LOOKUP_PARENT          0x0010
 #define LOOKUP_REVAL           0x0020
 #define LOOKUP_RCU             0x0040
-#define LOOKUP_NO_AUTOMOUNT    0x0080
+
 /*
  * Intent data
  */
index 8c230cb..9fc0122 100644 (file)
@@ -621,8 +621,9 @@ struct pci_driver {
 extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
 
 enum pcie_bus_config_types {
-       PCIE_BUS_PERFORMANCE,
+       PCIE_BUS_TUNE_OFF,
        PCIE_BUS_SAFE,
+       PCIE_BUS_PERFORMANCE,
        PCIE_BUS_PEER2PEER,
 };
 
index 245bafd..c816075 100644 (file)
@@ -944,8 +944,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
 
 extern int perf_num_counters(void);
 extern const char *perf_pmu_name(void);
-extern void __perf_event_task_sched_in(struct task_struct *task);
-extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
+extern void __perf_event_task_sched_in(struct task_struct *prev,
+                                      struct task_struct *task);
+extern void __perf_event_task_sched_out(struct task_struct *prev,
+                                       struct task_struct *next);
 extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
@@ -1059,17 +1061,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 
 extern struct jump_label_key perf_sched_events;
 
-static inline void perf_event_task_sched_in(struct task_struct *task)
+static inline void perf_event_task_sched_in(struct task_struct *prev,
+                                           struct task_struct *task)
 {
        if (static_branch(&perf_sched_events))
-               __perf_event_task_sched_in(task);
+               __perf_event_task_sched_in(prev, task);
 }
 
-static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
+static inline void perf_event_task_sched_out(struct task_struct *prev,
+                                            struct task_struct *next)
 {
        perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 
-       __perf_event_task_sched_out(task, next);
+       if (static_branch(&perf_sched_events))
+               __perf_event_task_sched_out(prev, next);
 }
 
 extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1139,10 +1144,11 @@ extern void perf_event_disable(struct perf_event *event);
 extern void perf_event_task_tick(void);
 #else
 static inline void
-perf_event_task_sched_in(struct task_struct *task)                     { }
+perf_event_task_sched_in(struct task_struct *prev,
+                        struct task_struct *task)                      { }
 static inline void
-perf_event_task_sched_out(struct task_struct *task,
-                           struct task_struct *next)                   { }
+perf_event_task_sched_out(struct task_struct *prev,
+                         struct task_struct *next)                     { }
 static inline int perf_event_init_task(struct task_struct *child)      { return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)     { }
 static inline void perf_event_free_task(struct task_struct *task)      { }
index e07e274..1dc420b 100644 (file)
@@ -51,6 +51,7 @@
 #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
 
 #define PTP_EV_PORT 319
+#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
 
 #define OFF_ETYPE      12
 #define OFF_IHL                14
@@ -116,14 +117,20 @@ static inline int ptp_filter_init(struct sock_filter *f, int len)
        {OP_OR,         0,   0, PTP_CLASS_IPV6          }, /*              */ \
        {OP_RETA,       0,   0, 0                       }, /*              */ \
 /*L3x*/        {OP_RETK,       0,   0, PTP_CLASS_NONE          }, /*              */ \
-/*L40*/        {OP_JEQ,        0,   6, ETH_P_8021Q             }, /* f goto L50   */ \
+/*L40*/        {OP_JEQ,        0,   9, ETH_P_8021Q             }, /* f goto L50   */ \
        {OP_LDH,        0,   0, OFF_ETYPE + 4           }, /*              */ \
-       {OP_JEQ,        0,   9, ETH_P_1588              }, /* f goto L60   */ \
+       {OP_JEQ,        0,  15, ETH_P_1588              }, /* f goto L60   */ \
+       {OP_LDB,        0,   0, ETH_HLEN + VLAN_HLEN    }, /*              */ \
+       {OP_AND,        0,   0, PTP_GEN_BIT             }, /*              */ \
+       {OP_JEQ,        0,  12, 0                       }, /* f goto L6x   */ \
        {OP_LDH,        0,   0, ETH_HLEN + VLAN_HLEN    }, /*              */ \
        {OP_AND,        0,   0, PTP_CLASS_VMASK         }, /*              */ \
        {OP_OR,         0,   0, PTP_CLASS_VLAN          }, /*              */ \
        {OP_RETA,       0,   0, 0                       }, /*              */ \
-/*L50*/        {OP_JEQ,        0,   4, ETH_P_1588              }, /* f goto L61   */ \
+/*L50*/        {OP_JEQ,        0,   7, ETH_P_1588              }, /* f goto L61   */ \
+       {OP_LDB,        0,   0, ETH_HLEN                }, /*              */ \
+       {OP_AND,        0,   0, PTP_GEN_BIT             }, /*              */ \
+       {OP_JEQ,        0,   4, 0                       }, /* f goto L6x   */ \
        {OP_LDH,        0,   0, ETH_HLEN                }, /*              */ \
        {OP_AND,        0,   0, PTP_CLASS_VMASK         }, /*              */ \
        {OP_OR,         0,   0, PTP_CLASS_L2            }, /*              */ \
index 26f6ea4..b47771a 100644 (file)
@@ -123,7 +123,7 @@ struct regulator_bulk_data {
        const char *supply;
        struct regulator *consumer;
 
-       /* Internal use */
+       /* private: Internal use */
        int ret;
 };
 
index 4ac2c05..41d0237 100644 (file)
@@ -1956,7 +1956,6 @@ static inline void disable_sched_clock_irqtime(void) {}
 
 extern unsigned long long
 task_sched_runtime(struct task_struct *task);
-extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
 
 /* sched_exec is called by processes performing an exec */
 #ifdef CONFIG_SMP
index 7b996ed..8bd383c 100644 (file)
@@ -524,6 +524,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
 extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
 
 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
 extern struct sk_buff *skb_clone(struct sk_buff *skb,
                                 gfp_t priority);
 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
index 12b2b18..e16557a 100644 (file)
@@ -231,6 +231,8 @@ enum
        LINUX_MIB_TCPDEFERACCEPTDROP,
        LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
        LINUX_MIB_TCPTIMEWAITOVERFLOW,          /* TCPTimeWaitOverflow */
+       LINUX_MIB_TCPREQQFULLDOCOOKIES,         /* TCPReqQFullDoCookies */
+       LINUX_MIB_TCPREQQFULLDROP,              /* TCPReqQFullDrop */
        __LINUX_MIB_MAX
 };
 
index 14d6249..c71f84b 100644 (file)
@@ -252,6 +252,12 @@ static inline void lru_cache_add_file(struct page *page)
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                                        gfp_t gfp_mask, nodemask_t *mask);
 extern int __isolate_lru_page(struct page *page, int mode, int file);
+extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
+                                                 gfp_t gfp_mask, bool noswap);
+extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
+                                               gfp_t gfp_mask, bool noswap,
+                                               struct zone *zone,
+                                               unsigned long *nr_scanned);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
 extern int remove_mapping(struct address_space *mapping, struct page *page);
index 342dcf1..a6326ef 100644 (file)
@@ -288,6 +288,35 @@ enum p9_perm_t {
        P9_DMSETVTX = 0x00010000,
 };
 
+/* 9p2000.L open flags */
+#define P9_DOTL_RDONLY        00000000
+#define P9_DOTL_WRONLY        00000001
+#define P9_DOTL_RDWR          00000002
+#define P9_DOTL_NOACCESS      00000003
+#define P9_DOTL_CREATE        00000100
+#define P9_DOTL_EXCL          00000200
+#define P9_DOTL_NOCTTY        00000400
+#define P9_DOTL_TRUNC         00001000
+#define P9_DOTL_APPEND        00002000
+#define P9_DOTL_NONBLOCK      00004000
+#define P9_DOTL_DSYNC         00010000
+#define P9_DOTL_FASYNC        00020000
+#define P9_DOTL_DIRECT        00040000
+#define P9_DOTL_LARGEFILE     00100000
+#define P9_DOTL_DIRECTORY     00200000
+#define P9_DOTL_NOFOLLOW      00400000
+#define P9_DOTL_NOATIME       01000000
+#define P9_DOTL_CLOEXEC       02000000
+#define P9_DOTL_SYNC          04000000
+
+/* 9p2000.L at flags */
+#define P9_DOTL_AT_REMOVEDIR           0x200
+
+/* 9p2000.L lock type */
+#define P9_LOCK_TYPE_RDLCK 0
+#define P9_LOCK_TYPE_WRLCK 1
+#define P9_LOCK_TYPE_UNLCK 2
+
 /**
  * enum p9_qid_t - QID types
  * @P9_QTDIR: directory
index 408ae48..401d73b 100644 (file)
@@ -1744,6 +1744,8 @@ struct wiphy_wowlan_support {
  *     by default for perm_addr. In this case, the mask should be set to
  *     all-zeroes. In this case it is assumed that the device can handle
  *     the same number of arbitrary MAC addresses.
+ * @registered: protects ->resume and ->suspend sysfs callbacks against
+ *     unregister hardware
  * @debugfsdir: debugfs directory used for this wiphy, will be renamed
  *     automatically on wiphy renames
  * @dev: (virtual) struct device for this wiphy
index 78113da..a094477 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef _NET_FLOW_H
 #define _NET_FLOW_H
 
+#include <linux/socket.h>
 #include <linux/in6.h>
 #include <linux/atomic.h>
 
@@ -68,7 +69,7 @@ struct flowi4 {
 #define fl4_ipsec_spi          uli.spi
 #define fl4_mh_type            uli.mht.type
 #define fl4_gre_key            uli.gre_key
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
                                      __u32 mark, __u8 tos, __u8 scope,
@@ -112,7 +113,7 @@ struct flowi6 {
 #define fl6_ipsec_spi          uli.spi
 #define fl6_mh_type            uli.mht.type
 #define fl6_gre_key            uli.gre_key
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 struct flowidn {
        struct flowi_common     __fl_common;
@@ -127,7 +128,7 @@ struct flowidn {
        union flowi_uli         uli;
 #define fld_sport              uli.ports.sport
 #define fld_dport              uli.ports.dport
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 struct flowi {
        union {
@@ -161,6 +162,24 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
        return container_of(fldn, struct flowi, u.dn);
 }
 
+typedef unsigned long flow_compare_t;
+
+static inline size_t flow_key_size(u16 family)
+{
+       switch (family) {
+       case AF_INET:
+               BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t));
+               return sizeof(struct flowi4) / sizeof(flow_compare_t);
+       case AF_INET6:
+               BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t));
+               return sizeof(struct flowi6) / sizeof(flow_compare_t);
+       case AF_DECnet:
+               BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t));
+               return sizeof(struct flowidn) / sizeof(flow_compare_t);
+       }
+       return 0;
+}
+
 #define FLOW_DIR_IN    0
 #define FLOW_DIR_OUT   1
 #define FLOW_DIR_FWD   2
index 99e6e19..4c0766e 100644 (file)
@@ -96,7 +96,8 @@ extern int sysctl_max_syn_backlog;
  */
 struct listen_sock {
        u8                      max_qlen_log;
-       /* 3 bytes hole, try to use */
+       u8                      synflood_warned;
+       /* 2 bytes hole, try to use */
        int                     qlen;
        int                     qlen_young;
        int                     clock_hand;
index 6506458..712b3be 100644 (file)
@@ -109,6 +109,7 @@ typedef enum {
        SCTP_CMD_SEND_MSG,       /* Send the whole use message */
        SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
        SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
+       SCTP_CMD_SET_ASOC,       /* Restore association context */
        SCTP_CMD_LAST
 } sctp_verb_t;
 
index 149a415..acc620a 100644 (file)
@@ -431,17 +431,34 @@ extern int tcp_disconnect(struct sock *sk, int flags);
 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 
                                    struct ip_options *opt);
+#ifdef CONFIG_SYN_COOKIES
 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 
                                     __u16 *mss);
+#else
+static inline __u32 cookie_v4_init_sequence(struct sock *sk,
+                                           struct sk_buff *skb,
+                                           __u16 *mss)
+{
+       return 0;
+}
+#endif
 
 extern __u32 cookie_init_timestamp(struct request_sock *req);
 extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
 
 /* From net/ipv6/syncookies.c */
 extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
+#ifdef CONFIG_SYN_COOKIES
 extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
                                     __u16 *mss);
-
+#else
+static inline __u32 cookie_v6_init_sequence(struct sock *sk,
+                                           struct sk_buff *skb,
+                                           __u16 *mss)
+{
+       return 0;
+}
+#endif
 /* tcp_output.c */
 
 extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
@@ -460,6 +477,9 @@ extern int tcp_write_wakeup(struct sock *);
 extern void tcp_send_fin(struct sock *sk);
 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 extern int tcp_send_synack(struct sock *);
+extern int tcp_syn_flood_action(struct sock *sk,
+                               const struct sk_buff *skb,
+                               const char *proto);
 extern void tcp_push_one(struct sock *, unsigned int mss_now);
 extern void tcp_send_ack(struct sock *sk);
 extern void tcp_send_delayed_ack(struct sock *sk);
index 5271a74..498433d 100644 (file)
@@ -39,6 +39,7 @@ extern int                    datagram_recv_ctl(struct sock *sk,
                                                  struct sk_buff *skb);
 
 extern int                     datagram_send_ctl(struct net *net,
+                                                 struct sock *sk,
                                                  struct msghdr *msg,
                                                  struct flowi6 *fl6,
                                                  struct ipv6_txoptions *opt,
index 6bca4cc..5f17270 100644 (file)
@@ -298,7 +298,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
                __array(char, name, 32)
                __field(unsigned long, ino)
                __field(unsigned long, state)
-               __field(unsigned long, age)
+               __field(unsigned long, dirtied_when)
                __field(unsigned long, writeback_index)
                __field(long, nr_to_write)
                __field(unsigned long, wrote)
@@ -309,19 +309,19 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
                        dev_name(inode->i_mapping->backing_dev_info->dev), 32);
                __entry->ino            = inode->i_ino;
                __entry->state          = inode->i_state;
-               __entry->age            = (jiffies - inode->dirtied_when) *
-                                                               1000 / HZ;
+               __entry->dirtied_when   = inode->dirtied_when;
                __entry->writeback_index = inode->i_mapping->writeback_index;
                __entry->nr_to_write    = nr_to_write;
                __entry->wrote          = nr_to_write - wbc->nr_to_write;
        ),
 
-       TP_printk("bdi %s: ino=%lu state=%s age=%lu "
+       TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
                  "index=%lu to_write=%ld wrote=%lu",
                  __entry->name,
                  __entry->ino,
                  show_inode_state(__entry->state),
-                 __entry->age,
+                 __entry->dirtied_when,
+                 (jiffies - __entry->dirtied_when) / HZ,
                  __entry->writeback_index,
                  __entry->nr_to_write,
                  __entry->wrote
index 9c51ee7..03b408d 100644 (file)
@@ -209,8 +209,19 @@ early_param("quiet", quiet_kernel);
 
 static int __init loglevel(char *str)
 {
-       get_option(&str, &console_loglevel);
-       return 0;
+       int newlevel;
+
+       /*
+        * Only update loglevel value when a correct setting was passed,
+        * to prevent blind crashes (when loglevel being set to 0) that
+        * are quite hard to debug
+        */
+       if (get_option(&str, &newlevel)) {
+               console_loglevel = newlevel;
+               return 0;
+       }
+
+       return -EINVAL;
 }
 
 early_param("loglevel", loglevel);
@@ -370,9 +381,6 @@ static noinline void __init_refok rest_init(void)
        preempt_enable_no_resched();
        schedule();
 
-       /* At this point, we can enable user mode helper functionality */
-       usermodehelper_enable();
-
        /* Call into cpu_idle with preempt disabled */
        preempt_disable();
        cpu_idle();
@@ -722,6 +730,7 @@ static void __init do_basic_setup(void)
        driver_init();
        init_irq_proc();
        do_ctors();
+       usermodehelper_enable();
        do_initcalls();
 }
 
index b8785e2..0f85778 100644 (file)
@@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
        local_irq_restore(flags);
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task)
+static inline void perf_cgroup_sched_out(struct task_struct *task,
+                                        struct task_struct *next)
 {
-       perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+       struct perf_cgroup *cgrp1;
+       struct perf_cgroup *cgrp2 = NULL;
+
+       /*
+        * we come here when we know perf_cgroup_events > 0
+        */
+       cgrp1 = perf_cgroup_from_task(task);
+
+       /*
+        * next is NULL when called from perf_event_enable_on_exec()
+        * that will systematically cause a cgroup_switch()
+        */
+       if (next)
+               cgrp2 = perf_cgroup_from_task(next);
+
+       /*
+        * only schedule out current cgroup events if we know
+        * that we are switching to a different cgroup. Otherwise,
+        * do no touch the cgroup events.
+        */
+       if (cgrp1 != cgrp2)
+               perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
 }
 
-static inline void perf_cgroup_sched_in(struct task_struct *task)
+static inline void perf_cgroup_sched_in(struct task_struct *prev,
+                                       struct task_struct *task)
 {
-       perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+       struct perf_cgroup *cgrp1;
+       struct perf_cgroup *cgrp2 = NULL;
+
+       /*
+        * we come here when we know perf_cgroup_events > 0
+        */
+       cgrp1 = perf_cgroup_from_task(task);
+
+       /* prev can never be NULL */
+       cgrp2 = perf_cgroup_from_task(prev);
+
+       /*
+        * only need to schedule in cgroup events if we are changing
+        * cgroup during ctxsw. Cgroup events were not scheduled
+        * out of ctxsw out if that was not the case.
+        */
+       if (cgrp1 != cgrp2)
+               perf_cgroup_switch(task, PERF_CGROUP_SWIN);
 }
 
 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
 {
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task)
+static inline void perf_cgroup_sched_out(struct task_struct *task,
+                                        struct task_struct *next)
 {
 }
 
-static inline void perf_cgroup_sched_in(struct task_struct *task)
+static inline void perf_cgroup_sched_in(struct task_struct *prev,
+                                       struct task_struct *task)
 {
 }
 
@@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
         * cgroup event are system-wide mode only
         */
        if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
-               perf_cgroup_sched_out(task);
+               perf_cgroup_sched_out(task, next);
 }
 
 static void task_ctx_sched_out(struct perf_event_context *ctx)
@@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
  * accessing the event control register. If a NMI hits, then it will
  * keep the event running.
  */
-void __perf_event_task_sched_in(struct task_struct *task)
+void __perf_event_task_sched_in(struct task_struct *prev,
+                               struct task_struct *task)
 {
        struct perf_event_context *ctx;
        int ctxn;
@@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task)
         * cgroup event are system-wide mode only
         */
        if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
-               perf_cgroup_sched_in(task);
+               perf_cgroup_sched_in(prev, task);
 }
 
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
@@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
         * ctxswin cgroup events which are already scheduled
         * in.
         */
-       perf_cgroup_sched_out(current);
+       perf_cgroup_sched_out(current, NULL);
 
        raw_spin_lock(&ctx->lock);
        task_ctx_sched_out(ctx);
@@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event)
 }
 
 static void calc_timer_values(struct perf_event *event,
-                               u64 *running,
-                               u64 *enabled)
+                               u64 *enabled,
+                               u64 *running)
 {
        u64 now, ctx_time;
 
index d5a3009..dc5114b 100644 (file)
@@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)
        desc->depth = 1;
        if (desc->irq_data.chip->irq_shutdown)
                desc->irq_data.chip->irq_shutdown(&desc->irq_data);
-       if (desc->irq_data.chip->irq_disable)
+       else if (desc->irq_data.chip->irq_disable)
                desc->irq_data.chip->irq_disable(&desc->irq_data);
        else
                desc->irq_data.chip->irq_mask(&desc->irq_data);
index d5828da..b57a377 100644 (file)
@@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain)
         */
        for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
                d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
-               if (d || d->domain) {
+               if (!d) {
+                       WARN(1, "error: assigning domain to non existant irq_desc");
+                       return;
+               }
+               if (d->domain) {
                        /* things are broken; just report, don't clean up */
                        WARN(1, "error: irq_desc already assigned to a domain");
                        return;
index 58f405b..c8008dd 100644 (file)
@@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
        do {
                times->utime = cputime_add(times->utime, t->utime);
                times->stime = cputime_add(times->stime, t->stime);
-               times->sum_exec_runtime += t->se.sum_exec_runtime;
+               times->sum_exec_runtime += task_sched_runtime(t);
        } while_each_thread(tsk, t);
 out:
        rcu_read_unlock();
@@ -312,7 +312,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
                cpu->cpu = cputime.utime;
                break;
        case CPUCLOCK_SCHED:
-               cpu->sched = thread_group_sched_runtime(p);
+               thread_group_cputime(p, &cputime);
+               cpu->sched = cputime.sum_exec_runtime;
                break;
        }
        return 0;
index 9de3ecf..a70d2a5 100644 (file)
@@ -744,20 +744,17 @@ int ptrace_request(struct task_struct *child, long request,
                        break;
 
                si = child->last_siginfo;
-               if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP))
-                       break;
-
-               child->jobctl |= JOBCTL_LISTENING;
-
-               /*
-                * If NOTIFY is set, it means event happened between start
-                * of this trap and now.  Trigger re-trap immediately.
-                */
-               if (child->jobctl & JOBCTL_TRAP_NOTIFY)
-                       signal_wake_up(child, true);
-
+               if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
+                       child->jobctl |= JOBCTL_LISTENING;
+                       /*
+                        * If NOTIFY is set, it means event happened between
+                        * start of this trap and now.  Trigger re-trap.
+                        */
+                       if (child->jobctl & JOBCTL_TRAP_NOTIFY)
+                               signal_wake_up(child, true);
+                       ret = 0;
+               }
                unlock_task_sighand(child, &flags);
-               ret = 0;
                break;
 
        case PTRACE_DETACH:      /* detach a process that was attached. */
index 3b3cedc..c8dc249 100644 (file)
@@ -419,6 +419,9 @@ static int __find_resource(struct resource *root, struct resource *old,
                else
                        tmp.end = root->end;
 
+               if (tmp.end < tmp.start)
+                       goto next;
+
                resource_clip(&tmp, constraint->min, constraint->max);
                arch_remove_reservations(&tmp);
 
@@ -436,8 +439,10 @@ static int __find_resource(struct resource *root, struct resource *old,
                                return 0;
                        }
                }
-               if (!this)
+
+next:          if (!this || this->end == root->end)
                        break;
+
                if (this != old)
                        tmp.start = this->end + 1;
                this = this->sibling;
index ccacdbd..b50b0f0 100644 (file)
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
        local_irq_disable();
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
-       perf_event_task_sched_in(current);
+       perf_event_task_sched_in(prev, current);
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
        local_irq_enable();
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
@@ -3724,30 +3724,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
        return ns;
 }
 
-/*
- * Return sum_exec_runtime for the thread group.
- * In case the task is currently running, return the sum plus current's
- * pending runtime that have not been accounted yet.
- *
- * Note that the thread group might have other running tasks as well,
- * so the return value not includes other pending runtime that other
- * running tasks might have.
- */
-unsigned long long thread_group_sched_runtime(struct task_struct *p)
-{
-       struct task_cputime totals;
-       unsigned long flags;
-       struct rq *rq;
-       u64 ns;
-
-       rq = task_rq_lock(p, &flags);
-       thread_group_cputime(p, &totals);
-       ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
-       task_rq_unlock(rq, p, &flags);
-
-       return ns;
-}
-
 /*
  * Account user cpu time to a process.
  * @p: the process that the cpu time gets accounted to
@@ -4279,9 +4255,9 @@ pick_next_task(struct rq *rq)
 }
 
 /*
- * schedule() is the main scheduler function.
+ * __schedule() is the main scheduler function.
  */
-asmlinkage void __sched schedule(void)
+static void __sched __schedule(void)
 {
        struct task_struct *prev, *next;
        unsigned long *switch_count;
@@ -4322,16 +4298,6 @@ need_resched:
                                if (to_wakeup)
                                        try_to_wake_up_local(to_wakeup);
                        }
-
-                       /*
-                        * If we are going to sleep and we have plugged IO
-                        * queued, make sure to submit it to avoid deadlocks.
-                        */
-                       if (blk_needs_flush_plug(prev)) {
-                               raw_spin_unlock(&rq->lock);
-                               blk_schedule_flush_plug(prev);
-                               raw_spin_lock(&rq->lock);
-                       }
                }
                switch_count = &prev->nvcsw;
        }
@@ -4369,6 +4335,26 @@ need_resched:
        if (need_resched())
                goto need_resched;
 }
+
+static inline void sched_submit_work(struct task_struct *tsk)
+{
+       if (!tsk->state)
+               return;
+       /*
+        * If we are going to sleep and we have plugged IO queued,
+        * make sure to submit it to avoid deadlocks.
+        */
+       if (blk_needs_flush_plug(tsk))
+               blk_schedule_flush_plug(tsk);
+}
+
+asmlinkage void __sched schedule(void)
+{
+       struct task_struct *tsk = current;
+
+       sched_submit_work(tsk);
+       __schedule();
+}
 EXPORT_SYMBOL(schedule);
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -4435,7 +4421,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
 
        do {
                add_preempt_count_notrace(PREEMPT_ACTIVE);
-               schedule();
+               __schedule();
                sub_preempt_count_notrace(PREEMPT_ACTIVE);
 
                /*
@@ -4463,7 +4449,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
        do {
                add_preempt_count(PREEMPT_ACTIVE);
                local_irq_enable();
-               schedule();
+               __schedule();
                local_irq_disable();
                sub_preempt_count(PREEMPT_ACTIVE);
 
@@ -5588,7 +5574,7 @@ static inline int should_resched(void)
 static void __cond_resched(void)
 {
        add_preempt_count(PREEMPT_ACTIVE);
-       schedule();
+       __schedule();
        sub_preempt_count(PREEMPT_ACTIVE);
 }
 
@@ -7443,6 +7429,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
                        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
                        if (sd && (sd->flags & SD_OVERLAP))
                                free_sched_groups(sd->groups, 0);
+                       kfree(*per_cpu_ptr(sdd->sd, j));
                        kfree(*per_cpu_ptr(sdd->sg, j));
                        kfree(*per_cpu_ptr(sdd->sgp, j));
                }
index 97540f0..af11778 100644 (file)
@@ -1050,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
         */
        if (curr && unlikely(rt_task(curr)) &&
            (curr->rt.nr_cpus_allowed < 2 ||
-            curr->prio < p->prio) &&
+            curr->prio <= p->prio) &&
            (p->rt.nr_cpus_allowed > 1)) {
                int target = find_lowest_rq(p);
 
@@ -1581,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
            p->rt.nr_cpus_allowed > 1 &&
            rt_task(rq->curr) &&
            (rq->curr->rt.nr_cpus_allowed < 2 ||
-            rq->curr->prio < p->prio))
+            rq->curr->prio <= p->prio))
                push_rt_tasks(rq);
 }
 
index e19ce14..e660464 100644 (file)
@@ -655,6 +655,7 @@ static struct genl_ops taskstats_ops = {
        .cmd            = TASKSTATS_CMD_GET,
        .doit           = taskstats_user_cmd,
        .policy         = taskstats_cmd_get_policy,
+       .flags          = GENL_ADMIN_PERM,
 };
 
 static struct genl_ops cgroupstats_ops = {
index 59f369f..ea5e1a9 100644 (file)
@@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer)
 static void alarm_timer_get(struct k_itimer *timr,
                                struct itimerspec *cur_setting)
 {
+       memset(cur_setting, 0, sizeof(struct itimerspec));
+
        cur_setting->it_interval =
                        ktime_to_timespec(timr->it.alarmtimer.period);
        cur_setting->it_value =
@@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
        if (!rtcdev)
                return -ENOTSUPP;
 
-       /* Save old values */
-       old_setting->it_interval =
-                       ktime_to_timespec(timr->it.alarmtimer.period);
-       old_setting->it_value =
-                       ktime_to_timespec(timr->it.alarmtimer.node.expires);
+       /*
+        * XXX HACK! Currently we can DOS a system if the interval
+        * period on alarmtimers is too small. Cap the interval here
+        * to 100us and solve this properly in a future patch! -jstultz
+        */
+       if ((new_setting->it_interval.tv_sec == 0) &&
+                       (new_setting->it_interval.tv_nsec < 100000))
+               new_setting->it_interval.tv_nsec = 100000;
+
+       if (old_setting)
+               alarm_timer_get(timr, old_setting);
 
        /* If the timer was already set, cancel it */
        alarm_cancel(&timr->it.alarmtimer);
index 24dc60d..5bbfac8 100644 (file)
@@ -78,6 +78,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
 
 #define KB 1024
 #define MB (1024*KB)
+#define KB_MASK (~(KB-1))
 /*
  * fill in extended accounting fields
  */
@@ -95,14 +96,14 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
                stats->hiwater_vm    = get_mm_hiwater_vm(mm)  * PAGE_SIZE / KB;
                mmput(mm);
        }
-       stats->read_char        = p->ioac.rchar;
-       stats->write_char       = p->ioac.wchar;
-       stats->read_syscalls    = p->ioac.syscr;
-       stats->write_syscalls   = p->ioac.syscw;
+       stats->read_char        = p->ioac.rchar & KB_MASK;
+       stats->write_char       = p->ioac.wchar & KB_MASK;
+       stats->read_syscalls    = p->ioac.syscr & KB_MASK;
+       stats->write_syscalls   = p->ioac.syscw & KB_MASK;
 #ifdef CONFIG_TASK_IO_ACCOUNTING
-       stats->read_bytes       = p->ioac.read_bytes;
-       stats->write_bytes      = p->ioac.write_bytes;
-       stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes;
+       stats->read_bytes       = p->ioac.read_bytes & KB_MASK;
+       stats->write_bytes      = p->ioac.write_bytes & KB_MASK;
+       stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;
 #else
        stats->read_bytes       = 0;
        stats->write_bytes      = 0;
index 25fb1b0..1783aab 100644 (file)
@@ -2412,8 +2412,13 @@ reflush:
 
        for_each_cwq_cpu(cpu, wq) {
                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+               bool drained;
 
-               if (!cwq->nr_active && list_empty(&cwq->delayed_works))
+               spin_lock_irq(&cwq->gcwq->lock);
+               drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
+               spin_unlock_irq(&cwq->gcwq->lock);
+
+               if (drained)
                        continue;
 
                if (++flush_cnt == 10 ||
index f33271d..1de509a 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/bitops.h>
+#include <linux/cryptohash.h>
 #include <asm/unaligned.h>
 
 /*
index e51e255..a768e6d 100644 (file)
@@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
         * next filter in the chain. Apply the BCJ filter on the new data
         * in the output buffer. If everything cannot be filtered, copy it
         * to temp and rewind the output buffer position accordingly.
+        *
+        * This needs to be always run when temp.size == 0 to handle a special
+        * case where the output buffer is full and the next filter has no
+        * more output coming but hasn't returned XZ_STREAM_END yet.
         */
-       if (s->temp.size < b->out_size - b->out_pos) {
+       if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
                out_start = b->out_pos;
                memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
                b->out_pos += s->temp.size;
@@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
                s->temp.size = b->out_pos - out_start;
                b->out_pos -= s->temp.size;
                memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
+
+               /*
+                * If there wasn't enough input to the next filter to fill
+                * the output buffer with unfiltered data, there's no point
+                * to try decoding more data to temp.
+                */
+               if (b->out_pos + s->temp.size < b->out_size)
+                       return XZ_OK;
        }
 
        /*
-        * If we have unfiltered data in temp, try to fill by decoding more
-        * data from the next filter. Apply the BCJ filter on temp. Then we
-        * hopefully can fill the actual output buffer by copying filtered
-        * data from temp. A mix of filtered and unfiltered data may be left
-        * in temp; it will be taken care on the next call to this function.
+        * We have unfiltered data in temp. If the output buffer isn't full
+        * yet, try to fill the temp buffer by decoding more data from the
+        * next filter. Apply the BCJ filter on temp. Then we hopefully can
+        * fill the actual output buffer by copying filtered data from temp.
+        * A mix of filtered and unfiltered data may be left in temp; it will
+        * be taken care on the next call to this function.
         */
-       if (s->temp.size > 0) {
+       if (b->out_pos < b->out_size) {
                /* Make b->out{,_pos,_size} temporarily point to s->temp. */
                s->out = b->out;
                s->out_pos = b->out_pos;
index d6edf8d..a87da52 100644 (file)
@@ -359,6 +359,17 @@ static unsigned long bdi_longest_inactive(void)
        return max(5UL * 60 * HZ, interval);
 }
 
+/*
+ * Clear pending bit and wakeup anybody waiting for flusher thread creation or
+ * shutdown
+ */
+static void bdi_clear_pending(struct backing_dev_info *bdi)
+{
+       clear_bit(BDI_pending, &bdi->state);
+       smp_mb__after_clear_bit();
+       wake_up_bit(&bdi->state, BDI_pending);
+}
+
 static int bdi_forker_thread(void *ptr)
 {
        struct bdi_writeback *me = ptr;
@@ -390,6 +401,13 @@ static int bdi_forker_thread(void *ptr)
                }
 
                spin_lock_bh(&bdi_lock);
+               /*
+                * In the following loop we are going to check whether we have
+                * some work to do without any synchronization with tasks
+                * waking us up to do work for them. So we have to set task
+                * state already here so that we don't miss wakeups coming
+                * after we verify some condition.
+                */
                set_current_state(TASK_INTERRUPTIBLE);
 
                list_for_each_entry(bdi, &bdi_list, bdi_list) {
@@ -469,11 +487,13 @@ static int bdi_forker_thread(void *ptr)
                                spin_unlock_bh(&bdi->wb_lock);
                                wake_up_process(task);
                        }
+                       bdi_clear_pending(bdi);
                        break;
 
                case KILL_THREAD:
                        __set_current_state(TASK_RUNNING);
                        kthread_stop(task);
+                       bdi_clear_pending(bdi);
                        break;
 
                case NO_ACTION:
@@ -489,16 +509,8 @@ static int bdi_forker_thread(void *ptr)
                        else
                                schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
                        try_to_freeze();
-                       /* Back to the main loop */
-                       continue;
+                       break;
                }
-
-               /*
-                * Clear pending bit and wakeup anybody waiting to tear us down.
-                */
-               clear_bit(BDI_pending, &bdi->state);
-               smp_mb__after_clear_bit();
-               wake_up_bit(&bdi->state, BDI_pending);
        }
 
        return 0;
index 645a080..7771871 100644 (file)
@@ -827,13 +827,14 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 {
        unsigned int i;
        unsigned int ret;
-       unsigned int nr_found;
+       unsigned int nr_found, nr_skip;
 
        rcu_read_lock();
 restart:
        nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
                                (void ***)pages, NULL, start, nr_pages);
        ret = 0;
+       nr_skip = 0;
        for (i = 0; i < nr_found; i++) {
                struct page *page;
 repeat:
@@ -856,6 +857,7 @@ repeat:
                         * here as an exceptional entry: so skip over it -
                         * we only reach this from invalidate_mapping_pages().
                         */
+                       nr_skip++;
                        continue;
                }
 
@@ -876,7 +878,7 @@ repeat:
         * If all entries were removed before we could secure them,
         * try again, because callers stop trying once 0 is returned.
         */
-       if (unlikely(!ret && nr_found))
+       if (unlikely(!ret && nr_found > nr_skip))
                goto restart;
        rcu_read_unlock();
        return ret;
index ebd1e86..3508777 100644 (file)
@@ -204,50 +204,6 @@ struct mem_cgroup_eventfd_list {
 static void mem_cgroup_threshold(struct mem_cgroup *mem);
 static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
 
-enum {
-       SCAN_BY_LIMIT,
-       SCAN_BY_SYSTEM,
-       NR_SCAN_CONTEXT,
-       SCAN_BY_SHRINK, /* not recorded now */
-};
-
-enum {
-       SCAN,
-       SCAN_ANON,
-       SCAN_FILE,
-       ROTATE,
-       ROTATE_ANON,
-       ROTATE_FILE,
-       FREED,
-       FREED_ANON,
-       FREED_FILE,
-       ELAPSED,
-       NR_SCANSTATS,
-};
-
-struct scanstat {
-       spinlock_t      lock;
-       unsigned long   stats[NR_SCAN_CONTEXT][NR_SCANSTATS];
-       unsigned long   rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS];
-};
-
-const char *scanstat_string[NR_SCANSTATS] = {
-       "scanned_pages",
-       "scanned_anon_pages",
-       "scanned_file_pages",
-       "rotated_pages",
-       "rotated_anon_pages",
-       "rotated_file_pages",
-       "freed_pages",
-       "freed_anon_pages",
-       "freed_file_pages",
-       "elapsed_ns",
-};
-#define SCANSTAT_WORD_LIMIT    "_by_limit"
-#define SCANSTAT_WORD_SYSTEM   "_by_system"
-#define SCANSTAT_WORD_HIERARCHY        "_under_hierarchy"
-
-
 /*
  * The memory controller data structure. The memory controller controls both
  * page cache and RSS per cgroup. We would eventually like to provide
@@ -313,8 +269,7 @@ struct mem_cgroup {
 
        /* For oom notifier event fd */
        struct list_head oom_notify;
-       /* For recording LRU-scan statistics */
-       struct scanstat scanstat;
+
        /*
         * Should we move charges of a task when a task is moved into this
         * mem_cgroup ? And what type of charges should we move ?
@@ -1678,44 +1633,6 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
 }
 #endif
 
-static void __mem_cgroup_record_scanstat(unsigned long *stats,
-                          struct memcg_scanrecord *rec)
-{
-
-       stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1];
-       stats[SCAN_ANON] += rec->nr_scanned[0];
-       stats[SCAN_FILE] += rec->nr_scanned[1];
-
-       stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1];
-       stats[ROTATE_ANON] += rec->nr_rotated[0];
-       stats[ROTATE_FILE] += rec->nr_rotated[1];
-
-       stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1];
-       stats[FREED_ANON] += rec->nr_freed[0];
-       stats[FREED_FILE] += rec->nr_freed[1];
-
-       stats[ELAPSED] += rec->elapsed;
-}
-
-static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec)
-{
-       struct mem_cgroup *mem;
-       int context = rec->context;
-
-       if (context >= NR_SCAN_CONTEXT)
-               return;
-
-       mem = rec->mem;
-       spin_lock(&mem->scanstat.lock);
-       __mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec);
-       spin_unlock(&mem->scanstat.lock);
-
-       mem = rec->root;
-       spin_lock(&mem->scanstat.lock);
-       __mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec);
-       spin_unlock(&mem->scanstat.lock);
-}
-
 /*
  * Scan the hierarchy if needed to reclaim memory. We remember the last child
  * we reclaimed from, so that we don't end up penalizing one child extensively
@@ -1740,9 +1657,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
        bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
        bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
        bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
-       struct memcg_scanrecord rec;
        unsigned long excess;
-       unsigned long scanned;
+       unsigned long nr_scanned;
 
        excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
 
@@ -1750,15 +1666,6 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
        if (!check_soft && !shrink && root_mem->memsw_is_minimum)
                noswap = true;
 
-       if (shrink)
-               rec.context = SCAN_BY_SHRINK;
-       else if (check_soft)
-               rec.context = SCAN_BY_SYSTEM;
-       else
-               rec.context = SCAN_BY_LIMIT;
-
-       rec.root = root_mem;
-
        while (1) {
                victim = mem_cgroup_select_victim(root_mem);
                if (victim == root_mem) {
@@ -1799,23 +1706,14 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
                        css_put(&victim->css);
                        continue;
                }
-               rec.mem = victim;
-               rec.nr_scanned[0] = 0;
-               rec.nr_scanned[1] = 0;
-               rec.nr_rotated[0] = 0;
-               rec.nr_rotated[1] = 0;
-               rec.nr_freed[0] = 0;
-               rec.nr_freed[1] = 0;
-               rec.elapsed = 0;
                /* we use swappiness of local cgroup */
                if (check_soft) {
                        ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
-                               noswap, zone, &rec, &scanned);
-                       *total_scanned += scanned;
+                               noswap, zone, &nr_scanned);
+                       *total_scanned += nr_scanned;
                } else
                        ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
-                                               noswap, &rec);
-               mem_cgroup_record_scanstat(&rec);
+                                               noswap);
                css_put(&victim->css);
                /*
                 * At shrinking usage, we can't check we should stop here or
@@ -3854,18 +3752,14 @@ try_to_free:
        /* try to free all pages in this cgroup */
        shrink = 1;
        while (nr_retries && mem->res.usage > 0) {
-               struct memcg_scanrecord rec;
                int progress;
 
                if (signal_pending(current)) {
                        ret = -EINTR;
                        goto out;
                }
-               rec.context = SCAN_BY_SHRINK;
-               rec.mem = mem;
-               rec.root = mem;
                progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
-                                               false, &rec);
+                                               false);
                if (!progress) {
                        nr_retries--;
                        /* maybe some writeback is necessary */
@@ -4709,54 +4603,6 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
 }
 #endif /* CONFIG_NUMA */
 
-static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp,
-                               struct cftype *cft,
-                               struct cgroup_map_cb *cb)
-{
-       struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
-       char string[64];
-       int i;
-
-       for (i = 0; i < NR_SCANSTATS; i++) {
-               strcpy(string, scanstat_string[i]);
-               strcat(string, SCANSTAT_WORD_LIMIT);
-               cb->fill(cb, string,  mem->scanstat.stats[SCAN_BY_LIMIT][i]);
-       }
-
-       for (i = 0; i < NR_SCANSTATS; i++) {
-               strcpy(string, scanstat_string[i]);
-               strcat(string, SCANSTAT_WORD_SYSTEM);
-               cb->fill(cb, string,  mem->scanstat.stats[SCAN_BY_SYSTEM][i]);
-       }
-
-       for (i = 0; i < NR_SCANSTATS; i++) {
-               strcpy(string, scanstat_string[i]);
-               strcat(string, SCANSTAT_WORD_LIMIT);
-               strcat(string, SCANSTAT_WORD_HIERARCHY);
-               cb->fill(cb, string,  mem->scanstat.rootstats[SCAN_BY_LIMIT][i]);
-       }
-       for (i = 0; i < NR_SCANSTATS; i++) {
-               strcpy(string, scanstat_string[i]);
-               strcat(string, SCANSTAT_WORD_SYSTEM);
-               strcat(string, SCANSTAT_WORD_HIERARCHY);
-               cb->fill(cb, string,  mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]);
-       }
-       return 0;
-}
-
-static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp,
-                               unsigned int event)
-{
-       struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
-
-       spin_lock(&mem->scanstat.lock);
-       memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats));
-       memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats));
-       spin_unlock(&mem->scanstat.lock);
-       return 0;
-}
-
-
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
@@ -4827,11 +4673,6 @@ static struct cftype mem_cgroup_files[] = {
                .mode = S_IRUGO,
        },
 #endif
-       {
-               .name = "vmscan_stat",
-               .read_map = mem_cgroup_vmscan_stat_read,
-               .trigger = mem_cgroup_reset_vmscan_stat,
-       },
 };
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -5095,7 +4936,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        atomic_set(&mem->refcnt, 1);
        mem->move_charge_at_immigrate = 0;
        mutex_init(&mem->thresholds_lock);
-       spin_lock_init(&mem->scanstat.lock);
        return &mem->css;
 free_out:
        __mem_cgroup_free(mem);
index 8b57173..9c51f9f 100644 (file)
@@ -636,7 +636,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        struct vm_area_struct *prev;
        struct vm_area_struct *vma;
        int err = 0;
-       pgoff_t pgoff;
        unsigned long vmstart;
        unsigned long vmend;
 
@@ -649,9 +648,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
                vmstart = max(start, vma->vm_start);
                vmend   = min(end, vma->vm_end);
 
-               pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
-                                 vma->anon_vma, vma->vm_file, pgoff, new_pol);
+                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+                                 new_pol);
                if (prev) {
                        vma = prev;
                        next = vma->vm_next;
@@ -1412,7 +1411,9 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy,
        err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
 
        if (!err && nmask) {
-               err = copy_from_user(bm, nm, alloc_size);
+               unsigned long copy_size;
+               copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
+               err = copy_from_user(bm, nm, copy_size);
                /* ensure entire bitmap is zeroed */
                err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
                err |= compat_put_bitmap(nmask, bm, nr_bits);
index 9f662d7..7c54fe8 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                 */
                if (unlikely(!prior)) {
                        remove_full(s, page);
-                       add_partial(n, page, 0);
+                       add_partial(n, page, 1);
                        stat(s, FREE_ADD_PARTIAL);
                }
        }
index 7ef0903..5016f19 100644 (file)
@@ -2140,6 +2140,14 @@ struct vm_struct *alloc_vm_area(size_t size)
                return NULL;
        }
 
+       /*
+        * If the allocated address space is passed to a hypercall
+        * before being used then we cannot rely on a page fault to
+        * trigger an update of the page tables.  So sync all the page
+        * tables here.
+        */
+       vmalloc_sync_all();
+
        return area;
 }
 EXPORT_SYMBOL_GPL(alloc_vm_area);
index b7719ec..b55699c 100644 (file)
@@ -105,7 +105,6 @@ struct scan_control {
 
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
-       struct memcg_scanrecord *memcg_record;
 
        /*
         * Nodemask of nodes allowed by the caller. If NULL, all nodes
@@ -1349,8 +1348,6 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
                        int file = is_file_lru(lru);
                        int numpages = hpage_nr_pages(page);
                        reclaim_stat->recent_rotated[file] += numpages;
-                       if (!scanning_global_lru(sc))
-                               sc->memcg_record->nr_rotated[file] += numpages;
                }
                if (!pagevec_add(&pvec, page)) {
                        spin_unlock_irq(&zone->lru_lock);
@@ -1394,10 +1391,6 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone,
 
        reclaim_stat->recent_scanned[0] += *nr_anon;
        reclaim_stat->recent_scanned[1] += *nr_file;
-       if (!scanning_global_lru(sc)) {
-               sc->memcg_record->nr_scanned[0] += *nr_anon;
-               sc->memcg_record->nr_scanned[1] += *nr_file;
-       }
 }
 
 /*
@@ -1511,9 +1504,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
                nr_reclaimed += shrink_page_list(&page_list, zone, sc);
        }
 
-       if (!scanning_global_lru(sc))
-               sc->memcg_record->nr_freed[file] += nr_reclaimed;
-
        local_irq_disable();
        if (current_is_kswapd())
                __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
@@ -1613,8 +1603,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        }
 
        reclaim_stat->recent_scanned[file] += nr_taken;
-       if (!scanning_global_lru(sc))
-               sc->memcg_record->nr_scanned[file] += nr_taken;
 
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
        if (file)
@@ -1666,8 +1654,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         * get_scan_ratio.
         */
        reclaim_stat->recent_rotated[file] += nr_rotated;
-       if (!scanning_global_lru(sc))
-               sc->memcg_record->nr_rotated[file] += nr_rotated;
 
        move_active_pages_to_lru(zone, &l_active,
                                                LRU_ACTIVE + file * LRU_FILE);
@@ -1808,23 +1794,15 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        u64 fraction[2], denominator;
        enum lru_list l;
        int noswap = 0;
-       int force_scan = 0;
+       bool force_scan = false;
        unsigned long nr_force_scan[2];
 
-
-       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
-       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
-
-       if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
-               /* kswapd does zone balancing and need to scan this zone */
-               if (scanning_global_lru(sc) && current_is_kswapd())
-                       force_scan = 1;
-               /* memcg may have small limit and need to avoid priority drop */
-               if (!scanning_global_lru(sc))
-                       force_scan = 1;
-       }
+       /* kswapd does zone balancing and needs to scan this zone */
+       if (scanning_global_lru(sc) && current_is_kswapd())
+               force_scan = true;
+       /* memcg may have small limit and need to avoid priority drop */
+       if (!scanning_global_lru(sc))
+               force_scan = true;
 
        /* If we have no swap space, do not bother scanning anon pages. */
        if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1837,6 +1815,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                goto out;
        }
 
+       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+
        if (scanning_global_lru(sc)) {
                free  = zone_page_state(zone, NR_FREE_PAGES);
                /* If we have very few page cache pages,
@@ -2268,10 +2251,9 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
-                                       gfp_t gfp_mask, bool noswap,
-                                       struct zone *zone,
-                                       struct memcg_scanrecord *rec,
-                                       unsigned long *scanned)
+                                               gfp_t gfp_mask, bool noswap,
+                                               struct zone *zone,
+                                               unsigned long *nr_scanned)
 {
        struct scan_control sc = {
                .nr_scanned = 0,
@@ -2281,9 +2263,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                .may_swap = !noswap,
                .order = 0,
                .mem_cgroup = mem,
-               .memcg_record = rec,
        };
-       ktime_t start, end;
 
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -2292,7 +2272,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                                                      sc.may_writepage,
                                                      sc.gfp_mask);
 
-       start = ktime_get();
        /*
         * NOTE: Although we can get the priority field, using it
         * here is not a good idea, since it limits the pages we can scan.
@@ -2301,25 +2280,19 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
         * the priority and make it zero.
         */
        shrink_zone(0, zone, &sc);
-       end = ktime_get();
-
-       if (rec)
-               rec->elapsed += ktime_to_ns(ktime_sub(end, start));
-       *scanned = sc.nr_scanned;
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
+       *nr_scanned = sc.nr_scanned;
        return sc.nr_reclaimed;
 }
 
 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                           gfp_t gfp_mask,
-                                          bool noswap,
-                                          struct memcg_scanrecord *rec)
+                                          bool noswap)
 {
        struct zonelist *zonelist;
        unsigned long nr_reclaimed;
-       ktime_t start, end;
        int nid;
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
@@ -2328,7 +2301,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .order = 0,
                .mem_cgroup = mem_cont,
-               .memcg_record = rec,
                .nodemask = NULL, /* we don't care the placement */
                .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                                (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
@@ -2337,7 +2309,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .gfp_mask = sc.gfp_mask,
        };
 
-       start = ktime_get();
        /*
         * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
         * take care of from where we get pages. So the node where we start the
@@ -2352,9 +2323,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                            sc.gfp_mask);
 
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
-       end = ktime_get();
-       if (rec)
-               rec->elapsed += ktime_to_ns(ktime_sub(end, start));
 
        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
 
index 20c18b7..d52b13d 100644 (file)
@@ -659,7 +659,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
 }
 #endif
 
-#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS)
+#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
 #ifdef CONFIG_ZONE_DMA
 #define TEXT_FOR_DMA(xx) xx "_dma",
 #else
@@ -788,7 +788,7 @@ const char * const vmstat_text[] = {
 
 #endif /* CONFIG_VM_EVENTS_COUNTERS */
 };
-#endif /* CONFIG_PROC_FS || CONFIG_SYSFS */
+#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
 
 
 #ifdef CONFIG_PROC_FS
index 175b513..e317583 100644 (file)
@@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
 {
        int in, out, inp, outp;
        struct virtio_chan *chan = client->trans;
-       char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
        unsigned long flags;
        size_t pdata_off = 0;
        struct trans_rpage_info *rpinfo = NULL;
@@ -346,7 +345,8 @@ req_retry_pinned:
                 * Arrange in such a way that server places header in the
                 * alloced memory and payload onto the user buffer.
                 */
-               inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
+               inp = pack_sg_list(chan->sg, out,
+                                  VIRTQUEUE_NUM, req->rc->sdata, 11);
                /*
                 * Running executables in the filesystem may result in
                 * a read request with kernel buffer as opposed to user buffer.
@@ -366,8 +366,8 @@ req_retry_pinned:
                }
                in += inp;
        } else {
-               in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
-                               req->rc->capacity);
+               in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
+                                 req->rc->sdata, req->rc->capacity);
        }
 
        err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
@@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = {
        .close = p9_virtio_close,
        .request = p9_virtio_request,
        .cancel = p9_virtio_cancel,
-       .maxsize = PAGE_SIZE*VIRTQUEUE_NUM,
+
+       /*
+        * We leave one entry for input and one entry for response
+        * headers. We also skip one more entry to accomodate, address
+        * that are not at page boundary, that can result in an extra
+        * page in zero copy.
+        */
+       .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
        .pref = P9_TRANS_PREF_PAYLOAD_SEP,
        .def = 0,
        .owner = THIS_MODULE,
index 3e2f91f..05dd351 100644 (file)
@@ -565,7 +565,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
        struct orig_node *orig_node = NULL;
        int data_len = skb->len, ret;
        short vid = -1;
-       bool do_bcast = false;
+       bool do_bcast;
 
        if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
                goto dropped;
@@ -598,15 +598,15 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
        tt_local_add(soft_iface, ethhdr->h_source);
 
        orig_node = transtable_search(bat_priv, ethhdr->h_dest);
-       if (is_multicast_ether_addr(ethhdr->h_dest) ||
-                               (orig_node && orig_node->gw_flags)) {
+       do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
+       if (do_bcast || (orig_node && orig_node->gw_flags)) {
                ret = gw_is_target(bat_priv, skb, orig_node);
 
                if (ret < 0)
                        goto dropped;
 
-               if (ret == 0)
-                       do_bcast = true;
+               if (ret)
+                       do_bcast = false;
        }
 
        /* ethernet packet should be broadcasted */
index a40170e..7ef4eb4 100644 (file)
@@ -58,8 +58,8 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
        if (status)
                return;
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                               test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+                       test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 0);
 
        hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -76,8 +76,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
        if (status)
                return;
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                               test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+                               test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 0);
 
        hci_conn_check_pending(hdev);
@@ -959,9 +959,8 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
                return;
        }
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                                       !test_and_set_bit(HCI_INQUIRY,
-                                                       &hdev->flags))
+       if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
+                               test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 1);
 }
 
@@ -1340,8 +1339,8 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
 
        BT_DBG("%s status %d", hdev->name, status);
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                               test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+                               test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 0);
 
        hci_req_complete(hdev, HCI_OP_INQUIRY, status);
index 32b8f9f..ff3ed60 100644 (file)
@@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
 
-       netif_carrier_off(dev);
        netdev_update_features(dev);
        netif_start_queue(dev);
        br_stp_enable_bridge(br);
@@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
 
-       netif_carrier_off(dev);
-
        br_stp_disable_bridge(br);
        br_multicast_stop(br);
 
index ba6f73e..a9aff9c 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig BRIDGE_NF_EBTABLES
        tristate "Ethernet Bridge tables (ebtables) support"
-       depends on BRIDGE && BRIDGE_NETFILTER
+       depends on BRIDGE && NETFILTER
        select NETFILTER_XTABLES
        help
          ebtables is a general, extensible frame/packet identification
index 7c2fa0a..7f9ac07 100644 (file)
@@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
        caifdevs = caif_device_list(dev_net(dev));
        BUG_ON(!caifdevs);
 
-       caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
+       caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
        if (!caifd)
                return NULL;
        caifd->pcpu_refcnt = alloc_percpu(int);
+       if (!caifd->pcpu_refcnt) {
+               kfree(caifd);
+               return NULL;
+       }
        caifd->netdev = dev;
        dev_hold(dev);
        return caifd;
index 8ce926d..9b0c32a 100644 (file)
@@ -857,7 +857,7 @@ static __exit void can_exit(void)
        struct net_device *dev;
 
        if (stats_timer)
-               del_timer(&can_stattimer);
+               del_timer_sync(&can_stattimer);
 
        can_remove_proc();
 
index d6c8ae5..c84963d 100644 (file)
@@ -344,6 +344,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
        }
 }
 
+static void bcm_tx_start_timer(struct bcm_op *op)
+{
+       if (op->kt_ival1.tv64 && op->count)
+               hrtimer_start(&op->timer,
+                             ktime_add(ktime_get(), op->kt_ival1),
+                             HRTIMER_MODE_ABS);
+       else if (op->kt_ival2.tv64)
+               hrtimer_start(&op->timer,
+                             ktime_add(ktime_get(), op->kt_ival2),
+                             HRTIMER_MODE_ABS);
+}
+
 static void bcm_tx_timeout_tsklet(unsigned long data)
 {
        struct bcm_op *op = (struct bcm_op *)data;
@@ -365,26 +377,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
 
                        bcm_send_to_user(op, &msg_head, NULL, 0);
                }
-       }
-
-       if (op->kt_ival1.tv64 && (op->count > 0)) {
-
-               /* send (next) frame */
                bcm_can_tx(op);
-               hrtimer_start(&op->timer,
-                             ktime_add(ktime_get(), op->kt_ival1),
-                             HRTIMER_MODE_ABS);
 
-       } else {
-               if (op->kt_ival2.tv64) {
+       } else if (op->kt_ival2.tv64)
+               bcm_can_tx(op);
 
-                       /* send (next) frame */
-                       bcm_can_tx(op);
-                       hrtimer_start(&op->timer,
-                                     ktime_add(ktime_get(), op->kt_ival2),
-                                     HRTIMER_MODE_ABS);
-               }
-       }
+       bcm_tx_start_timer(op);
 }
 
 /*
@@ -964,23 +962,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                        hrtimer_cancel(&op->timer);
        }
 
-       if ((op->flags & STARTTIMER) &&
-           ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) {
-
+       if (op->flags & STARTTIMER) {
+               hrtimer_cancel(&op->timer);
                /* spec: send can_frame when starting timer */
                op->flags |= TX_ANNOUNCE;
-
-               if (op->kt_ival1.tv64 && (op->count > 0)) {
-                       /* op->count-- is done in bcm_tx_timeout_handler */
-                       hrtimer_start(&op->timer, op->kt_ival1,
-                                     HRTIMER_MODE_REL);
-               } else
-                       hrtimer_start(&op->timer, op->kt_ival2,
-                                     HRTIMER_MODE_REL);
        }
 
-       if (op->flags & TX_ANNOUNCE)
+       if (op->flags & TX_ANNOUNCE) {
                bcm_can_tx(op);
+               if (op->count)
+                       op->count--;
+       }
+
+       if (op->flags & STARTTIMER)
+               bcm_tx_start_timer(op);
 
        return msg_head->nframes * CFSIZ + MHSIZ;
 }
index 132963a..2883ea0 100644 (file)
@@ -232,6 +232,7 @@ void ceph_destroy_options(struct ceph_options *opt)
                ceph_crypto_key_destroy(opt->key);
                kfree(opt->key);
        }
+       kfree(opt->mon_addr);
        kfree(opt);
 }
 EXPORT_SYMBOL(ceph_destroy_options);
index c340e2e..9918e9e 100644 (file)
@@ -2307,6 +2307,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
        m->front_max = front_len;
        m->front_is_vmalloc = false;
        m->more_to_follow = false;
+       m->ack_stamp = 0;
        m->pool = NULL;
 
        /* middle */
index d5f2d97..1f4cb30 100644 (file)
@@ -7,27 +7,37 @@
 
 #include <linux/ceph/msgpool.h>
 
-static void *alloc_fn(gfp_t gfp_mask, void *arg)
+static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
 {
        struct ceph_msgpool *pool = arg;
-       void *p;
+       struct ceph_msg *msg;
 
-       p = ceph_msg_new(0, pool->front_len, gfp_mask);
-       if (!p)
-               pr_err("msgpool %s alloc failed\n", pool->name);
-       return p;
+       msg = ceph_msg_new(0, pool->front_len, gfp_mask);
+       if (!msg) {
+               dout("msgpool_alloc %s failed\n", pool->name);
+       } else {
+               dout("msgpool_alloc %s %p\n", pool->name, msg);
+               msg->pool = pool;
+       }
+       return msg;
 }
 
-static void free_fn(void *element, void *arg)
+static void msgpool_free(void *element, void *arg)
 {
-       ceph_msg_put(element);
+       struct ceph_msgpool *pool = arg;
+       struct ceph_msg *msg = element;
+
+       dout("msgpool_release %s %p\n", pool->name, msg);
+       msg->pool = NULL;
+       ceph_msg_put(msg);
 }
 
 int ceph_msgpool_init(struct ceph_msgpool *pool,
                      int front_len, int size, bool blocking, const char *name)
 {
+       dout("msgpool %s init\n", name);
        pool->front_len = front_len;
-       pool->pool = mempool_create(size, alloc_fn, free_fn, pool);
+       pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
        if (!pool->pool)
                return -ENOMEM;
        pool->name = name;
@@ -36,14 +46,17 @@ int ceph_msgpool_init(struct ceph_msgpool *pool,
 
 void ceph_msgpool_destroy(struct ceph_msgpool *pool)
 {
+       dout("msgpool %s destroy\n", pool->name);
        mempool_destroy(pool->pool);
 }
 
 struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
                                  int front_len)
 {
+       struct ceph_msg *msg;
+
        if (front_len > pool->front_len) {
-               pr_err("msgpool_get pool %s need front %d, pool size is %d\n",
+               dout("msgpool_get %s need front %d, pool size is %d\n",
                       pool->name, front_len, pool->front_len);
                WARN_ON(1);
 
@@ -51,14 +64,19 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
                return ceph_msg_new(0, front_len, GFP_NOFS);
        }
 
-       return mempool_alloc(pool->pool, GFP_NOFS);
+       msg = mempool_alloc(pool->pool, GFP_NOFS);
+       dout("msgpool_get %s %p\n", pool->name, msg);
+       return msg;
 }
 
 void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
 {
+       dout("msgpool_put %s %p\n", pool->name, msg);
+
        /* reset msg front_len; user may have changed it */
        msg->front.iov_len = pool->front_len;
        msg->hdr.front_len = cpu_to_le32(pool->front_len);
 
        kref_init(&msg->kref);  /* retake single ref */
+       mempool_free(msg, pool->pool);
 }
index ce310ee..88ad8a2 100644 (file)
@@ -217,6 +217,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
        INIT_LIST_HEAD(&req->r_unsafe_item);
        INIT_LIST_HEAD(&req->r_linger_item);
        INIT_LIST_HEAD(&req->r_linger_osd);
+       INIT_LIST_HEAD(&req->r_req_lru_item);
        req->r_flags = flags;
 
        WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
@@ -685,6 +686,18 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
        put_osd(osd);
 }
 
+static void remove_all_osds(struct ceph_osd_client *osdc)
+{
+       dout("__remove_old_osds %p\n", osdc);
+       mutex_lock(&osdc->request_mutex);
+       while (!RB_EMPTY_ROOT(&osdc->osds)) {
+               struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
+                                               struct ceph_osd, o_node);
+               __remove_osd(osdc, osd);
+       }
+       mutex_unlock(&osdc->request_mutex);
+}
+
 static void __move_osd_to_lru(struct ceph_osd_client *osdc,
                              struct ceph_osd *osd)
 {
@@ -701,14 +714,14 @@ static void __remove_osd_from_lru(struct ceph_osd *osd)
                list_del_init(&osd->o_osd_lru);
 }
 
-static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all)
+static void remove_old_osds(struct ceph_osd_client *osdc)
 {
        struct ceph_osd *osd, *nosd;
 
        dout("__remove_old_osds %p\n", osdc);
        mutex_lock(&osdc->request_mutex);
        list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
-               if (!remove_all && time_before(jiffies, osd->lru_ttl))
+               if (time_before(jiffies, osd->lru_ttl))
                        break;
                __remove_osd(osdc, osd);
        }
@@ -751,6 +764,7 @@ static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
        struct rb_node *parent = NULL;
        struct ceph_osd *osd = NULL;
 
+       dout("__insert_osd %p osd%d\n", new, new->o_osd);
        while (*p) {
                parent = *p;
                osd = rb_entry(parent, struct ceph_osd, o_node);
@@ -803,13 +817,10 @@ static void __register_request(struct ceph_osd_client *osdc,
 {
        req->r_tid = ++osdc->last_tid;
        req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
-       INIT_LIST_HEAD(&req->r_req_lru_item);
-
        dout("__register_request %p tid %lld\n", req, req->r_tid);
        __insert_request(osdc, req);
        ceph_osdc_get_request(req);
        osdc->num_requests++;
-
        if (osdc->num_requests == 1) {
                dout(" first request, scheduling timeout\n");
                __schedule_osd_timeout(osdc);
@@ -1144,7 +1155,7 @@ static void handle_osds_timeout(struct work_struct *work)
 
        dout("osds timeout\n");
        down_read(&osdc->map_sem);
-       remove_old_osds(osdc, 0);
+       remove_old_osds(osdc);
        up_read(&osdc->map_sem);
 
        schedule_delayed_work(&osdc->osds_timeout_work,
@@ -1862,8 +1873,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
                ceph_osdmap_destroy(osdc->osdmap);
                osdc->osdmap = NULL;
        }
-       remove_old_osds(osdc, 1);
-       WARN_ON(!RB_EMPTY_ROOT(&osdc->osds));
+       remove_all_osds(osdc);
        mempool_destroy(osdc->req_mempool);
        ceph_msgpool_destroy(&osdc->msgpool_op);
        ceph_msgpool_destroy(&osdc->msgpool_op_reply);
index e97c358..fd863fe 100644 (file)
@@ -339,6 +339,7 @@ static int __insert_pg_mapping(struct ceph_pg_mapping *new,
        struct ceph_pg_mapping *pg = NULL;
        int c;
 
+       dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
        while (*p) {
                parent = *p;
                pg = rb_entry(parent, struct ceph_pg_mapping, node);
@@ -366,16 +367,33 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
        while (n) {
                pg = rb_entry(n, struct ceph_pg_mapping, node);
                c = pgid_cmp(pgid, pg->pgid);
-               if (c < 0)
+               if (c < 0) {
                        n = n->rb_left;
-               else if (c > 0)
+               } else if (c > 0) {
                        n = n->rb_right;
-               else
+               } else {
+                       dout("__lookup_pg_mapping %llx got %p\n",
+                            *(u64 *)&pgid, pg);
                        return pg;
+               }
        }
        return NULL;
 }
 
+static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
+{
+       struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
+
+       if (pg) {
+               dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg);
+               rb_erase(&pg->node, root);
+               kfree(pg);
+               return 0;
+       }
+       dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid);
+       return -ENOENT;
+}
+
 /*
  * rbtree of pg pool info
  */
@@ -711,7 +729,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
        void *start = *p;
        int err = -EINVAL;
        u16 version;
-       struct rb_node *rbp;
 
        ceph_decode_16_safe(p, end, version, bad);
        if (version > CEPH_OSDMAP_INC_VERSION) {
@@ -861,7 +878,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
        }
 
        /* new_pg_temp */
-       rbp = rb_first(&map->pg_temp);
        ceph_decode_32_safe(p, end, len, bad);
        while (len--) {
                struct ceph_pg_mapping *pg;
@@ -872,18 +888,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                ceph_decode_copy(p, &pgid, sizeof(pgid));
                pglen = ceph_decode_32(p);
 
-               /* remove any? */
-               while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
-                                               node)->pgid, pgid) <= 0) {
-                       struct ceph_pg_mapping *cur =
-                               rb_entry(rbp, struct ceph_pg_mapping, node);
-
-                       rbp = rb_next(rbp);
-                       dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
-                       rb_erase(&cur->node, &map->pg_temp);
-                       kfree(cur);
-               }
-
                if (pglen) {
                        /* insert */
                        ceph_decode_need(p, end, pglen*sizeof(u32), bad);
@@ -903,17 +907,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                        }
                        dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
                             pglen);
+               } else {
+                       /* remove */
+                       __remove_pg_mapping(&map->pg_temp, pgid);
                }
        }
-       while (rbp) {
-               struct ceph_pg_mapping *cur =
-                       rb_entry(rbp, struct ceph_pg_mapping, node);
-
-               rbp = rb_next(rbp);
-               dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
-               rb_erase(&cur->node, &map->pg_temp);
-               kfree(cur);
-       }
 
        /* ignore the rest */
        *p = end;
@@ -1046,10 +1044,25 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
        struct ceph_pg_mapping *pg;
        struct ceph_pg_pool_info *pool;
        int ruleno;
-       unsigned poolid, ps, pps;
+       unsigned poolid, ps, pps, t;
        int preferred;
 
+       poolid = le32_to_cpu(pgid.pool);
+       ps = le16_to_cpu(pgid.ps);
+       preferred = (s16)le16_to_cpu(pgid.preferred);
+
+       pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
+       if (!pool)
+               return NULL;
+
        /* pg_temp? */
+       if (preferred >= 0)
+               t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
+                                   pool->lpgp_num_mask);
+       else
+               t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
+                                   pool->pgp_num_mask);
+       pgid.ps = cpu_to_le16(t);
        pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
        if (pg) {
                *num = pg->len;
@@ -1057,18 +1070,6 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
        }
 
        /* crush */
-       poolid = le32_to_cpu(pgid.pool);
-       ps = le16_to_cpu(pgid.ps);
-       preferred = (s16)le16_to_cpu(pgid.preferred);
-
-       /* don't forcefeed bad device ids to crush */
-       if (preferred >= osdmap->max_osd ||
-           preferred >= osdmap->crush->max_devices)
-               preferred = -1;
-
-       pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
-       if (!pool)
-               return NULL;
        ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
                                 pool->v.type, pool->v.size);
        if (ruleno < 0) {
@@ -1078,6 +1079,11 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
                return NULL;
        }
 
+       /* don't forcefeed bad device ids to crush */
+       if (preferred >= osdmap->max_osd ||
+           preferred >= osdmap->crush->max_devices)
+               preferred = -1;
+
        if (preferred >= 0)
                pps = ceph_stable_mod(ps,
                                      le32_to_cpu(pool->v.lpgp_num),
index 17d67b5..b10ff0a 100644 (file)
@@ -1515,6 +1515,14 @@ static inline bool is_skb_forwardable(struct net_device *dev,
  */
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 {
+       if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
+               if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
+                       atomic_long_inc(&dev->rx_dropped);
+                       kfree_skb(skb);
+                       return NET_RX_DROP;
+               }
+       }
+
        skb_orphan(skb);
        nf_reset(skb);
 
index e7ab0c0..3231b46 100644 (file)
@@ -384,8 +384,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
                 */
                list_for_each_entry(r, &ops->rules_list, list) {
                        if (r->action == FR_ACT_GOTO &&
-                           r->target == rule->pref) {
-                               BUG_ON(rtnl_dereference(r->ctarget) != NULL);
+                           r->target == rule->pref &&
+                           rtnl_dereference(r->ctarget) == NULL) {
                                rcu_assign_pointer(r->ctarget, rule);
                                if (--ops->unresolved_rules == 0)
                                        break;
index bf32c33..555a456 100644 (file)
@@ -30,6 +30,7 @@ struct flow_cache_entry {
                struct hlist_node       hlist;
                struct list_head        gc_list;
        } u;
+       struct net                      *net;
        u16                             family;
        u8                              dir;
        u32                             genid;
@@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
 
 static u32 flow_hash_code(struct flow_cache *fc,
                          struct flow_cache_percpu *fcp,
-                         const struct flowi *key)
+                         const struct flowi *key,
+                         size_t keysize)
 {
        const u32 *k = (const u32 *) key;
+       const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
 
-       return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
+       return jhash2(k, length, fcp->hash_rnd)
                & (flow_cache_hash_size(fc) - 1);
 }
 
-typedef unsigned long flow_compare_t;
-
 /* I hear what you're saying, use memcmp.  But memcmp cannot make
- * important assumptions that we can here, such as alignment and
- * constant size.
+ * important assumptions that we can here, such as alignment.
  */
-static int flow_key_compare(const struct flowi *key1, const struct flowi *key2)
+static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
+                           size_t keysize)
 {
        const flow_compare_t *k1, *k1_lim, *k2;
-       const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
-
-       BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
 
        k1 = (const flow_compare_t *) key1;
-       k1_lim = k1 + n_elem;
+       k1_lim = k1 + keysize;
 
        k2 = (const flow_compare_t *) key2;
 
@@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
        struct flow_cache_entry *fle, *tfle;
        struct hlist_node *entry;
        struct flow_cache_object *flo;
+       size_t keysize;
        unsigned int hash;
 
        local_bh_disable();
@@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
 
        fle = NULL;
        flo = NULL;
+
+       keysize = flow_key_size(family);
+       if (!keysize)
+               goto nocache;
+
        /* Packet really early in init?  Making flow_cache_init a
         * pre-smp initcall would solve this.  --RR */
        if (!fcp->hash_table)
@@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
        if (fcp->hash_rnd_recalc)
                flow_new_hash_rnd(fc, fcp);
 
-       hash = flow_hash_code(fc, fcp, key);
+       hash = flow_hash_code(fc, fcp, key, keysize);
        hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
-               if (tfle->family == family &&
+               if (tfle->net == net &&
+                   tfle->family == family &&
                    tfle->dir == dir &&
-                   flow_key_compare(key, &tfle->key) == 0) {
+                   flow_key_compare(key, &tfle->key, keysize) == 0) {
                        fle = tfle;
                        break;
                }
@@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
 
                fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
                if (fle) {
+                       fle->net = net;
                        fle->family = family;
                        fle->dir = dir;
-                       memcpy(&fle->key, key, sizeof(*key));
+                       memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
                        fle->object = NULL;
                        hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
                        fcp->hash_count++;
index 27002df..387703f 100644 (file)
@@ -611,8 +611,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
 }
 EXPORT_SYMBOL_GPL(skb_morph);
 
-/* skb frags copy userspace buffers to kernel */
-static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+/*     skb_copy_ubufs  -       copy userspace skb frags buffers to kernel
+ *     @skb: the skb to modify
+ *     @gfp_mask: allocation priority
+ *
+ *     This must be called on SKBTX_DEV_ZEROCOPY skb.
+ *     It will copy all frags into kernel and drop the reference
+ *     to userspace pages.
+ *
+ *     If this function is called from an interrupt gfp_mask() must be
+ *     %GFP_ATOMIC.
+ *
+ *     Returns 0 on success or a negative error code on failure
+ *     to allocate kernel memory to copy to.
+ */
+int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
 {
        int i;
        int num_frags = skb_shinfo(skb)->nr_frags;
@@ -652,6 +665,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
                skb_shinfo(skb)->frags[i - 1].page = head;
                head = (struct page *)head->private;
        }
+
+       skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
        return 0;
 }
 
@@ -677,7 +692,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
        if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
                if (skb_copy_ubufs(skb, gfp_mask))
                        return NULL;
-               skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
        }
 
        n = skb + 1;
@@ -803,7 +817,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
                                n = NULL;
                                goto out;
                        }
-                       skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
                }
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -896,7 +909,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
                if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
                        if (skb_copy_ubufs(skb, gfp_mask))
                                goto nofrags;
-                       skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
                }
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
                        get_page(skb_shinfo(skb)->frags[i].page);
index 27997d3..a246836 100644 (file)
@@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev)
        dev->addr_len           = ETH_ALEN;
        dev->tx_queue_len       = 1000; /* Ethernet wants good queues */
        dev->flags              = IFF_BROADCAST|IFF_MULTICAST;
-       dev->priv_flags         = IFF_TX_SKB_SHARING;
+       dev->priv_flags         |= IFF_TX_SKB_SHARING;
 
        memset(dev->broadcast, 0xFF, ETH_ALEN);
 
index 1b745d4..dd2b947 100644 (file)
@@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
 
        if (addr->sin_family != AF_INET) {
+               /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
+                * only if s_addr is INADDR_ANY.
+                */
                err = -EAFNOSUPPORT;
-               goto out;
+               if (addr->sin_family != AF_UNSPEC ||
+                   addr->sin_addr.s_addr != htonl(INADDR_ANY))
+                       goto out;
        }
 
        chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
index 33e2c35..80106d8 100644 (file)
@@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
 };
 
 /* Release a nexthop info record */
+static void free_fib_info_rcu(struct rcu_head *head)
+{
+       struct fib_info *fi = container_of(head, struct fib_info, rcu);
+
+       if (fi->fib_metrics != (u32 *) dst_default_metrics)
+               kfree(fi->fib_metrics);
+       kfree(fi);
+}
 
 void free_fib_info(struct fib_info *fi)
 {
@@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi)
        } endfor_nexthops(fi);
        fib_info_cnt--;
        release_net(fi->fib_net);
-       kfree_rcu(fi, rcu);
+       call_rcu(&fi->rcu, free_fib_info_rcu);
 }
 
 void fib_release_info(struct fib_info *fi)
index 5c9b9d9..e59aabd 100644 (file)
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
        return skb;
 
 nlmsg_failure:
+       kfree_skb(skb);
        *errp = -EINVAL;
        printk(KERN_ERR "ip_queue: error creating packet message\n");
        return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
 {
        struct nf_queue_entry *entry;
 
-       if (vmsg->value > NF_MAX_VERDICT)
+       if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
                return -EINVAL;
 
        entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
                break;
 
        case IPQM_VERDICT:
-               if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
-                       status = -EINVAL;
-               else
-                       status = ipq_set_verdict(&pmsg->msg.verdict,
-                                                len - sizeof(*pmsg));
-                       break;
+               status = ipq_set_verdict(&pmsg->msg.verdict,
+                                        len - sizeof(*pmsg));
+               break;
        default:
                status = -EINVAL;
        }
index b14ec7d..4bfad5d 100644 (file)
@@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
        SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
        SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
+       SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
+       SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
        SNMP_MIB_SENTINEL
 };
 
index ea0d218..d73aab3 100644 (file)
@@ -1124,7 +1124,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
                return 0;
 
        /* ...Then it's D-SACK, and must reside below snd_una completely */
-       if (!after(end_seq, tp->snd_una))
+       if (after(end_seq, tp->snd_una))
                return 0;
 
        if (!before(start_seq, tp->undo_marker))
@@ -1389,9 +1389,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
 
        BUG_ON(!pcount);
 
-       /* Tweak before seqno plays */
-       if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
-           !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
+       if (skb == tp->lost_skb_hint)
                tp->lost_cnt_hint += pcount;
 
        TCP_SKB_CB(prev)->end_seq += shifted;
index 1c12b8e..7963e03 100644 (file)
@@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
        kfree(inet_rsk(req)->opt);
 }
 
-static void syn_flood_warning(const struct sk_buff *skb)
+/*
+ * Return 1 if a syncookie should be sent
+ */
+int tcp_syn_flood_action(struct sock *sk,
+                        const struct sk_buff *skb,
+                        const char *proto)
 {
-       const char *msg;
+       const char *msg = "Dropping request";
+       int want_cookie = 0;
+       struct listen_sock *lopt;
+
+
 
 #ifdef CONFIG_SYN_COOKIES
-       if (sysctl_tcp_syncookies)
+       if (sysctl_tcp_syncookies) {
                msg = "Sending cookies";
-       else
+               want_cookie = 1;
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
+       } else
 #endif
-               msg = "Dropping request";
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 
-       pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
-                               ntohs(tcp_hdr(skb)->dest), msg);
+       lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
+       if (!lopt->synflood_warned) {
+               lopt->synflood_warned = 1;
+               pr_info("%s: Possible SYN flooding on port %d. %s. "
+                       " Check SNMP counters.\n",
+                       proto, ntohs(tcp_hdr(skb)->dest), msg);
+       }
+       return want_cookie;
 }
+EXPORT_SYMBOL(tcp_syn_flood_action);
 
 /*
  * Save and compile IPv4 options into the request_sock if needed.
@@ -909,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
                        }
                        sk_nocaps_add(sk, NETIF_F_GSO_MASK);
                }
-               if (tcp_alloc_md5sig_pool(sk) == NULL) {
+
+               md5sig = tp->md5sig_info;
+               if (md5sig->entries4 == 0 &&
+                   tcp_alloc_md5sig_pool(sk) == NULL) {
                        kfree(newkey);
                        return -ENOMEM;
                }
-               md5sig = tp->md5sig_info;
 
                if (md5sig->alloced4 == md5sig->entries4) {
                        keys = kmalloc((sizeof(*keys) *
                                        (md5sig->entries4 + 1)), GFP_ATOMIC);
                        if (!keys) {
                                kfree(newkey);
-                               tcp_free_md5sig_pool();
+                               if (md5sig->entries4 == 0)
+                                       tcp_free_md5sig_pool();
                                return -ENOMEM;
                        }
 
@@ -964,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
                                kfree(tp->md5sig_info->keys4);
                                tp->md5sig_info->keys4 = NULL;
                                tp->md5sig_info->alloced4 = 0;
+                               tcp_free_md5sig_pool();
                        } else if (tp->md5sig_info->entries4 != i) {
                                /* Need to do some manipulation */
                                memmove(&tp->md5sig_info->keys4[i],
@@ -971,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
                                        (tp->md5sig_info->entries4 - i) *
                                         sizeof(struct tcp4_md5sig_key));
                        }
-                       tcp_free_md5sig_pool();
                        return 0;
                }
        }
@@ -1235,11 +1256,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        __be32 saddr = ip_hdr(skb)->saddr;
        __be32 daddr = ip_hdr(skb)->daddr;
        __u32 isn = TCP_SKB_CB(skb)->when;
-#ifdef CONFIG_SYN_COOKIES
        int want_cookie = 0;
-#else
-#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
-#endif
 
        /* Never answer to SYNs send to broadcast or multicast */
        if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1250,14 +1267,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
         * evidently real one.
         */
        if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
-               if (net_ratelimit())
-                       syn_flood_warning(skb);
-#ifdef CONFIG_SYN_COOKIES
-               if (sysctl_tcp_syncookies) {
-                       want_cookie = 1;
-               } else
-#endif
-               goto drop;
+               want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
+               if (!want_cookie)
+                       goto drop;
        }
 
        /* Accept backlog is full. If we have already queued enough
@@ -1303,9 +1315,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                while (l-- > 0)
                        *c++ ^= *hash_location++;
 
-#ifdef CONFIG_SYN_COOKIES
                want_cookie = 0;        /* not our kind of cookie */
-#endif
                tmp_ext.cookie_out_never = 0; /* false */
                tmp_ext.cookie_plus = tmp_opt.cookie_plus;
        } else if (!tp->rx_opt.cookie_in_always) {
index f012ebd..12368c5 100644 (file)
@@ -374,8 +374,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
                        "%s(): cannot allocate memory for statistics; dev=%s.\n",
                        __func__, dev->name));
                neigh_parms_release(&nd_tbl, ndev->nd_parms);
-               ndev->dead = 1;
-               in6_dev_finish_destroy(ndev);
+               dev_put(dev);
+               kfree(ndev);
                return NULL;
        }
 
index 9ef1831..b46e9f8 100644 (file)
@@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
        return 0;
 }
 
-int datagram_send_ctl(struct net *net,
+int datagram_send_ctl(struct net *net, struct sock *sk,
                      struct msghdr *msg, struct flowi6 *fl6,
                      struct ipv6_txoptions *opt,
                      int *hlimit, int *tclass, int *dontfrag)
@@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net,
 
                        if (addr_type != IPV6_ADDR_ANY) {
                                int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
-                               if (!ipv6_chk_addr(net, &src_info->ipi6_addr,
+                               if (!inet_sk(sk)->transparent &&
+                                   !ipv6_chk_addr(net, &src_info->ipi6_addr,
                                                   strict ? dev : NULL, 0))
                                        err = -EINVAL;
                                else
index f3caf1b..5430394 100644 (file)
@@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
 }
 
 static struct ip6_flowlabel *
-fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
-         int optlen, int *err_p)
+fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
+         char __user *optval, int optlen, int *err_p)
 {
        struct ip6_flowlabel *fl = NULL;
        int olen;
@@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
                msg.msg_control = (void*)(fl->opt+1);
                memset(&flowi6, 0, sizeof(flowi6));
 
-               err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk,
+               err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
                                        &junk, &junk);
                if (err)
                        goto done;
@@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
                if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
                        return -EINVAL;
 
-               fl = fl_create(net, &freq, optval, optlen, &err);
+               fl = fl_create(net, sk, &freq, optval, optlen, &err);
                if (fl == NULL)
                        return err;
                sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
index 705c828..def0538 100644 (file)
@@ -696,8 +696,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
        int err;
 
        err = ip6mr_fib_lookup(net, &fl6, &mrt);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(skb);
                return err;
+       }
 
        read_lock(&mrt_lock);
        dev->stats.tx_bytes += skb->len;
@@ -2052,8 +2054,10 @@ int ip6_mr_input(struct sk_buff *skb)
        int err;
 
        err = ip6mr_fib_lookup(net, &fl6, &mrt);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(skb);
                return err;
+       }
 
        read_lock(&mrt_lock);
        cache = ip6mr_cache_find(mrt,
index 147ede3..2fbda5f 100644 (file)
@@ -475,7 +475,7 @@ sticky_done:
                msg.msg_controllen = optlen;
                msg.msg_control = (void*)(opt+1);
 
-               retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk,
+               retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
                                         &junk);
                if (retv)
                        goto done;
index 2493948..e63c397 100644 (file)
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
        return skb;
 
 nlmsg_failure:
+       kfree_skb(skb);
        *errp = -EINVAL;
        printk(KERN_ERR "ip6_queue: error creating packet message\n");
        return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
 {
        struct nf_queue_entry *entry;
 
-       if (vmsg->value > NF_MAX_VERDICT)
+       if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
                return -EINVAL;
 
        entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
                break;
 
        case IPQM_VERDICT:
-               if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
-                       status = -EINVAL;
-               else
-                       status = ipq_set_verdict(&pmsg->msg.verdict,
-                                                len - sizeof(*pmsg));
-                       break;
+               status = ipq_set_verdict(&pmsg->msg.verdict,
+                                        len - sizeof(*pmsg));
+               break;
        default:
                status = -EINVAL;
        }
index 6a79f30..343852e 100644 (file)
@@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
 
-               err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit,
-                                       &tclass, &dontfrag);
+               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                       &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
index 9e69eb0..fb545ed 100644 (file)
@@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
        struct inet_peer *peer;
        u32 *p = NULL;
 
+       if (!(rt->dst.flags & DST_HOST))
+               return NULL;
+
        if (!rt->rt6i_peer)
                rt6_bind_peer(rt, 1);
 
@@ -241,7 +244,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
 {
        struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
 
-       memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
+       if (rt != NULL)
+               memset(&rt->rt6i_table, 0,
+                       sizeof(*rt) - sizeof(struct dst_entry));
 
        return rt;
 }
@@ -252,6 +257,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
        struct inet6_dev *idev = rt->rt6i_idev;
        struct inet_peer *peer = rt->rt6i_peer;
 
+       if (!(rt->dst.flags & DST_HOST))
+               dst_destroy_metrics_generic(dst);
+
        if (idev != NULL) {
                rt->rt6i_idev = NULL;
                in6_dev_put(idev);
@@ -723,9 +731,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
                        ipv6_addr_copy(&rt->rt6i_gateway, daddr);
                }
 
-               rt->rt6i_dst.plen = 128;
                rt->rt6i_flags |= RTF_CACHE;
-               rt->dst.flags |= DST_HOST;
 
 #ifdef CONFIG_IPV6_SUBTREES
                if (rt->rt6i_src.plen && saddr) {
@@ -775,9 +781,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
        struct rt6_info *rt = ip6_rt_copy(ort, daddr);
 
        if (rt) {
-               rt->rt6i_dst.plen = 128;
                rt->rt6i_flags |= RTF_CACHE;
-               rt->dst.flags |= DST_HOST;
                dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
        }
        return rt;
@@ -1078,12 +1082,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
                        neigh = NULL;
        }
 
-       rt->rt6i_idev     = idev;
+       rt->dst.flags |= DST_HOST;
+       rt->dst.output  = ip6_output;
        dst_set_neighbour(&rt->dst, neigh);
        atomic_set(&rt->dst.__refcnt, 1);
-       ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
        dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
-       rt->dst.output  = ip6_output;
+
+       ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+       rt->rt6i_dst.plen = 128;
+       rt->rt6i_idev     = idev;
 
        spin_lock_bh(&icmp6_dst_lock);
        rt->dst.next = icmp6_dst_gc_list;
@@ -1261,6 +1268,14 @@ int ip6_route_add(struct fib6_config *cfg)
        if (rt->rt6i_dst.plen == 128)
               rt->dst.flags |= DST_HOST;
 
+       if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
+               u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+               if (!metrics) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+               dst_init_metrics(&rt->dst, metrics, 0);
+       }
 #ifdef CONFIG_IPV6_SUBTREES
        ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
        rt->rt6i_src.plen = cfg->fc_src_len;
@@ -1607,9 +1622,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
        if (on_link)
                nrt->rt6i_flags &= ~RTF_GATEWAY;
 
-       nrt->rt6i_dst.plen = 128;
-       nrt->dst.flags |= DST_HOST;
-
        ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
        dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
 
@@ -1754,9 +1766,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
        if (rt) {
                rt->dst.input = ort->dst.input;
                rt->dst.output = ort->dst.output;
+               rt->dst.flags |= DST_HOST;
 
                ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
-               rt->rt6i_dst.plen = ort->rt6i_dst.plen;
+               rt->rt6i_dst.plen = 128;
                dst_copy_metrics(&rt->dst, &ort->dst);
                rt->dst.error = ort->dst.error;
                rt->rt6i_idev = ort->rt6i_idev;
index d1fb63f..7b8fc57 100644 (file)
@@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
        return tcp_v6_send_synack(sk, req, rvp);
 }
 
-static inline void syn_flood_warning(struct sk_buff *skb)
-{
-#ifdef CONFIG_SYN_COOKIES
-       if (sysctl_tcp_syncookies)
-               printk(KERN_INFO
-                      "TCPv6: Possible SYN flooding on port %d. "
-                      "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
-       else
-#endif
-               printk(KERN_INFO
-                      "TCPv6: Possible SYN flooding on port %d. "
-                      "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
-}
-
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
 {
        kfree_skb(inet6_rsk(req)->pktopts);
@@ -605,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
                        }
                        sk_nocaps_add(sk, NETIF_F_GSO_MASK);
                }
-               if (tcp_alloc_md5sig_pool(sk) == NULL) {
+               if (tp->md5sig_info->entries6 == 0 &&
+                       tcp_alloc_md5sig_pool(sk) == NULL) {
                        kfree(newkey);
                        return -ENOMEM;
                }
@@ -614,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
                                       (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
 
                        if (!keys) {
-                               tcp_free_md5sig_pool();
                                kfree(newkey);
+                               if (tp->md5sig_info->entries6 == 0)
+                                       tcp_free_md5sig_pool();
                                return -ENOMEM;
                        }
 
@@ -661,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
                                kfree(tp->md5sig_info->keys6);
                                tp->md5sig_info->keys6 = NULL;
                                tp->md5sig_info->alloced6 = 0;
+                               tcp_free_md5sig_pool();
                        } else {
                                /* shrink the database */
                                if (tp->md5sig_info->entries6 != i)
@@ -669,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
                                                (tp->md5sig_info->entries6 - i)
                                                * sizeof (tp->md5sig_info->keys6[0]));
                        }
-                       tcp_free_md5sig_pool();
                        return 0;
                }
        }
@@ -1179,11 +1167,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        struct tcp_sock *tp = tcp_sk(sk);
        __u32 isn = TCP_SKB_CB(skb)->when;
        struct dst_entry *dst = NULL;
-#ifdef CONFIG_SYN_COOKIES
        int want_cookie = 0;
-#else
-#define want_cookie 0
-#endif
 
        if (skb->protocol == htons(ETH_P_IP))
                return tcp_v4_conn_request(sk, skb);
@@ -1192,14 +1176,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop;
 
        if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
-               if (net_ratelimit())
-                       syn_flood_warning(skb);
-#ifdef CONFIG_SYN_COOKIES
-               if (sysctl_tcp_syncookies)
-                       want_cookie = 1;
-               else
-#endif
-               goto drop;
+               want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
+               if (!want_cookie)
+                       goto drop;
        }
 
        if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
@@ -1249,9 +1228,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                while (l-- > 0)
                        *c++ ^= *hash_location++;
 
-#ifdef CONFIG_SYN_COOKIES
                want_cookie = 0;        /* not our kind of cookie */
-#endif
                tmp_ext.cookie_out_never = 0; /* false */
                tmp_ext.cookie_plus = tmp_opt.cookie_plus;
        } else if (!tp->rx_opt.cookie_in_always) {
@@ -1408,6 +1385,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
 #endif
 
+               newnp->ipv6_ac_list = NULL;
+               newnp->ipv6_fl_list = NULL;
                newnp->pktoptions  = NULL;
                newnp->opt         = NULL;
                newnp->mcast_oif   = inet6_iif(skb);
@@ -1472,6 +1451,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
           First: no IPv4 options.
         */
        newinet->inet_opt = NULL;
+       newnp->ipv6_ac_list = NULL;
        newnp->ipv6_fl_list = NULL;
 
        /* Clone RX bits */
index 29213b5..bb95e8e 100644 (file)
@@ -1090,8 +1090,8 @@ do_udp_sendmsg:
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(*opt);
 
-               err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit,
-                                       &tclass, &dontfrag);
+               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                       &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
index d0b70da..2615ffc 100644 (file)
@@ -40,9 +40,9 @@ extern int  sysctl_slot_timeout;
 extern int  sysctl_fast_poll_increase;
 extern char sysctl_devname[];
 extern int  sysctl_max_baud_rate;
-extern int  sysctl_min_tx_turn_time;
-extern int  sysctl_max_tx_data_size;
-extern int  sysctl_max_tx_window;
+extern unsigned int sysctl_min_tx_turn_time;
+extern unsigned int sysctl_max_tx_data_size;
+extern unsigned int sysctl_max_tx_window;
 extern int  sysctl_max_noreply_time;
 extern int  sysctl_warn_noreply_time;
 extern int  sysctl_lap_keepalive_time;
index 1b51bcf..4369f7f 100644 (file)
@@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12;
  * Default is 10us which means using the unmodified value given by the
  * peer except if it's 0 (0 is likely a bug in the other stack).
  */
-unsigned sysctl_min_tx_turn_time = 10;
+unsigned int sysctl_min_tx_turn_time = 10;
 /*
  * Maximum data size to be used in transmission in payload of LAP frame.
  * There is a bit of confusion in the IrDA spec :
@@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10;
  * bytes frames or all negotiated frame sizes, but you can use the sysctl
  * to play with this value anyway.
  * Jean II */
-unsigned sysctl_max_tx_data_size = 2042;
+unsigned int sysctl_max_tx_data_size = 2042;
 /*
  * Maximum transmit window, i.e. number of LAP frames between turn-around.
  * This allow to override what the peer told us. Some peers are buggy and
  * don't always support what they tell us.
  * Jean II */
-unsigned sysctl_max_tx_window = 7;
+unsigned int sysctl_max_tx_window = 7;
 
 static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
 static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
index 3db78b6..21070e9 100644 (file)
@@ -665,7 +665,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
                BUG_ON(!sdata->bss);
 
                atomic_dec(&sdata->bss->num_sta_ps);
-               __sta_info_clear_tim_bit(sdata->bss, sta);
+               sta_info_clear_tim_bit(sta);
        }
 
        local->num_sta--;
index 2b771dc..5290ac3 100644 (file)
@@ -3679,7 +3679,7 @@ int __net_init ip_vs_control_net_init(struct net *net)
        int idx;
        struct netns_ipvs *ipvs = net_ipvs(net);
 
-       ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
+       rwlock_init(&ipvs->rs_lock);
 
        /* Initialize rs_table */
        for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
index 2fd4565..31d56b2 100644 (file)
@@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
                break;
 
        case PPTP_WAN_ERROR_NOTIFY:
+       case PPTP_SET_LINK_INFO:
        case PPTP_ECHO_REQUEST:
        case PPTP_ECHO_REPLY:
                /* I don't have to explain these ;) */
index 37bf943..8235b86 100644 (file)
@@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb,
                        if (opsize < 2) /* "silly options" */
                                return;
                        if (opsize > length)
-                               break;  /* don't parse partial options */
+                               return; /* don't parse partial options */
 
                        if (opcode == TCPOPT_SACK_PERM
                            && opsize == TCPOLEN_SACK_PERM)
@@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
        BUG_ON(ptr == NULL);
 
        /* Fast path for timestamp-only option */
-       if (length == TCPOLEN_TSTAMP_ALIGNED*4
+       if (length == TCPOLEN_TSTAMP_ALIGNED
            && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
                                       | (TCPOPT_NOP << 16)
                                       | (TCPOPT_TIMESTAMP << 8)
@@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
                        if (opsize < 2) /* "silly options" */
                                return;
                        if (opsize > length)
-                               break;  /* don't parse partial options */
+                               return; /* don't parse partial options */
 
                        if (opcode == TCPOPT_SACK
                            && opsize >= (TCPOLEN_SACK_BASE
index 00bd475..a80b0cb 100644 (file)
@@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[])
                return NULL;
 
        vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
-       verdict = ntohl(vhdr->verdict);
-       if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT)
+       verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
+       if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
                return NULL;
        return vhdr;
 }
index 76a0831..ed0db15 100644 (file)
@@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
 {
        struct xt_rateest_match_info *info = par->matchinfo;
        struct xt_rateest *est1, *est2;
-       int ret = false;
+       int ret = -EINVAL;
 
        if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
                                     XT_RATEEST_MATCH_REL)) != 1)
@@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
        if (!est1)
                goto err1;
 
+       est2 = NULL;
        if (info->flags & XT_RATEEST_MATCH_REL) {
                est2 = xt_rateest_lookup(info->name2);
                if (!est2)
                        goto err2;
-       } else
-               est2 = NULL;
-
+       }
 
        info->est1 = est1;
        info->est2 = est2;
@@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
 err2:
        xt_rateest_put(est1);
 err1:
-       return -EINVAL;
+       return ret;
 }
 
 static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
index c698cec..fabb4fa 100644 (file)
@@ -961,7 +961,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        return 0;
 
 drop_n_acct:
-       po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
+       spin_lock(&sk->sk_receive_queue.lock);
+       po->stats.tp_drops++;
+       atomic_inc(&sk->sk_drops);
+       spin_unlock(&sk->sk_receive_queue.lock);
 
 drop_n_restore:
        if (skb_head != skb->data && skb_shared(skb)) {
index 8b77edb..4e1de17 100644 (file)
@@ -84,7 +84,8 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
                        struct list_head *unmap_list,
-                       struct list_head *kill_list);
+                       struct list_head *kill_list,
+                       int *unpinned);
 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
 
 static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
@@ -499,7 +500,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
        LIST_HEAD(unmap_list);
        LIST_HEAD(kill_list);
        unsigned long flags;
-       unsigned int nfreed = 0, ncleaned = 0, free_goal;
+       unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal;
        int ret = 0;
 
        rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
@@ -524,7 +525,8 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
         * will be destroyed by the unmap function.
         */
        if (!list_empty(&unmap_list)) {
-               ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list);
+               ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
+                                                    &kill_list, &unpinned);
                /* If we've been asked to destroy all MRs, move those
                 * that were simply cleaned to the kill list */
                if (free_all)
@@ -548,6 +550,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
                spin_unlock_irqrestore(&pool->list_lock, flags);
        }
 
+       atomic_sub(unpinned, &pool->free_pinned);
        atomic_sub(ncleaned, &pool->dirty_count);
        atomic_sub(nfreed, &pool->item_count);
 
@@ -828,7 +831,8 @@ static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
 
 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
                                struct list_head *unmap_list,
-                               struct list_head *kill_list)
+                               struct list_head *kill_list,
+                               int *unpinned)
 {
        struct rds_iw_mapping *mapping, *next;
        unsigned int ncleaned = 0;
@@ -855,6 +859,7 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
 
                spin_lock_irqsave(&pool->list_lock, flags);
                list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
+                       *unpinned += mapping->m_sg.len;
                        list_move(&mapping->m_list, &laundered);
                        ncleaned++;
                }
index be4505e..b014279 100644 (file)
@@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
        struct rsvp_filter *f, **fp;
        struct rsvp_session *s, **sp;
        struct tc_rsvp_pinfo *pinfo = NULL;
-       struct nlattr *opt = tca[TCA_OPTIONS-1];
+       struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_RSVP_MAX + 1];
        struct tcf_exts e;
        unsigned int h1, h2;
@@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
        if (err < 0)
                return err;
 
-       err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map);
+       err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
        if (err < 0)
                return err;
 
@@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
 
                if (f->handle != handle && handle)
                        goto errout2;
-               if (tb[TCA_RSVP_CLASSID-1]) {
-                       f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]);
+               if (tb[TCA_RSVP_CLASSID]) {
+                       f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
                        tcf_bind_filter(tp, &f->res, base);
                }
 
@@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
        err = -EINVAL;
        if (handle)
                goto errout2;
-       if (tb[TCA_RSVP_DST-1] == NULL)
+       if (tb[TCA_RSVP_DST] == NULL)
                goto errout2;
 
        err = -ENOBUFS;
@@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
                goto errout2;
 
        h2 = 16;
-       if (tb[TCA_RSVP_SRC-1]) {
-               memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src));
+       if (tb[TCA_RSVP_SRC]) {
+               memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
                h2 = hash_src(f->src);
        }
-       if (tb[TCA_RSVP_PINFO-1]) {
-               pinfo = nla_data(tb[TCA_RSVP_PINFO-1]);
+       if (tb[TCA_RSVP_PINFO]) {
+               pinfo = nla_data(tb[TCA_RSVP_PINFO]);
                f->spi = pinfo->spi;
                f->tunnelhdr = pinfo->tunnelhdr;
        }
-       if (tb[TCA_RSVP_CLASSID-1])
-               f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]);
+       if (tb[TCA_RSVP_CLASSID])
+               f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
 
-       dst = nla_data(tb[TCA_RSVP_DST-1]);
+       dst = nla_data(tb[TCA_RSVP_DST]);
        h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
 
        err = -ENOMEM;
@@ -642,8 +642,7 @@ nla_put_failure:
        return -1;
 }
 
-static struct tcf_proto_ops RSVP_OPS = {
-       .next           =       NULL,
+static struct tcf_proto_ops RSVP_OPS __read_mostly = {
        .kind           =       RSVP_ID,
        .classify       =       rsvp_classify,
        .init           =       rsvp_init,
index 167c880..76388b0 100644 (file)
@@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                case SCTP_CMD_PURGE_ASCONF_QUEUE:
                        sctp_asconf_queue_teardown(asoc);
                        break;
+
+               case SCTP_CMD_SET_ASOC:
+                       asoc = cmd->obj.asoc;
+                       break;
+
                default:
                        pr_warn("Impossible command: %u, %p\n",
                                cmd->verb, cmd->obj.ptr);
index 49b847b..a0f31e6 100644 (file)
@@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
 
+       /* Restore association pointer to provide SCTP command interpeter
+        * with a valid context in case it needs to manipulate
+        * the queues */
+       sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
+                        SCTP_ASOC((struct sctp_association *)asoc));
+
        return retval;
 
 nomem:
index e83e7fe..ea40d54 100644 (file)
@@ -4113,9 +4113,12 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
                if (len % sizeof(u32))
                        return -EINVAL;
 
+               if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES)
+                       return -EINVAL;
+
                memcpy(settings->akm_suites, data, len);
 
-               for (i = 0; i < settings->n_ciphers_pairwise; i++)
+               for (i = 0; i < settings->n_akm_suites; i++)
                        if (!nl80211_valid_akm_suite(settings->akm_suites[i]))
                                return -EINVAL;
        }
index 02751db..68a471b 100644 (file)
@@ -852,6 +852,7 @@ static void handle_channel(struct wiphy *wiphy,
                return;
        }
 
+       chan->beacon_found = false;
        chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
        chan->max_antenna_gain = min(chan->orig_mag,
                (int) MBI_TO_DBI(power_rule->max_antenna_gain));
index b7b6ff8..dec0fa2 100644 (file)
@@ -118,6 +118,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
                             i++, j++)
                                request->channels[i] =
                                        &wdev->wiphy->bands[band]->channels[j];
+                       request->rates[band] =
+                               (1 << wdev->wiphy->bands[band]->n_bitrates) - 1;
                }
        }
        request->n_channels = n_channels;
index a026b0e..54a0dc2 100644 (file)
@@ -212,6 +212,11 @@ resume:
                /* only the first xfrm gets the encap type */
                encap_type = 0;
 
+               if (async && x->repl->check(x, skb, seq)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
+                       goto drop_unlock;
+               }
+
                x->repl->advance(x, seq);
 
                x->curlft.bytes += skb->len;
index 94fdcc7..552df27 100644 (file)
@@ -1349,14 +1349,16 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
                BUG();
        }
        xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
-       memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry));
-       xfrm_policy_put_afinfo(afinfo);
 
-       if (likely(xdst))
+       if (likely(xdst)) {
+               memset(&xdst->u.rt6.rt6i_table, 0,
+                       sizeof(*xdst) - sizeof(struct dst_entry));
                xdst->flo.ops = &xfrm_bundle_fc_ops;
-       else
+       else
                xdst = ERR_PTR(-ENOBUFS);
 
+       xfrm_policy_put_afinfo(afinfo);
+
        return xdst;
 }
 
index 86d0caf..62e90b8 100644 (file)
@@ -1761,6 +1761,10 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
        snd_pcm_uframes_t avail = 0;
        long wait_time, tout;
 
+       init_waitqueue_entry(&wait, current);
+       set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&runtime->tsleep, &wait);
+
        if (runtime->no_period_wakeup)
                wait_time = MAX_SCHEDULE_TIMEOUT;
        else {
@@ -1771,16 +1775,32 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
                }
                wait_time = msecs_to_jiffies(wait_time * 1000);
        }
-       init_waitqueue_entry(&wait, current);
-       add_wait_queue(&runtime->tsleep, &wait);
+
        for (;;) {
                if (signal_pending(current)) {
                        err = -ERESTARTSYS;
                        break;
                }
+
+               /*
+                * We need to check if space became available already
+                * (and thus the wakeup happened already) first to close
+                * the race of space already having become available.
+                * This check must happen after been added to the waitqueue
+                * and having current state be INTERRUPTIBLE.
+                */
+               if (is_playback)
+                       avail = snd_pcm_playback_avail(runtime);
+               else
+                       avail = snd_pcm_capture_avail(runtime);
+               if (avail >= runtime->twake)
+                       break;
                snd_pcm_stream_unlock_irq(substream);
-               tout = schedule_timeout_interruptible(wait_time);
+
+               tout = schedule_timeout(wait_time);
+
                snd_pcm_stream_lock_irq(substream);
+               set_current_state(TASK_INTERRUPTIBLE);
                switch (runtime->status->state) {
                case SNDRV_PCM_STATE_SUSPENDED:
                        err = -ESTRPIPE;
@@ -1806,14 +1826,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
                        err = -EIO;
                        break;
                }
-               if (is_playback)
-                       avail = snd_pcm_playback_avail(runtime);
-               else
-                       avail = snd_pcm_capture_avail(runtime);
-               if (avail >= runtime->twake)
-                       break;
        }
  _endloop:
+       set_current_state(TASK_RUNNING);
        remove_wait_queue(&runtime->tsleep, &wait);
        *availp = avail;
        return err;
index f9123f0..32b02d9 100644 (file)
@@ -68,6 +68,7 @@ MODULE_PARM_DESC(enable, "Enable FM801 soundcard.");
 module_param_array(tea575x_tuner, int, NULL, 0444);
 MODULE_PARM_DESC(tea575x_tuner, "TEA575x tuner access method (0 = auto, 1 = SF256-PCS, 2=SF256-PCP, 3=SF64-PCR, 8=disable, +16=tuner-only).");
 
+#define TUNER_DISABLED         (1<<3)
 #define TUNER_ONLY             (1<<4)
 #define TUNER_TYPE_MASK                (~TUNER_ONLY & 0xFFFF)
 
@@ -1150,7 +1151,8 @@ static int snd_fm801_free(struct fm801 *chip)
 
       __end_hw:
 #ifdef CONFIG_SND_FM801_TEA575X_BOOL
-       snd_tea575x_exit(&chip->tea);
+       if (!(chip->tea575x_tuner & TUNER_DISABLED))
+               snd_tea575x_exit(&chip->tea);
 #endif
        if (chip->irq >= 0)
                free_irq(chip->irq, chip);
@@ -1236,7 +1238,6 @@ static int __devinit snd_fm801_create(struct snd_card *card,
            (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
                if (snd_tea575x_init(&chip->tea)) {
                        snd_printk(KERN_ERR "TEA575x radio not found\n");
-                       snd_fm801_free(chip);
                        return -ENODEV;
                }
        } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) {
@@ -1251,11 +1252,15 @@ static int __devinit snd_fm801_create(struct snd_card *card,
                }
                if (tea575x_tuner == 4) {
                        snd_printk(KERN_ERR "TEA575x radio not found\n");
-                       snd_fm801_free(chip);
-                       return -ENODEV;
+                       chip->tea575x_tuner = TUNER_DISABLED;
                }
        }
-       strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card));
+       if (!(chip->tea575x_tuner & TUNER_DISABLED)) {
+               strlcpy(chip->tea.card,
+                       snd_fm801_tea575x_gpios[(tea575x_tuner &
+                                                TUNER_TYPE_MASK) - 1].name,
+                       sizeof(chip->tea.card));
+       }
 #endif
 
        *rchip = chip;
index 3e7850c..f3aefef 100644 (file)
@@ -579,9 +579,13 @@ int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
                return -1;
        }
        recursive++;
-       for (i = 0; i < nums; i++)
+       for (i = 0; i < nums; i++) {
+               unsigned int type = get_wcaps_type(get_wcaps(codec, conn[i]));
+               if (type == AC_WID_PIN || type == AC_WID_AUD_OUT)
+                       continue;
                if (snd_hda_get_conn_index(codec, conn[i], nid, recursive) >= 0)
                        return i;
+       }
        return -1;
 }
 EXPORT_SYMBOL_HDA(snd_hda_get_conn_index);
index be69822..e9a2a87 100644 (file)
@@ -1924,7 +1924,8 @@ static unsigned int azx_via_get_position(struct azx *chip,
 }
 
 static unsigned int azx_get_position(struct azx *chip,
-                                    struct azx_dev *azx_dev)
+                                    struct azx_dev *azx_dev,
+                                    bool with_check)
 {
        unsigned int pos;
        int stream = azx_dev->substream->stream;
@@ -1940,7 +1941,7 @@ static unsigned int azx_get_position(struct azx *chip,
        default:
                /* use the position buffer */
                pos = le32_to_cpu(*azx_dev->posbuf);
-               if (chip->position_fix[stream] == POS_FIX_AUTO) {
+               if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
                        if (!pos || pos == (u32)-1) {
                                printk(KERN_WARNING
                                       "hda-intel: Invalid position buffer, "
@@ -1964,7 +1965,7 @@ static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
        struct azx *chip = apcm->chip;
        struct azx_dev *azx_dev = get_azx_dev(substream);
        return bytes_to_frames(substream->runtime,
-                              azx_get_position(chip, azx_dev));
+                              azx_get_position(chip, azx_dev, false));
 }
 
 /*
@@ -1987,7 +1988,7 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
                return -1;      /* bogus (too early) interrupt */
 
        stream = azx_dev->substream->stream;
-       pos = azx_get_position(chip, azx_dev);
+       pos = azx_get_position(chip, azx_dev, true);
 
        if (WARN_ONCE(!azx_dev->period_bytes,
                      "hda-intel: zero azx_dev->period_bytes"))
index d6c93d9..c45f3e6 100644 (file)
@@ -535,7 +535,7 @@ static int add_volume(struct hda_codec *codec, const char *name,
                      int index, unsigned int pval, int dir,
                      struct snd_kcontrol **kctlp)
 {
-       char tmp[32];
+       char tmp[44];
        struct snd_kcontrol_new knew =
                HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT);
        knew.private_value = pval;
index 7cabd73..7a73621 100644 (file)
@@ -168,7 +168,7 @@ struct alc_spec {
        unsigned int auto_mic_valid_imux:1;     /* valid imux for auto-mic */
        unsigned int automute:1;        /* HP automute enabled */
        unsigned int detect_line:1;     /* Line-out detection enabled */
-       unsigned int automute_lines:1;  /* automute line-out as well */
+       unsigned int automute_lines:1;  /* automute line-out as well; NOP when automute_hp_lo isn't set */
        unsigned int automute_hp_lo:1;  /* both HP and LO available */
 
        /* other flags */
@@ -551,7 +551,7 @@ static void update_speakers(struct hda_codec *codec)
        if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0] ||
            spec->autocfg.line_out_pins[0] == spec->autocfg.speaker_pins[0])
                return;
-       if (!spec->automute_lines || !spec->automute)
+       if (!spec->automute || (spec->automute_hp_lo && !spec->automute_lines))
                on = 0;
        else
                on = spec->jack_present;
@@ -578,6 +578,10 @@ static void alc_line_automute(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
 
+       /* check LO jack only when it's different from HP */
+       if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0])
+               return;
+
        spec->line_jack_present =
                detect_jacks(codec, ARRAY_SIZE(spec->autocfg.line_out_pins),
                             spec->autocfg.line_out_pins);
@@ -803,7 +807,7 @@ static int alc_automute_mode_get(struct snd_kcontrol *kcontrol,
        unsigned int val;
        if (!spec->automute)
                val = 0;
-       else if (!spec->automute_lines)
+       else if (!spec->automute_hp_lo || !spec->automute_lines)
                val = 1;
        else
                val = 2;
@@ -824,7 +828,8 @@ static int alc_automute_mode_put(struct snd_kcontrol *kcontrol,
                spec->automute = 0;
                break;
        case 1:
-               if (spec->automute && !spec->automute_lines)
+               if (spec->automute &&
+                   (!spec->automute_hp_lo || !spec->automute_lines))
                        return 0;
                spec->automute = 1;
                spec->automute_lines = 0;
@@ -1320,7 +1325,9 @@ do_sku:
         * 15   : 1 --> enable the function "Mute internal speaker
         *              when the external headphone out jack is plugged"
         */
-       if (!spec->autocfg.hp_pins[0]) {
+       if (!spec->autocfg.hp_pins[0] &&
+           !(spec->autocfg.line_out_pins[0] &&
+             spec->autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
                hda_nid_t nid;
                tmp = (ass >> 11) & 0x3;        /* HP to chassis */
                if (tmp == 0)
index 5145b66..987e3cf 100644 (file)
@@ -5630,6 +5630,7 @@ again:
        switch (codec->vendor_id) {
        case 0x111d76d1:
        case 0x111d76d9:
+       case 0x111d76df:
        case 0x111d76e5:
        case 0x111d7666:
        case 0x111d7667:
@@ -6573,6 +6574,7 @@ static const struct hda_codec_preset snd_hda_preset_sigmatel[] = {
        { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx },
        { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
        { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76df, .name = "92HD93BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
index a118a0f..5956584 100644 (file)
@@ -103,7 +103,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {
                .cpu_dai_name = "bfin-tdm.0",
                .codec_dai_name ="ad193x-hifi",
                .platform_name = "bfin-tdm-pcm-audio",
-               .codec_name = "ad193x.5",
+               .codec_name = "spi0.5",
                .ops = &bf5xx_ad193x_ops,
        },
        {
@@ -112,7 +112,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {
                .cpu_dai_name = "bfin-tdm.1",
                .codec_dai_name ="ad193x-hifi",
                .platform_name = "bfin-tdm-pcm-audio",
-               .codec_name = "ad193x.5",
+               .codec_name = "spi0.5",
                .ops = &bf5xx_ad193x_ops,
        },
 };
index 732a247..b94eb7e 100644 (file)
@@ -128,7 +128,7 @@ static int snd_ad73311_configure(void)
        return 0;
 }
 
-static int bf5xx_probe(struct platform_device *pdev)
+static int bf5xx_probe(struct snd_soc_card *card)
 {
        int err;
        if (gpio_request(GPIO_SE, "AD73311_SE")) {
index 84f4ad5..9801cd7 100644 (file)
@@ -431,7 +431,8 @@ static int ssm2602_set_dai_fmt(struct snd_soc_dai *codec_dai,
 static int ssm2602_set_bias_level(struct snd_soc_codec *codec,
                                 enum snd_soc_bias_level level)
 {
-       u16 reg = snd_soc_read(codec, SSM2602_PWR) & 0xff7f;
+       u16 reg = snd_soc_read(codec, SSM2602_PWR);
+       reg &= ~(PWR_POWER_OFF | PWR_OSC_PDN);
 
        switch (level) {
        case SND_SOC_BIAS_ON:
index ffa2ffe..aa091a0 100644 (file)
@@ -1454,8 +1454,8 @@ static int wm8753_probe(struct snd_soc_codec *codec)
        /* set the update bits */
        snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100);
        snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100);
-       snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100);
-       snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100);
+       snd_soc_update_bits(codec, WM8753_LADC, 0x0100, 0x0100);
+       snd_soc_update_bits(codec, WM8753_RADC, 0x0100, 0x0100);
        snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100);
        snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100);
        snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100);
index 1725550..d2c315f 100644 (file)
@@ -3479,31 +3479,6 @@ int wm8962_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
 }
 EXPORT_SYMBOL_GPL(wm8962_mic_detect);
 
-#ifdef CONFIG_PM
-static int wm8962_resume(struct snd_soc_codec *codec)
-{
-       u16 *reg_cache = codec->reg_cache;
-       int i;
-
-       /* Restore the registers */
-       for (i = 1; i < codec->driver->reg_cache_size; i++) {
-               switch (i) {
-               case WM8962_SOFTWARE_RESET:
-                       continue;
-               default:
-                       break;
-               }
-
-               if (reg_cache[i] != wm8962_reg[i])
-                       snd_soc_write(codec, i, reg_cache[i]);
-       }
-
-       return 0;
-}
-#else
-#define wm8962_resume NULL
-#endif
-
 #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
 static int beep_rates[] = {
        500, 1000, 2000, 4000,
@@ -4015,7 +3990,6 @@ static int wm8962_remove(struct snd_soc_codec *codec)
 static struct snd_soc_codec_driver soc_codec_dev_wm8962 = {
        .probe =        wm8962_probe,
        .remove =       wm8962_remove,
-       .resume =       wm8962_resume,
        .set_bias_level = wm8962_set_bias_level,
        .reg_cache_size = WM8962_MAX_REGISTER + 1,
        .reg_word_size = sizeof(u16),
index fd0dc46..5c6c245 100644 (file)
@@ -369,7 +369,7 @@ static struct snd_soc_platform_driver mpc5200_audio_dma_platform = {
        .pcm_free       = &psc_dma_free,
 };
 
-static int mpc5200_hpcd_probe(struct of_device *op)
+static int mpc5200_hpcd_probe(struct platform_device *op)
 {
        phys_addr_t fifo;
        struct psc_dma *psc_dma;
@@ -487,7 +487,7 @@ out_unmap:
        return ret;
 }
 
-static int mpc5200_hpcd_remove(struct of_device *op)
+static int mpc5200_hpcd_remove(struct platform_device *op)
 {
        struct psc_dma *psc_dma = dev_get_drvdata(&op->dev);
 
@@ -519,7 +519,7 @@ MODULE_DEVICE_TABLE(of, mpc5200_hpcd_match);
 static struct platform_driver mpc5200_hpcd_of_driver = {
        .probe          = mpc5200_hpcd_probe,
        .remove         = mpc5200_hpcd_remove,
-       .dev = {
+       .driver = {
                .owner          = THIS_MODULE,
                .name           = "mpc5200-pcm-audio",
                .of_match_table    = mpc5200_hpcd_match,
index 309c59e..7945625 100644 (file)
@@ -240,7 +240,6 @@ static int ssi_irq = 0;
 
 static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)
 {
-       struct snd_card *card = rtd->card->snd_card;
        struct snd_soc_dai *dai = rtd->cpu_dai;
        struct snd_pcm *pcm = rtd->pcm;
        int ret;
index 8f16cd3..d0bcf3f 100644 (file)
@@ -424,7 +424,7 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev)
        if (!priv->mem) {
                dev_err(&pdev->dev, "request_mem_region failed\n");
                err = -EBUSY;
-               goto error_alloc;
+               goto err_alloc;
        }
 
        priv->io = ioremap(priv->mem->start, SZ_16K);
index 928f037..50e5919 100644 (file)
@@ -449,7 +449,7 @@ exit:
        return ret;
 }
 
-int __devexit omap_mcpdm_remove(struct platform_device *pdev)
+int omap_mcpdm_remove(struct platform_device *pdev)
 {
        struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev);
 
index df3e16f..20c20a8 100644 (file)
@@ -150,4 +150,4 @@ extern int omap_mcpdm_request(void);
 extern void omap_mcpdm_free(void);
 extern int omap_mcpdm_set_offset(int offset1, int offset2);
 int __devinit omap_mcpdm_probe(struct platform_device *pdev);
-int __devexit omap_mcpdm_remove(struct platform_device *pdev);
+int omap_mcpdm_remove(struct platform_device *pdev);
index ebcc2d4..478d607 100644 (file)
@@ -516,6 +516,12 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
        struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
        int err = 0;
 
+       if (mcbsp_data->active)
+               if (freq == mcbsp_data->in_freq)
+                       return 0;
+               else
+                       return -EBUSY;
+
        /* The McBSP signal muxing functions are only available on McBSP1 */
        if (clk_id == OMAP_MCBSP_CLKR_SRC_CLKR ||
            clk_id == OMAP_MCBSP_CLKR_SRC_CLKX ||
index b644575..2b8350b 100644 (file)
@@ -196,20 +196,20 @@ static int zylonite_probe(struct snd_soc_card *card)
        if (clk_pout) {
                pout = clk_get(NULL, "CLK_POUT");
                if (IS_ERR(pout)) {
-                       dev_err(&pdev->dev, "Unable to obtain CLK_POUT: %ld\n",
+                       dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n",
                                PTR_ERR(pout));
                        return PTR_ERR(pout);
                }
 
                ret = clk_enable(pout);
                if (ret != 0) {
-                       dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n",
+                       dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
                                ret);
                        clk_put(pout);
                        return ret;
                }
 
-               dev_dbg(&pdev->dev, "MCLK enabled at %luHz\n",
+               dev_dbg(card->dev, "MCLK enabled at %luHz\n",
                        clk_get_rate(pout));
        }
 
@@ -241,7 +241,7 @@ static int zylonite_resume_pre(struct snd_soc_card *card)
        if (clk_pout) {
                ret = clk_enable(pout);
                if (ret != 0)
-                       dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n",
+                       dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
                                ret);
        }
 
index d9f8ade..20b7f3b 100644 (file)
@@ -203,14 +203,14 @@ static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
                rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
                for (i = 0; i < rbnode->blklen; ++i) {
                        regtmp = rbnode->base_reg + i;
-                       WARN_ON(codec->writable_register &&
-                               codec->writable_register(codec, regtmp));
                        val = snd_soc_rbtree_get_register(rbnode, i);
                        def = snd_soc_get_cache_val(codec->reg_def_copy, i,
                                                    rbnode->word_size);
                        if (val == def)
                                continue;
 
+                       WARN_ON(!snd_soc_codec_writable_register(codec, regtmp));
+
                        codec->cache_bypass = 1;
                        ret = snd_soc_write(codec, regtmp, val);
                        codec->cache_bypass = 0;
@@ -563,8 +563,7 @@ static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
 
        lzo_blocks = codec->reg_cache;
        for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
-               WARN_ON(codec->writable_register &&
-                       codec->writable_register(codec, i));
+               WARN_ON(!snd_soc_codec_writable_register(codec, i));
                ret = snd_soc_cache_read(codec, i, &val);
                if (ret)
                        return ret;
@@ -823,8 +822,6 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
 
        codec_drv = codec->driver;
        for (i = 0; i < codec_drv->reg_cache_size; ++i) {
-               WARN_ON(codec->writable_register &&
-                       codec->writable_register(codec, i));
                ret = snd_soc_cache_read(codec, i, &val);
                if (ret)
                        return ret;
@@ -832,6 +829,9 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
                        if (snd_soc_get_cache_val(codec->reg_def_copy,
                                                  i, codec_drv->reg_word_size) == val)
                                continue;
+
+               WARN_ON(!snd_soc_codec_writable_register(codec, i));
+
                ret = snd_soc_write(codec, i, val);
                if (ret)
                        return ret;
index b085d8e..ef69f5a 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/bitops.h>
 #include <linux/debugfs.h>
 #include <linux/platform_device.h>
+#include <linux/ctype.h>
 #include <linux/slab.h>
 #include <sound/ac97_codec.h>
 #include <sound/core.h>
@@ -1434,9 +1435,20 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card)
                 "%s", card->name);
        snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
                 "%s", card->long_name ? card->long_name : card->name);
-       if (card->driver_name)
-               strlcpy(card->snd_card->driver, card->driver_name,
-                       sizeof(card->snd_card->driver));
+       snprintf(card->snd_card->driver, sizeof(card->snd_card->driver),
+                "%s", card->driver_name ? card->driver_name : card->name);
+       for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) {
+               switch (card->snd_card->driver[i]) {
+               case '_':
+               case '-':
+               case '\0':
+                       break;
+               default:
+                       if (!isalnum(card->snd_card->driver[i]))
+                               card->snd_card->driver[i] = '_';
+                       break;
+               }
+       }
 
        if (card->late_probe) {
                ret = card->late_probe(card);
@@ -1633,7 +1645,7 @@ int snd_soc_codec_readable_register(struct snd_soc_codec *codec,
        if (codec->readable_register)
                return codec->readable_register(codec, reg);
        else
-               return 0;
+               return 1;
 }
 EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register);
 
@@ -1651,7 +1663,7 @@ int snd_soc_codec_writable_register(struct snd_soc_codec *codec,
        if (codec->writable_register)
                return codec->writable_register(codec, reg);
        else
-               return 0;
+               return 1;
 }
 EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register);
 
index 7e15914..d67c637 100644 (file)
@@ -2763,7 +2763,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);
 
 /**
  * snd_soc_dapm_free - free dapm resources
- * @card: SoC device
+ * @dapm: DAPM context
  *
  * Free all dapm widgets and resources.
  */
index 38b0013..fa31d9c 100644 (file)
@@ -105,7 +105,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
 
        snd_soc_dapm_sync(dapm);
 
-       snd_jack_report(jack->jack, status);
+       snd_jack_report(jack->jack, jack->status);
 
 out:
        mutex_unlock(&codec->mutex);
index 781d9e6..d8f2bf4 100644 (file)
@@ -530,8 +530,11 @@ snd_usb_audio_probe(struct usb_device *dev,
        return chip;
 
  __error:
-       if (chip && !chip->num_interfaces)
-               snd_card_free(chip->card);
+       if (chip) {
+               if (!chip->num_interfaces)
+                       snd_card_free(chip->card);
+               chip->probing = 0;
+       }
        mutex_unlock(&register_mutex);
  __err_val:
        return NULL;
index 3b8f7b8..e9d5c27 100644 (file)
@@ -30,6 +30,8 @@ endif
 # Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
 #
 # Define NO_DWARF if you do not want debug-info analysis feature at all.
+#
+# Define WERROR=0 to disable treating any warnings as errors.
 
 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
        @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
@@ -63,6 +65,11 @@ ifeq ($(ARCH),x86_64)
        endif
 endif
 
+# Treat warnings as errors unless directed not to
+ifneq ($(WERROR),0)
+       CFLAGS_WERROR := -Werror
+endif
+
 #
 # Include saner warnings here, which can catch bugs:
 #
@@ -95,7 +102,7 @@ ifndef PERF_DEBUG
   CFLAGS_OPTIMIZE = -O6
 endif
 
-CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
 EXTLIBS = -lpthread -lrt -lelf -lm
 ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
 ALL_LDFLAGS = $(LDFLAGS)
index 6b0519f..f4c3fbe 100644 (file)
@@ -161,6 +161,7 @@ static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
        struct perf_event_attr *attr = &evsel->attr;
        int track = !evsel->idx; /* only the first counter needs these */
 
+       attr->disabled          = 1;
        attr->inherit           = !no_inherit;
        attr->read_format       = PERF_FORMAT_TOTAL_TIME_ENABLED |
                                  PERF_FORMAT_TOTAL_TIME_RUNNING |
@@ -671,6 +672,8 @@ static int __cmd_record(int argc, const char **argv)
                }
        }
 
+       perf_evlist__enable(evsel_list);
+
        /*
         * Let the child rip
         */
index 55f4c76..efe696f 100644 (file)
@@ -561,7 +561,7 @@ static int test__basic_mmap(void)
                }
 
                err = perf_event__parse_sample(event, attr.sample_type, sample_size,
-                                              false, &sample);
+                                              false, &sample, false);
                if (err) {
                        pr_err("Can't parse sample, err = %d\n", err);
                        goto out_munmap;
index a43433f..d28013b 100644 (file)
@@ -191,7 +191,8 @@ static void __zero_source_counters(struct sym_entry *syme)
        symbol__annotate_zero_histograms(sym);
 }
 
-static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
+static void record_precise_ip(struct sym_entry *syme, struct map *map,
+                             int counter, u64 ip)
 {
        struct annotation *notes;
        struct symbol *sym;
@@ -205,8 +206,8 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
        if (pthread_mutex_trylock(&notes->lock))
                return;
 
-       ip = syme->map->map_ip(syme->map, ip);
-       symbol__inc_addr_samples(sym, syme->map, counter, ip);
+       ip = map->map_ip(map, ip);
+       symbol__inc_addr_samples(sym, map, counter, ip);
 
        pthread_mutex_unlock(&notes->lock);
 }
@@ -810,7 +811,7 @@ static void perf_event__process_sample(const union perf_event *event,
                evsel = perf_evlist__id2evsel(top.evlist, sample->id);
                assert(evsel != NULL);
                syme->count[evsel->idx]++;
-               record_precise_ip(syme, evsel->idx, ip);
+               record_precise_ip(syme, al.map, evsel->idx, ip);
                pthread_mutex_lock(&top.active_symbols_lock);
                if (list_empty(&syme->node) || !syme->node.next) {
                        static bool first = true;
index 3c1b8a6..437f8ca 100644 (file)
@@ -169,12 +169,17 @@ static int perf_event__synthesize_mmap_events(union perf_event *event,
                        continue;
                pbf += n + 3;
                if (*pbf == 'x') { /* vm_exec */
+                       char anonstr[] = "//anon\n";
                        char *execname = strchr(bf, '/');
 
                        /* Catch VDSO */
                        if (execname == NULL)
                                execname = strstr(bf, "[vdso]");
 
+                       /* Catch anonymous mmaps */
+                       if ((execname == NULL) && !strstr(bf, "["))
+                               execname = anonstr;
+
                        if (execname == NULL)
                                continue;
 
index 1d7f664..357a85b 100644 (file)
@@ -186,6 +186,6 @@ const char *perf_event__name(unsigned int id);
 
 int perf_event__parse_sample(const union perf_event *event, u64 type,
                             int sample_size, bool sample_id_all,
-                            struct perf_sample *sample);
+                            struct perf_sample *sample, bool swapped);
 
 #endif /* __PERF_RECORD_H */
index c12bd47..72e9f48 100644 (file)
@@ -113,6 +113,19 @@ void perf_evlist__disable(struct perf_evlist *evlist)
        }
 }
 
+void perf_evlist__enable(struct perf_evlist *evlist)
+{
+       int cpu, thread;
+       struct perf_evsel *pos;
+
+       for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+               list_for_each_entry(pos, &evlist->entries, node) {
+                       for (thread = 0; thread < evlist->threads->nr; thread++)
+                               ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
+               }
+       }
+}
+
 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
 {
        int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
index ce85ae9..f349150 100644 (file)
@@ -54,6 +54,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
 void perf_evlist__munmap(struct perf_evlist *evlist);
 
 void perf_evlist__disable(struct perf_evlist *evlist);
+void perf_evlist__enable(struct perf_evlist *evlist);
 
 static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
                                         struct cpu_map *cpus,
index a03a36b..e389815 100644 (file)
@@ -7,6 +7,8 @@
  * Released under the GPL v2. (and only v2, not any later version)
  */
 
+#include <byteswap.h>
+#include "asm/bug.h"
 #include "evsel.h"
 #include "evlist.h"
 #include "util.h"
@@ -342,10 +344,20 @@ static bool sample_overlap(const union perf_event *event,
 
 int perf_event__parse_sample(const union perf_event *event, u64 type,
                             int sample_size, bool sample_id_all,
-                            struct perf_sample *data)
+                            struct perf_sample *data, bool swapped)
 {
        const u64 *array;
 
+       /*
+        * used for cross-endian analysis. See git commit 65014ab3
+        * for why this goofiness is needed.
+        */
+       union {
+               u64 val64;
+               u32 val32[2];
+       } u;
+
+
        data->cpu = data->pid = data->tid = -1;
        data->stream_id = data->id = data->time = -1ULL;
 
@@ -366,9 +378,16 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
        }
 
        if (type & PERF_SAMPLE_TID) {
-               u32 *p = (u32 *)array;
-               data->pid = p[0];
-               data->tid = p[1];
+               u.val64 = *array;
+               if (swapped) {
+                       /* undo swap of u64, then swap on individual u32s */
+                       u.val64 = bswap_64(u.val64);
+                       u.val32[0] = bswap_32(u.val32[0]);
+                       u.val32[1] = bswap_32(u.val32[1]);
+               }
+
+               data->pid = u.val32[0];
+               data->tid = u.val32[1];
                array++;
        }
 
@@ -395,8 +414,15 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
        }
 
        if (type & PERF_SAMPLE_CPU) {
-               u32 *p = (u32 *)array;
-               data->cpu = *p;
+
+               u.val64 = *array;
+               if (swapped) {
+                       /* undo swap of u64, then swap on individual u32s */
+                       u.val64 = bswap_64(u.val64);
+                       u.val32[0] = bswap_32(u.val32[0]);
+               }
+
+               data->cpu = u.val32[0];
                array++;
        }
 
@@ -423,18 +449,27 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
        }
 
        if (type & PERF_SAMPLE_RAW) {
-               u32 *p = (u32 *)array;
+               const u64 *pdata;
+
+               u.val64 = *array;
+               if (WARN_ONCE(swapped,
+                             "Endianness of raw data not corrected!\n")) {
+                       /* undo swap of u64, then swap on individual u32s */
+                       u.val64 = bswap_64(u.val64);
+                       u.val32[0] = bswap_32(u.val32[0]);
+                       u.val32[1] = bswap_32(u.val32[1]);
+               }
 
                if (sample_overlap(event, array, sizeof(u32)))
                        return -EFAULT;
 
-               data->raw_size = *p;
-               p++;
+               data->raw_size = u.val32[0];
+               pdata = (void *) array + sizeof(u32);
 
-               if (sample_overlap(event, p, data->raw_size))
+               if (sample_overlap(event, pdata, data->raw_size))
                        return -EFAULT;
 
-               data->raw_data = p;
+               data->raw_data = (void *) pdata;
        }
 
        return 0;
index 555fc38..5d73262 100644 (file)
@@ -659,7 +659,7 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
                if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die))
                        ret = -ENOENT;
        }
-       if (ret == 0)
+       if (ret >= 0)
                ret = convert_variable(&vr_die, pf);
 
        if (ret < 0)
index cbc8f21..7624324 100644 (file)
@@ -803,7 +803,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
                first = list_entry(evlist->entries.next, struct perf_evsel, node);
                err = perf_event__parse_sample(event, first->attr.sample_type,
                                               perf_evsel__sample_size(first),
-                                              sample_id_all, &pevent->sample);
+                                              sample_id_all, &pevent->sample, false);
                if (err)
                        return PyErr_Format(PyExc_OSError,
                                            "perf: can't parse sample, err=%d", err);
index 170601e..974d0cb 100644 (file)
@@ -162,7 +162,8 @@ static inline int perf_session__parse_sample(struct perf_session *session,
 {
        return perf_event__parse_sample(event, session->sample_type,
                                        session->sample_size,
-                                       session->sample_id_all, sample);
+                                       session->sample_id_all, sample,
+                                       session->header.needs_swap);
 }
 
 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
index 401e220..1ee8f1e 100644 (file)
@@ -151,11 +151,17 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
 {
        u64 ip_l, ip_r;
 
+       if (!left->ms.sym && !right->ms.sym)
+               return right->level - left->level;
+
+       if (!left->ms.sym || !right->ms.sym)
+               return cmp_null(left->ms.sym, right->ms.sym);
+
        if (left->ms.sym == right->ms.sym)
                return 0;
 
-       ip_l = left->ms.sym ? left->ms.sym->start : left->ip;
-       ip_r = right->ms.sym ? right->ms.sym->start : right->ip;
+       ip_l = left->ms.sym->start;
+       ip_r = right->ms.sym->start;
 
        return (int64_t)(ip_r - ip_l);
 }
index 469c026..40eeaf0 100644 (file)
@@ -74,16 +74,104 @@ static void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
 
 bool symbol_type__is_a(char symbol_type, enum map_type map_type)
 {
+       symbol_type = toupper(symbol_type);
+
        switch (map_type) {
        case MAP__FUNCTION:
                return symbol_type == 'T' || symbol_type == 'W';
        case MAP__VARIABLE:
-               return symbol_type == 'D' || symbol_type == 'd';
+               return symbol_type == 'D';
        default:
                return false;
        }
 }
 
+static int prefix_underscores_count(const char *str)
+{
+       const char *tail = str;
+
+       while (*tail == '_')
+               tail++;
+
+       return tail - str;
+}
+
+#define SYMBOL_A 0
+#define SYMBOL_B 1
+
+static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
+{
+       s64 a;
+       s64 b;
+
+       /* Prefer a symbol with non zero length */
+       a = syma->end - syma->start;
+       b = symb->end - symb->start;
+       if ((b == 0) && (a > 0))
+               return SYMBOL_A;
+       else if ((a == 0) && (b > 0))
+               return SYMBOL_B;
+
+       /* Prefer a non weak symbol over a weak one */
+       a = syma->binding == STB_WEAK;
+       b = symb->binding == STB_WEAK;
+       if (b && !a)
+               return SYMBOL_A;
+       if (a && !b)
+               return SYMBOL_B;
+
+       /* Prefer a global symbol over a non global one */
+       a = syma->binding == STB_GLOBAL;
+       b = symb->binding == STB_GLOBAL;
+       if (a && !b)
+               return SYMBOL_A;
+       if (b && !a)
+               return SYMBOL_B;
+
+       /* Prefer a symbol with less underscores */
+       a = prefix_underscores_count(syma->name);
+       b = prefix_underscores_count(symb->name);
+       if (b > a)
+               return SYMBOL_A;
+       else if (a > b)
+               return SYMBOL_B;
+
+       /* If all else fails, choose the symbol with the longest name */
+       if (strlen(syma->name) >= strlen(symb->name))
+               return SYMBOL_A;
+       else
+               return SYMBOL_B;
+}
+
+static void symbols__fixup_duplicate(struct rb_root *symbols)
+{
+       struct rb_node *nd;
+       struct symbol *curr, *next;
+
+       nd = rb_first(symbols);
+
+       while (nd) {
+               curr = rb_entry(nd, struct symbol, rb_node);
+again:
+               nd = rb_next(&curr->rb_node);
+               next = rb_entry(nd, struct symbol, rb_node);
+
+               if (!nd)
+                       break;
+
+               if (curr->start != next->start)
+                       continue;
+
+               if (choose_best_symbol(curr, next) == SYMBOL_A) {
+                       rb_erase(&next->rb_node, symbols);
+                       goto again;
+               } else {
+                       nd = rb_next(&curr->rb_node);
+                       rb_erase(&curr->rb_node, symbols);
+               }
+       }
+}
+
 static void symbols__fixup_end(struct rb_root *symbols)
 {
        struct rb_node *nd, *prevnd = rb_first(symbols);
@@ -438,18 +526,11 @@ int kallsyms__parse(const char *filename, void *arg,
        char *line = NULL;
        size_t n;
        int err = -1;
-       u64 prev_start = 0;
-       char prev_symbol_type = 0;
-       char *prev_symbol_name;
        FILE *file = fopen(filename, "r");
 
        if (file == NULL)
                goto out_failure;
 
-       prev_symbol_name = malloc(KSYM_NAME_LEN);
-       if (prev_symbol_name == NULL)
-               goto out_close;
-
        err = 0;
 
        while (!feof(file)) {
@@ -470,7 +551,7 @@ int kallsyms__parse(const char *filename, void *arg,
                if (len + 2 >= line_len)
                        continue;
 
-               symbol_type = toupper(line[len]);
+               symbol_type = line[len];
                len += 2;
                symbol_name = line + len;
                len = line_len - len;
@@ -480,24 +561,18 @@ int kallsyms__parse(const char *filename, void *arg,
                        break;
                }
 
-               if (prev_symbol_type) {
-                       u64 end = start;
-                       if (end != prev_start)
-                               --end;
-                       err = process_symbol(arg, prev_symbol_name,
-                                            prev_symbol_type, prev_start, end);
-                       if (err)
-                               break;
-               }
-
-               memcpy(prev_symbol_name, symbol_name, len + 1);
-               prev_symbol_type = symbol_type;
-               prev_start = start;
+               /*
+                * module symbols are not sorted so we add all
+                * symbols with zero length and rely on
+                * symbols__fixup_end() to fix it up.
+                */
+               err = process_symbol(arg, symbol_name,
+                                    symbol_type, start, start);
+               if (err)
+                       break;
        }
 
-       free(prev_symbol_name);
        free(line);
-out_close:
        fclose(file);
        return err;
 
@@ -703,6 +778,9 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
        if (dso__load_all_kallsyms(dso, filename, map) < 0)
                return -1;
 
+       symbols__fixup_duplicate(&dso->symbols[map->type]);
+       symbols__fixup_end(&dso->symbols[map->type]);
+
        if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
                dso->symtab_type = SYMTAB__GUEST_KALLSYMS;
        else
@@ -1092,8 +1170,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
        if (dso->has_build_id) {
                u8 build_id[BUILD_ID_SIZE];
 
-               if (elf_read_build_id(elf, build_id,
-                                     BUILD_ID_SIZE) != BUILD_ID_SIZE)
+               if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
                        goto out_elf_end;
 
                if (!dso__build_id_equal(dso, build_id))
@@ -1111,6 +1188,8 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
        }
 
        opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
+       if (opdshdr.sh_type != SHT_PROGBITS)
+               opdsec = NULL;
        if (opdsec)
                opddata = elf_rawdata(opdsec, NULL);
 
@@ -1276,6 +1355,7 @@ new_symbol:
         * For misannotated, zeroed, ASM function sizes.
         */
        if (nr > 0) {
+               symbols__fixup_duplicate(&dso->symbols[map->type]);
                symbols__fixup_end(&dso->symbols[map->type]);
                if (kmap) {
                        /*
@@ -1362,8 +1442,8 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
        ptr = data->d_buf;
        while (ptr < (data->d_buf + data->d_size)) {
                GElf_Nhdr *nhdr = ptr;
-               int namesz = NOTE_ALIGN(nhdr->n_namesz),
-                   descsz = NOTE_ALIGN(nhdr->n_descsz);
+               size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
+                      descsz = NOTE_ALIGN(nhdr->n_descsz);
                const char *name;
 
                ptr += sizeof(*nhdr);
@@ -1372,8 +1452,10 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
                if (nhdr->n_type == NT_GNU_BUILD_ID &&
                    nhdr->n_namesz == sizeof("GNU")) {
                        if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
-                               memcpy(bf, ptr, BUILD_ID_SIZE);
-                               err = BUILD_ID_SIZE;
+                               size_t sz = min(size, descsz);
+                               memcpy(bf, ptr, sz);
+                               memset(bf + sz, 0, size - sz);
+                               err = descsz;
                                break;
                        }
                }
@@ -1425,7 +1507,7 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
        while (1) {
                char bf[BUFSIZ];
                GElf_Nhdr nhdr;
-               int namesz, descsz;
+               size_t namesz, descsz;
 
                if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
                        break;
@@ -1434,15 +1516,16 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
                descsz = NOTE_ALIGN(nhdr.n_descsz);
                if (nhdr.n_type == NT_GNU_BUILD_ID &&
                    nhdr.n_namesz == sizeof("GNU")) {
-                       if (read(fd, bf, namesz) != namesz)
+                       if (read(fd, bf, namesz) != (ssize_t)namesz)
                                break;
                        if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
-                               if (read(fd, build_id,
-                                   BUILD_ID_SIZE) == BUILD_ID_SIZE) {
+                               size_t sz = min(descsz, size);
+                               if (read(fd, build_id, sz) == (ssize_t)sz) {
+                                       memset(build_id + sz, 0, size - sz);
                                        err = 0;
                                        break;
                                }
-                       } else if (read(fd, bf, descsz) != descsz)
+                       } else if (read(fd, bf, descsz) != (ssize_t)descsz)
                                break;
                } else {
                        int n = namesz + descsz;